From fca9167883d611f4fd7c9ce4cbcc94e7b66fded0 Mon Sep 17 00:00:00 2001 From: j Date: Sun, 15 Mar 2026 15:57:22 +1300 Subject: [PATCH] Add GPU passthrough and host networking to llamacpp-monitor --- llamacpp-monitor/start.sh | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/llamacpp-monitor/start.sh b/llamacpp-monitor/start.sh index bc0c389..b1c2e7e 100755 --- a/llamacpp-monitor/start.sh +++ b/llamacpp-monitor/start.sh @@ -3,10 +3,22 @@ source "${AGENT_PATH}/common.sh" SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" _check_required_env_vars "CONTAINER_NAME" "LLAMA_SERVER_URL" "MONITOR_PORT" "IMAGE_REGISTRY" "IMAGE_REPO" "IMAGE_TAG" +# Build GPU device mounts if nvidia-smi is available on the host +GPU_ARGS="" +if command -v nvidia-smi &>/dev/null; then + GPU_ARGS="--device /dev/nvidia0:/dev/nvidia0 \ + --device /dev/nvidiactl:/dev/nvidiactl \ + --device /dev/nvidia-uvm:/dev/nvidia-uvm \ + -v /usr/bin/nvidia-smi:/usr/bin/nvidia-smi:ro \ + -v /usr/lib/nvidia:/usr/lib/nvidia:ro \ + -e LD_LIBRARY_PATH=/usr/lib/nvidia" +fi + DOCKER_RUN_CMD="docker run -d \ --restart unless-stopped \ --name ${CONTAINER_NAME} \ - -p ${MONITOR_PORT}:${MONITOR_PORT} \ + --network host \ + ${GPU_ARGS} \ -e LLAMA_SERVER_URL=${LLAMA_SERVER_URL} \ -e MONITOR_PORT=${MONITOR_PORT} \ -v ${SCRIPT_DIR}/monitor.py:/app/monitor.py:ro \