Add GPU passthrough and host networking to llamacpp-monitor
All checks were successful
Test and Publish Templates / test-and-publish (push) Successful in 9s

This commit is contained in:
j
2026-03-15 15:57:22 +13:00
parent 0d6c64d7fd
commit fca9167883

View File

@@ -3,10 +3,22 @@ source "${AGENT_PATH}/common.sh"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
_check_required_env_vars "CONTAINER_NAME" "LLAMA_SERVER_URL" "MONITOR_PORT" "IMAGE_REGISTRY" "IMAGE_REPO" "IMAGE_TAG" _check_required_env_vars "CONTAINER_NAME" "LLAMA_SERVER_URL" "MONITOR_PORT" "IMAGE_REGISTRY" "IMAGE_REPO" "IMAGE_TAG"
# Build GPU device mounts if nvidia-smi is available on the host
GPU_ARGS=""
if command -v nvidia-smi &>/dev/null; then
GPU_ARGS="--device /dev/nvidia0:/dev/nvidia0 \
--device /dev/nvidiactl:/dev/nvidiactl \
--device /dev/nvidia-uvm:/dev/nvidia-uvm \
-v /usr/bin/nvidia-smi:/usr/bin/nvidia-smi:ro \
-v /usr/lib/nvidia:/usr/lib/nvidia:ro \
-e LD_LIBRARY_PATH=/usr/lib/nvidia"
fi
DOCKER_RUN_CMD="docker run -d \ DOCKER_RUN_CMD="docker run -d \
--restart unless-stopped \ --restart unless-stopped \
--name ${CONTAINER_NAME} \ --name ${CONTAINER_NAME} \
-p ${MONITOR_PORT}:${MONITOR_PORT} \ --network host \
${GPU_ARGS} \
-e LLAMA_SERVER_URL=${LLAMA_SERVER_URL} \ -e LLAMA_SERVER_URL=${LLAMA_SERVER_URL} \
-e MONITOR_PORT=${MONITOR_PORT} \ -e MONITOR_PORT=${MONITOR_PORT} \
-v ${SCRIPT_DIR}/monitor.py:/app/monitor.py:ro \ -v ${SCRIPT_DIR}/monitor.py:/app/monitor.py:ro \