diff --git a/docker-compose.macmini.yml b/docker-compose.macmini.yml index c7a0b689f..874cea160 100644 --- a/docker-compose.macmini.yml +++ b/docker-compose.macmini.yml @@ -952,10 +952,24 @@ services: depends_on: redis: condition: service_healthy + # Ollama lives on the Windows GPU box at 192.168.178.11:11434, but + # Colima containers can't reach the LAN range — the entire + # 192.168.178.0/24 subnet gets synthesized RST from inside any + # container, even though the macOS host routes there fine. The + # gpu-proxy LaunchAgent on the Mac Mini host (com.mana.gpu-proxy, + # see /Users/mana/gpu-proxy.py) bridges 127.0.0.1:13434 → GPU + # box's 11434, so we go through host.docker.internal:13434 to + # reach Ollama. Without this hop the local mana-llm starts + # cleanly but reports an empty model list and every chat + # completion fails with "All connection attempts failed", which + # cascades into voice quick-add silently degrading to its no-LLM + # fallback for everyone hitting the local stack. + extra_hosts: + - "host.docker.internal:host-gateway" environment: PORT: 3025 LOG_LEVEL: info - OLLAMA_URL: ${OLLAMA_URL:-http://192.168.178.11:11434} + OLLAMA_URL: ${OLLAMA_URL:-http://host.docker.internal:13434} OLLAMA_DEFAULT_MODEL: ${OLLAMA_MODEL:-gemma3:12b} OLLAMA_TIMEOUT: 120 REDIS_URL: redis://redis:6379