# Mana Mac Mini Configuration # Domain: mana.how (via Cloudflare Tunnel) # # Port Schema: # 3000-3099: Core Services & Backends # 5000-5099: Web Frontends # 5100-5199: Games # 8000-8099: Monitoring Dashboards # 9000-9199: Infrastructure & Exporters # # Naming Convention: mana-{category}-{service} # Categories: infra, core, app, mon, auto # # Memory Limits: # All containers have explicit mem_limit to prevent unbounded growth. # Total budget: ~9.8 GiB (fits in 12 GiB Colima VM with ~2 GiB for builds) # Run ./scripts/mac-mini/memory-baseline.sh to verify actual usage. # Limits are ceilings — actual usage is typically 50-70% of limits. services: # ============================================ # Tier 0: Infrastructure Services # ============================================ postgres: image: postgres:16-alpine container_name: mana-infra-postgres restart: always mem_limit: 1024m environment: POSTGRES_DB: mana_platform POSTGRES_USER: postgres POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-mana123} volumes: - /Volumes/ManaData/postgres:/var/lib/postgresql/data ports: - "5432:5432" healthcheck: test: ["CMD-SHELL", "pg_isready -U postgres"] interval: 30s timeout: 5s retries: 5 start_period: 10s command: > postgres -c shared_buffers=512MB -c effective_cache_size=2GB -c work_mem=16MB -c maintenance_work_mem=128MB -c max_wal_size=1GB -c max_wal_senders=3 -c log_min_duration_statement=1000 -c log_checkpoints=on # PostgreSQL Backup — hourly pg_dumpall + daily pg_basebackup # Retention: 48 hourly dumps + 30 daily base backups # Restore: see docs/POSTGRES_BACKUP.md postgres-backup: image: postgres:16-alpine container_name: mana-infra-postgres-backup restart: unless-stopped mem_limit: 128m depends_on: postgres: condition: service_healthy environment: PGHOST: postgres PGUSER: postgres PGPASSWORD: ${POSTGRES_PASSWORD:-mana123} BACKUP_DIR: /backups RETENTION_HOURLY: 48 RETENTION_DAILY: 30 volumes: - /Volumes/ManaData/backups/postgres:/backups entrypoint: > /bin/sh -c " echo 'PostgreSQL Backup Service started'; echo 'Hourly: pg_dumpall (retention: 48)'; echo 'Daily 03:00: pg_basebackup (retention: 30)'; while true; do HOUR=$$(date +%H); TIMESTAMP=$$(date +%Y%m%d_%H%M%S); echo \"[$$TIMESTAMP] Running hourly backup...\"; pg_dumpall -h postgres -U postgres | gzip > /backups/hourly_$$TIMESTAMP.sql.gz; if [ \"$$HOUR\" = '03' ]; then echo \"[$$TIMESTAMP] Running daily base backup...\"; mkdir -p /backups/base_$$TIMESTAMP; pg_basebackup -h postgres -U postgres -D /backups/base_$$TIMESTAMP -Ft -z -P; fi; find /backups -name 'hourly_*.sql.gz' -mmin +$$((48*60)) -delete 2>/dev/null; find /backups -name 'base_*' -maxdepth 1 -type d -mtime +30 -exec rm -rf {} + 2>/dev/null; echo \"[$$TIMESTAMP] Backup complete. Sleeping 1h...\"; sleep 3600; done " # Self-hosted Landing Pages (replaces Cloudflare Pages) # Serves all Astro landing page dist/ folders via Nginx # Build with: ./scripts/mac-mini/build-landings.sh landings: image: nginx:alpine container_name: mana-infra-landings restart: always mem_limit: 48m volumes: - ./docker/nginx:/etc/nginx/host-config:ro - /Volumes/ManaData/landings:/srv/landings:ro command: > sh -c "mkdir -p /etc/nginx/snippets && cp /etc/nginx/host-config/landings.conf /etc/nginx/conf.d/default.conf && cp /etc/nginx/host-config/snippets/* /etc/nginx/snippets/ && nginx -g 'daemon off;'" ports: - "4400:80" healthcheck: test: ["CMD", "wget", "-q", "--spider", "http://127.0.0.1/health"] interval: 30s timeout: 5s retries: 3 redis: image: redis:7-alpine container_name: mana-infra-redis restart: always mem_limit: 384m # maxmemory ~80% of mem_limit so Redis evicts before the kernel OOM-kills. # allkeys-lru drops least-recently-used keys when full — safe for our # cache-style usage (rate-limit counters, sync hot-paths, no critical state). command: redis-server --requirepass ${REDIS_PASSWORD:-redis123} --maxmemory 320mb --maxmemory-policy allkeys-lru volumes: - redis_data:/data ports: - "6379:6379" healthcheck: test: ["CMD", "redis-cli", "--raw", "incr", "ping"] interval: 30s timeout: 5s retries: 5 start_period: 10s minio: image: minio/minio:latest container_name: mana-infra-minio restart: always mem_limit: 256m command: server /data --console-address ":9001" environment: MINIO_ROOT_USER: ${MINIO_ACCESS_KEY:-minioadmin} MINIO_ROOT_PASSWORD: ${MINIO_SECRET_KEY:-minioadmin} MINIO_API_CORS_ALLOW_ORIGIN: https://music.mana.how,https://mana.how,https://picture.mana.how,https://storage.mana.how,https://plants.mana.how,https://contacts.mana.how,https://chat.mana.how,https://food.mana.how,https://photos.mana.how volumes: - /Volumes/ManaData/minio:/data ports: - "9000:9000" - "9001:9001" healthcheck: test: ["CMD", "mc", "ready", "local"] interval: 30s timeout: 20s retries: 3 start_period: 15s # MinIO bucket initialization and lifecycle rules (runs once) minio-init: image: minio/mc:latest container_name: mana-infra-minio-init mem_limit: 64m depends_on: minio: condition: service_healthy entrypoint: > /bin/sh -c " mc alias set myminio http://minio:9000 $${MINIO_ACCESS_KEY:-minioadmin} $${MINIO_SECRET_KEY:-minioadmin}; mc mb --ignore-existing myminio/mana-storage; mc mb --ignore-existing myminio/picture-storage; mc mb --ignore-existing myminio/chat-storage; mc mb --ignore-existing myminio/cards-storage; mc mb --ignore-existing myminio/presi-storage; mc mb --ignore-existing myminio/calendar-storage; mc mb --ignore-existing myminio/contacts-storage; mc mb --ignore-existing myminio/storage-storage; mc mb --ignore-existing myminio/inventory-storage; mc mb --ignore-existing myminio/music-storage; mc mb --ignore-existing myminio/plants-storage; mc mb --ignore-existing myminio/projectdoc-storage; mc mb --ignore-existing myminio/mail-storage; mc anonymous set download myminio/mana-storage; mc anonymous set download myminio/picture-storage; mc anonymous set download myminio/plants-storage; mc anonymous set download myminio/inventory-storage; mc ilm rule add --expire-days 90 myminio/chat-storage --prefix 'tmp/' 2>/dev/null || true; mc ilm rule add --expire-days 30 myminio/calendar-storage --prefix 'tmp/' 2>/dev/null || true; mc ilm rule add --expire-days 7 myminio/picture-storage --prefix 'tmp/' 2>/dev/null || true; echo 'Buckets and lifecycle rules created successfully'; exit 0; " # ============================================ # Tier 0b: Forgejo (Git + CI/CD + Registry) # ============================================ mana-auth: build: context: ../mana dockerfile: services/mana-auth/Dockerfile image: mana-auth:local container_name: mana-auth restart: always mem_limit: 192m depends_on: postgres: condition: service_healthy environment: TZ: Europe/Berlin NODE_ENV: production PORT: 3001 DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform BASE_URL: https://auth.mana.how COOKIE_DOMAIN: .mana.how MANA_SERVICE_KEY: ${MANA_SERVICE_KEY} MANA_CREDITS_URL: http://mana-credits:3061 MANA_SUBSCRIPTIONS_URL: http://mana-subscriptions:3063 # Points at the mana-sync DB where sync_changes lives — read by # the F4 bootstrap, the /api/v1/me/bootstrap-singletons endpoint, # and the GDPR data summary in user-data.ts. Previously pointed # at mana_platform which silently broke all three (F4 was # fire-and-forget so the error went unnoticed; the explicit # endpoint added in 099cac4a0 surfaced it as a 500). SYNC_DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_sync BETTER_AUTH_SECRET: ${BETTER_AUTH_SECRET:-${JWT_SECRET:-your-jwt-secret-change-me}} # KEK for the encryption-vault feature (Phase 9). Required in production # — generate with `openssl rand -base64 32`. See services/mana-auth/CLAUDE.md. MANA_AUTH_KEK: ${MANA_AUTH_KEK} # RSA-OAEP-2048 public key of the mana-ai runner. Used to wrap # per-mission data keys in POST /me/ai-mission-grant. Paired with # MANA_AI_PRIVATE_KEY_PEM on the mana-ai service. Absent → endpoint # returns 503 GRANT_NOT_CONFIGURED (graceful degrade). MANA_AI_PUBLIC_KEY_PEM: ${MANA_AI_PUBLIC_KEY_PEM:-} MANA_NOTIFY_URL: http://mana-notify:3013 # Glitchtip error reporting (Phase 2d, 2026-05-07). DSN is the # mana-platform project on glitchtip.mana.how; per-service env so # each service ends up tagged with its serverName. GLITCHTIP_DSN: ${GLITCHTIP_DSN_MANA_PLATFORM:-} MAX_DAILY_SIGNUPS: ${MAX_DAILY_SIGNUPS:-0} # Must be a superset of TRUSTED_ORIGINS in # services/mana-auth/src/auth/better-auth.config.ts. # Enforced by services/mana-auth/src/auth/sso-config.spec.ts. # All productivity modules now live under mana.how (path-based) — # no per-module subdomain entries required here. CORS_ORIGINS: https://mana.how,https://auth.mana.how,https://whopxl.mana.how,https://cardecky.mana.how,https://cardecky-api.mana.how,https://memoro-app.mana.how,https://zitare.mana.how,https://zitare-api.mana.how,https://nutriphi.mana.how,https://nutriphi-api.mana.how ports: - "3001:3001" healthcheck: test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3001/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"] interval: 120s timeout: 10s retries: 3 start_period: 40s # ============================================ # Tier 1a': AI Mission Runner (Hono + Bun) # Background ticker that plans due AI Missions and stages proposals # back to user devices via mana-sync. Opt-in decrypt of encrypted # inputs via the Mission Key-Grant flow (see services/mana-ai/CLAUDE.md # and docs/plans/ai-mission-key-grant.md). # ============================================ mana-credits: build: context: ../mana dockerfile: services/mana-credits/Dockerfile image: mana-credits:local container_name: mana-credits restart: always mem_limit: 128m depends_on: postgres: condition: service_healthy environment: TZ: Europe/Berlin PORT: 3002 DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform MANA_AUTH_URL: http://mana-auth:3001 MANA_SERVICE_KEY: ${MANA_SERVICE_KEY} STRIPE_SECRET_KEY: ${STRIPE_SECRET_KEY:-} STRIPE_WEBHOOK_SECRET: ${STRIPE_CREDITS_WEBHOOK_SECRET:-} BASE_URL: https://credits.mana.how CORS_ORIGINS: https://mana.how,https://chat.mana.how,https://picture.mana.how,https://todo.mana.how,https://quotes.mana.how,https://calendar.mana.how,https://clock.mana.how,https://contacts.mana.how,https://cardecky.mana.how,https://presi.mana.how,https://storage.mana.how,https://food.mana.how,https://plants.mana.how,https://music.mana.how,https://context.mana.how,https://photos.mana.how,https://questions.mana.how,https://calc.mana.how ports: - "3002:3002" healthcheck: test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3002/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"] interval: 120s timeout: 10s retries: 3 start_period: 15s labels: - "traefik.enable=true" - "traefik.http.routers.mana-credits.rule=Host(`credits.mana.how`)" - "traefik.http.routers.mana-credits.tls=true" - "traefik.http.services.mana-credits.loadbalancer.server.port=3002" cards-server: # Cardecky Marketplace + Community backend. See # apps/cards/docs/MARKETPLACE_PLAN.md for the full design. build: context: . dockerfile: services/cards-server/Dockerfile image: cards-server:local container_name: mana-app-cards-server restart: always mem_limit: 128m depends_on: postgres: condition: service_healthy mana-auth: condition: service_healthy environment: NODE_ENV: production PORT: 3072 DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform MANA_AUTH_URL: http://mana-auth:3001 MANA_CREDITS_URL: http://mana-credits:3002 MANA_LLM_URL: http://mana-llm:3025 MANA_MEDIA_URL: http://mana-media:3015 MANA_NOTIFY_URL: http://mana-notify:3040 MANA_SERVICE_KEY: ${MANA_SERVICE_KEY} CORS_ORIGINS: https://cardecky.mana.how,https://mana.how AUTHOR_PAYOUT_STANDARD_BPS: 8000 AUTHOR_PAYOUT_VERIFIED_BPS: 9000 COMMUNITY_VERIFY_STARS: 500 COMMUNITY_VERIFY_FEATURED: 3 COMMUNITY_VERIFY_SUBSCRIBERS: 200 ports: - "3072:3072" healthcheck: test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3072/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"] interval: 120s timeout: 10s retries: 3 start_period: 15s # mana-research moved to GPU-Box on 2026-05-07 (Phase 2g). Source-of-truth # is now managarten/infrastructure/docker-compose.gpu-box.yml. DNS for # research.mana.how points at the mana-gpu-server tunnel; cross-LAN to # mana-credits + mana-search + postgres + redis on 192.168.178.131. mana-events: build: context: services/mana-events dockerfile: Dockerfile image: mana-events:local container_name: mana-events restart: always mem_limit: 128m depends_on: postgres: { condition: service_healthy } environment: TZ: Europe/Berlin PORT: 3065 DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform MANA_AUTH_URL: http://mana-auth:3001 # The public RSVP endpoints accept requests from anywhere — they're # designed to be hit by guests who only have the share link. # The host endpoints sit behind JWT auth so CORS is still scoped. CORS_ORIGINS: https://mana.how ports: - "3065:3065" healthcheck: test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3065/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"] interval: 120s timeout: 10s retries: 3 start_period: 15s labels: - "traefik.enable=true" - "traefik.http.routers.mana-events.rule=Host(`events.mana.how`)" - "traefik.http.routers.mana-events.tls=true" - "traefik.http.services.mana-events.loadbalancer.server.port=3065" # ─── Geocoding ─────────────────────────────────────────── # Thin Hono wrapper in front of self-hosted Photon (mana-gpu) with # public photon.komoot.io and Nominatim as last-resort fallbacks. # See services/mana-geocoding/CLAUDE.md for provider-chain details. # Internal-only: no traefik labels, not exposed via Cloudflare. # ─── Föderations-Backbone (mana-share + mana-mcp) ────────────── # Built aus Plattform-Repo (../mana/services/...). Phase F (Föderation) # 2026-05-08 nach Live-Cardecky deployed — vorher nur lokal lauffähig. mana-share: build: context: ../mana dockerfile: services/mana-share/Dockerfile image: mana-share:local container_name: mana-share restart: always mem_limit: 192m depends_on: postgres: { condition: service_healthy } environment: TZ: Europe/Berlin PORT: 3072 DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform MANA_AUTH_URL: http://mana-auth:3001 MANA_SERVICE_KEY: ${MANA_SERVICE_KEY} CORS_ORIGINS: https://mana.how,https://cardecky.mana.how ports: - "3072:3072" healthcheck: test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3072/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"] interval: 120s timeout: 10s retries: 3 start_period: 15s mana-mcp: build: context: ../mana dockerfile: services/mana-mcp/Dockerfile image: mana-mcp:local container_name: mana-mcp restart: always mem_limit: 192m depends_on: postgres: { condition: service_healthy } mana-share: { condition: service_started } environment: TZ: Europe/Berlin PORT: 3069 DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform MANA_AUTH_URL: http://mana-auth:3001 MANA_SERVICE_KEY: ${MANA_SERVICE_KEY} CORS_ORIGINS: https://mana.how,https://cardecky.mana.how MANA_SHARE_URL: http://mana-share:3072 ports: - "3069:3069" healthcheck: test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3069/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"] interval: 120s timeout: 10s retries: 3 start_period: 15s mana-geocoding: build: context: services/mana-geocoding dockerfile: Dockerfile image: mana-geocoding:local container_name: mana-geocoding restart: always mem_limit: 128m environment: TZ: Europe/Berlin PORT: 3018 # Self-hosted Photon. Läuft als Docker-Container auf der GPU-Box # (mana-server-gpu, WSL2). Mini-Container können die GPU-LAN-IP # nicht direkt routen (Colima-NAT-Quirk), daher via Cloudflare- # Tunnel — `photon.mana.how` schickt direkt an den GPU-tunnel. # Empty value = slot disabled. PHOTON_SELF_API_URL: ${PHOTON_SELF_API_URL:-https://photon.mana.how} # Cold-start cross-LAN fetches to photon-self consistently take # >10s on the first probe; the 8s default false-marked it unhealthy # on every cold path. 20s leaves headroom while still cutting off # actually-stuck connections. PROVIDER_TIMEOUT_MS: ${PROVIDER_TIMEOUT_MS:-20000} # Short public-API cache TTL so a transient photon-self blip can't # pin stale public-fallback answers in the LRU for days. CACHE_PUBLIC_TTL_MS: ${CACHE_PUBLIC_TTL_MS:-3600000} CORS_ORIGINS: https://mana.how,http://localhost:5173 CACHE_MAX_ENTRIES: "5000" CACHE_TTL_MS: "86400000" ports: - "3018:3018" healthcheck: test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3018/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"] interval: 120s timeout: 10s retries: 3 start_period: 15s mana-user: build: context: services/mana-user dockerfile: Dockerfile image: mana-user:local container_name: mana-user restart: always mem_limit: 128m depends_on: postgres: { condition: service_healthy } environment: TZ: Europe/Berlin PORT: 3062 DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform MANA_AUTH_URL: http://mana-auth:3001 MANA_SERVICE_KEY: ${MANA_SERVICE_KEY} CORS_ORIGINS: https://mana.how,https://calc.mana.how,https://calendar.mana.how,https://chat.mana.how,https://clock.mana.how,https://contacts.mana.how,https://context.mana.how,https://cardecky.mana.how,https://music.mana.how,https://food.mana.how,https://photos.mana.how,https://picture.mana.how,https://plants.mana.how,https://presi.mana.how,https://questions.mana.how,https://storage.mana.how,https://todo.mana.how,https://quotes.mana.how ports: - "3062:3062" healthcheck: test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3062/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"] interval: 120s timeout: 10s retries: 3 start_period: 15s mana-subscriptions: build: context: services/mana-subscriptions dockerfile: Dockerfile image: mana-subscriptions:local container_name: mana-subscriptions restart: always mem_limit: 128m depends_on: postgres: { condition: service_healthy } environment: TZ: Europe/Berlin PORT: 3063 DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform MANA_AUTH_URL: http://mana-auth:3001 MANA_SERVICE_KEY: ${MANA_SERVICE_KEY} STRIPE_SECRET_KEY: ${STRIPE_SECRET_KEY:-} STRIPE_WEBHOOK_SECRET: ${STRIPE_SUBSCRIPTIONS_WEBHOOK_SECRET:-} BASE_URL: https://subscriptions.mana.how CORS_ORIGINS: https://mana.how ports: - "3063:3063" healthcheck: test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3063/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"] interval: 120s timeout: 10s retries: 3 start_period: 15s mana-analytics: build: context: . dockerfile: services/mana-analytics/Dockerfile image: mana-analytics:local container_name: mana-analytics restart: always mem_limit: 128m depends_on: postgres: { condition: service_healthy } environment: TZ: Europe/Berlin PORT: 3064 DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform MANA_AUTH_URL: http://mana-auth:3001 MANA_LLM_URL: http://mana-llm:3025 MANA_CREDITS_URL: http://mana-credits:3002 MANA_SERVICE_KEY: ${MANA_SERVICE_KEY:?missing in .env.macmini} # Seeds the per-user display-hash for the public-community pseudonym # ("Wachsame Eule #4528"). Set in .env.macmini — rotation re-keys # only future pseudonyms, existing rows keep the old hash/name. FEEDBACK_PSEUDONYM_SECRET: ${FEEDBACK_PSEUDONYM_SECRET:?missing in .env.macmini} # Comma-separated userIds that bypass community credit-grants # (otherwise founders self-reward when posting/shipping). FEEDBACK_FOUNDER_USER_IDS: ${FEEDBACK_FOUNDER_USER_IDS:-} CORS_ORIGINS: https://mana.how,https://feedback.mana.how ports: - "3064:3064" healthcheck: test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3064/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"] interval: 120s timeout: 10s retries: 3 start_period: 15s # ============================================ # Tier 2: Gateway & Search Services (Ports 3010-3029) # ============================================ api-gateway: build: context: . dockerfile: services/mana-api-gateway/Dockerfile image: mana-api-gateway:local container_name: mana-api-gateway restart: always mem_limit: 64m depends_on: postgres: condition: service_healthy redis: condition: service_healthy environment: TZ: Europe/Berlin PORT: 3016 DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform?sslmode=disable REDIS_HOST: redis REDIS_PORT: 6379 REDIS_PASSWORD: ${REDIS_PASSWORD:-redis123} MANA_AUTH_URL: http://mana-auth:3001 SEARCH_SERVICE_URL: http://mana-search:3012 STT_SERVICE_URL: ${STT_SERVICE_URL:-http://192.168.178.11:3020} TTS_SERVICE_URL: ${TTS_SERVICE_URL:-http://192.168.178.11:3022} CORS_ORIGINS: https://api.mana.how,https://mana.how ADMIN_USER_IDS: ${ADMIN_USER_IDS:-} ports: - "3016:3016" healthcheck: test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3016/health"] interval: 60s timeout: 5s retries: 3 start_period: 5s searxng: image: searxng/searxng:latest container_name: mana-core-searxng restart: always mem_limit: 256m volumes: - ./services/mana-search/searxng:/mnt/searxng-config:ro entrypoint: ["sh", "-c", "cp /mnt/searxng-config/settings.yml /etc/searxng/settings.yml && cp /mnt/searxng-config/limiter.toml /etc/searxng/limiter.toml 2>/dev/null; exec /usr/local/searxng/entrypoint.sh"] environment: SEARXNG_BASE_URL: http://searxng:8080 SEARXNG_SECRET: ${SEARXNG_SECRET:-change-me-searxng-secret} # Internal only - no external port mapping healthcheck: test: ["CMD", "wget", "-q", "--spider", "http://localhost:8080/healthz"] interval: 120s timeout: 10s retries: 3 start_period: 15s mana-search: build: context: . dockerfile: services/mana-search/Dockerfile image: mana-search:local container_name: mana-core-search restart: always mem_limit: 64m depends_on: searxng: condition: service_healthy environment: PORT: 3012 SEARXNG_URL: http://searxng:8080 SEARXNG_TIMEOUT: 15000 SEARXNG_DEFAULT_LANGUAGE: de-DE REDIS_HOST: redis REDIS_PORT: 6379 REDIS_PASSWORD: ${REDIS_PASSWORD:-redis123} CACHE_SEARCH_TTL: 3600 CACHE_EXTRACT_TTL: 86400 EXTRACT_TIMEOUT: 10000 EXTRACT_MAX_LENGTH: 50000 ports: - "3012:3012" healthcheck: test: ["CMD", "wget", "-q", "--spider", "http://localhost:3012/health"] interval: 120s timeout: 10s retries: 3 start_period: 5s mana-sync: build: context: . dockerfile: services/mana-sync/Dockerfile image: mana-sync:local container_name: mana-core-sync restart: always mem_limit: 64m depends_on: postgres: condition: service_healthy environment: PORT: 3010 DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_sync?sslmode=disable JWKS_URL: http://mana-auth:3001/api/v1/auth/jwks CORS_ORIGINS: "https://mana.how,https://*.mana.how" MANA_CREDITS_URL: http://mana-credits:3002 MANA_SERVICE_KEY: ${MANA_SERVICE_KEY} # Apps that bypass the sync-subscription billing gate. Cardecky # promises free Sync per its Phase-1 GUIDELINES. BILLING_EXEMPT_APPS: cards ports: - "3010:3010" healthcheck: test: ["CMD", "wget", "-q", "--spider", "http://localhost:3010/health"] interval: 120s timeout: 5s retries: 3 start_period: 5s mana-notify: build: context: ../mana dockerfile: services/mana-notify/Dockerfile image: mana-notify:local container_name: mana-core-notify restart: always mem_limit: 64m depends_on: postgres: condition: service_healthy stalwart: condition: service_started environment: PORT: 3013 DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform?sslmode=disable SERVICE_KEY: ${MANA_SERVICE_KEY:-dev-service-key} MANA_AUTH_URL: http://mana-auth:3001 SMTP_HOST: ${SMTP_HOST:-stalwart} SMTP_PORT: ${SMTP_PORT:-587} SMTP_USER: ${SMTP_USER:-noreply} SMTP_PASSWORD: ${SMTP_PASSWORD:-ManaNoReply2026!} SMTP_FROM: "Mana " SMTP_INSECURE_TLS: "true" EXPO_ACCESS_TOKEN: ${EXPO_ACCESS_TOKEN:-} ports: - "3013:3013" healthcheck: test: ["CMD", "wget", "-q", "--spider", "http://localhost:3013/health"] interval: 120s timeout: 5s retries: 3 start_period: 5s stalwart: image: stalwartlabs/stalwart:latest container_name: mana-mail restart: always # Tier-3 right-size 2026-04-28: bootstrap-mode RSS ~11 MiB. Bumped # to 128m (not 64m) because once Stalwart finishes its initial setup # and starts handling real SMTP queues + IMAP sessions, RSS will # rise. 128m gives 10× current headroom without being wasteful. mem_limit: 128m ports: - "25:25" - "587:587" - "465:465" - "993:993" - "8443:8080" volumes: - stalwart_data:/opt/stalwart-mail environment: - STALWART_ADMIN_PASSWORD=${STALWART_ADMIN_PASSWORD:-ChangeMe123!} # Stalwart's official image is distroless — no wget, curl, nc, ss, # netstat. Any `docker exec`-based healthcheck just OOMs with # "executable file not found". Deactivate; rely on docker's # `restart: always` if Stalwart's process actually crashes # (mana-monitoring will catch the SMTP-port-unreachable case via # blackbox-exporter from outside the container). healthcheck: disable: true mana-crawler: build: context: . dockerfile: services/mana-crawler/Dockerfile image: mana-crawler:local container_name: mana-crawler restart: always mem_limit: 128m depends_on: postgres: condition: service_healthy redis: condition: service_healthy environment: TZ: Europe/Berlin PORT: 3014 DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform?sslmode=disable REDIS_HOST: redis REDIS_PORT: 6379 REDIS_PASSWORD: ${REDIS_PASSWORD:-redis123} CRAWLER_USER_AGENT: "ManaCrawler/1.0 (+https://mana.how/bot)" QUEUE_CONCURRENCY: 5 ports: - "3014:3014" healthcheck: test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3014/health"] interval: 60s timeout: 5s retries: 3 start_period: 5s mana-media: build: context: ../mana/services/mana-media/apps/api image: mana-media:local container_name: mana-core-media restart: always # Tier-3 right-size 2026-04-28: live RSS ~80 MiB (63%) — within # OOM range when image-thumb spikes hit. Bumped to 160m. mem_limit: 160m depends_on: postgres: condition: service_healthy redis: condition: service_healthy minio: condition: service_healthy environment: NODE_ENV: production PORT: 3011 DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform REDIS_HOST: redis REDIS_PORT: 6379 REDIS_PASSWORD: ${REDIS_PASSWORD:-redis123} S3_ENDPOINT: minio S3_PORT: 9000 S3_USE_SSL: "false" S3_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin} S3_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin} S3_BUCKET: mana-media S3_PUBLIC_URL: https://media.mana.how PUBLIC_URL: https://media.mana.how/api/v1 CORS_ORIGINS: https://mana.how,https://food.mana.how,https://contacts.mana.how,https://chat.mana.how,https://storage.mana.how,https://photos.mana.how ports: - "3011:3011" healthcheck: test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3011/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"] interval: 120s timeout: 10s retries: 3 start_period: 10s mana-landing-builder: build: context: . dockerfile: services/mana-landing-builder/Dockerfile image: mana-landing-builder:local container_name: mana-core-landing-builder restart: always mem_limit: 192m depends_on: mana-auth: condition: service_healthy environment: NODE_ENV: production PORT: 3015 MANA_AUTH_URL: http://mana-auth:3001 CLOUDFLARE_API_TOKEN: ${CLOUDFLARE_API_TOKEN:-} CLOUDFLARE_ACCOUNT_ID: ${CLOUDFLARE_ACCOUNT_ID:-} ORG_LANDING_DOMAIN: mana.how ports: - "3015:3015" healthcheck: test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3015/api/v1/health"] interval: 120s timeout: 10s retries: 3 start_period: 40s # ============================================ # Tier 3: App Compute Servers (Hono + Bun) # CRUD is handled by mana-sync. These only handle # server-side compute (AI, file upload, external APIs). # ============================================ # --- App Compute Servers (Hono + Bun, ~120 LOC each) --- # CRUD handled by mana-sync. These handle AI, uploads, external APIs. # All use FROM oven/bun:1, ~160MB images, ~30MB RAM, <50ms cold start. # Ports match the old NestJS backends for backward compatibility. # NOTE: These apps need Hono server Dockerfiles to be added. # For now they share the same pattern: # build: { context: apps/{app}/apps/server, dockerfile: ../../Dockerfile.bun } # Requires: Dockerfile.bun in apps/ root (FROM oven/bun:1, COPY, CMD bun run src/index.ts) # ============================================ # Tier 5: Web Frontends (Ports 5000-5099) # ============================================ mana-web: build: context: . dockerfile: apps/mana/apps/web/Dockerfile args: PUBLIC_SYNC_SERVER_URL: wss://sync.mana.how image: mana-web:local container_name: mana-app-web restart: always mem_limit: 256m depends_on: mana-auth: condition: service_healthy mana-api: condition: service_healthy environment: NODE_ENV: production PORT: 5000 PUBLIC_MANA_AUTH_URL: http://mana-auth:3001 PUBLIC_MANA_AUTH_URL_CLIENT: https://auth.mana.how PUBLIC_SYNC_SERVER_URL: http://mana-sync:3010 PUBLIC_SYNC_SERVER_URL_CLIENT: https://sync.mana.how # Unified Hono/Bun API server (apps/api) — hosts all 16 product # compute modules (calendar, todo, picture, … who) on port 3060. # Browser calls go through https://mana-api.mana.how (cloudflared # tunnel route to mana-api:3060). SSR calls inside the docker # network use the internal hostname. # # NOTE: api.mana.how is already in use for the Go mana-api-gateway # on port 3016 (different service, predates apps/api). The # mana-api.* subdomain is the unambiguous new home. PUBLIC_MANA_API_URL: http://mana-api:3060 PUBLIC_MANA_API_URL_CLIENT: https://mana-api.mana.how # Server-side same-origin proxy upstream for routes/api/v1/who/[...path]. # Inside docker the SvelteKit handler reaches mana-api over the internal # network; the dev fallback in code is localhost:3060, so this env var # is what makes prod hit the right hostname. MANA_API_INTERNAL_URL: http://mana-api:3060 PUBLIC_MANA_CREDITS_URL: http://mana-credits:3002 PUBLIC_MANA_CREDITS_URL_CLIENT: https://credits.mana.how # Per-app HTTP backend URLs (todo-api, calendar-api, contacts-api, # chat-api, storage-api, cards-api, music-api, food-api, # picture-api, presi-api, quotes-api, clock-api, context-api) and # the standalone memoro-server URL were removed in the pre-launch # ghost-API cleanup — every product module talks to mana-sync # directly and the unified `memoro` module is fully local-first. # See docs/PRE_LAUNCH_CLEANUP.md for the full rationale. PUBLIC_ULOAD_SERVER_URL: http://uload-server:3070 PUBLIC_ULOAD_SERVER_URL_CLIENT: https://uload-api.mana.how PUBLIC_MANA_MEDIA_URL: http://mana-media:3011 PUBLIC_MANA_MEDIA_URL_CLIENT: https://media.mana.how PUBLIC_MANA_LLM_URL: http://mana-llm:3025 PUBLIC_MANA_LLM_URL_CLIENT: https://llm.mana.how PUBLIC_MANA_EVENTS_URL: http://mana-events:3065 PUBLIC_MANA_EVENTS_URL_CLIENT: https://events.mana.how # mana-research — async web-research provider orchestration. # Browser hits /research/* endpoints directly; SSR uses the # internal docker-network URL. Without this pair, the SSR- # injected window.__PUBLIC_MANA_RESEARCH_URL__ is empty string # and research fetches fall back to the current origin (404). PUBLIC_MANA_RESEARCH_URL: https://research.mana.how PUBLIC_MANA_RESEARCH_URL_CLIENT: https://research.mana.how # mana-analytics — public-feedback hub. Browser hits the # /api/v1/(public/)feedback/* endpoints directly; SSR uses the # internal docker-network URL. PUBLIC_MANA_ANALYTICS_URL: http://mana-analytics:3064 PUBLIC_MANA_ANALYTICS_URL_CLIENT: https://feedback.mana.how # mana-ai background Mission Runner. Browser calls the audit # endpoint (/api/v1/me/ai-audit) to render the Workbench # "Datenzugriff" tab. SSR doesn't hit this service directly. PUBLIC_MANA_AI_URL: http://mana-ai:3067 PUBLIC_MANA_AI_URL_CLIENT: https://mana-ai.mana.how # Feature flag for the Mission Key-Grant consent UI. false → the # dialog + Workbench audit tab stay hidden even on missions with # encrypted inputs. Flip to "true" per deployment once the keypair # is provisioned (see docs/plans/ai-mission-key-grant.md). PUBLIC_AI_MISSION_GRANTS: ${PUBLIC_AI_MISSION_GRANTS:-false} # Analytics & Error Tracking PUBLIC_UMAMI_WEBSITE_ID: 32777167-e026-4618-933a-3429120b479b PUBLIC_GLITCHTIP_DSN: ${GLITCHTIP_DSN_MANA_WEB:-} # Speech-to-Text proxy: SvelteKit /api/v1/voice/transcribe forwards # to mana-stt via Cloudflare Tunnel. The browser never sees the API # key — it stays server-side. MANA_STT_URL: https://gpu-stt.mana.how MANA_STT_API_KEY: ${MANA_STT_API_KEY:-} # LLM proxy: /api/v1/voice/parse-task and /api/v1/voice/parse-habit # call mana-llm for structured extraction. Set WITHOUT the PUBLIC_ # prefix because $env/dynamic/private explicitly excludes vars # that start with the public prefix — so the parse endpoints # would never see PUBLIC_MANA_LLM_URL even though it's right # there in the compose env. Both vars exist; the public one # is read by the browser-side playground and status page. MANA_LLM_URL: http://mana-llm:3025 MANA_LLM_API_KEY: ${MANA_LLM_API_KEY:-} ports: - "5000:5000" healthcheck: test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:5000/health"] interval: 180s timeout: 10s retries: 3 start_period: 20s # REMOVED standalone web containers — now served by unified mana-web container (mana.how): # chat-web, todo-web, quotes-web, calendar-web, clock-web, contacts-web, # storage-web, presi-web, cards-web, food-web, skilltree-web, photos-web, # music-web, citycorners-web, picture-web, inventory-web, calc-web, times-web, # uload-web, memoro-web # picture-backend: REMOVED — replaced by Hono server (apps/picture/apps/server) # arcade-web: REMOVED — extracted to standalone repo at ~/Documents/Code/arcade manavoxel-web: build: context: . dockerfile: apps/manavoxel/apps/web/Dockerfile image: manavoxel-web:local container_name: mana-app-manavoxel-web restart: always mem_limit: 128m depends_on: mana-auth: condition: service_healthy environment: NODE_ENV: production PORT: 5028 PUBLIC_MANA_AUTH_URL: http://mana-auth:3001 PUBLIC_MANA_AUTH_URL_CLIENT: https://auth.mana.how PUBLIC_SYNC_SERVER_URL: http://mana-sync:3010 PUBLIC_SYNC_SERVER_URL_CLIENT: https://sync.mana.how ports: - "5028:5028" healthcheck: test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:5028/health"] interval: 180s timeout: 10s retries: 3 start_period: 45s cards-web: # Standalone Cardecky frontend on cardecky.mana.how — separate SvelteKit # container that consumes the same mana-sync 'cards' app-id as the # in-mana cards module. See apps/cards/GUIDELINES.md. build: context: . dockerfile: apps/cards/apps/web/Dockerfile image: cards-web:local container_name: mana-app-cards-web restart: always mem_limit: 128m depends_on: mana-auth: condition: service_healthy environment: NODE_ENV: production PORT: 5180 PUBLIC_MANA_AUTH_URL: http://mana-auth:3001 PUBLIC_MANA_AUTH_URL_CLIENT: https://auth.mana.how PUBLIC_MANA_SYNC_URL: http://mana-sync:3050 PUBLIC_MANA_SYNC_URL_CLIENT: https://sync.mana.how PUBLIC_MANA_LLM_URL_CLIENT: https://llm.mana.how PUBLIC_MANA_MEDIA_URL_CLIENT: https://media.mana.how PUBLIC_CARDS_API_URL_CLIENT: https://cardecky-api.mana.how ports: - "5180:5180" healthcheck: test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:5180/"] interval: 180s timeout: 10s retries: 3 start_period: 45s uload-server: build: context: apps/uload/apps/server dockerfile: Dockerfile image: uload-server:local container_name: mana-app-uload-server restart: always # Tier-3 right-size 2026-04-28: live RSS ~51 MiB (20%). 128m is # 2.5× headroom — enough for spike during multi-file uploads. mem_limit: 128m depends_on: postgres: condition: service_healthy environment: NODE_ENV: production PORT: 3070 DATABASE_URL: postgresql://mana:${POSTGRES_PASSWORD:-devpassword}@postgres:5432/mana_sync MANA_AUTH_URL: http://mana-auth:3001 CORS_ORIGINS: http://mana-web:5000,https://mana.how,https://uload.mana.how,https://ulo.ad ports: - "3070:3070" healthcheck: test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3070/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"] interval: 30s timeout: 10s retries: 3 start_period: 10s memoro-server: build: context: . dockerfile: docker/Dockerfile.hono-server args: APP: memoro image: memoro-server:local container_name: mana-app-memoro-server restart: always mem_limit: 256m depends_on: mana-auth: condition: service_healthy environment: NODE_ENV: production PORT: 3015 MANA_AUTH_URL: http://mana-auth:3001 CORS_ORIGINS: http://mana-web:5000,https://mana.how,https://memoro.mana.how MEMORO_SUPABASE_URL: ${MEMORO_SUPABASE_URL} MEMORO_SUPABASE_SERVICE_KEY: ${MEMORO_SUPABASE_SERVICE_KEY} SERVICE_KEY: ${MEMORO_SERVICE_KEY} AUDIO_SERVER_URL: http://memoro-audio-server:3016 MANA_LLM_URL: http://mana-llm:3025 MANA_LLM_MODEL: ${MANA_LLM_MODEL:-ollama/gemma3:12b} GEMINI_API_KEY: ${GEMINI_API_KEY} AZURE_OPENAI_KEY: ${AZURE_OPENAI_KEY} AZURE_OPENAI_ENDPOINT: ${AZURE_OPENAI_ENDPOINT} AZURE_OPENAI_DEPLOYMENT: ${AZURE_OPENAI_DEPLOYMENT} MANA_CREDITS_URL: http://mana-credits:3006 MANA_CREDITS_SERVICE_KEY: ${MANA_CREDITS_SERVICE_KEY} ports: - "3015:3015" healthcheck: test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3015/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"] interval: 30s timeout: 10s retries: 3 start_period: 15s memoro-audio-server: build: context: apps/memoro/apps/audio-server dockerfile: Dockerfile image: memoro-audio-server:local container_name: mana-app-memoro-audio-server restart: always mem_limit: 512m environment: NODE_ENV: production PORT: 3016 SERVICE_KEY: ${MEMORO_SERVICE_KEY} MEMORO_SERVER_URL: http://memoro-server:3015 MEMORO_SUPABASE_URL: ${MEMORO_SUPABASE_URL} MEMORO_SUPABASE_SERVICE_KEY: ${MEMORO_SUPABASE_SERVICE_KEY} AZURE_SPEECH_KEY_1: ${AZURE_SPEECH_KEY_1} AZURE_SPEECH_KEY_2: ${AZURE_SPEECH_KEY_2} AZURE_SPEECH_KEY_3: ${AZURE_SPEECH_KEY_3} AZURE_SPEECH_KEY_4: ${AZURE_SPEECH_KEY_4} AZURE_SPEECH_REGION: ${AZURE_SPEECH_REGION:-germanywestcentral} AZURE_SPEECH_ENDPOINT: ${AZURE_SPEECH_ENDPOINT} AZURE_STORAGE_ACCOUNT_NAME: ${AZURE_STORAGE_ACCOUNT_NAME} AZURE_STORAGE_ACCOUNT_KEY: ${AZURE_STORAGE_ACCOUNT_KEY} AZURE_STORAGE_CONTAINER: ${AZURE_STORAGE_CONTAINER:-memoro-batch-audio} MANA_STT_URL: http://host.docker.internal:3020 MANA_STT_API_KEY: ${MANA_STT_API_KEY:-} ports: - "3016:3016" healthcheck: test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3016/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"] interval: 30s timeout: 10s retries: 3 start_period: 20s mana-llm: build: context: ../mana/services/mana-llm dockerfile: Dockerfile container_name: mana-service-llm restart: unless-stopped # Tier-3 right-size 2026-04-28: live RSS ~46 MiB (18%). The service # is a thin OpenAI-compatible router around the GPU-box Ollama — # all heavy LLM work happens upstream, this container just proxies. # 128m is 2.5× headroom for streaming response buffers. mem_limit: 128m depends_on: redis: condition: service_healthy # Ollama lives on the Windows GPU box at 192.168.178.11:11434, but # Colima containers can't reach the LAN range — the entire # 192.168.178.0/24 subnet gets synthesized RST from inside any # container, even though the macOS host routes there fine. The # gpu-proxy LaunchAgent on the Mac Mini host (com.mana.gpu-proxy, # see /Users/mana/gpu-proxy.py) bridges 127.0.0.1:13434 → GPU # box's 11434, so we go through host.docker.internal:13434 to # reach Ollama. Without this hop the local mana-llm starts # cleanly but reports an empty model list and every chat # completion fails with "All connection attempts failed", which # cascades into voice quick-add silently degrading to its no-LLM # fallback for everyone hitting the local stack. extra_hosts: - "host.docker.internal:host-gateway" environment: PORT: 3025 LOG_LEVEL: info OLLAMA_URL: ${OLLAMA_URL:-http://host.docker.internal:13434} OLLAMA_DEFAULT_MODEL: ${OLLAMA_MODEL:-gemma3:12b} OLLAMA_TIMEOUT: 120 REDIS_URL: redis://redis:6379 OPENROUTER_API_KEY: ${OPENROUTER_API_KEY:-} GROQ_API_KEY: ${GROQ_API_KEY:-} TOGETHER_API_KEY: ${TOGETHER_API_KEY:-} GOOGLE_API_KEY: ${GOOGLE_API_KEY:-} GOOGLE_DEFAULT_MODEL: gemini-2.0-flash AUTO_FALLBACK_ENABLED: "true" OLLAMA_MAX_CONCURRENT: 5 CORS_ORIGINS: https://playground.mana.how,https://mana.how,https://chat.mana.how ports: - "3025:3025" healthcheck: test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:3025/health')"] interval: 120s timeout: 10s retries: 3 start_period: 30s llm-playground: build: context: . dockerfile: apps/playground/apps/web/Dockerfile container_name: mana-app-llm-playground restart: unless-stopped # Tier-3 right-size 2026-04-28: live RSS ~22 MiB (17%) — 64m is # plenty for a SvelteKit static-export demo page. mem_limit: 64m depends_on: mana-auth: condition: service_healthy mana-llm: condition: service_healthy environment: NODE_ENV: production PORT: 5050 PUBLIC_MANA_AUTH_URL: http://mana-auth:3001 PUBLIC_MANA_AUTH_URL_CLIENT: https://auth.mana.how PUBLIC_MANA_LLM_URL: http://mana-llm:3025 PUBLIC_MANA_LLM_URL_CLIENT: https://llm.mana.how ports: - "5050:5050" healthcheck: test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:5050/health"] interval: 180s timeout: 10s retries: 3 start_period: 45s labels: - "com.centurylinklabs.watchtower.enable=true" # ============================================ # Tier 7: Monitoring Dashboards (Ports 8000-8099) # ============================================ cadvisor: image: gcr.io/cadvisor/cadvisor:v0.49.1 container_name: mana-mon-cadvisor restart: always # Tier-3 right-size 2026-04-28: live RSS ~98 MiB (76%) — too close # to OOM during cgroup-stat bursts on a busy host. Bumped to 160m. mem_limit: 160m privileged: true volumes: - /:/rootfs:ro - /var/run:/var/run:ro - /sys:/sys:ro - /var/lib/docker/:/var/lib/docker:ro - /dev/disk/:/dev/disk:ro ports: - "9110:8080" healthcheck: test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:8080/healthz"] interval: 300s timeout: 10s retries: 3 start_period: 20s postgres-exporter: image: prometheuscommunity/postgres-exporter:v0.15.0 container_name: mana-mon-postgres-exporter restart: always mem_limit: 48m depends_on: postgres: condition: service_healthy environment: DATA_SOURCE_NAME: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/postgres?sslmode=disable ports: - "9187:9187" redis-exporter: image: oliver006/redis_exporter:v1.58.0 container_name: mana-mon-redis-exporter restart: always mem_limit: 32m depends_on: redis: condition: service_healthy environment: REDIS_ADDR: redis://redis:6379 REDIS_PASSWORD: ${REDIS_PASSWORD:-redis123} ports: - "9121:9121" node-exporter: image: prom/node-exporter:v1.7.0 container_name: mana-mon-node-exporter restart: always mem_limit: 32m # macOS Docker runs in a Linux VM, so we can only monitor the VM's metrics # For full host metrics on macOS, install node_exporter natively command: - '--collector.disable-defaults' - '--collector.cpu' - '--collector.meminfo' - '--collector.loadavg' - '--collector.filesystem' - '--collector.netdev' - '--collector.time' - '--collector.uname' - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)' ports: - "9100:9100" healthcheck: test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:9100/metrics"] interval: 300s timeout: 10s retries: 3 start_period: 20s watchtower: image: nickfedor/watchtower:latest container_name: mana-auto-watchtower restart: always mem_limit: 64m volumes: - /var/run/docker.sock:/var/run/docker.sock environment: TZ: Europe/Berlin DOCKER_API_VERSION: 1.45 WATCHTOWER_POLL_INTERVAL: 300 WATCHTOWER_CLEANUP: "true" WATCHTOWER_INCLUDE_STOPPED: "false" # ============================================ # Volumes (Naming: mana-{service}-data) # ============================================ # ============================================ # GlitchTip Error Tracking (Sentry-compatible) # ============================================ mana-api: build: context: . dockerfile: apps/api/Dockerfile args: # Verdaccio-Token zum Pullen von @mana/media-client (und künftiger # Plattform-Pakete). Auf dem Mac-Mini kommt aus `.env.macmini`, # in CI aus `secrets.NPM_AUTH_TOKEN`. Setzt die `.npmrc`- # Variable `${NPM_TOKEN}` zur Build-Zeit. NPM_TOKEN: ${NPM_AUTH_TOKEN:-${NPM_TOKEN:-}} image: mana-api:local container_name: mana-api restart: always mem_limit: 384m depends_on: postgres: condition: service_healthy mana-auth: condition: service_healthy environment: TZ: Europe/Berlin NODE_ENV: production PORT: 3060 # Auth (JWT validation via JWKS) MANA_AUTH_URL: http://mana-auth:3001 JWT_PUBLIC_KEY: ${JWT_PUBLIC_KEY:-} # Compute services apps/api orchestrates MANA_LLM_URL: http://mana-llm:3025 MANA_SEARCH_URL: http://mana-search:3012 MANA_CREDITS_URL: http://mana-credits:3002 MANA_MEDIA_URL: http://mana-media:3011 MANA_CRAWLER_URL: http://mana-crawler:3014 MANA_LLM_DEFAULT_MODEL: ${MANA_LLM_DEFAULT_MODEL:-gemma3:4b} MANA_SERVICE_KEY: ${MANA_SERVICE_KEY} # OpenAI — picture module gpt-image-2 path. Optional: without it, # /api/v1/picture/generate falls through to Replicate/local Flux. OPENAI_API_KEY: ${OPENAI_API_KEY:-} # Replicate — fallback for Flux-schnell image generation REPLICATE_API_TOKEN: ${REPLICATE_API_TOKEN:-} APP_ID: mana-api # Database (used by modules that have server-side state — research, # presi share-links, traces guides). Same Postgres + schema split # as the rest of the platform. DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform # CORS — only the unified mana.how origin needs access today. # The manavoxel game frontend doesn't call apps/api. CORS_ORIGINS: https://mana.how # Structured-logger format LOGGER_FORMAT: json ports: - "3060:3060" healthcheck: test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3060/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"] interval: 60s timeout: 5s retries: 3 start_period: 30s # ============================================ # News Ingester # ============================================ # services/news-ingester — pulls public RSS/JSON feeds into the # news.curated_articles pool every 15 min. The unified mana-api reads # from the same table to serve /api/v1/news/feed; user reading lists # remain client-side in the unified Mana app's local IndexedDB. volumes: redis_data: name: mana-redis-data analytics_data: name: mana-analytics-data stalwart_data: name: mana-stalwart-data