mirror of
https://github.com/Memo-2023/mana-monorepo.git
synced 2026-05-14 19:01:08 +02:00
The compose mem_limits hadn't been revisited in months. Today's live `docker stats` snapshot revealed: - 5 services using <25% of their limit (waste) - 3 services using >70% of their limit (OOM risk during spikes) Adjusted both directions, no container removal, no behaviour change. Each tweak carries a 1-line rationale in the file with the observed RSS that motivated it. Bumped (tight → comfortable): mana-mon-cadvisor 128m → 160m (was 76% — bursts during stat collection) mana-mon-alert-notifier 32m → 48m (was 79% — alert-bursts queue up) mana-core-media 128m → 160m (was 63% — image-thumb spikes) Trimmed (over-provisioned): mana-research 256m → 128m (live ~57m, 22%) mana-mail 256m → 128m (live ~11m bootstrap; legitimate growth headroom) mana-app-uload-server 256m → 128m (live ~51m, 20%) mana-service-llm 256m → 128m (live ~46m, 18%; thin proxy to upstream Ollama) mana-app-llm-playground 128m → 64m (live ~22m, 17%; static-export demo) Net delta: -496 MiB in compose limits — direct headroom for the mana-web Vite build that previously OOM'd on the same VM. Combined with the build-memory-headroom.sh wrapper (which still pauses the monitoring stack during heavy builds), the Vite OOM risk is gone on paper. Containers will be recreated on next CD pass through `docker compose up -d` (touched env or recipe). For the trimmed services, the new limit is well above current RSS so nothing should OOM. For the bumped services, the old limit was the tight one, so this only relaxes. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
1818 lines
66 KiB
YAML
1818 lines
66 KiB
YAML
# Mana Mac Mini Configuration
|
||
# Domain: mana.how (via Cloudflare Tunnel)
|
||
#
|
||
# Port Schema:
|
||
# 3000-3099: Core Services & Backends
|
||
# 5000-5099: Web Frontends
|
||
# 5100-5199: Games
|
||
# 8000-8099: Monitoring Dashboards
|
||
# 9000-9199: Infrastructure & Exporters
|
||
#
|
||
# Naming Convention: mana-{category}-{service}
|
||
# Categories: infra, core, app, mon, auto
|
||
#
|
||
# Memory Limits:
|
||
# All containers have explicit mem_limit to prevent unbounded growth.
|
||
# Total budget: ~9.8 GiB (fits in 12 GiB Colima VM with ~2 GiB for builds)
|
||
# Run ./scripts/mac-mini/memory-baseline.sh to verify actual usage.
|
||
# Limits are ceilings — actual usage is typically 50-70% of limits.
|
||
|
||
services:
|
||
# ============================================
|
||
# Tier 0: Infrastructure Services
|
||
# ============================================
|
||
|
||
postgres:
|
||
image: postgres:16-alpine
|
||
container_name: mana-infra-postgres
|
||
restart: always
|
||
mem_limit: 1024m
|
||
environment:
|
||
POSTGRES_DB: mana_platform
|
||
POSTGRES_USER: postgres
|
||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-mana123}
|
||
volumes:
|
||
- /Volumes/ManaData/postgres:/var/lib/postgresql/data
|
||
ports:
|
||
- "5432:5432"
|
||
healthcheck:
|
||
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
||
interval: 30s
|
||
timeout: 5s
|
||
retries: 5
|
||
start_period: 10s
|
||
command: >
|
||
postgres
|
||
-c shared_buffers=512MB
|
||
-c effective_cache_size=2GB
|
||
-c work_mem=16MB
|
||
-c maintenance_work_mem=128MB
|
||
-c max_wal_size=1GB
|
||
-c max_wal_senders=3
|
||
-c log_min_duration_statement=1000
|
||
-c log_checkpoints=on
|
||
|
||
# PostgreSQL Backup — hourly pg_dumpall + daily pg_basebackup
|
||
# Retention: 48 hourly dumps + 30 daily base backups
|
||
# Restore: see docs/POSTGRES_BACKUP.md
|
||
postgres-backup:
|
||
image: postgres:16-alpine
|
||
container_name: mana-infra-postgres-backup
|
||
restart: unless-stopped
|
||
mem_limit: 128m
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
environment:
|
||
PGHOST: postgres
|
||
PGUSER: postgres
|
||
PGPASSWORD: ${POSTGRES_PASSWORD:-mana123}
|
||
BACKUP_DIR: /backups
|
||
RETENTION_HOURLY: 48
|
||
RETENTION_DAILY: 30
|
||
volumes:
|
||
- /Volumes/ManaData/backups/postgres:/backups
|
||
entrypoint: >
|
||
/bin/sh -c "
|
||
echo 'PostgreSQL Backup Service started';
|
||
echo 'Hourly: pg_dumpall (retention: 48)';
|
||
echo 'Daily 03:00: pg_basebackup (retention: 30)';
|
||
while true; do
|
||
HOUR=$$(date +%H);
|
||
TIMESTAMP=$$(date +%Y%m%d_%H%M%S);
|
||
echo \"[$$TIMESTAMP] Running hourly backup...\";
|
||
pg_dumpall -h postgres -U postgres | gzip > /backups/hourly_$$TIMESTAMP.sql.gz;
|
||
if [ \"$$HOUR\" = '03' ]; then
|
||
echo \"[$$TIMESTAMP] Running daily base backup...\";
|
||
mkdir -p /backups/base_$$TIMESTAMP;
|
||
pg_basebackup -h postgres -U postgres -D /backups/base_$$TIMESTAMP -Ft -z -P;
|
||
fi;
|
||
find /backups -name 'hourly_*.sql.gz' -mmin +$$((48*60)) -delete 2>/dev/null;
|
||
find /backups -name 'base_*' -maxdepth 1 -type d -mtime +30 -exec rm -rf {} + 2>/dev/null;
|
||
echo \"[$$TIMESTAMP] Backup complete. Sleeping 1h...\";
|
||
sleep 3600;
|
||
done
|
||
"
|
||
|
||
# Self-hosted Landing Pages (replaces Cloudflare Pages)
|
||
# Serves all Astro landing page dist/ folders via Nginx
|
||
# Build with: ./scripts/mac-mini/build-landings.sh
|
||
landings:
|
||
image: nginx:alpine
|
||
container_name: mana-infra-landings
|
||
restart: always
|
||
mem_limit: 48m
|
||
volumes:
|
||
- ./docker/nginx:/etc/nginx/host-config:ro
|
||
- /Volumes/ManaData/landings:/srv/landings:ro
|
||
command: >
|
||
sh -c "mkdir -p /etc/nginx/snippets &&
|
||
cp /etc/nginx/host-config/landings.conf /etc/nginx/conf.d/default.conf &&
|
||
cp /etc/nginx/host-config/snippets/* /etc/nginx/snippets/ &&
|
||
nginx -g 'daemon off;'"
|
||
ports:
|
||
- "4400:80"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "-q", "--spider", "http://127.0.0.1/health"]
|
||
interval: 30s
|
||
timeout: 5s
|
||
retries: 3
|
||
|
||
redis:
|
||
image: redis:7-alpine
|
||
container_name: mana-infra-redis
|
||
restart: always
|
||
mem_limit: 384m
|
||
# maxmemory ~80% of mem_limit so Redis evicts before the kernel OOM-kills.
|
||
# allkeys-lru drops least-recently-used keys when full — safe for our
|
||
# cache-style usage (rate-limit counters, sync hot-paths, no critical state).
|
||
command:
|
||
redis-server --requirepass ${REDIS_PASSWORD:-redis123} --maxmemory 320mb
|
||
--maxmemory-policy allkeys-lru
|
||
volumes:
|
||
- redis_data:/data
|
||
ports:
|
||
- "6379:6379"
|
||
healthcheck:
|
||
test: ["CMD", "redis-cli", "--raw", "incr", "ping"]
|
||
interval: 30s
|
||
timeout: 5s
|
||
retries: 5
|
||
start_period: 10s
|
||
|
||
minio:
|
||
image: minio/minio:latest
|
||
container_name: mana-infra-minio
|
||
restart: always
|
||
mem_limit: 256m
|
||
command: server /data --console-address ":9001"
|
||
environment:
|
||
MINIO_ROOT_USER: ${MINIO_ACCESS_KEY:-minioadmin}
|
||
MINIO_ROOT_PASSWORD: ${MINIO_SECRET_KEY:-minioadmin}
|
||
MINIO_API_CORS_ALLOW_ORIGIN: https://music.mana.how,https://mana.how,https://picture.mana.how,https://storage.mana.how,https://plants.mana.how,https://contacts.mana.how,https://chat.mana.how,https://food.mana.how,https://photos.mana.how
|
||
volumes:
|
||
- /Volumes/ManaData/minio:/data
|
||
ports:
|
||
- "9000:9000"
|
||
- "9001:9001"
|
||
healthcheck:
|
||
test: ["CMD", "mc", "ready", "local"]
|
||
interval: 30s
|
||
timeout: 20s
|
||
retries: 3
|
||
start_period: 15s
|
||
|
||
# MinIO bucket initialization and lifecycle rules (runs once)
|
||
minio-init:
|
||
image: minio/mc:latest
|
||
container_name: mana-infra-minio-init
|
||
mem_limit: 64m
|
||
depends_on:
|
||
minio:
|
||
condition: service_healthy
|
||
entrypoint: >
|
||
/bin/sh -c "
|
||
mc alias set myminio http://minio:9000 $${MINIO_ACCESS_KEY:-minioadmin} $${MINIO_SECRET_KEY:-minioadmin};
|
||
mc mb --ignore-existing myminio/mana-storage;
|
||
mc mb --ignore-existing myminio/picture-storage;
|
||
mc mb --ignore-existing myminio/chat-storage;
|
||
mc mb --ignore-existing myminio/cards-storage;
|
||
mc mb --ignore-existing myminio/presi-storage;
|
||
mc mb --ignore-existing myminio/calendar-storage;
|
||
mc mb --ignore-existing myminio/contacts-storage;
|
||
mc mb --ignore-existing myminio/storage-storage;
|
||
mc mb --ignore-existing myminio/inventory-storage;
|
||
mc mb --ignore-existing myminio/music-storage;
|
||
mc mb --ignore-existing myminio/plants-storage;
|
||
mc mb --ignore-existing myminio/projectdoc-storage;
|
||
mc mb --ignore-existing myminio/mail-storage;
|
||
mc anonymous set download myminio/mana-storage;
|
||
mc anonymous set download myminio/picture-storage;
|
||
mc anonymous set download myminio/plants-storage;
|
||
mc anonymous set download myminio/inventory-storage;
|
||
mc ilm rule add --expire-days 90 myminio/chat-storage --prefix 'tmp/' 2>/dev/null || true;
|
||
mc ilm rule add --expire-days 30 myminio/calendar-storage --prefix 'tmp/' 2>/dev/null || true;
|
||
mc ilm rule add --expire-days 7 myminio/picture-storage --prefix 'tmp/' 2>/dev/null || true;
|
||
echo 'Buckets and lifecycle rules created successfully';
|
||
exit 0;
|
||
"
|
||
|
||
|
||
# ============================================
|
||
# Tier 0b: Forgejo (Git + CI/CD + Registry)
|
||
# ============================================
|
||
|
||
forgejo:
|
||
image: codeberg.org/forgejo/forgejo:11
|
||
container_name: mana-core-forgejo
|
||
restart: always
|
||
mem_limit: 512m
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
environment:
|
||
USER_UID: 1000
|
||
USER_GID: 1000
|
||
FORGEJO__database__DB_TYPE: postgres
|
||
FORGEJO__database__HOST: postgres:5432
|
||
FORGEJO__database__NAME: forgejo
|
||
FORGEJO__database__USER: postgres
|
||
FORGEJO__database__PASSWD: ${POSTGRES_PASSWORD:-mana123}
|
||
FORGEJO__server__DOMAIN: git.mana.how
|
||
FORGEJO__server__SSH_DOMAIN: git.mana.how
|
||
FORGEJO__server__ROOT_URL: https://git.mana.how/
|
||
FORGEJO__server__HTTP_PORT: 3000
|
||
FORGEJO__server__SSH_PORT: 2222
|
||
FORGEJO__server__LFS_START_SERVER: "true"
|
||
FORGEJO__service__DISABLE_REGISTRATION: "true"
|
||
FORGEJO__service__REQUIRE_SIGNIN_VIEW: "false"
|
||
FORGEJO__actions__ENABLED: "true"
|
||
FORGEJO__actions__DEFAULT_ACTIONS_URL: https://code.forgejo.org
|
||
FORGEJO__packages__ENABLED: "true"
|
||
FORGEJO__ui__DEFAULT_THEME: forgejo-dark
|
||
FORGEJO__ui__SHOW_USER_EMAIL: "false"
|
||
FORGEJO__mailer__ENABLED: "false"
|
||
volumes:
|
||
- /Volumes/ManaData/forgejo:/data
|
||
ports:
|
||
- "3041:3000"
|
||
- "2222:22"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3000/api/v1/version"]
|
||
interval: 120s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 30s
|
||
|
||
# Forgejo runner removed — no macOS binary exists, Docker-based runner
|
||
# can't access host filesystem/SSH for CD. GitHub CD handles deployment
|
||
# via native self-hosted runner. Forgejo is kept as a mirror only.
|
||
|
||
# ============================================
|
||
# Tier 1: Core Auth Service (Port 3001)
|
||
# ============================================
|
||
|
||
mana-auth:
|
||
build:
|
||
context: .
|
||
dockerfile: services/mana-auth/Dockerfile
|
||
image: mana-auth:local
|
||
container_name: mana-auth
|
||
restart: always
|
||
mem_limit: 192m
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
environment:
|
||
TZ: Europe/Berlin
|
||
NODE_ENV: production
|
||
PORT: 3001
|
||
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform
|
||
BASE_URL: https://auth.mana.how
|
||
COOKIE_DOMAIN: .mana.how
|
||
MANA_SERVICE_KEY: ${MANA_SERVICE_KEY}
|
||
MANA_CREDITS_URL: http://mana-credits:3061
|
||
MANA_SUBSCRIPTIONS_URL: http://mana-subscriptions:3063
|
||
# Points at the mana-sync DB where sync_changes lives — read by
|
||
# the F4 bootstrap, the /api/v1/me/bootstrap-singletons endpoint,
|
||
# and the GDPR data summary in user-data.ts. Previously pointed
|
||
# at mana_platform which silently broke all three (F4 was
|
||
# fire-and-forget so the error went unnoticed; the explicit
|
||
# endpoint added in 099cac4a0 surfaced it as a 500).
|
||
SYNC_DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_sync
|
||
BETTER_AUTH_SECRET: ${BETTER_AUTH_SECRET:-${JWT_SECRET:-your-jwt-secret-change-me}}
|
||
# KEK for the encryption-vault feature (Phase 9). Required in production
|
||
# — generate with `openssl rand -base64 32`. See services/mana-auth/CLAUDE.md.
|
||
MANA_AUTH_KEK: ${MANA_AUTH_KEK}
|
||
# RSA-OAEP-2048 public key of the mana-ai runner. Used to wrap
|
||
# per-mission data keys in POST /me/ai-mission-grant. Paired with
|
||
# MANA_AI_PRIVATE_KEY_PEM on the mana-ai service. Absent → endpoint
|
||
# returns 503 GRANT_NOT_CONFIGURED (graceful degrade).
|
||
MANA_AI_PUBLIC_KEY_PEM: ${MANA_AI_PUBLIC_KEY_PEM:-}
|
||
MANA_NOTIFY_URL: http://mana-notify:3013
|
||
MAX_DAILY_SIGNUPS: ${MAX_DAILY_SIGNUPS:-0}
|
||
# Must be a superset of TRUSTED_ORIGINS in
|
||
# services/mana-auth/src/auth/better-auth.config.ts.
|
||
# Enforced by services/mana-auth/src/auth/sso-config.spec.ts.
|
||
# All productivity modules now live under mana.how (path-based) —
|
||
# no per-module subdomain entries required here.
|
||
CORS_ORIGINS: https://mana.how,https://auth.mana.how,https://arcade.mana.how,https://whopxl.mana.how
|
||
ports:
|
||
- "3001:3001"
|
||
healthcheck:
|
||
test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3001/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"]
|
||
interval: 120s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 40s
|
||
|
||
# ============================================
|
||
# Tier 1a': AI Mission Runner (Hono + Bun)
|
||
# Background ticker that plans due AI Missions and stages proposals
|
||
# back to user devices via mana-sync. Opt-in decrypt of encrypted
|
||
# inputs via the Mission Key-Grant flow (see services/mana-ai/CLAUDE.md
|
||
# and docs/plans/ai-mission-key-grant.md).
|
||
# ============================================
|
||
|
||
mana-ai:
|
||
build:
|
||
context: .
|
||
dockerfile: services/mana-ai/Dockerfile
|
||
image: mana-ai:local
|
||
container_name: mana-ai
|
||
restart: always
|
||
mem_limit: 256m
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
mana-llm:
|
||
condition: service_started
|
||
mana-api:
|
||
condition: service_healthy
|
||
mana-research:
|
||
condition: service_started
|
||
environment:
|
||
TZ: Europe/Berlin
|
||
NODE_ENV: production
|
||
PORT: 3067
|
||
SYNC_DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_sync
|
||
MANA_LLM_URL: http://mana-llm:3020
|
||
MANA_API_URL: http://mana-api:3060
|
||
MANA_RESEARCH_URL: http://mana-research:3068
|
||
MANA_AI_DEEP_RESEARCH_ENABLED: ${MANA_AI_DEEP_RESEARCH_ENABLED:-false}
|
||
MANA_SERVICE_KEY: ${MANA_SERVICE_KEY}
|
||
TICK_INTERVAL_MS: ${MANA_AI_TICK_INTERVAL_MS:-60000}
|
||
TICK_ENABLED: ${MANA_AI_TICK_ENABLED:-true}
|
||
# RSA-OAEP-2048 private key paired with MANA_AI_PUBLIC_KEY_PEM on
|
||
# mana-auth. Used to unwrap per-mission data keys at tick time.
|
||
# Absent → all grants skip silently with reason="not-configured".
|
||
MANA_AI_PRIVATE_KEY_PEM: ${MANA_AI_PRIVATE_KEY_PEM:-}
|
||
OTEL_EXPORTER_OTLP_ENDPOINT: http://tempo:4318
|
||
ports:
|
||
- "3067:3067"
|
||
healthcheck:
|
||
test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3067/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"]
|
||
interval: 120s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 40s
|
||
|
||
# ============================================
|
||
# Tier 1b: Credits Service (Hono + Bun)
|
||
# ============================================
|
||
|
||
mana-credits:
|
||
build:
|
||
context: .
|
||
dockerfile: services/mana-credits/Dockerfile
|
||
image: mana-credits:local
|
||
container_name: mana-credits
|
||
restart: always
|
||
mem_limit: 128m
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
environment:
|
||
TZ: Europe/Berlin
|
||
PORT: 3002
|
||
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform
|
||
MANA_AUTH_URL: http://mana-auth:3001
|
||
MANA_SERVICE_KEY: ${MANA_SERVICE_KEY}
|
||
STRIPE_SECRET_KEY: ${STRIPE_SECRET_KEY:-}
|
||
STRIPE_WEBHOOK_SECRET: ${STRIPE_CREDITS_WEBHOOK_SECRET:-}
|
||
BASE_URL: https://credits.mana.how
|
||
CORS_ORIGINS: https://mana.how,https://chat.mana.how,https://picture.mana.how,https://todo.mana.how,https://quotes.mana.how,https://calendar.mana.how,https://clock.mana.how,https://contacts.mana.how,https://cards.mana.how,https://presi.mana.how,https://storage.mana.how,https://food.mana.how,https://plants.mana.how,https://music.mana.how,https://context.mana.how,https://photos.mana.how,https://questions.mana.how,https://calc.mana.how
|
||
ports:
|
||
- "3002:3002"
|
||
healthcheck:
|
||
test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3002/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"]
|
||
interval: 120s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 15s
|
||
labels:
|
||
- "traefik.enable=true"
|
||
- "traefik.http.routers.mana-credits.rule=Host(`credits.mana.how`)"
|
||
- "traefik.http.routers.mana-credits.tls=true"
|
||
- "traefik.http.services.mana-credits.loadbalancer.server.port=3002"
|
||
|
||
mana-research:
|
||
build:
|
||
context: .
|
||
dockerfile: services/mana-research/Dockerfile
|
||
image: mana-research:local
|
||
container_name: mana-research
|
||
restart: always
|
||
# Tier-3 right-size 2026-04-28: live RSS ~57 MiB, 4× headroom is enough.
|
||
mem_limit: 128m
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
redis:
|
||
condition: service_started
|
||
mana-credits:
|
||
condition: service_healthy
|
||
mana-search:
|
||
condition: service_started
|
||
environment:
|
||
TZ: Europe/Berlin
|
||
NODE_ENV: production
|
||
PORT: 3068
|
||
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform
|
||
# Redis requires auth (see redis service `--requirepass`). Without the
|
||
# password here the cache layer degrades to every-request-missing with
|
||
# NOAUTH log spam. Cache misses are not fatal (the executor just
|
||
# proxies straight to the upstream provider), but the log noise
|
||
# drowns out real errors in grafana/glitchtip.
|
||
REDIS_URL: redis://:${REDIS_PASSWORD:-redis123}@redis:6379
|
||
MANA_AUTH_URL: http://mana-auth:3001
|
||
MANA_LLM_URL: http://mana-llm:3025
|
||
MANA_CREDITS_URL: http://mana-credits:3002
|
||
MANA_SEARCH_URL: http://mana-search:3021
|
||
MANA_SERVICE_KEY: ${MANA_SERVICE_KEY}
|
||
CACHE_TTL_SECONDS: 3600
|
||
BRAVE_API_KEY: ${BRAVE_API_KEY:-}
|
||
TAVILY_API_KEY: ${TAVILY_API_KEY:-}
|
||
EXA_API_KEY: ${EXA_API_KEY:-}
|
||
SERPER_API_KEY: ${SERPER_API_KEY:-}
|
||
JINA_API_KEY: ${JINA_API_KEY:-}
|
||
FIRECRAWL_API_KEY: ${FIRECRAWL_API_KEY:-}
|
||
SCRAPINGBEE_API_KEY: ${SCRAPINGBEE_API_KEY:-}
|
||
PERPLEXITY_API_KEY: ${PERPLEXITY_API_KEY:-}
|
||
ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY:-}
|
||
OPENAI_API_KEY: ${OPENAI_API_KEY:-}
|
||
GOOGLE_GENAI_API_KEY: ${GOOGLE_GENAI_API_KEY:-}
|
||
CORS_ORIGINS: https://mana.how,https://chat.mana.how,https://research.mana.how
|
||
ports:
|
||
- "3068:3068"
|
||
healthcheck:
|
||
test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3068/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"]
|
||
interval: 120s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 15s
|
||
labels:
|
||
- "traefik.enable=true"
|
||
- "traefik.http.routers.mana-research.rule=Host(`research.mana.how`)"
|
||
- "traefik.http.routers.mana-research.tls=true"
|
||
- "traefik.http.services.mana-research.loadbalancer.server.port=3068"
|
||
|
||
mana-events:
|
||
build:
|
||
context: services/mana-events
|
||
dockerfile: Dockerfile
|
||
image: mana-events:local
|
||
container_name: mana-events
|
||
restart: always
|
||
mem_limit: 128m
|
||
depends_on:
|
||
postgres: { condition: service_healthy }
|
||
environment:
|
||
TZ: Europe/Berlin
|
||
PORT: 3065
|
||
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform
|
||
MANA_AUTH_URL: http://mana-auth:3001
|
||
# The public RSVP endpoints accept requests from anywhere — they're
|
||
# designed to be hit by guests who only have the share link.
|
||
# The host endpoints sit behind JWT auth so CORS is still scoped.
|
||
CORS_ORIGINS: https://mana.how
|
||
ports:
|
||
- "3065:3065"
|
||
healthcheck:
|
||
test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3065/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"]
|
||
interval: 120s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 15s
|
||
labels:
|
||
- "traefik.enable=true"
|
||
- "traefik.http.routers.mana-events.rule=Host(`events.mana.how`)"
|
||
- "traefik.http.routers.mana-events.tls=true"
|
||
- "traefik.http.services.mana-events.loadbalancer.server.port=3065"
|
||
|
||
# ─── Geocoding ───────────────────────────────────────────
|
||
# Thin Hono wrapper in front of a self-hosted Pelias stack.
|
||
# Pelias itself (elasticsearch + api + libpostal) runs from a separate
|
||
# compose file in services/mana-geocoding/pelias/ — see
|
||
# services/mana-geocoding/CLAUDE.md for the initial import procedure.
|
||
# Internal-only: no traefik labels, not exposed via Cloudflare.
|
||
mana-geocoding:
|
||
build:
|
||
context: services/mana-geocoding
|
||
dockerfile: Dockerfile
|
||
image: mana-geocoding:local
|
||
container_name: mana-geocoding
|
||
restart: always
|
||
mem_limit: 128m
|
||
# Pelias runs on host network via its own compose, so the wrapper
|
||
# reaches it via host.docker.internal (Pelias API at :4000).
|
||
extra_hosts:
|
||
- "host.docker.internal:host-gateway"
|
||
environment:
|
||
TZ: Europe/Berlin
|
||
PORT: 3018
|
||
PELIAS_API_URL: http://host.docker.internal:4000/v1
|
||
CORS_ORIGINS: https://mana.how,http://localhost:5173
|
||
CACHE_MAX_ENTRIES: "5000"
|
||
CACHE_TTL_MS: "86400000"
|
||
ports:
|
||
- "3018:3018"
|
||
healthcheck:
|
||
test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3018/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"]
|
||
interval: 120s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 15s
|
||
|
||
mana-user:
|
||
build:
|
||
context: services/mana-user
|
||
dockerfile: Dockerfile
|
||
image: mana-user:local
|
||
container_name: mana-user
|
||
restart: always
|
||
mem_limit: 128m
|
||
depends_on:
|
||
postgres: { condition: service_healthy }
|
||
environment:
|
||
TZ: Europe/Berlin
|
||
PORT: 3062
|
||
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform
|
||
MANA_AUTH_URL: http://mana-auth:3001
|
||
MANA_SERVICE_KEY: ${MANA_SERVICE_KEY}
|
||
CORS_ORIGINS: https://mana.how,https://calc.mana.how,https://calendar.mana.how,https://chat.mana.how,https://clock.mana.how,https://contacts.mana.how,https://context.mana.how,https://cards.mana.how,https://music.mana.how,https://food.mana.how,https://photos.mana.how,https://picture.mana.how,https://plants.mana.how,https://presi.mana.how,https://questions.mana.how,https://storage.mana.how,https://todo.mana.how,https://quotes.mana.how
|
||
ports:
|
||
- "3062:3062"
|
||
healthcheck:
|
||
test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3062/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"]
|
||
interval: 120s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 15s
|
||
|
||
mana-subscriptions:
|
||
build:
|
||
context: services/mana-subscriptions
|
||
dockerfile: Dockerfile
|
||
image: mana-subscriptions:local
|
||
container_name: mana-subscriptions
|
||
restart: always
|
||
mem_limit: 128m
|
||
depends_on:
|
||
postgres: { condition: service_healthy }
|
||
environment:
|
||
TZ: Europe/Berlin
|
||
PORT: 3063
|
||
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform
|
||
MANA_AUTH_URL: http://mana-auth:3001
|
||
MANA_SERVICE_KEY: ${MANA_SERVICE_KEY}
|
||
STRIPE_SECRET_KEY: ${STRIPE_SECRET_KEY:-}
|
||
STRIPE_WEBHOOK_SECRET: ${STRIPE_SUBSCRIPTIONS_WEBHOOK_SECRET:-}
|
||
BASE_URL: https://subscriptions.mana.how
|
||
CORS_ORIGINS: https://mana.how
|
||
ports:
|
||
- "3063:3063"
|
||
healthcheck:
|
||
test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3063/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"]
|
||
interval: 120s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 15s
|
||
|
||
mana-analytics:
|
||
build:
|
||
context: .
|
||
dockerfile: services/mana-analytics/Dockerfile
|
||
image: mana-analytics:local
|
||
container_name: mana-analytics
|
||
restart: always
|
||
mem_limit: 128m
|
||
depends_on:
|
||
postgres: { condition: service_healthy }
|
||
environment:
|
||
TZ: Europe/Berlin
|
||
PORT: 3064
|
||
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform
|
||
MANA_AUTH_URL: http://mana-auth:3001
|
||
MANA_LLM_URL: http://mana-llm:3025
|
||
MANA_CREDITS_URL: http://mana-credits:3002
|
||
MANA_SERVICE_KEY: ${MANA_SERVICE_KEY:?missing in .env.macmini}
|
||
# Seeds the per-user display-hash for the public-community pseudonym
|
||
# ("Wachsame Eule #4528"). Set in .env.macmini — rotation re-keys
|
||
# only future pseudonyms, existing rows keep the old hash/name.
|
||
FEEDBACK_PSEUDONYM_SECRET: ${FEEDBACK_PSEUDONYM_SECRET:?missing in .env.macmini}
|
||
# Comma-separated userIds that bypass community credit-grants
|
||
# (otherwise founders self-reward when posting/shipping).
|
||
FEEDBACK_FOUNDER_USER_IDS: ${FEEDBACK_FOUNDER_USER_IDS:-}
|
||
CORS_ORIGINS: https://mana.how,https://feedback.mana.how
|
||
ports:
|
||
- "3064:3064"
|
||
healthcheck:
|
||
test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3064/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"]
|
||
interval: 120s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 15s
|
||
|
||
# ============================================
|
||
# Tier 2: Gateway & Search Services (Ports 3010-3029)
|
||
# ============================================
|
||
|
||
api-gateway:
|
||
build:
|
||
context: .
|
||
dockerfile: services/mana-api-gateway/Dockerfile
|
||
image: mana-api-gateway:local
|
||
container_name: mana-api-gateway
|
||
restart: always
|
||
mem_limit: 64m
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
redis:
|
||
condition: service_healthy
|
||
environment:
|
||
TZ: Europe/Berlin
|
||
PORT: 3016
|
||
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform?sslmode=disable
|
||
REDIS_HOST: redis
|
||
REDIS_PORT: 6379
|
||
REDIS_PASSWORD: ${REDIS_PASSWORD:-redis123}
|
||
MANA_AUTH_URL: http://mana-auth:3001
|
||
SEARCH_SERVICE_URL: http://mana-search:3012
|
||
STT_SERVICE_URL: ${STT_SERVICE_URL:-http://192.168.178.11:3020}
|
||
TTS_SERVICE_URL: ${TTS_SERVICE_URL:-http://192.168.178.11:3022}
|
||
CORS_ORIGINS: https://api.mana.how,https://mana.how
|
||
ADMIN_USER_IDS: ${ADMIN_USER_IDS:-}
|
||
ports:
|
||
- "3016:3016"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3016/health"]
|
||
interval: 60s
|
||
timeout: 5s
|
||
retries: 3
|
||
start_period: 5s
|
||
|
||
searxng:
|
||
image: searxng/searxng:latest
|
||
container_name: mana-core-searxng
|
||
restart: always
|
||
mem_limit: 256m
|
||
volumes:
|
||
- ./services/mana-search/searxng:/mnt/searxng-config:ro
|
||
entrypoint: ["sh", "-c", "cp /mnt/searxng-config/settings.yml /etc/searxng/settings.yml && cp /mnt/searxng-config/limiter.toml /etc/searxng/limiter.toml 2>/dev/null; exec /usr/local/searxng/entrypoint.sh"]
|
||
environment:
|
||
SEARXNG_BASE_URL: http://searxng:8080
|
||
SEARXNG_SECRET: ${SEARXNG_SECRET:-change-me-searxng-secret}
|
||
# Internal only - no external port mapping
|
||
healthcheck:
|
||
test: ["CMD", "wget", "-q", "--spider", "http://localhost:8080/healthz"]
|
||
interval: 120s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 15s
|
||
|
||
mana-search:
|
||
build:
|
||
context: .
|
||
dockerfile: services/mana-search/Dockerfile
|
||
image: mana-search:local
|
||
container_name: mana-core-search
|
||
restart: always
|
||
mem_limit: 64m
|
||
depends_on:
|
||
searxng:
|
||
condition: service_healthy
|
||
environment:
|
||
PORT: 3012
|
||
SEARXNG_URL: http://searxng:8080
|
||
SEARXNG_TIMEOUT: 15000
|
||
SEARXNG_DEFAULT_LANGUAGE: de-DE
|
||
REDIS_HOST: redis
|
||
REDIS_PORT: 6379
|
||
REDIS_PASSWORD: ${REDIS_PASSWORD:-redis123}
|
||
CACHE_SEARCH_TTL: 3600
|
||
CACHE_EXTRACT_TTL: 86400
|
||
EXTRACT_TIMEOUT: 10000
|
||
EXTRACT_MAX_LENGTH: 50000
|
||
ports:
|
||
- "3012:3012"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3012/health"]
|
||
interval: 120s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 5s
|
||
|
||
mana-sync:
|
||
build:
|
||
context: .
|
||
dockerfile: services/mana-sync/Dockerfile
|
||
image: mana-sync:local
|
||
container_name: mana-core-sync
|
||
restart: always
|
||
mem_limit: 64m
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
environment:
|
||
PORT: 3010
|
||
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_sync?sslmode=disable
|
||
JWKS_URL: http://mana-auth:3001/api/v1/auth/jwks
|
||
CORS_ORIGINS: "https://mana.how,https://*.mana.how"
|
||
MANA_CREDITS_URL: http://mana-credits:3002
|
||
MANA_SERVICE_KEY: ${MANA_SERVICE_KEY}
|
||
ports:
|
||
- "3010:3010"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3010/health"]
|
||
interval: 120s
|
||
timeout: 5s
|
||
retries: 3
|
||
start_period: 5s
|
||
|
||
mana-notify:
|
||
build:
|
||
context: .
|
||
dockerfile: services/mana-notify/Dockerfile
|
||
image: mana-notify:local
|
||
container_name: mana-core-notify
|
||
restart: always
|
||
mem_limit: 64m
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
stalwart:
|
||
condition: service_started
|
||
environment:
|
||
PORT: 3013
|
||
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform?sslmode=disable
|
||
SERVICE_KEY: ${MANA_SERVICE_KEY:-dev-service-key}
|
||
MANA_AUTH_URL: http://mana-auth:3001
|
||
SMTP_HOST: ${SMTP_HOST:-stalwart}
|
||
SMTP_PORT: ${SMTP_PORT:-587}
|
||
SMTP_USER: ${SMTP_USER:-noreply}
|
||
SMTP_PASSWORD: ${SMTP_PASSWORD:-ManaNoReply2026!}
|
||
SMTP_FROM: "Mana <noreply@mana.how>"
|
||
SMTP_INSECURE_TLS: "true"
|
||
EXPO_ACCESS_TOKEN: ${EXPO_ACCESS_TOKEN:-}
|
||
ports:
|
||
- "3013:3013"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "-q", "--spider", "http://localhost:3013/health"]
|
||
interval: 120s
|
||
timeout: 5s
|
||
retries: 3
|
||
start_period: 5s
|
||
|
||
stalwart:
|
||
image: stalwartlabs/stalwart:latest
|
||
container_name: mana-mail
|
||
restart: always
|
||
# Tier-3 right-size 2026-04-28: bootstrap-mode RSS ~11 MiB. Bumped
|
||
# to 128m (not 64m) because once Stalwart finishes its initial setup
|
||
# and starts handling real SMTP queues + IMAP sessions, RSS will
|
||
# rise. 128m gives 10× current headroom without being wasteful.
|
||
mem_limit: 128m
|
||
ports:
|
||
- "25:25"
|
||
- "587:587"
|
||
- "465:465"
|
||
- "993:993"
|
||
- "8443:8080"
|
||
volumes:
|
||
- stalwart_data:/opt/stalwart-mail
|
||
environment:
|
||
- STALWART_ADMIN_PASSWORD=${STALWART_ADMIN_PASSWORD:-ChangeMe123!}
|
||
healthcheck:
|
||
test: ["CMD", "wget", "-q", "--spider", "http://localhost:8080/healthz"]
|
||
interval: 120s
|
||
timeout: 5s
|
||
retries: 3
|
||
start_period: 10s
|
||
|
||
mana-crawler:
|
||
build:
|
||
context: .
|
||
dockerfile: services/mana-crawler/Dockerfile
|
||
image: mana-crawler:local
|
||
container_name: mana-crawler
|
||
restart: always
|
||
mem_limit: 128m
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
redis:
|
||
condition: service_healthy
|
||
environment:
|
||
TZ: Europe/Berlin
|
||
PORT: 3014
|
||
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform?sslmode=disable
|
||
REDIS_HOST: redis
|
||
REDIS_PORT: 6379
|
||
REDIS_PASSWORD: ${REDIS_PASSWORD:-redis123}
|
||
CRAWLER_USER_AGENT: "ManaCrawler/1.0 (+https://mana.how/bot)"
|
||
QUEUE_CONCURRENCY: 5
|
||
ports:
|
||
- "3014:3014"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3014/health"]
|
||
interval: 60s
|
||
timeout: 5s
|
||
retries: 3
|
||
start_period: 5s
|
||
|
||
mana-media:
|
||
build:
|
||
context: services/mana-media/apps/api
|
||
image: mana-media:local
|
||
container_name: mana-core-media
|
||
restart: always
|
||
# Tier-3 right-size 2026-04-28: live RSS ~80 MiB (63%) — within
|
||
# OOM range when image-thumb spikes hit. Bumped to 160m.
|
||
mem_limit: 160m
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
redis:
|
||
condition: service_healthy
|
||
minio:
|
||
condition: service_healthy
|
||
environment:
|
||
NODE_ENV: production
|
||
PORT: 3011
|
||
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform
|
||
REDIS_HOST: redis
|
||
REDIS_PORT: 6379
|
||
REDIS_PASSWORD: ${REDIS_PASSWORD:-redis123}
|
||
S3_ENDPOINT: minio
|
||
S3_PORT: 9000
|
||
S3_USE_SSL: "false"
|
||
S3_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
|
||
S3_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
|
||
S3_BUCKET: mana-media
|
||
S3_PUBLIC_URL: https://media.mana.how
|
||
PUBLIC_URL: https://media.mana.how/api/v1
|
||
CORS_ORIGINS: https://mana.how,https://food.mana.how,https://contacts.mana.how,https://chat.mana.how,https://storage.mana.how,https://photos.mana.how
|
||
ports:
|
||
- "3011:3011"
|
||
healthcheck:
|
||
test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3011/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"]
|
||
interval: 120s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 10s
|
||
|
||
mana-landing-builder:
|
||
build:
|
||
context: .
|
||
dockerfile: services/mana-landing-builder/Dockerfile
|
||
image: mana-landing-builder:local
|
||
container_name: mana-core-landing-builder
|
||
restart: always
|
||
mem_limit: 192m
|
||
depends_on:
|
||
mana-auth:
|
||
condition: service_healthy
|
||
environment:
|
||
NODE_ENV: production
|
||
PORT: 3015
|
||
MANA_AUTH_URL: http://mana-auth:3001
|
||
CLOUDFLARE_API_TOKEN: ${CLOUDFLARE_API_TOKEN:-}
|
||
CLOUDFLARE_ACCOUNT_ID: ${CLOUDFLARE_ACCOUNT_ID:-}
|
||
ORG_LANDING_DOMAIN: mana.how
|
||
ports:
|
||
- "3015:3015"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3015/api/v1/health"]
|
||
interval: 120s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 40s
|
||
|
||
# ============================================
|
||
# Tier 3: App Compute Servers (Hono + Bun)
|
||
# CRUD is handled by mana-sync. These only handle
|
||
# server-side compute (AI, file upload, external APIs).
|
||
# ============================================
|
||
|
||
|
||
# --- App Compute Servers (Hono + Bun, ~120 LOC each) ---
|
||
# CRUD handled by mana-sync. These handle AI, uploads, external APIs.
|
||
# All use FROM oven/bun:1, ~160MB images, ~30MB RAM, <50ms cold start.
|
||
# Ports match the old NestJS backends for backward compatibility.
|
||
|
||
# NOTE: These apps need Hono server Dockerfiles to be added.
|
||
# For now they share the same pattern:
|
||
# build: { context: apps/{app}/apps/server, dockerfile: ../../Dockerfile.bun }
|
||
# Requires: Dockerfile.bun in apps/ root (FROM oven/bun:1, COPY, CMD bun run src/index.ts)
|
||
|
||
|
||
# ============================================
|
||
# Tier 5: Web Frontends (Ports 5000-5099)
|
||
# ============================================
|
||
|
||
mana-web:
|
||
build:
|
||
context: .
|
||
dockerfile: apps/mana/apps/web/Dockerfile
|
||
args:
|
||
PUBLIC_SYNC_SERVER_URL: wss://sync.mana.how
|
||
image: mana-web:local
|
||
container_name: mana-app-web
|
||
restart: always
|
||
mem_limit: 256m
|
||
depends_on:
|
||
mana-auth:
|
||
condition: service_healthy
|
||
mana-api:
|
||
condition: service_healthy
|
||
environment:
|
||
NODE_ENV: production
|
||
PORT: 5000
|
||
PUBLIC_MANA_AUTH_URL: http://mana-auth:3001
|
||
PUBLIC_MANA_AUTH_URL_CLIENT: https://auth.mana.how
|
||
PUBLIC_SYNC_SERVER_URL: http://mana-sync:3010
|
||
PUBLIC_SYNC_SERVER_URL_CLIENT: https://sync.mana.how
|
||
# Unified Hono/Bun API server (apps/api) — hosts all 16 product
|
||
# compute modules (calendar, todo, picture, … who) on port 3060.
|
||
# Browser calls go through https://mana-api.mana.how (cloudflared
|
||
# tunnel route to mana-api:3060). SSR calls inside the docker
|
||
# network use the internal hostname.
|
||
#
|
||
# NOTE: api.mana.how is already in use for the Go mana-api-gateway
|
||
# on port 3016 (different service, predates apps/api). The
|
||
# mana-api.* subdomain is the unambiguous new home.
|
||
PUBLIC_MANA_API_URL: http://mana-api:3060
|
||
PUBLIC_MANA_API_URL_CLIENT: https://mana-api.mana.how
|
||
PUBLIC_MANA_CREDITS_URL: http://mana-credits:3002
|
||
PUBLIC_MANA_CREDITS_URL_CLIENT: https://credits.mana.how
|
||
# Per-app HTTP backend URLs (todo-api, calendar-api, contacts-api,
|
||
# chat-api, storage-api, cards-api, music-api, food-api,
|
||
# picture-api, presi-api, quotes-api, clock-api, context-api) and
|
||
# the standalone memoro-server URL were removed in the pre-launch
|
||
# ghost-API cleanup — every product module talks to mana-sync
|
||
# directly and the unified `memoro` module is fully local-first.
|
||
# See docs/PRE_LAUNCH_CLEANUP.md for the full rationale.
|
||
PUBLIC_ULOAD_SERVER_URL: http://uload-server:3070
|
||
PUBLIC_ULOAD_SERVER_URL_CLIENT: https://uload-api.mana.how
|
||
PUBLIC_MANA_MEDIA_URL: http://mana-media:3011
|
||
PUBLIC_MANA_MEDIA_URL_CLIENT: https://media.mana.how
|
||
PUBLIC_MANA_LLM_URL: http://mana-llm:3025
|
||
PUBLIC_MANA_LLM_URL_CLIENT: https://llm.mana.how
|
||
PUBLIC_MANA_EVENTS_URL: http://mana-events:3065
|
||
PUBLIC_MANA_EVENTS_URL_CLIENT: https://events.mana.how
|
||
# mana-research — async web-research provider orchestration.
|
||
# Browser hits /research/* endpoints directly; SSR uses the
|
||
# internal docker-network URL. Without this pair, the SSR-
|
||
# injected window.__PUBLIC_MANA_RESEARCH_URL__ is empty string
|
||
# and research fetches fall back to the current origin (404).
|
||
PUBLIC_MANA_RESEARCH_URL: http://mana-research:3068
|
||
PUBLIC_MANA_RESEARCH_URL_CLIENT: https://research.mana.how
|
||
# mana-analytics — public-feedback hub. Browser hits the
|
||
# /api/v1/(public/)feedback/* endpoints directly; SSR uses the
|
||
# internal docker-network URL.
|
||
PUBLIC_MANA_ANALYTICS_URL: http://mana-analytics:3064
|
||
PUBLIC_MANA_ANALYTICS_URL_CLIENT: https://feedback.mana.how
|
||
# mana-ai background Mission Runner. Browser calls the audit
|
||
# endpoint (/api/v1/me/ai-audit) to render the Workbench
|
||
# "Datenzugriff" tab. SSR doesn't hit this service directly.
|
||
PUBLIC_MANA_AI_URL: http://mana-ai:3067
|
||
PUBLIC_MANA_AI_URL_CLIENT: https://mana-ai.mana.how
|
||
# Feature flag for the Mission Key-Grant consent UI. false → the
|
||
# dialog + Workbench audit tab stay hidden even on missions with
|
||
# encrypted inputs. Flip to "true" per deployment once the keypair
|
||
# is provisioned (see docs/plans/ai-mission-key-grant.md).
|
||
PUBLIC_AI_MISSION_GRANTS: ${PUBLIC_AI_MISSION_GRANTS:-false}
|
||
# Analytics & Error Tracking
|
||
PUBLIC_UMAMI_WEBSITE_ID: 32777167-e026-4618-933a-3429120b479b
|
||
PUBLIC_GLITCHTIP_DSN: ${GLITCHTIP_DSN_MANA_WEB:-}
|
||
# Speech-to-Text proxy: SvelteKit /api/v1/voice/transcribe forwards
|
||
# to mana-stt via Cloudflare Tunnel. The browser never sees the API
|
||
# key — it stays server-side.
|
||
MANA_STT_URL: https://gpu-stt.mana.how
|
||
MANA_STT_API_KEY: ${MANA_STT_API_KEY:-}
|
||
# LLM proxy: /api/v1/voice/parse-task and /api/v1/voice/parse-habit
|
||
# call mana-llm for structured extraction. Set WITHOUT the PUBLIC_
|
||
# prefix because $env/dynamic/private explicitly excludes vars
|
||
# that start with the public prefix — so the parse endpoints
|
||
# would never see PUBLIC_MANA_LLM_URL even though it's right
|
||
# there in the compose env. Both vars exist; the public one
|
||
# is read by the browser-side playground and status page.
|
||
MANA_LLM_URL: http://mana-llm:3025
|
||
MANA_LLM_API_KEY: ${MANA_LLM_API_KEY:-}
|
||
ports:
|
||
- "5000:5000"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:5000/health"]
|
||
interval: 180s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 20s
|
||
|
||
# REMOVED standalone web containers — now served by unified mana-web container (mana.how):
|
||
# chat-web, todo-web, quotes-web, calendar-web, clock-web, contacts-web,
|
||
# storage-web, presi-web, cards-web, food-web, skilltree-web, photos-web,
|
||
# music-web, citycorners-web, picture-web, inventory-web, calc-web, times-web,
|
||
# uload-web, memoro-web
|
||
|
||
# picture-backend: REMOVED — replaced by Hono server (apps/picture/apps/server)
|
||
|
||
arcade-web:
|
||
build:
|
||
context: .
|
||
dockerfile: games/arcade/apps/web/Dockerfile
|
||
image: arcade-web:local
|
||
container_name: mana-app-arcade-web
|
||
restart: always
|
||
mem_limit: 128m
|
||
depends_on:
|
||
mana-auth:
|
||
condition: service_healthy
|
||
environment:
|
||
NODE_ENV: production
|
||
PORT: 5210
|
||
PUBLIC_MANA_AUTH_URL: http://mana-auth:3001
|
||
PUBLIC_MANA_AUTH_URL_CLIENT: https://auth.mana.how
|
||
PUBLIC_SYNC_SERVER_URL: http://mana-sync:3010
|
||
PUBLIC_SYNC_SERVER_URL_CLIENT: https://sync.mana.how
|
||
ports:
|
||
- "5210:5210"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:5210/health"]
|
||
interval: 180s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 20s
|
||
|
||
manavoxel-web:
|
||
build:
|
||
context: .
|
||
dockerfile: apps/manavoxel/apps/web/Dockerfile
|
||
image: manavoxel-web:local
|
||
container_name: mana-app-manavoxel-web
|
||
restart: always
|
||
mem_limit: 128m
|
||
depends_on:
|
||
mana-auth:
|
||
condition: service_healthy
|
||
environment:
|
||
NODE_ENV: production
|
||
PORT: 5028
|
||
PUBLIC_MANA_AUTH_URL: http://mana-auth:3001
|
||
PUBLIC_MANA_AUTH_URL_CLIENT: https://auth.mana.how
|
||
PUBLIC_SYNC_SERVER_URL: http://mana-sync:3010
|
||
PUBLIC_SYNC_SERVER_URL_CLIENT: https://sync.mana.how
|
||
ports:
|
||
- "5028:5028"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:5028/health"]
|
||
interval: 180s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 45s
|
||
|
||
uload-server:
|
||
build:
|
||
context: apps/uload/apps/server
|
||
dockerfile: Dockerfile
|
||
image: uload-server:local
|
||
container_name: mana-app-uload-server
|
||
restart: always
|
||
# Tier-3 right-size 2026-04-28: live RSS ~51 MiB (20%). 128m is
|
||
# 2.5× headroom — enough for spike during multi-file uploads.
|
||
mem_limit: 128m
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
environment:
|
||
NODE_ENV: production
|
||
PORT: 3070
|
||
DATABASE_URL: postgresql://mana:${POSTGRES_PASSWORD:-devpassword}@postgres:5432/mana_sync
|
||
MANA_AUTH_URL: http://mana-auth:3001
|
||
CORS_ORIGINS: http://mana-web:5000,https://mana.how,https://uload.mana.how,https://ulo.ad
|
||
ports:
|
||
- "3070:3070"
|
||
healthcheck:
|
||
test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3070/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"]
|
||
interval: 30s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 10s
|
||
|
||
memoro-server:
|
||
build:
|
||
context: .
|
||
dockerfile: docker/Dockerfile.hono-server
|
||
args:
|
||
APP: memoro
|
||
image: memoro-server:local
|
||
container_name: mana-app-memoro-server
|
||
restart: always
|
||
mem_limit: 256m
|
||
depends_on:
|
||
mana-auth:
|
||
condition: service_healthy
|
||
environment:
|
||
NODE_ENV: production
|
||
PORT: 3015
|
||
MANA_AUTH_URL: http://mana-auth:3001
|
||
CORS_ORIGINS: http://mana-web:5000,https://mana.how,https://memoro.mana.how
|
||
MEMORO_SUPABASE_URL: ${MEMORO_SUPABASE_URL}
|
||
MEMORO_SUPABASE_SERVICE_KEY: ${MEMORO_SUPABASE_SERVICE_KEY}
|
||
SERVICE_KEY: ${MEMORO_SERVICE_KEY}
|
||
AUDIO_SERVER_URL: http://memoro-audio-server:3016
|
||
MANA_LLM_URL: http://mana-llm:3025
|
||
MANA_LLM_MODEL: ${MANA_LLM_MODEL:-ollama/gemma3:12b}
|
||
GEMINI_API_KEY: ${GEMINI_API_KEY}
|
||
AZURE_OPENAI_KEY: ${AZURE_OPENAI_KEY}
|
||
AZURE_OPENAI_ENDPOINT: ${AZURE_OPENAI_ENDPOINT}
|
||
AZURE_OPENAI_DEPLOYMENT: ${AZURE_OPENAI_DEPLOYMENT}
|
||
MANA_CREDITS_URL: http://mana-credits:3006
|
||
MANA_CREDITS_SERVICE_KEY: ${MANA_CREDITS_SERVICE_KEY}
|
||
ports:
|
||
- "3015:3015"
|
||
healthcheck:
|
||
test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3015/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"]
|
||
interval: 30s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 15s
|
||
|
||
memoro-audio-server:
|
||
build:
|
||
context: apps/memoro/apps/audio-server
|
||
dockerfile: Dockerfile
|
||
image: memoro-audio-server:local
|
||
container_name: mana-app-memoro-audio-server
|
||
restart: always
|
||
mem_limit: 512m
|
||
environment:
|
||
NODE_ENV: production
|
||
PORT: 3016
|
||
SERVICE_KEY: ${MEMORO_SERVICE_KEY}
|
||
MEMORO_SERVER_URL: http://memoro-server:3015
|
||
MEMORO_SUPABASE_URL: ${MEMORO_SUPABASE_URL}
|
||
MEMORO_SUPABASE_SERVICE_KEY: ${MEMORO_SUPABASE_SERVICE_KEY}
|
||
AZURE_SPEECH_KEY_1: ${AZURE_SPEECH_KEY_1}
|
||
AZURE_SPEECH_KEY_2: ${AZURE_SPEECH_KEY_2}
|
||
AZURE_SPEECH_KEY_3: ${AZURE_SPEECH_KEY_3}
|
||
AZURE_SPEECH_KEY_4: ${AZURE_SPEECH_KEY_4}
|
||
AZURE_SPEECH_REGION: ${AZURE_SPEECH_REGION:-germanywestcentral}
|
||
AZURE_SPEECH_ENDPOINT: ${AZURE_SPEECH_ENDPOINT}
|
||
AZURE_STORAGE_ACCOUNT_NAME: ${AZURE_STORAGE_ACCOUNT_NAME}
|
||
AZURE_STORAGE_ACCOUNT_KEY: ${AZURE_STORAGE_ACCOUNT_KEY}
|
||
AZURE_STORAGE_CONTAINER: ${AZURE_STORAGE_CONTAINER:-memoro-batch-audio}
|
||
MANA_STT_URL: http://host.docker.internal:3020
|
||
MANA_STT_API_KEY: ${MANA_STT_API_KEY:-}
|
||
ports:
|
||
- "3016:3016"
|
||
healthcheck:
|
||
test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3016/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"]
|
||
interval: 30s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 20s
|
||
|
||
mana-llm:
|
||
build:
|
||
context: ./services/mana-llm
|
||
dockerfile: Dockerfile
|
||
container_name: mana-service-llm
|
||
restart: unless-stopped
|
||
# Tier-3 right-size 2026-04-28: live RSS ~46 MiB (18%). The service
|
||
# is a thin OpenAI-compatible router around the GPU-box Ollama —
|
||
# all heavy LLM work happens upstream, this container just proxies.
|
||
# 128m is 2.5× headroom for streaming response buffers.
|
||
mem_limit: 128m
|
||
depends_on:
|
||
redis:
|
||
condition: service_healthy
|
||
# Ollama lives on the Windows GPU box at 192.168.178.11:11434, but
|
||
# Colima containers can't reach the LAN range — the entire
|
||
# 192.168.178.0/24 subnet gets synthesized RST from inside any
|
||
# container, even though the macOS host routes there fine. The
|
||
# gpu-proxy LaunchAgent on the Mac Mini host (com.mana.gpu-proxy,
|
||
# see /Users/mana/gpu-proxy.py) bridges 127.0.0.1:13434 → GPU
|
||
# box's 11434, so we go through host.docker.internal:13434 to
|
||
# reach Ollama. Without this hop the local mana-llm starts
|
||
# cleanly but reports an empty model list and every chat
|
||
# completion fails with "All connection attempts failed", which
|
||
# cascades into voice quick-add silently degrading to its no-LLM
|
||
# fallback for everyone hitting the local stack.
|
||
extra_hosts:
|
||
- "host.docker.internal:host-gateway"
|
||
environment:
|
||
PORT: 3025
|
||
LOG_LEVEL: info
|
||
OLLAMA_URL: ${OLLAMA_URL:-http://host.docker.internal:13434}
|
||
OLLAMA_DEFAULT_MODEL: ${OLLAMA_MODEL:-gemma3:12b}
|
||
OLLAMA_TIMEOUT: 120
|
||
REDIS_URL: redis://redis:6379
|
||
OPENROUTER_API_KEY: ${OPENROUTER_API_KEY:-}
|
||
GROQ_API_KEY: ${GROQ_API_KEY:-}
|
||
TOGETHER_API_KEY: ${TOGETHER_API_KEY:-}
|
||
GOOGLE_API_KEY: ${GOOGLE_API_KEY:-}
|
||
GOOGLE_DEFAULT_MODEL: gemini-2.0-flash
|
||
AUTO_FALLBACK_ENABLED: "true"
|
||
OLLAMA_MAX_CONCURRENT: 5
|
||
CORS_ORIGINS: https://playground.mana.how,https://mana.how,https://chat.mana.how
|
||
ports:
|
||
- "3025:3025"
|
||
healthcheck:
|
||
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:3025/health')"]
|
||
interval: 120s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 30s
|
||
|
||
llm-playground:
|
||
build:
|
||
context: .
|
||
dockerfile: apps/playground/apps/web/Dockerfile
|
||
container_name: mana-app-llm-playground
|
||
restart: unless-stopped
|
||
# Tier-3 right-size 2026-04-28: live RSS ~22 MiB (17%) — 64m is
|
||
# plenty for a SvelteKit static-export demo page.
|
||
mem_limit: 64m
|
||
depends_on:
|
||
mana-auth:
|
||
condition: service_healthy
|
||
mana-llm:
|
||
condition: service_healthy
|
||
environment:
|
||
NODE_ENV: production
|
||
PORT: 5050
|
||
PUBLIC_MANA_AUTH_URL: http://mana-auth:3001
|
||
PUBLIC_MANA_AUTH_URL_CLIENT: https://auth.mana.how
|
||
PUBLIC_MANA_LLM_URL: http://mana-llm:3025
|
||
PUBLIC_MANA_LLM_URL_CLIENT: https://llm.mana.how
|
||
ports:
|
||
- "5050:5050"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:5050/health"]
|
||
interval: 180s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 45s
|
||
labels:
|
||
- "com.centurylinklabs.watchtower.enable=true"
|
||
|
||
# ============================================
|
||
# Tier 7: Monitoring Dashboards (Ports 8000-8099)
|
||
# ============================================
|
||
|
||
grafana:
|
||
image: grafana/grafana:10.4.1
|
||
container_name: mana-mon-grafana
|
||
restart: always
|
||
mem_limit: 192m
|
||
depends_on:
|
||
victoriametrics:
|
||
condition: service_healthy
|
||
environment:
|
||
GF_SECURITY_ADMIN_USER: admin
|
||
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_PASSWORD:-admin}
|
||
GF_USERS_ALLOW_SIGN_UP: false
|
||
GF_AUTH_ANONYMOUS_ENABLED: true
|
||
GF_AUTH_ANONYMOUS_ORG_ROLE: Viewer
|
||
GF_SERVER_ROOT_URL: https://grafana.mana.how
|
||
GF_SERVER_HTTP_PORT: 8000
|
||
GF_INSTALL_PLUGINS: yesoreyeram-infinity-datasource
|
||
GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH: /var/lib/grafana/dashboards/master-overview.json
|
||
volumes:
|
||
- ./docker/grafana/provisioning:/etc/grafana/provisioning:ro
|
||
- ./docker/grafana/dashboards:/var/lib/grafana/dashboards:ro
|
||
- grafana_data:/var/lib/grafana
|
||
ports:
|
||
- "8000:8000"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:8000/api/health"]
|
||
interval: 300s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 10s
|
||
|
||
umami:
|
||
# Pinned away from postgresql-latest on 2026-04-23. The rolling
|
||
# tag jumped to Umami 3.1.0 (Next.js 16) and started crashing the
|
||
# container on every POST /api/send — page loaders hung on the
|
||
# failing tracker request. v2.18.0 is the last known-stable v2.
|
||
# Rolling back to v2 was safe here because the schema is shared
|
||
# across 2.x. If you bump to v3 again, verify the DB migration
|
||
# path and test /api/send with a real POST before committing.
|
||
image: ghcr.io/umami-software/umami:postgresql-v2.18.0
|
||
container_name: mana-mon-umami
|
||
restart: always
|
||
mem_limit: 384m
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
environment:
|
||
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/umami
|
||
DATABASE_TYPE: postgresql
|
||
APP_SECRET: ${UMAMI_APP_SECRET:-change-me-umami-secret}
|
||
DISABLE_TELEMETRY: 1
|
||
ports:
|
||
- "8010:3000"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3000/api/heartbeat"]
|
||
interval: 300s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 30s
|
||
|
||
# ============================================
|
||
# Tier 8: Metrics & Exporters (Ports 9000-9199)
|
||
# ============================================
|
||
|
||
victoriametrics:
|
||
image: victoriametrics/victoria-metrics:v1.99.0
|
||
container_name: mana-mon-victoria
|
||
restart: always
|
||
mem_limit: 384m
|
||
# Mount the host config dir read-only and point promscrape directly at it,
|
||
# so edits to docker/prometheus/prometheus.yml are picked up by POST /-/reload
|
||
# without a container restart. The previous setup baked a copy into
|
||
# /etc/prometheus/ at startup, which silently drifted from the host file
|
||
# whenever the container wasn't restarted (matrix removal incident, 2026-04-08).
|
||
entrypoint: ["/victoria-metrics-prod", "-storageDataPath=/storage", "-retentionPeriod=2y", "-httpListenAddr=:9090", "-promscrape.config=/etc/prometheus/prometheus.yml", "-promscrape.config.strictParse=false", "-selfScrapeInterval=15s", "-search.latencyOffset=0s"]
|
||
volumes:
|
||
- ./docker/prometheus:/etc/prometheus:ro
|
||
- victoriametrics_data:/storage
|
||
ports:
|
||
- "9090:9090"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:9090/health"]
|
||
interval: 300s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 10s
|
||
|
||
tempo:
|
||
image: grafana/tempo:2.6.1
|
||
container_name: mana-mon-tempo
|
||
restart: always
|
||
mem_limit: 256m
|
||
command: ["-config.file=/etc/tempo/tempo.yaml"]
|
||
volumes:
|
||
- ./docker/tempo:/etc/tempo:ro
|
||
- tempo_data:/var/tempo
|
||
ports:
|
||
- "4318:4318" # OTLP HTTP receiver
|
||
- "3200:3200" # Tempo API (for Grafana)
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3200/ready"]
|
||
interval: 300s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 10s
|
||
|
||
loki:
|
||
image: grafana/loki:3.0.0
|
||
container_name: mana-mon-loki
|
||
restart: always
|
||
mem_limit: 192m
|
||
entrypoint: ["sh", "-c", "mkdir -p /etc/loki && cp /mnt/loki-config/*.yaml /etc/loki/ 2>/dev/null; exec /usr/bin/loki -config.file=/etc/loki/local-config.yaml"]
|
||
volumes:
|
||
- ./docker/loki:/mnt/loki-config:ro
|
||
- loki_data:/loki
|
||
ports:
|
||
- "3100:3100"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3100/ready"]
|
||
interval: 300s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 15s
|
||
|
||
promtail:
|
||
image: grafana/promtail:3.0.0
|
||
container_name: mana-mon-promtail
|
||
restart: always
|
||
mem_limit: 96m
|
||
command: -config.file=/etc/promtail/config.yaml -config.expand-env=true
|
||
volumes:
|
||
- ./docker/promtail:/etc/promtail:ro
|
||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||
depends_on:
|
||
loki:
|
||
condition: service_started
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:9080/ready"]
|
||
interval: 300s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 10s
|
||
|
||
pushgateway:
|
||
image: prom/pushgateway:v1.7.0
|
||
container_name: mana-mon-pushgateway
|
||
restart: always
|
||
mem_limit: 48m
|
||
ports:
|
||
- "9091:9091"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:9091/-/healthy"]
|
||
interval: 300s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 20s
|
||
|
||
cadvisor:
|
||
image: gcr.io/cadvisor/cadvisor:v0.49.1
|
||
container_name: mana-mon-cadvisor
|
||
restart: always
|
||
# Tier-3 right-size 2026-04-28: live RSS ~98 MiB (76%) — too close
|
||
# to OOM during cgroup-stat bursts on a busy host. Bumped to 160m.
|
||
mem_limit: 160m
|
||
privileged: true
|
||
volumes:
|
||
- /:/rootfs:ro
|
||
- /var/run:/var/run:ro
|
||
- /sys:/sys:ro
|
||
- /var/lib/docker/:/var/lib/docker:ro
|
||
- /dev/disk/:/dev/disk:ro
|
||
ports:
|
||
- "9110:8080"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:8080/healthz"]
|
||
interval: 300s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 20s
|
||
|
||
postgres-exporter:
|
||
image: prometheuscommunity/postgres-exporter:v0.15.0
|
||
container_name: mana-mon-postgres-exporter
|
||
restart: always
|
||
mem_limit: 48m
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
environment:
|
||
DATA_SOURCE_NAME: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/postgres?sslmode=disable
|
||
ports:
|
||
- "9187:9187"
|
||
|
||
redis-exporter:
|
||
image: oliver006/redis_exporter:v1.58.0
|
||
container_name: mana-mon-redis-exporter
|
||
restart: always
|
||
mem_limit: 32m
|
||
depends_on:
|
||
redis:
|
||
condition: service_healthy
|
||
environment:
|
||
REDIS_ADDR: redis://redis:6379
|
||
REDIS_PASSWORD: ${REDIS_PASSWORD:-redis123}
|
||
ports:
|
||
- "9121:9121"
|
||
|
||
node-exporter:
|
||
image: prom/node-exporter:v1.7.0
|
||
container_name: mana-mon-node-exporter
|
||
restart: always
|
||
mem_limit: 32m
|
||
# macOS Docker runs in a Linux VM, so we can only monitor the VM's metrics
|
||
# For full host metrics on macOS, install node_exporter natively
|
||
command:
|
||
- '--collector.disable-defaults'
|
||
- '--collector.cpu'
|
||
- '--collector.meminfo'
|
||
- '--collector.loadavg'
|
||
- '--collector.filesystem'
|
||
- '--collector.netdev'
|
||
- '--collector.time'
|
||
- '--collector.uname'
|
||
- '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)'
|
||
ports:
|
||
- "9100:9100"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:9100/metrics"]
|
||
interval: 300s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 20s
|
||
|
||
status-page-gen:
|
||
image: alpine:3.20
|
||
container_name: mana-status-gen
|
||
restart: always
|
||
mem_limit: 64m
|
||
# host network: direkter Zugriff auf VictoriaMetrics via localhost:9090
|
||
# (kein depends_on möglich mit network_mode: host)
|
||
network_mode: host
|
||
environment:
|
||
VICTORIAMETRICS_URL: http://localhost:9090
|
||
OUTPUT_FILE: /output/index.html
|
||
volumes:
|
||
- ./scripts/generate-status-page.sh:/generate.sh:ro
|
||
- ./packages/shared-branding/src/mana-apps.ts:/mana-apps.ts:ro
|
||
- /Volumes/ManaData/landings/status:/output
|
||
command:
|
||
- sh
|
||
- -c
|
||
- |
|
||
apk add --no-cache curl jq || { echo "apk add fehlgeschlagen, retry in 10s"; sleep 10; exit 1; }
|
||
mkdir -p /output
|
||
while true; do
|
||
cp /generate.sh /tmp/generate.sh
|
||
sh /tmp/generate.sh
|
||
sleep 60
|
||
done
|
||
|
||
blackbox-exporter:
|
||
image: prom/blackbox-exporter:v0.25.0
|
||
container_name: mana-mon-blackbox
|
||
restart: always
|
||
mem_limit: 128m
|
||
# Use Cloudflare + Google public resolvers instead of Docker's
|
||
# embedded DNS (127.0.0.11). Docker DNS forwards to the host
|
||
# resolver which forwards to the home router (FRITZ!Box), and the
|
||
# router keeps a stale negative cache for hours after a hostname
|
||
# first fails. New CNAMEs (e.g. fresh GPU public hostnames added
|
||
# via the Cloudflare dashboard) appear as "no such host" to the
|
||
# blackbox probes for the entire negative-cache TTL even though
|
||
# they resolve fine via 1.1.1.1 directly.
|
||
dns:
|
||
- 1.1.1.1
|
||
- 8.8.8.8
|
||
command: ["--config.file=/etc/blackbox/blackbox.yml"]
|
||
volumes:
|
||
- ./docker/blackbox/blackbox.yml:/etc/blackbox/blackbox.yml:ro
|
||
ports:
|
||
- "9115:9115"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:9115/"]
|
||
interval: 300s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 10s
|
||
|
||
# ============================================
|
||
# Alerting Stack (Ports 9093-9095)
|
||
# ============================================
|
||
|
||
vmalert:
|
||
image: victoriametrics/vmalert:v1.99.0
|
||
container_name: mana-mon-vmalert
|
||
restart: always
|
||
mem_limit: 64m
|
||
depends_on:
|
||
victoriametrics:
|
||
condition: service_healthy
|
||
alertmanager:
|
||
condition: service_healthy
|
||
# Same direct-mount pattern as victoriametrics above — see the comment
|
||
# there for the rationale.
|
||
entrypoint: ["/vmalert-prod", "-datasource.url=http://victoriametrics:9090", "-notifier.url=http://alertmanager:9093", "-remoteWrite.url=http://victoriametrics:9090", "-remoteRead.url=http://victoriametrics:9090", "-rule=/etc/alerts/alerts.yml", "-evaluationInterval=30s", "-httpListenAddr=:8880"]
|
||
volumes:
|
||
- ./docker/prometheus:/etc/alerts:ro
|
||
ports:
|
||
- "8880:8880"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:8880/health"]
|
||
interval: 300s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 25s
|
||
|
||
alertmanager:
|
||
image: prom/alertmanager:v0.27.0
|
||
container_name: mana-mon-alertmanager
|
||
restart: always
|
||
mem_limit: 64m
|
||
depends_on:
|
||
alert-notifier:
|
||
condition: service_healthy
|
||
command: ["--config.file=/etc/alertmanager/alertmanager.yml", "--storage.path=/alertmanager", "--web.listen-address=:9093"]
|
||
volumes:
|
||
- ./docker/alertmanager/alertmanager.yml:/etc/alertmanager/alertmanager.yml:ro
|
||
- alertmanager_data:/alertmanager
|
||
ports:
|
||
- "9093:9093"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:9093/-/healthy"]
|
||
interval: 300s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 25s
|
||
|
||
alert-notifier:
|
||
build:
|
||
context: ./docker/alert-notifier
|
||
dockerfile: Dockerfile
|
||
image: alert-notifier:local
|
||
container_name: mana-mon-alert-notifier
|
||
restart: always
|
||
# Tier-3 right-size 2026-04-28: live RSS ~25 MiB (79%) — at OOM
|
||
# risk during alert-burst when many alerts queue at once. Bumped
|
||
# to 48m.
|
||
mem_limit: 48m
|
||
environment:
|
||
PORT: 8080
|
||
TELEGRAM_BOT_TOKEN: ${TELEGRAM_BOT_TOKEN:-}
|
||
TELEGRAM_CHAT_ID: ${TELEGRAM_CHAT_ID:-}
|
||
NTFY_TOPIC: ${NTFY_TOPIC:-}
|
||
ports:
|
||
- "9095:8080"
|
||
healthcheck:
|
||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:8080/health"]
|
||
interval: 300s
|
||
timeout: 5s
|
||
retries: 3
|
||
start_period: 25s
|
||
|
||
# ============================================
|
||
# Auto-Update (Watchtower)
|
||
# ============================================
|
||
|
||
watchtower:
|
||
image: nickfedor/watchtower:latest
|
||
container_name: mana-auto-watchtower
|
||
restart: always
|
||
mem_limit: 64m
|
||
volumes:
|
||
- /var/run/docker.sock:/var/run/docker.sock
|
||
environment:
|
||
TZ: Europe/Berlin
|
||
DOCKER_API_VERSION: 1.45
|
||
WATCHTOWER_POLL_INTERVAL: 300
|
||
WATCHTOWER_CLEANUP: "true"
|
||
WATCHTOWER_INCLUDE_STOPPED: "false"
|
||
|
||
# ============================================
|
||
# Volumes (Naming: mana-{service}-data)
|
||
# ============================================
|
||
|
||
# ============================================
|
||
# GlitchTip Error Tracking (Sentry-compatible)
|
||
# ============================================
|
||
|
||
glitchtip:
|
||
image: glitchtip/glitchtip:latest
|
||
container_name: mana-mon-glitchtip
|
||
restart: always
|
||
mem_limit: 384m
|
||
environment:
|
||
DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/glitchtip
|
||
REDIS_URL: redis://:${REDIS_PASSWORD:-redis123}@redis:6379/1
|
||
SECRET_KEY: ${GLITCHTIP_SECRET_KEY:-change-me-in-production}
|
||
PORT: "8020"
|
||
GLITCHTIP_DOMAIN: https://glitchtip.mana.how
|
||
DEFAULT_FROM_EMAIL: glitchtip@mana.how
|
||
CELERY_WORKER_AUTOSCALE: "1,3"
|
||
ENABLE_USER_REGISTRATION: "true"
|
||
ports:
|
||
- "8020:8020"
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
redis:
|
||
condition: service_healthy
|
||
healthcheck:
|
||
test: ["CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8020/_health/')"]
|
||
interval: 300s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 30s
|
||
|
||
glitchtip-worker:
|
||
image: glitchtip/glitchtip:latest
|
||
container_name: mana-mon-glitchtip-worker
|
||
restart: always
|
||
mem_limit: 192m
|
||
command: ./bin/run-celery-with-beat.sh
|
||
environment:
|
||
DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/glitchtip
|
||
REDIS_URL: redis://:${REDIS_PASSWORD:-redis123}@redis:6379/1
|
||
SECRET_KEY: ${GLITCHTIP_SECRET_KEY:-change-me-in-production}
|
||
GLITCHTIP_DOMAIN: https://glitchtip.mana.how
|
||
CELERY_WORKER_AUTOSCALE: "1,3"
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
redis:
|
||
condition: service_healthy
|
||
|
||
# ============================================
|
||
# Unified API Server
|
||
# ============================================
|
||
# apps/api — Hono/Bun process that hosts all 16 product compute
|
||
# modules (calendar, todo, chat, picture, planta, food, news,
|
||
# traces, moodlit, presi, music, contacts, storage, context, guides,
|
||
# research, who) on a single port. Replaces ~17 per-product backend
|
||
# containers from the pre-consolidation era; the unified Mana web
|
||
# app's compute calls all flow through here.
|
||
|
||
mana-api:
|
||
build:
|
||
context: .
|
||
dockerfile: apps/api/Dockerfile
|
||
image: mana-api:local
|
||
container_name: mana-api
|
||
restart: always
|
||
mem_limit: 384m
|
||
depends_on:
|
||
postgres:
|
||
condition: service_healthy
|
||
mana-auth:
|
||
condition: service_healthy
|
||
environment:
|
||
TZ: Europe/Berlin
|
||
NODE_ENV: production
|
||
PORT: 3060
|
||
# Auth (JWT validation via JWKS)
|
||
MANA_AUTH_URL: http://mana-auth:3001
|
||
JWT_PUBLIC_KEY: ${JWT_PUBLIC_KEY:-}
|
||
# Compute services apps/api orchestrates
|
||
MANA_LLM_URL: http://mana-llm:3025
|
||
MANA_SEARCH_URL: http://mana-search:3012
|
||
MANA_CREDITS_URL: http://mana-credits:3002
|
||
MANA_MEDIA_URL: http://mana-media:3011
|
||
MANA_CRAWLER_URL: http://mana-crawler:3014
|
||
MANA_LLM_DEFAULT_MODEL: ${MANA_LLM_DEFAULT_MODEL:-gemma3:4b}
|
||
MANA_SERVICE_KEY: ${MANA_SERVICE_KEY}
|
||
# OpenAI — picture module gpt-image-2 path. Optional: without it,
|
||
# /api/v1/picture/generate falls through to Replicate/local Flux.
|
||
OPENAI_API_KEY: ${OPENAI_API_KEY:-}
|
||
# Replicate — fallback for Flux-schnell image generation
|
||
REPLICATE_API_TOKEN: ${REPLICATE_API_TOKEN:-}
|
||
APP_ID: mana-api
|
||
# Database (used by modules that have server-side state — research,
|
||
# presi share-links, traces guides). Same Postgres + schema split
|
||
# as the rest of the platform.
|
||
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform
|
||
# CORS — only the unified mana.how origin needs access today.
|
||
# The arcade + manavoxel game frontends don't call apps/api.
|
||
CORS_ORIGINS: https://mana.how
|
||
# Structured-logger format
|
||
LOGGER_FORMAT: json
|
||
ports:
|
||
- "3060:3060"
|
||
healthcheck:
|
||
test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3060/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"]
|
||
interval: 60s
|
||
timeout: 5s
|
||
retries: 3
|
||
start_period: 30s
|
||
|
||
# ============================================
|
||
# News Ingester
|
||
# ============================================
|
||
# services/news-ingester — pulls public RSS/JSON feeds into the
|
||
# news.curated_articles pool every 15 min. The unified mana-api reads
|
||
# from the same table to serve /api/v1/news/feed; user reading lists
|
||
# remain client-side in the unified Mana app's local IndexedDB.
|
||
|
||
news-ingester:
|
||
build:
|
||
context: services/news-ingester
|
||
dockerfile: Dockerfile
|
||
image: news-ingester:local
|
||
container_name: news-ingester
|
||
restart: always
|
||
mem_limit: 256m
|
||
depends_on:
|
||
postgres: { condition: service_healthy }
|
||
environment:
|
||
TZ: Europe/Berlin
|
||
PORT: 3066
|
||
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/mana_platform
|
||
TICK_INTERVAL_MS: "900000"
|
||
RUN_ON_STARTUP: "true"
|
||
ports:
|
||
- "3066:3066"
|
||
healthcheck:
|
||
test: ["CMD", "bun", "-e", "fetch('http://127.0.0.1:3066/health').then(r=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))"]
|
||
interval: 120s
|
||
timeout: 10s
|
||
retries: 3
|
||
start_period: 30s
|
||
|
||
# ============================================
|
||
# Games
|
||
# ============================================
|
||
# whopixels was removed 2026-04-09 — its core mechanic (LLM-driven
|
||
# historical figure guessing) lives now as the `who` module inside
|
||
# apps/mana/apps/web. The standalone Phaser/Node container is gone;
|
||
# see docs/WHO_MODULE.md for the migration rationale.
|
||
|
||
volumes:
|
||
redis_data:
|
||
name: mana-redis-data
|
||
victoriametrics_data:
|
||
name: mana-victoria-data
|
||
alertmanager_data:
|
||
name: mana-alertmanager-data
|
||
grafana_data:
|
||
name: mana-grafana-data
|
||
analytics_data:
|
||
name: mana-analytics-data
|
||
loki_data:
|
||
name: mana-loki-data
|
||
stalwart_data:
|
||
name: mana-stalwart-data
|
||
tempo_data:
|
||
name: mana-tempo-data
|