mirror of
https://github.com/Memo-2023/mana-monorepo.git
synced 2026-05-14 21:01:08 +02:00
This commit bundles two unrelated changes that were swept together by an
accidental `git add -A` in another working session. Documented here so the
history reflects what's actually inside.
═══════════════════════════════════════════════════════════════════════
1. fix(mana-auth): /api/v1/auth/login mints JWT via auth.handler instead
of api.signInEmail
═══════════════════════════════════════════════════════════════════════
Previous attempt (commit 55cc75e7d) tried to fix the broken JWT mint in
/api/v1/auth/login by switching the cookie name from `mana.session_token`
to `__Secure-mana.session_token` for production. That was necessary but
not sufficient: Better Auth's session cookie value isn't just the raw
session token, it's `<token>.<HMAC>` where the HMAC is derived from the
better-auth secret. Reconstructing the cookie from auth.api.signInEmail's
JSON response only gave us the raw token, so /api/auth/token's
get-session middleware still couldn't validate it and the JWT mint kept
silently failing.
Real fix: do the sign-in via auth.handler (the HTTP path) rather than
auth.api.signInEmail (the SDK path). The handler returns a real fetch
Response with a Set-Cookie header containing the fully signed cookie
envelope. We capture that header verbatim and forward it as the cookie
on the /api/auth/token request, which now passes validation and mints
the JWT correctly.
Verified end-to-end on auth.mana.how:
$ curl -X POST https://auth.mana.how/api/v1/auth/login \
-d '{"email":"...","password":"..."}'
{
"user": {...},
"token": "<session token>",
"accessToken": "eyJhbGciOiJFZERTQSI...", ← real JWT now
"refreshToken": "<session token>"
}
Side benefits:
- Email-not-verified path is now handled by checking
signInResponse.status === 403 directly, no more catching APIError
with the comment-noted async-stream footgun.
- X-Forwarded-For is forwarded explicitly so Better Auth's rate limiter
and our security log see the real client IP.
- The leftover catch block now only handles unexpected exceptions
(network errors etc); the FORBIDDEN-checking logic in it is dead but
harmless and left in for defense in depth.
═══════════════════════════════════════════════════════════════════════
2. chore: remove the entire self-hosted Matrix stack (Synapse, Element,
Manalink, mana-matrix-bot)
═══════════════════════════════════════════════════════════════════════
The Matrix subsystem ran parallel to the main Mana product without any
load-bearing integration: the unified web app never imported matrix-js-sdk,
the chat module uses mana-sync (local-first), and mana-matrix-bot's
plugins duplicated features the unified app already ships natively.
Keeping it alive cost a Synapse + Element + matrix-web + bot container
quartet, three Cloudflare routes, an OIDC provider plugin in mana-auth,
and a steady drip of devlog/dependency churn.
Removed:
- apps/matrix (Manalink web + mobile, ~150 files)
- services/mana-matrix-bot (Go bot with ~20 plugins)
- docker/matrix configs (Synapse + Element)
- synapse/element-web/matrix-web/mana-matrix-bot services in
docker-compose.macmini.yml
- matrix.mana.how/element.mana.how/link.mana.how Cloudflare tunnel routes
- OIDC provider plugin + matrix-synapse trustedClient + matrixUserLinks
table from mana-auth (oauth_* schema definitions also removed)
- MatrixService import path in mana-media (importFromMatrix endpoint)
- Matrix notification channel in mana-notify (worker, metrics, config,
channel_type enum, MatrixOptions handler)
- Matrix entries from shared-branding (mana-apps + app-icons),
notify-client, the i18n bundle, the observatory map, the credits
app-label list, the landing footer/apps page, the prometheus + alerts
+ promtail tier mappings, and the matrix-related deploy paths in
cd-macmini.yml + ci.yml
Devlog/manascore/blueprint entries that mention Matrix are left intact
as historical record. The oauth_* + matrix_user_links Postgres tables
stay on existing prod databases — code can no longer write to them, drop
them in a follow-up migration if you want them gone for real.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
153 lines
5.3 KiB
Bash
Executable file
153 lines
5.3 KiB
Bash
Executable file
#!/bin/bash
|
|
# Mana Mac Mini Startup Script
|
|
# Called by launchd on boot — starts Colima + all containers
|
|
#
|
|
# LaunchAgent: ~/Library/LaunchAgents/com.mana.docker-startup.plist
|
|
|
|
set -uo pipefail
|
|
|
|
export PATH="/opt/homebrew/bin:/usr/local/bin:$PATH"
|
|
|
|
LOG_FILE="/tmp/mana-startup.log"
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
|
COMPOSE_FILE="$PROJECT_ROOT/docker-compose.macmini.yml"
|
|
ENV_FILE="$PROJECT_ROOT/.env.macmini"
|
|
|
|
log() {
|
|
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" | tee -a "$LOG_FILE"
|
|
}
|
|
|
|
log "=== Mana Startup Script ==="
|
|
log "Project root: $PROJECT_ROOT"
|
|
|
|
# ─── Kill Docker Desktop if it auto-started ───
|
|
if pgrep -f "Docker.app" >/dev/null 2>&1; then
|
|
log "Docker Desktop detected, stopping..."
|
|
osascript -e 'quit app "Docker"' 2>/dev/null
|
|
sleep 5
|
|
pkill -f "Docker.app" 2>/dev/null || true
|
|
sleep 3
|
|
log "Docker Desktop stopped"
|
|
fi
|
|
|
|
# ─── Verify Colima datadisk symlink (must be on external SSD) ───
|
|
COLIMA_DISK_LINK="$HOME/.colima/_lima/_disks/colima"
|
|
EXPECTED_TARGET="/Volumes/ManaData/colima-disk"
|
|
if [ -e "$COLIMA_DISK_LINK" ] && [ ! -L "$COLIMA_DISK_LINK" ]; then
|
|
log "ERROR: Colima datadisk is a directory, not a symlink!"
|
|
log "The datadisk must live on the external SSD to prevent filling the internal SSD."
|
|
log "Run: scripts/mac-mini/move-colima-to-external-ssd.sh"
|
|
exit 1
|
|
fi
|
|
if [ -L "$COLIMA_DISK_LINK" ]; then
|
|
actual_target=$(readlink "$COLIMA_DISK_LINK")
|
|
if [ "$actual_target" != "$EXPECTED_TARGET" ]; then
|
|
log "WARNING: Colima datadisk symlink points to $actual_target (expected $EXPECTED_TARGET)"
|
|
else
|
|
log "Colima datadisk symlink OK → $EXPECTED_TARGET"
|
|
fi
|
|
fi
|
|
if [ ! -d "/Volumes/ManaData" ]; then
|
|
log "ERROR: External SSD /Volumes/ManaData not mounted!"
|
|
exit 1
|
|
fi
|
|
|
|
# ─── Start Colima ───
|
|
# Use `docker info` as the source of truth for "is the runtime usable" instead
|
|
# of `colima status`, which can mis-report and trigger a destructive restart.
|
|
if docker info >/dev/null 2>&1; then
|
|
log "Colima already running (docker reachable)"
|
|
else
|
|
log "Colima not reachable, preparing fresh start..."
|
|
|
|
# Reap zombie colima/limactl processes from previously failed starts.
|
|
# These hold locks that prevent a clean start. Do NOT touch a running VM.
|
|
for pat in "colima stop" "limactl stop" "colima daemon" "limactl hostagent" "limactl usernet"; do
|
|
pids=$(pgrep -f "$pat" || true)
|
|
if [ -n "$pids" ]; then
|
|
log " reaping stale: $pat ($pids)"
|
|
echo "$pids" | xargs kill -9 2>/dev/null || true
|
|
fi
|
|
done
|
|
sleep 1
|
|
|
|
# Clear stale disk lock if no process actually owns it.
|
|
# The lock is a symlink at /Volumes/ManaData/colima-disk/in_use_by → ~/.colima/_lima/colima
|
|
# If the symlink exists but no limactl/vz process is running, the lock is stale.
|
|
LOCK="/Volumes/ManaData/colima-disk/in_use_by"
|
|
if [ -L "$LOCK" ] && ! pgrep -f "limactl hostagent" >/dev/null 2>&1; then
|
|
log " removing stale disk lock: $LOCK"
|
|
rm -f "$LOCK"
|
|
fi
|
|
|
|
log "Starting Colima..."
|
|
colima start \
|
|
--cpu 8 \
|
|
--memory 12 \
|
|
--disk 200 \
|
|
--vm-type vz \
|
|
--vz-rosetta \
|
|
--mount-type virtiofs \
|
|
--mount /Users/mana:w \
|
|
--mount /Volumes/ManaData:w \
|
|
2>&1 | tee -a "$LOG_FILE"
|
|
|
|
# Verify with docker info, not colima status (more reliable)
|
|
if ! docker info >/dev/null 2>&1; then
|
|
log "ERROR: Colima failed to start (docker not reachable)"
|
|
exit 1
|
|
fi
|
|
log "Colima started successfully"
|
|
fi
|
|
|
|
# ─── Verify Docker CLI works ───
|
|
if ! docker info >/dev/null 2>&1; then
|
|
log "ERROR: Docker CLI not connected to Colima"
|
|
exit 1
|
|
fi
|
|
log "Docker CLI connected"
|
|
|
|
# ─── Restore named volumes if missing ───
|
|
BACKUP_DIR="/Volumes/ManaData/backups/docker-migration-20260328"
|
|
for vol in mana-redis-data mana-victoria-data mana-alertmanager-data mana-grafana-data mana-analytics-data mana-loki-data; do
|
|
if ! docker volume inspect "$vol" >/dev/null 2>&1; then
|
|
BACKUP_FILE="$BACKUP_DIR/${vol}.tar.gz"
|
|
if [ -f "$BACKUP_FILE" ]; then
|
|
log "Restoring volume: $vol"
|
|
docker volume create "$vol" >/dev/null
|
|
docker run --rm -v "$vol":/target -v "$BACKUP_DIR":/backup:ro \
|
|
alpine sh -c "tar xzf /backup/${vol}.tar.gz -C /target 2>/dev/null"
|
|
fi
|
|
fi
|
|
done
|
|
|
|
# ─── Check prerequisites ───
|
|
if [ ! -f "$COMPOSE_FILE" ]; then
|
|
log "ERROR: $COMPOSE_FILE not found"
|
|
exit 1
|
|
fi
|
|
if [ ! -f "$ENV_FILE" ]; then
|
|
log "ERROR: $ENV_FILE not found"
|
|
exit 1
|
|
fi
|
|
|
|
# ─── Start containers ───
|
|
log "Starting Docker containers..."
|
|
cd "$PROJECT_ROOT"
|
|
docker compose -f "$COMPOSE_FILE" --env-file "$ENV_FILE" up -d --no-build 2>&1 | tee -a "$LOG_FILE"
|
|
|
|
# ─── Wait and verify ───
|
|
log "Waiting 45s for services to initialize..."
|
|
sleep 45
|
|
|
|
RUNNING=$(docker ps -q | wc -l | tr -d ' ')
|
|
log "Containers running: $RUNNING"
|
|
|
|
# ─── Create missing databases ───
|
|
log "Ensuring databases exist..."
|
|
for db in mana_auth mana_credits chat todo calendar clock contacts storage umami; do
|
|
docker exec mana-infra-postgres psql -U postgres -c "CREATE DATABASE $db;" 2>/dev/null || true
|
|
done
|
|
|
|
log "=== Startup Complete ($RUNNING containers running) ==="
|