managarten/services/mana-notify/cmd/server/main.go
Till JS 8e8b6ac65f fix(mana-auth) + chore: rewrite /api/v1/auth/login JWT mint, remove Matrix stack
This commit bundles two unrelated changes that were swept together by an
accidental `git add -A` in another working session. Documented here so the
history reflects what's actually inside.

═══════════════════════════════════════════════════════════════════════
1. fix(mana-auth): /api/v1/auth/login mints JWT via auth.handler instead
   of api.signInEmail
═══════════════════════════════════════════════════════════════════════

Previous attempt (commit 55cc75e7d) tried to fix the broken JWT mint in
/api/v1/auth/login by switching the cookie name from `mana.session_token`
to `__Secure-mana.session_token` for production. That was necessary but
not sufficient: Better Auth's session cookie value isn't just the raw
session token, it's `<token>.<HMAC>` where the HMAC is derived from the
better-auth secret. Reconstructing the cookie from auth.api.signInEmail's
JSON response only gave us the raw token, so /api/auth/token's
get-session middleware still couldn't validate it and the JWT mint kept
silently failing.

Real fix: do the sign-in via auth.handler (the HTTP path) rather than
auth.api.signInEmail (the SDK path). The handler returns a real fetch
Response with a Set-Cookie header containing the fully signed cookie
envelope. We capture that header verbatim and forward it as the cookie
on the /api/auth/token request, which now passes validation and mints
the JWT correctly.

Verified end-to-end on auth.mana.how:

  $ curl -X POST https://auth.mana.how/api/v1/auth/login \
      -d '{"email":"...","password":"..."}'
  {
    "user": {...},
    "token": "<session token>",
    "accessToken": "eyJhbGciOiJFZERTQSI...",   ← real JWT now
    "refreshToken": "<session token>"
  }

Side benefits:
- Email-not-verified path is now handled by checking
  signInResponse.status === 403 directly, no more catching APIError
  with the comment-noted async-stream footgun.
- X-Forwarded-For is forwarded explicitly so Better Auth's rate limiter
  and our security log see the real client IP.
- The leftover catch block now only handles unexpected exceptions
  (network errors etc); the FORBIDDEN-checking logic in it is dead but
  harmless and left in for defense in depth.

═══════════════════════════════════════════════════════════════════════
2. chore: remove the entire self-hosted Matrix stack (Synapse, Element,
   Manalink, mana-matrix-bot)
═══════════════════════════════════════════════════════════════════════

The Matrix subsystem ran parallel to the main Mana product without any
load-bearing integration: the unified web app never imported matrix-js-sdk,
the chat module uses mana-sync (local-first), and mana-matrix-bot's
plugins duplicated features the unified app already ships natively.
Keeping it alive cost a Synapse + Element + matrix-web + bot container
quartet, three Cloudflare routes, an OIDC provider plugin in mana-auth,
and a steady drip of devlog/dependency churn.

Removed:
- apps/matrix (Manalink web + mobile, ~150 files)
- services/mana-matrix-bot (Go bot with ~20 plugins)
- docker/matrix configs (Synapse + Element)
- synapse/element-web/matrix-web/mana-matrix-bot services in
  docker-compose.macmini.yml
- matrix.mana.how/element.mana.how/link.mana.how Cloudflare tunnel routes
- OIDC provider plugin + matrix-synapse trustedClient + matrixUserLinks
  table from mana-auth (oauth_* schema definitions also removed)
- MatrixService import path in mana-media (importFromMatrix endpoint)
- Matrix notification channel in mana-notify (worker, metrics, config,
  channel_type enum, MatrixOptions handler)
- Matrix entries from shared-branding (mana-apps + app-icons),
  notify-client, the i18n bundle, the observatory map, the credits
  app-label list, the landing footer/apps page, the prometheus + alerts
  + promtail tier mappings, and the matrix-related deploy paths in
  cd-macmini.yml + ci.yml

Devlog/manascore/blueprint entries that mention Matrix are left intact
as historical record. The oauth_* + matrix_user_links Postgres tables
stay on existing prod databases — code can no longer write to them, drop
them in a follow-up migration if you want them gone for real.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-08 16:32:13 +02:00

136 lines
4.7 KiB
Go

package main
import (
"context"
"fmt"
"log/slog"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/rs/cors"
"github.com/mana/mana-notify/internal/auth"
"github.com/mana/mana-notify/internal/channel"
"github.com/mana/mana-notify/internal/config"
"github.com/mana/mana-notify/internal/db"
"github.com/mana/mana-notify/internal/handler"
"github.com/mana/mana-notify/internal/metrics"
"github.com/mana/mana-notify/internal/queue"
tmpl "github.com/mana/mana-notify/internal/template"
)
func main() {
slog.SetDefault(slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{
Level: slog.LevelInfo,
})))
cfg := config.Load()
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
database, err := db.New(ctx, cfg.DatabaseURL)
cancel()
if err != nil {
slog.Error("database init failed", "error", err)
os.Exit(1)
}
defer database.Close()
// Initialize services
m := metrics.New()
emailSvc := channel.NewEmailService(cfg)
pushSvc := channel.NewPushService(cfg)
webhookSvc := channel.NewWebhookService()
engine := tmpl.NewEngine(database)
// Seed default templates
engine.SeedDefaults(context.Background())
// Start worker pool
workerPool := queue.NewWorkerPool(database, emailSvc, pushSvc, webhookSvc, m)
workerPool.Start()
defer workerPool.Stop()
// Handlers
notifHandler := handler.NewNotificationsHandler(database, workerPool, engine)
tmplHandler := handler.NewTemplatesHandler(database, engine)
devicesHandler := handler.NewDevicesHandler(database)
prefsHandler := handler.NewPreferencesHandler(database)
healthHandler := handler.NewHealthHandler(database)
// Middleware
serviceAuth := auth.ValidateServiceKey(cfg.ServiceKey)
jwtAuth := auth.ValidateJWT(cfg.ManaAuthURL)
mux := http.NewServeMux()
// System endpoints (no auth)
mux.HandleFunc("GET /health", healthHandler.Health)
mux.Handle("GET /metrics", promhttp.Handler())
// Notification endpoints (service key auth)
mux.Handle("POST /api/v1/notifications/send", serviceAuth(http.HandlerFunc(notifHandler.Send)))
mux.Handle("POST /api/v1/notifications/schedule", serviceAuth(http.HandlerFunc(notifHandler.Schedule)))
mux.Handle("POST /api/v1/notifications/batch", serviceAuth(http.HandlerFunc(notifHandler.Batch)))
mux.Handle("GET /api/v1/notifications/{id}", serviceAuth(http.HandlerFunc(notifHandler.GetNotification)))
mux.Handle("DELETE /api/v1/notifications/{id}", serviceAuth(http.HandlerFunc(notifHandler.CancelNotification)))
// Template endpoints (service key auth)
mux.Handle("GET /api/v1/templates", serviceAuth(http.HandlerFunc(tmplHandler.List)))
mux.Handle("POST /api/v1/templates", serviceAuth(http.HandlerFunc(tmplHandler.Create)))
mux.Handle("POST /api/v1/templates/preview", serviceAuth(http.HandlerFunc(tmplHandler.PreviewCustom)))
mux.Handle("GET /api/v1/templates/{slug}", serviceAuth(http.HandlerFunc(tmplHandler.Get)))
mux.Handle("PUT /api/v1/templates/{slug}", serviceAuth(http.HandlerFunc(tmplHandler.Update)))
mux.Handle("DELETE /api/v1/templates/{slug}", serviceAuth(http.HandlerFunc(tmplHandler.Delete)))
mux.Handle("POST /api/v1/templates/{slug}/preview", serviceAuth(http.HandlerFunc(tmplHandler.Preview)))
// Device endpoints (JWT auth)
mux.Handle("POST /api/v1/devices/register", jwtAuth(http.HandlerFunc(devicesHandler.Register)))
mux.Handle("GET /api/v1/devices", jwtAuth(http.HandlerFunc(devicesHandler.List)))
mux.Handle("DELETE /api/v1/devices/{id}", jwtAuth(http.HandlerFunc(devicesHandler.Delete)))
// Preference endpoints (JWT auth)
mux.Handle("GET /api/v1/preferences", jwtAuth(http.HandlerFunc(prefsHandler.Get)))
mux.Handle("PUT /api/v1/preferences", jwtAuth(http.HandlerFunc(prefsHandler.Update)))
corsHandler := cors.New(cors.Options{
AllowedOrigins: cfg.CORSOrigins,
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
AllowedHeaders: []string{"Content-Type", "Authorization", "X-Service-Key"},
AllowCredentials: true,
}).Handler(mux)
server := &http.Server{
Addr: fmt.Sprintf(":%d", cfg.Port),
Handler: corsHandler,
ReadTimeout: 30 * time.Second,
WriteTimeout: 60 * time.Second,
IdleTimeout: 120 * time.Second,
MaxHeaderBytes: 1 << 20,
}
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
go func() {
slog.Info("mana-notify started", "port", cfg.Port)
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
slog.Error("server error", "error", err)
os.Exit(1)
}
}()
<-sigCh
slog.Info("shutting down...")
ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
if err := server.Shutdown(ctx); err != nil {
slog.Error("shutdown error", "error", err)
}
slog.Info("server stopped")
}