fix(events): production wiring + polling resilience (quick wins)

Five small follow-ups on Phase 1b:

- docker-compose.macmini.yml: add the mana-events container with the
  same shape as mana-credits, expose port 3065, add a Traefik route
  for events.mana.how, and inject PUBLIC_MANA_EVENTS_URL into the
  mana-web container so the SvelteKit SSR + browser both reach it.
- mana-events: background sweeper that deletes rsvp_rate_buckets
  rows older than 2h every hour. Without it, long-published events
  accumulate one row per traffic-hour forever (FK cascade only fires
  on snapshot delete).
- PublicRsvpList: track consecutiveFailures and only show the error
  banner after two failures in a row, so a single mid-poll network
  hiccup doesn't flash a 30s error the user can't act on.
- apps/mana/apps/web: declare postgres as a devDep (already imported
  by the e2e spec via pnpm hoisting, now explicit).
This commit is contained in:
Till JS 2026-04-07 18:53:29 +02:00
parent 354cbcb176
commit 640242500e
5 changed files with 116 additions and 5 deletions

View file

@ -16,10 +16,15 @@ import { jwtAuth } from './middleware/jwt-auth';
import { healthRoutes } from './routes/health';
import { createEventsRoutes } from './routes/events';
import { createRsvpRoutes } from './routes/rsvp';
import { startRateBucketSweeper } from './lib/cleanup';
const config = loadConfig();
const db = getDb(config.databaseUrl);
// Background cleanup of stale rate-limit buckets so they don't accumulate
// for the lifetime of long-published events.
startRateBucketSweeper(db);
const app = new Hono();
app.onError(errorHandler);

View file

@ -0,0 +1,63 @@
/**
* Periodic cleanup of stale rate-limit buckets.
*
* Each (token, hour-bucket) row is only useful for the hour it represents
* once that hour is over, the row is just dead weight in Postgres.
* The FK cascade only fires when an event snapshot is deleted; long-lived
* snapshots therefore accumulate one bucket row per traffic-hour forever.
*
* This sweeper deletes any bucket whose hour is more than KEEP_HOURS old.
* Conservative window so we don't delete a row another request could still
* read for the same hour boundary in flight.
*/
import { lt, sql } from 'drizzle-orm';
import type { Database } from '../db/connection';
import { rsvpRateBuckets } from '../db/schema/events';
const KEEP_HOURS = 2;
function cutoffBucket(): string {
const d = new Date(Date.now() - KEEP_HOURS * 60 * 60 * 1000);
const pad = (n: number) => n.toString().padStart(2, '0');
return `${d.getUTCFullYear()}-${pad(d.getUTCMonth() + 1)}-${pad(d.getUTCDate())}T${pad(d.getUTCHours())}`;
}
export async function sweepRateBuckets(db: Database): Promise<number> {
const cutoff = cutoffBucket();
const result = await db
.delete(rsvpRateBuckets)
.where(lt(rsvpRateBuckets.hourBucket, cutoff))
.returning({ token: rsvpRateBuckets.token });
return result.length;
}
/**
* Start a periodic sweep. Returns a stop function for tests.
* Runs once on boot, then on the configured interval.
*/
export function startRateBucketSweeper(
db: Database,
intervalMs = 60 * 60 * 1000 // 1h
): () => void {
const tick = async () => {
try {
const removed = await sweepRateBuckets(db);
if (removed > 0) {
console.log(`[mana-events] swept ${removed} stale rate buckets`);
}
} catch (err) {
console.error('[mana-events] rate bucket sweep failed:', err);
}
};
// Fire once shortly after boot so we don't wait a full hour for the
// first cleanup, but defer slightly so startup logs aren't interleaved.
const bootTimer = setTimeout(tick, 5_000);
const intervalTimer = setInterval(tick, intervalMs);
return () => {
clearTimeout(bootTimer);
clearInterval(intervalTimer);
};
}