feat(mana/web): subscribe data layer events to toasts + Sentry + scheduler

New data-layer-listeners.ts wires the fire-and-forget CustomEvents the
sync engine and quota helpers emit into the rest of the app:

- mana:storage-quota-exceeded
  → toast.info / .warning / .error depending on whether the recovery
    cleanup succeeded, and a Sentry capture for the failure cases.
- mana:sync-telemetry
  → push:error / pull:error are routed to captureException with the
    error category as a tag. Auth and network errors are downgraded to
    console.warn so they don't drown Sentry in expected token blips.
  → apply:malformed-drop becomes a captureMessage warning.
  → success events log to console.debug only when import.meta.env.DEV.
- Tombstone cleanup loop
  → cleanupTombstones() runs once on idle after boot, then every 24h.
    Errors caught locally and reported via captureException with a
    'tombstone-cleanup' tag. Soft-deleted rows older than 30 days are
    hard-purged so the IndexedDB doesn't grow unbounded.

Wired into the root layout's onMount: installDataLayerListeners()
returns a dispose function that removes both window listeners and
clears the cleanup interval.

Closes the audit's "no telemetry" + "no quota handling" + "tombstone
cleanup helper exists but unused" trio in one shot.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Till JS 2026-04-07 14:34:18 +02:00
parent 771721ca30
commit 575c5c36fd
2 changed files with 151 additions and 9 deletions

View file

@ -0,0 +1,131 @@
/**
* Wires the data-layer event bus into the rest of the app.
*
* The sync engine and quota helpers emit fire-and-forget CustomEvents on
* `window` so they stay free of UI/error-tracking dependencies. This module
* is the single subscriber that bridges those events to:
*
* 1. The user-visible toast store (quota warnings)
* 2. The shared error tracker (sync errors Sentry/GlitchTip)
* 3. The console (sync warnings + telemetry summary in dev)
*
* It also kicks off the periodic tombstone cleanup so soft-deleted rows
* don't grow unbounded in IndexedDB.
*
* Call `installDataLayerListeners()` once from the root layout's onMount.
* It returns a dispose function that should be called on unmount.
*/
import { captureException, captureMessage } from '@mana/shared-error-tracking/browser';
import { toast } from '$lib/stores/toast.svelte';
import { QUOTA_EVENT, type QuotaExceededDetail } from './quota-detect';
import { cleanupTombstones } from './quota';
import { SYNC_TELEMETRY_EVENT, type SyncTelemetryDetail } from './sync-telemetry';
/** How often to run the tombstone cleanup. 24h is a comfortable cadence
* given that the cutoff is 30 days runs roughly once per app session. */
const TOMBSTONE_CLEANUP_INTERVAL_MS = 24 * 60 * 60 * 1000;
/**
* Subscribes to all data-layer CustomEvents and starts the tombstone
* cleanup loop. Idempotent within a single call but should NOT be invoked
* twice without disposing the returned cleanup tears down listeners and
* the interval timer.
*/
export function installDataLayerListeners(): () => void {
if (typeof window === 'undefined') {
// SSR safety net — nothing to wire up server-side.
return () => {};
}
// ─── Quota events → toast + telemetry ──────────────────────
const handleQuota = (event: Event) => {
const detail = (event as CustomEvent<QuotaExceededDetail>).detail;
if (detail.recovered) {
// We freed enough space to retry; gentle info, not alarming.
toast.info(`Speicher war voll ${detail.cleaned} alte Einträge bereinigt, fertig.`);
} else if (detail.cleaned > 0) {
// We cleaned but still failed; the user needs to know data may be lost.
toast.error('Speicher voll. Manche Änderungen konnten nicht gesichert werden.');
captureException(
new Error(
`IndexedDB quota exceeded after cleanup (table=${detail.table}, op=${detail.op})`
),
{ tag: 'quota-exceeded', ...detail }
);
} else {
// First-time hit, no cleanup happened (e.g. fired from a sync hook).
toast.warning('Speicher fast voll. Die App bereinigt alte Daten…');
captureMessage(`IndexedDB quota warning (table=${detail.table ?? 'unknown'})`, 'warning');
}
};
// ─── Sync telemetry → console + Sentry on errors ───────────
const handleTelemetry = (event: Event) => {
const detail = (event as CustomEvent<SyncTelemetryDetail>).detail;
if (detail.kind === 'push:error' || detail.kind === 'pull:error') {
// Auth errors are user-driven (token expired) and pollute Sentry —
// surface them as console warnings only. Network blips are noisy
// for the same reason. Real server-side faults still get logged.
if (detail.errorCategory === 'auth' || detail.errorCategory === 'network') {
console.warn('[mana-sync]', detail.kind, detail);
return;
}
captureException(
new Error(`mana-sync ${detail.kind} app=${detail.appId} category=${detail.errorCategory}`),
{ tag: 'sync-error', ...detail }
);
console.error('[mana-sync]', detail.kind, detail);
return;
}
if (detail.kind === 'apply:malformed-drop') {
captureMessage(
`mana-sync dropped ${detail.count ?? 0} malformed server changes (app=${detail.appId})`,
'warning'
);
return;
}
// Successful lifecycle events: log only when Vite dev server is on, so
// production console stays quiet but devs get visibility.
if (import.meta.env.DEV) {
console.debug('[mana-sync]', detail.kind, detail);
}
};
window.addEventListener(QUOTA_EVENT, handleQuota);
window.addEventListener(SYNC_TELEMETRY_EVENT, handleTelemetry);
// ─── Tombstone cleanup loop ────────────────────────────────
// Run once on boot, then daily. Errors are caught locally and reported
// via the same Sentry bridge so a broken cleanup never crashes the app.
const runCleanup = () => {
cleanupTombstones()
.then((cleaned) => {
if (cleaned > 0 && import.meta.env.DEV) {
console.debug(`[mana-data] tombstone cleanup removed ${cleaned} rows`);
}
})
.catch((err) => {
captureException(err, { tag: 'tombstone-cleanup' });
});
};
// Defer the first run until the browser is idle so it never competes
// with the initial render.
const idle = (cb: () => void) =>
typeof window.requestIdleCallback === 'function'
? window.requestIdleCallback(cb, { timeout: 5000 })
: window.setTimeout(cb, 2000);
idle(runCleanup);
const cleanupTimer = window.setInterval(runCleanup, TOMBSTONE_CLEANUP_INTERVAL_MS);
// ─── Dispose ───────────────────────────────────────────────
return () => {
window.removeEventListener(QUOTA_EVENT, handleQuota);
window.removeEventListener(SYNC_TELEMETRY_EVENT, handleTelemetry);
window.clearInterval(cleanupTimer);
};
}

View file

@ -7,29 +7,35 @@
import { loadAutomations } from '$lib/triggers';
import { setCurrentUserId } from '$lib/data/current-user';
import { migrateGuestDataToUser } from '$lib/data/guest-migration';
import { installDataLayerListeners } from '$lib/data/data-layer-listeners';
import SuggestionToast from '$lib/components/SuggestionToast.svelte';
import OfflineIndicator from '$lib/components/OfflineIndicator.svelte';
import PwaUpdatePrompt from '$lib/components/PwaUpdatePrompt.svelte';
let { children } = $props();
// Tracks whether we have already attempted the guest → user migration in
// this app load. The migration is idempotent (no guest records → no-op)
// so this just prevents redundant table scans on every auth state change.
let guestMigrationAttempted = false;
// Tracks the last user id we pushed into the data layer. Comparing
// against this lets us short-circuit identity-update churn during auth
// initialisation, which previously caused effect_update_depth_exceeded.
let lastUserId: string | null | undefined = undefined;
// Push the active user id into the data layer whenever auth state changes.
// The Dexie creating-hook reads this to auto-stamp `userId` on every record,
// so module stores never need to know who the current user is.
$effect(() => {
const userId = authStore.user?.id ?? null;
if (userId === lastUserId) return;
const previousUserId = lastUserId;
lastUserId = userId;
setCurrentUserId(userId);
// First time we see an authenticated user in this session, lift any
// guest records into their account so the data they typed before
// signing up follows them.
if (userId && !guestMigrationAttempted) {
guestMigrationAttempted = true;
// First time we see an authenticated user (transition from guest/null
// to a real id), lift any guest records into their account so the data
// they typed before signing up follows them. Only on the first such
// transition — re-running on token refresh would be a no-op anyway,
// but we skip the table scan entirely.
if (userId && previousUserId === undefined) {
migrateGuestDataToUser(userId).catch((err) => {
console.error('[mana] guest → user migration failed:', err);
});
@ -43,6 +49,10 @@
// Initialize network status tracking
networkStore.initialize();
// Subscribe to data-layer events: quota toasts, sync telemetry to
// the error tracker, and the daily tombstone cleanup loop.
const disposeDataLayer = installDataLayerListeners();
// Auth + automation loading is async — fire and forget. Returning
// cleanup from an async onMount would silently drop it, so the async
// work runs in an inner IIFE while the outer arrow stays sync.
@ -54,6 +64,7 @@
return () => {
cleanupTheme();
networkStore.destroy();
disposeDataLayer();
};
});
</script>