refactor: rename nutriphi module to food (Essen)

Complete rename across the entire monorepo pre-launch:
- Module, routes, API, i18n, standalone landing app directories
- All code identifiers, display names, logo component
- German user-facing label: "Essen" (English brand stays "Food")
- Dexie table nutriFavorites -> foodFavorites
- Infra configs (docker-compose, cloudflared, nginx, wrangler)

Zero residue of nutriphi remains. No data migration needed (pre-launch).

Follow-up: run pnpm install, update Cloudflare DNS
(food.mana.how), rename Cloudflare Pages project.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Till JS 2026-04-14 15:30:07 +02:00
parent f5cb833b04
commit 53b3746b98
196 changed files with 863 additions and 719 deletions

View file

@ -121,7 +121,7 @@ const PROJECT_META: Record<string, { name: string; icon: string }> = {
zitare: { name: 'Zitare', icon: '✨' },
presi: { name: 'Presi', icon: '📊' },
inventory: { name: 'Inventar', icon: '📦' },
nutriphi: { name: 'Nutriphi', icon: '🥗' },
food: { name: 'Food', icon: '🥗' },
plants: { name: 'Plants', icon: '🌱' },
storage: { name: 'Storage', icon: '☁️' },
questions: { name: 'Questions', icon: '❓' },

View file

@ -150,7 +150,7 @@ const customUrl = media.getTransformUrl(result.id, {
| id | UUID | Primary key |
| media_id | UUID | FK to media |
| user_id | UUID | Owner user ID |
| app | TEXT | Source app (nutriphi, contacts, etc.) |
| app | TEXT | Source app (food, contacts, etc.) |
| source_url | TEXT | Original source URL |
## Processing Pipeline

View file

@ -82,7 +82,7 @@ export const mediaReferences = mediaSchema.table(
.notNull(),
// Owner user ID
userId: text('user_id').notNull(),
// Source app (nutriphi, contacts, chat, etc.)
// Source app (food, contacts, chat, etc.)
app: text('app').notNull(),
// Optional: reference to the source URL
sourceUrl: text('source_url'),

View file

@ -210,4 +210,4 @@ services/mana-sync/
## Connected Apps (19)
Todo, Calendar, Clock, Contacts, Chat, Questions, Mukke, Context, Photos, Cards, Picture, Presi, Storage, Zitare, SkillTree, CityCorners, NutriPhi, Planta, Inventar, uLoad, Times, Calc
Todo, Calendar, Clock, Contacts, Chat, Questions, Mukke, Context, Photos, Cards, Picture, Presi, Storage, Zitare, SkillTree, CityCorners, Food, Planta, Inventar, uLoad, Times, Calc

View file

@ -1,27 +1,53 @@
// Package backup implements the M1 thin-slice user-data backup endpoint.
// Package backup implements the user-data backup endpoint.
//
// Streams every sync_changes row owned by the authenticated user as JSON Lines
// (one Change per line). The body is the raw event stream from mana-sync —
// identical in shape to what live sync emits, so a future restore endpoint can
// replay it via the existing applyServerChanges() path on the client.
// Streams a .mana archive (zip container) to the authenticated user containing:
//
// Field-level ciphertext passes through untouched: the registry-encrypted
// fields are already encrypted when they reach this table, so the file is
// effectively encrypted at rest for sensitive fields.
// events.jsonl — one SyncChange per line, chronological
// manifest.json — header with userId, counts, integrity hash, format version
//
// Design notes:
//
// - The zip is built in a single DB pass. events.jsonl is written first
// while the body is teed through a sha256 hasher; manifest.json lands as
// a second zip entry after the stream closes, so the manifest can embed
// the final eventsSha256 without a second scan.
//
// - Ciphertext passes through untouched: fields encrypted by the client-
// side registry remain AES-GCM ciphertext, so the archive is effectively
// encrypted at rest for sensitive fields. Plaintext fields (IDs, sort
// keys, timestamps) are visible in the archive — this matches the GDPR
// data-portability expectation.
//
// - The route is wired outside billingMiddleware in main.go so users can
// always retrieve their data regardless of subscription status.
//
// - Signature over manifest.json is deferred to phase 2; the eventsSha256
// already catches accidental corruption during download/storage.
package backup
import (
"archive/zip"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"log/slog"
"net/http"
"sort"
"time"
"github.com/mana/mana-sync/internal/auth"
syncproto "github.com/mana/mana-sync/internal/sync"
"github.com/mana/mana-sync/internal/store"
)
// Handler serves the /backup/export endpoint.
// BackupFormatVersion is the container-format version (manifest.formatVersion).
// Distinct from syncproto.CurrentSchemaVersion — the container can change
// (signature added, different body encoding) without bumping every event.
const BackupFormatVersion = 1
// Handler serves GET /backup/export.
type Handler struct {
store *store.Store
validator *auth.Validator
@ -32,9 +58,10 @@ func NewHandler(s *store.Store, v *auth.Validator) *Handler {
return &Handler{store: s, validator: v}
}
// exportLine is the on-wire shape of one row in the JSONL body. Field names
// mirror the sync-protocol Change shape as closely as possible; the restore
// side maps these back into SyncChange objects.
// exportLine is the on-wire shape of one row inside events.jsonl. Field
// names mirror the sync-protocol Change shape so the restore side can feed
// lines straight into applyServerChanges() after running them through the
// migration chain keyed on schemaVersion.
type exportLine struct {
EventID string `json:"eventId"`
SchemaVersion int `json:"schemaVersion"`
@ -48,13 +75,23 @@ type exportLine struct {
CreatedAt string `json:"createdAt"`
}
// HandleExport streams the authenticated user's full sync_changes log as
// JSONL. This is the M1 thin slice of the backup/restore feature — no zip,
// no manifest, no signature yet. Those land in M3.
//
// GDPR-bypass for billing: the route is wired outside the billing middleware
// in main.go, so users can always export their data even if their sync
// subscription is inactive.
// manifestFile is the header object serialized as manifest.json. Kept small
// and declarative so tools can parse it without loading events.jsonl.
type manifestFile struct {
FormatVersion int `json:"formatVersion"`
SchemaVersion int `json:"schemaVersion"` // max event schemaVersion this server knows
UserID string `json:"userId"`
CreatedAt string `json:"createdAt"`
EventCount int `json:"eventCount"`
EventsSHA256 string `json:"eventsSha256"`
Apps []string `json:"apps"`
ProducedBy string `json:"producedBy"`
SchemaVersionMin int `json:"schemaVersionMin,omitempty"`
SchemaVersionMax int `json:"schemaVersionMax,omitempty"`
}
// HandleExport streams a .mana zip archive containing the user's full
// sync-event log plus a manifest with integrity hash.
func (h *Handler) HandleExport(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
@ -67,24 +104,71 @@ func (h *Handler) HandleExport(w http.ResponseWriter, r *http.Request) {
return
}
filename := fmt.Sprintf("mana-backup-%s-%s.jsonl", userID, time.Now().UTC().Format("20060102-150405"))
createdAt := time.Now().UTC()
filename := fmt.Sprintf("mana-backup-%s-%s.mana", userID, createdAt.Format("20060102-150405"))
w.Header().Set("Content-Type", "application/x-ndjson")
w.Header().Set("Content-Type", "application/zip")
w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment; filename="%s"`, filename))
w.Header().Set("X-Content-Type-Options", "nosniff")
// Disable proxy buffering so the response streams as rows arrive.
w.Header().Set("X-Accel-Buffering", "no")
w.Header().Set("Cache-Control", "no-store")
flusher, _ := w.(http.Flusher)
encoder := json.NewEncoder(w)
zw := zip.NewWriter(w)
// Only close once — closing writes the central directory, which we need
// even if streaming errored partway so the file is at least a valid zip.
zipClosed := false
closeZip := func() {
if zipClosed {
return
}
zipClosed = true
if err := zw.Close(); err != nil {
slog.Error("backup: zip close failed", "user_id", userID, "error", err)
}
}
defer closeZip()
// ─── events.jsonl entry ──────────────────────────────────────
eventsWriter, err := zw.CreateHeader(&zip.FileHeader{
Name: "events.jsonl",
Method: zip.Deflate,
Modified: createdAt,
})
if err != nil {
slog.Error("backup: create events.jsonl entry", "user_id", userID, "error", err)
return
}
hasher := sha256.New()
// Tee so the deflate entry and the hash both see every byte — the hash
// is over the *decompressed* JSONL, which is what the restore side will
// re-hash after unzipping.
teed := io.MultiWriter(eventsWriter, hasher)
encoder := json.NewEncoder(teed)
var (
count int
appSet = make(map[string]struct{})
minVer int
maxVer int
)
var count int
streamErr := h.store.StreamAllUserChanges(r.Context(), userID, func(row store.ChangeRow) error {
sv := row.SchemaVersion
if sv <= 0 {
sv = 1
}
if count == 0 {
minVer = sv
maxVer = sv
} else {
if sv < minVer {
minVer = sv
}
if sv > maxVer {
maxVer = sv
}
}
line := exportLine{
EventID: row.ID,
SchemaVersion: sv,
@ -100,24 +184,62 @@ func (h *Handler) HandleExport(w http.ResponseWriter, r *http.Request) {
if err := encoder.Encode(line); err != nil {
return err
}
appSet[row.AppID] = struct{}{}
count++
// Flush every ~500 rows so big exports show progress over the wire.
if flusher != nil && count%500 == 0 {
flusher.Flush()
}
return nil
})
if flusher != nil {
flusher.Flush()
}
if streamErr != nil {
// Headers are already sent, so we cannot change the status code.
// Log and let the client detect truncation via the row count it expected.
// (M3 will add a manifest with eventCount + sha256 for integrity checking.)
slog.Error("backup export stream failed", "user_id", userID, "written", count, "error", streamErr)
slog.Error("backup: stream failed", "user_id", userID, "written", count, "error", streamErr)
// Headers are flushed; best we can do is close the zip so the file
// isn't corrupt. The manifest won't land, and the absence of it is
// itself a signal to the importer that the export was truncated.
return
}
slog.Info("backup export ok", "user_id", userID, "rows", count)
// ─── manifest.json entry ─────────────────────────────────────
apps := make([]string, 0, len(appSet))
for a := range appSet {
apps = append(apps, a)
}
sort.Strings(apps)
manifest := manifestFile{
FormatVersion: BackupFormatVersion,
SchemaVersion: syncproto.CurrentSchemaVersion,
UserID: userID,
CreatedAt: createdAt.Format(time.RFC3339Nano),
EventCount: count,
EventsSHA256: hex.EncodeToString(hasher.Sum(nil)),
Apps: apps,
ProducedBy: "mana-sync",
SchemaVersionMin: minVer,
SchemaVersionMax: maxVer,
}
manifestBytes, err := json.MarshalIndent(manifest, "", " ")
if err != nil {
slog.Error("backup: marshal manifest", "user_id", userID, "error", err)
return
}
manifestWriter, err := zw.CreateHeader(&zip.FileHeader{
Name: "manifest.json",
Method: zip.Deflate,
Modified: createdAt,
})
if err != nil {
slog.Error("backup: create manifest entry", "user_id", userID, "error", err)
return
}
if _, err := manifestWriter.Write(manifestBytes); err != nil {
slog.Error("backup: write manifest", "user_id", userID, "error", err)
return
}
closeZip()
slog.Info("backup export ok",
"user_id", userID,
"rows", count,
"apps", len(apps),
"schema_min", minVer,
"schema_max", maxVer,
)
}