mirror of
https://github.com/Memo-2023/mana-monorepo.git
synced 2026-05-17 23:49:40 +02:00
generateObject() in the AI SDK falls back to a tool-call mode when the provider doesn't advertise structured-output support — and tool calling through Ollama isn't reliable enough that the schema-validation step passes. The response was failing with 'No object generated: response did not match schema' even though the underlying mana-llm + Ollama roundtrip works correctly when called with response_format directly (verified via curl). Set supportsStructuredOutputs:true on the createOpenAICompatible factory so the AI SDK uses response_format json_schema mode. mana-llm already routes that to Ollama's native format field thanks to the companion fix in services/mana-llm/src/providers/ollama.py — verified end-to-end with the MealAnalysisSchema and Gemma 3 4B. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
221 lines
7.6 KiB
TypeScript
221 lines
7.6 KiB
TypeScript
/**
|
|
* NutriPhi module — Meal analysis (Gemini Vision via mana-llm) + recommendations.
|
|
*
|
|
* CRUD for meals, goals, favorites is handled by mana-sync. This module
|
|
* owns the server-only operations: photo upload to mana-media, structured
|
|
* AI analysis using the Vercel AI SDK (`generateObject`) against the
|
|
* shared Zod schema in @mana/shared-types, and a small rule-based
|
|
* recommendation engine.
|
|
*
|
|
* Why generateObject + Zod instead of raw fetch?
|
|
* - Runtime validation of the AI response — if Gemini drifts on a
|
|
* field, we throw at the boundary instead of corrupting downstream
|
|
* state. The frontend never sees malformed data.
|
|
* - Provider-portable structured outputs: the AI SDK translates one
|
|
* Zod schema into OpenAI strict json_schema / Anthropic tool-use /
|
|
* Gemini response_schema depending on which backend mana-llm routes
|
|
* to. We don't have to know which.
|
|
* - Single source of truth: the same MealAnalysisSchema is consumed
|
|
* by the unified web app via `z.infer<typeof MealAnalysisSchema>`,
|
|
* so changes here propagate end-to-end without manual sync.
|
|
*/
|
|
|
|
import { Hono } from 'hono';
|
|
import { generateObject } from 'ai';
|
|
import { createOpenAICompatible } from '@ai-sdk/openai-compatible';
|
|
import {
|
|
AI_SCHEMA_VERSION,
|
|
MealAnalysisSchema,
|
|
type AiResponseEnvelope,
|
|
type MealAnalysis,
|
|
} from '@mana/shared-types';
|
|
import { logger, type AuthVariables } from '@mana/shared-hono';
|
|
|
|
const LLM_URL = process.env.MANA_LLM_URL || 'http://localhost:3025';
|
|
// mana-llm parses model strings as `provider/model` (router.py:_parse_model).
|
|
// Default to Gemma 3 (4B, multimodal) on the local Ollama instance — it
|
|
// runs on the GPU server (192.168.178.11) via the gpu-proxy bridge and
|
|
// supports vision out of the box. Override with VISION_MODEL=google/gemini-2.0-flash
|
|
// (or similar) once mana-llm has GOOGLE_API_KEY configured.
|
|
const VISION_MODEL = process.env.VISION_MODEL || 'ollama/gemma3:4b';
|
|
|
|
const llm = createOpenAICompatible({
|
|
name: 'mana-llm',
|
|
// mana-llm exposes /v1/chat/completions (see services/mana-llm/CLAUDE.md +
|
|
// src/main.py:125). The AI SDK's openai-compatible adapter appends
|
|
// /chat/completions to baseURL, so baseURL ends in /v1.
|
|
baseURL: `${LLM_URL}/v1`,
|
|
// Tell the AI SDK that mana-llm honours OpenAI-style strict
|
|
// json_schema response_format. Without this, generateObject() falls
|
|
// back to a tool-call mode that Ollama-backed models don't support
|
|
// reliably and the response fails to validate against the Zod schema.
|
|
// mana-llm's Ollama provider translates response_format → Ollama's
|
|
// native `format` field (services/mana-llm/src/providers/ollama.py)
|
|
// so this is honoured end-to-end.
|
|
supportsStructuredOutputs: true,
|
|
});
|
|
|
|
const ANALYSIS_PROMPT = `Du bist ein Ernährungsexperte. Analysiere die Mahlzeit und gib strukturierte Nährwertdaten zurück. Schätze realistische Portionsgrößen und Kalorien. Antworte auf Deutsch.`;
|
|
|
|
/**
|
|
* Provider hints attached to the system message. Forward-compat:
|
|
*
|
|
* - anthropic.cacheControl: ephemeral system-prompt caching. NO-OP today
|
|
* because (a) we route to Gemini via mana-llm and (b) the prompt is
|
|
* ~50 tokens — well under Anthropic's 1024-token cache minimum. Becomes
|
|
* active automatically when mana-llm routes to Claude AND the prompt
|
|
* grows (e.g. once we attach per-user dietary preferences as system
|
|
* context, which would push us past the threshold).
|
|
*
|
|
* Kept here so the day we flip the backend, we don't have to revisit
|
|
* every route to enable caching — it just starts working.
|
|
*/
|
|
const SYSTEM_CACHE_HINT = {
|
|
anthropic: { cacheControl: { type: 'ephemeral' as const } },
|
|
};
|
|
|
|
/** Wrap a validated AI object in the standard wire-format envelope. */
|
|
function envelope(data: MealAnalysis): AiResponseEnvelope<MealAnalysis> {
|
|
return { schemaVersion: AI_SCHEMA_VERSION, data };
|
|
}
|
|
|
|
const routes = new Hono<{ Variables: AuthVariables }>();
|
|
|
|
// ─── Photo Upload (server-only: S3 storage via mana-media) ───
|
|
|
|
routes.post('/photos/upload', async (c) => {
|
|
const userId = c.get('userId');
|
|
const formData = await c.req.formData();
|
|
const file = formData.get('file') as File | null;
|
|
|
|
if (!file) return c.json({ error: 'No file provided' }, 400);
|
|
if (file.size > 10 * 1024 * 1024) return c.json({ error: 'File too large (max 10MB)' }, 400);
|
|
|
|
try {
|
|
const { uploadImageToMedia } = await import('../../lib/media');
|
|
const buffer = await file.arrayBuffer();
|
|
const result = await uploadImageToMedia(buffer, file.name, { app: 'nutriphi', userId });
|
|
|
|
return c.json(
|
|
{
|
|
mediaId: result.id,
|
|
publicUrl: result.urls.original,
|
|
thumbnailUrl: result.urls.thumbnail || result.urls.original,
|
|
storagePath: result.id,
|
|
},
|
|
201
|
|
);
|
|
} catch (err) {
|
|
logger.error('nutriphi.upload_failed', {
|
|
error: err instanceof Error ? err.message : String(err),
|
|
});
|
|
return c.json({ error: 'Upload failed' }, 500);
|
|
}
|
|
});
|
|
|
|
// ─── Photo Analysis (Gemini Vision on uploaded URL) ──────────
|
|
|
|
routes.post('/analysis/photo', async (c) => {
|
|
const { photoUrl } = await c.req.json();
|
|
if (!photoUrl) return c.json({ error: 'photoUrl required' }, 400);
|
|
|
|
try {
|
|
const { object } = await generateObject({
|
|
model: llm(VISION_MODEL),
|
|
schema: MealAnalysisSchema,
|
|
messages: [
|
|
{
|
|
role: 'system',
|
|
content: ANALYSIS_PROMPT,
|
|
providerOptions: SYSTEM_CACHE_HINT,
|
|
},
|
|
{
|
|
role: 'user',
|
|
content: [
|
|
{ type: 'text', text: 'Analysiere diese Mahlzeit.' },
|
|
{ type: 'image', image: new URL(photoUrl) },
|
|
],
|
|
},
|
|
],
|
|
temperature: 0.3,
|
|
});
|
|
return c.json(envelope(object));
|
|
} catch (err) {
|
|
logger.error('nutriphi.photo_analysis_failed', {
|
|
error: err instanceof Error ? err.message : String(err),
|
|
});
|
|
return c.json({ error: 'Analysis failed' }, 500);
|
|
}
|
|
});
|
|
|
|
// ─── Text Analysis (Gemini on a free-text meal description) ──
|
|
|
|
routes.post('/analysis/text', async (c) => {
|
|
const { description } = await c.req.json();
|
|
if (!description) return c.json({ error: 'description required' }, 400);
|
|
|
|
try {
|
|
const { object } = await generateObject({
|
|
model: llm(VISION_MODEL),
|
|
schema: MealAnalysisSchema,
|
|
messages: [
|
|
{
|
|
role: 'system',
|
|
content: ANALYSIS_PROMPT,
|
|
providerOptions: SYSTEM_CACHE_HINT,
|
|
},
|
|
{
|
|
role: 'user',
|
|
content: `Analysiere diese Mahlzeit: ${description}`,
|
|
},
|
|
],
|
|
temperature: 0.3,
|
|
});
|
|
return c.json(envelope(object));
|
|
} catch (err) {
|
|
logger.error('nutriphi.text_analysis_failed', {
|
|
error: err instanceof Error ? err.message : String(err),
|
|
});
|
|
return c.json({ error: 'Analysis failed' }, 500);
|
|
}
|
|
});
|
|
|
|
// ─── Recommendations (server-only: rule engine) ──────────────
|
|
|
|
routes.post('/recommendations/generate', async (c) => {
|
|
const { dailyNutrition } = await c.req.json();
|
|
const hints: Array<{ type: string; priority: string; message: string; nutrient?: string }> = [];
|
|
|
|
if (dailyNutrition) {
|
|
if (dailyNutrition.protein < 25) {
|
|
hints.push({
|
|
type: 'hint',
|
|
priority: 'medium',
|
|
message:
|
|
'Deine Proteinzufuhr ist niedrig. Versuche Hülsenfrüchte, Eier oder Joghurt einzubauen.',
|
|
nutrient: 'protein',
|
|
});
|
|
}
|
|
if (dailyNutrition.fiber < 10) {
|
|
hints.push({
|
|
type: 'hint',
|
|
priority: 'medium',
|
|
message: 'Mehr Ballaststoffe! Vollkornprodukte, Gemüse und Obst helfen.',
|
|
nutrient: 'fiber',
|
|
});
|
|
}
|
|
if (dailyNutrition.sugar > 50) {
|
|
hints.push({
|
|
type: 'hint',
|
|
priority: 'high',
|
|
message:
|
|
'Dein Zuckerkonsum ist hoch. Achte auf versteckten Zucker in Getränken und Fertigprodukten.',
|
|
nutrient: 'sugar',
|
|
});
|
|
}
|
|
}
|
|
|
|
return c.json({ recommendations: hints });
|
|
});
|
|
|
|
export { routes as nutriphiRoutes };
|