mirror of
https://github.com/Memo-2023/mana-monorepo.git
synced 2026-05-16 12:39:39 +02:00
Final milestone of docs/plans/llm-fallback-aliases.md. Every backend
caller now requests models via the `mana/<class>` alias system instead
of hardcoded `ollama/...` strings. mana-llm resolves aliases through
`services/mana-llm/aliases.yaml` with health-aware fallback (M3) and
emits resolved-model + fallback metrics (M4).
SSOT moved to `packages/shared-ai/src/llm-aliases.ts` so apps/api,
apps/mana/apps/web, and services/mana-ai all import the same
`MANA_LLM` constant via the existing `@mana/shared-ai` workspace
dependency. Three additional sites (memoro-server, mana-events,
mana-research) inline the alias string with a SSOT comment because
they don't pull @mana/shared-ai today.
Migrated 14 sites across 10 files:
- apps/api: writing(LONG_FORM), comic(STRUCTURED), context(FAST_TEXT),
food(VISION), plants(VISION), research orchestrator (3 tiers
collapsed to STRUCTURED+FAST_TEXT/LONG_FORM)
- apps/mana/apps/web: voice/parse-task + parse-habit (STRUCTURED)
- services/mana-ai: planner llm-client + tick.ts (REASONING)
- services/mana-events: website-extractor (STRUCTURED, inlined)
- services/mana-research: mana-llm client (FAST_TEXT, inlined)
- apps/memoro/apps/server: ai.ts (FAST_TEXT, inlined)
Legacy env-vars removed: WRITING_MODEL, COMIC_STORYBOARD_MODEL,
VISION_MODEL, MANA_LLM_DEFAULT_MODEL. The chain in aliases.yaml is
now the single tuning surface; SIGHUP reloads it without redeploys.
New `scripts/validate-llm-strings.mjs` regex-scans 2538 files for
hardcoded `<provider>/<model>` strings and fails the build if any
land outside the SSOT or the explicitly-allowed paths (image-gen
modules, model-inspector code, this validator itself, the registry).
Wired into `validate:all` next to the i18n + theme validators.
Verified: `pnpm validate:llm-strings` clean, `pnpm --filter @mana/api
type-check` clean, `pnpm --filter @mana/ai-service type-check`
clean. Web type-check has 2 pre-existing errors in
SettingsSidebar.svelte (i18n MessageFormatter type drift, last
touched in 988c17a67 — unrelated to this work).
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
95 lines
3 KiB
TypeScript
95 lines
3 KiB
TypeScript
/**
|
|
* Writing module — one-shot prose generation against mana-llm.
|
|
*
|
|
* M3 scope: the client sends a fully-built prompt (system + user), we
|
|
* round-trip to mana-llm and return the raw completion text. Draft +
|
|
* version bookkeeping stays entirely client-side — the browser writes
|
|
* the returned text into a new LocalDraftVersion via the generations
|
|
* store. This keeps the server stateless and lets the same endpoint
|
|
* serve refinement calls later (shorten / expand / tone).
|
|
*
|
|
* Later milestones:
|
|
* M6 — selection-refinement tools will call this same endpoint with
|
|
* different system/user prompts (shorten, expand, change tone).
|
|
* M7 — long-form drafts flip to mana-ai missions with streaming; the
|
|
* sync endpoint here stays for short-form as a fast path.
|
|
*/
|
|
|
|
import { Hono } from 'hono';
|
|
import { llmText, LlmError } from '../../lib/llm';
|
|
import { MANA_LLM } from '@mana/shared-ai';
|
|
import { logger, type AuthVariables } from '@mana/shared-hono';
|
|
|
|
const DEFAULT_MODEL = MANA_LLM.LONG_FORM;
|
|
|
|
/** Hard cap so a runaway briefing can't burn unlimited tokens. */
|
|
const MAX_OUTPUT_TOKENS = 8000;
|
|
|
|
interface GenerationRequest {
|
|
systemPrompt?: string;
|
|
userPrompt: string;
|
|
/** Kind discriminator — logged for observability, not used for routing. */
|
|
kind?: string;
|
|
/** Ghostwriter default 0.7; selection-refinements might want 0.3. */
|
|
temperature?: number;
|
|
/** Token ceiling. Server clamps to MAX_OUTPUT_TOKENS. */
|
|
maxTokens?: number;
|
|
/** Optional model override — most callers leave this unset. */
|
|
model?: string;
|
|
}
|
|
|
|
const routes = new Hono<{ Variables: AuthVariables }>();
|
|
|
|
routes.post('/generations', async (c) => {
|
|
const userId = c.get('userId');
|
|
const body = (await c.req.json()) as Partial<GenerationRequest>;
|
|
|
|
if (!body.userPrompt || typeof body.userPrompt !== 'string') {
|
|
return c.json({ error: 'userPrompt required' }, 400);
|
|
}
|
|
|
|
const maxTokens = Math.min(MAX_OUTPUT_TOKENS, Math.max(64, body.maxTokens ?? 2000));
|
|
const temperature =
|
|
typeof body.temperature === 'number' ? Math.max(0, Math.min(1.2, body.temperature)) : 0.7;
|
|
const model = body.model || DEFAULT_MODEL;
|
|
|
|
const startedAt = Date.now();
|
|
try {
|
|
const result = await llmText({
|
|
model,
|
|
system: body.systemPrompt,
|
|
user: body.userPrompt,
|
|
temperature,
|
|
maxTokens,
|
|
});
|
|
const durationMs = Date.now() - startedAt;
|
|
logger.info('writing.generation_ok', {
|
|
userId,
|
|
kind: body.kind,
|
|
model: result.model,
|
|
outputChars: result.text.length,
|
|
tokenUsage: result.tokenUsage,
|
|
durationMs,
|
|
});
|
|
return c.json({
|
|
output: result.text,
|
|
model: result.model,
|
|
tokenUsage: result.tokenUsage,
|
|
durationMs,
|
|
});
|
|
} catch (err) {
|
|
const durationMs = Date.now() - startedAt;
|
|
const message = err instanceof Error ? err.message : String(err);
|
|
logger.error('writing.generation_failed', {
|
|
userId,
|
|
kind: body.kind,
|
|
model,
|
|
error: message,
|
|
status: err instanceof LlmError ? err.status : undefined,
|
|
durationMs,
|
|
});
|
|
return c.json({ error: 'Generation failed', detail: message, durationMs }, 500);
|
|
}
|
|
});
|
|
|
|
export { routes as writingRoutes };
|