mirror of
https://github.com/Memo-2023/mana-monorepo.git
synced 2026-05-14 21:41:09 +02:00
feat(llm-aliases): M5 — migrate consumers to MANA_LLM aliases
Final milestone of docs/plans/llm-fallback-aliases.md. Every backend
caller now requests models via the `mana/<class>` alias system instead
of hardcoded `ollama/...` strings. mana-llm resolves aliases through
`services/mana-llm/aliases.yaml` with health-aware fallback (M3) and
emits resolved-model + fallback metrics (M4).
SSOT moved to `packages/shared-ai/src/llm-aliases.ts` so apps/api,
apps/mana/apps/web, and services/mana-ai all import the same
`MANA_LLM` constant via the existing `@mana/shared-ai` workspace
dependency. Three additional sites (memoro-server, mana-events,
mana-research) inline the alias string with a SSOT comment because
they don't pull @mana/shared-ai today.
Migrated 14 sites across 10 files:
- apps/api: writing(LONG_FORM), comic(STRUCTURED), context(FAST_TEXT),
food(VISION), plants(VISION), research orchestrator (3 tiers
collapsed to STRUCTURED+FAST_TEXT/LONG_FORM)
- apps/mana/apps/web: voice/parse-task + parse-habit (STRUCTURED)
- services/mana-ai: planner llm-client + tick.ts (REASONING)
- services/mana-events: website-extractor (STRUCTURED, inlined)
- services/mana-research: mana-llm client (FAST_TEXT, inlined)
- apps/memoro/apps/server: ai.ts (FAST_TEXT, inlined)
Legacy env-vars removed: WRITING_MODEL, COMIC_STORYBOARD_MODEL,
VISION_MODEL, MANA_LLM_DEFAULT_MODEL. The chain in aliases.yaml is
now the single tuning surface; SIGHUP reloads it without redeploys.
New `scripts/validate-llm-strings.mjs` regex-scans 2538 files for
hardcoded `<provider>/<model>` strings and fails the build if any
land outside the SSOT or the explicitly-allowed paths (image-gen
modules, model-inspector code, this validator itself, the registry).
Wired into `validate:all` next to the i18n + theme validators.
Verified: `pnpm validate:llm-strings` clean, `pnpm --filter @mana/api
type-check` clean, `pnpm --filter @mana/ai-service type-check`
clean. Web type-check has 2 pre-existing errors in
SettingsSidebar.svelte (i18n MessageFormatter type drift, last
touched in 988c17a67 — unrelated to this work).
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
8a49e3ffd5
commit
fea3adf5fe
19 changed files with 299 additions and 50 deletions
|
|
@ -30,6 +30,7 @@ import { loadActiveAgents, refreshAgentSnapshots, type ServerAgent } from '../db
|
|||
import { appendServerIteration, planToIteration } from '../db/iteration-writer';
|
||||
import { refreshSnapshots } from '../db/snapshot-refresh';
|
||||
import { createServerLlmClient, ProviderCallError } from '../planner/llm-client';
|
||||
import { MANA_LLM } from '@mana/shared-ai';
|
||||
import { SERVER_TOOLS } from '../planner/tools';
|
||||
import {
|
||||
ticksTotal,
|
||||
|
|
@ -393,7 +394,7 @@ async function planOneMission(
|
|||
pretickUsage24h,
|
||||
});
|
||||
|
||||
const plannerModel = 'google/gemini-2.5-flash';
|
||||
const plannerModel = MANA_LLM.REASONING;
|
||||
|
||||
// Claude-Code wU2 pattern: fold the middle of messages into a structured
|
||||
// summary once cumulative tokens cross 92% of maxContextTokens.
|
||||
|
|
@ -493,7 +494,7 @@ async function planOneMission(
|
|||
} catch (err) {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
if (err instanceof ProviderCallError) {
|
||||
const provider = inferProviderFromModel('google/gemini-2.5-flash');
|
||||
const provider = inferProviderFromModel(MANA_LLM.REASONING);
|
||||
providerErrorsTotal.inc({ provider, kind: err.kind });
|
||||
}
|
||||
console.warn(`[mana-ai tick] mission=${m.id} planner loop failed: ${msg}`);
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ import type {
|
|||
LlmFinishReason,
|
||||
ToolCallRequest,
|
||||
} from '@mana/shared-ai';
|
||||
import { MANA_LLM } from '@mana/shared-ai';
|
||||
|
||||
/** Thrown when mana-llm returns a non-2xx status. ``kind`` mirrors the
|
||||
* structured ProviderError vocabulary (blocked / truncated / auth /
|
||||
|
|
@ -37,7 +38,7 @@ export interface ServerLlmClientOptions {
|
|||
readonly fetchTimeoutMs?: number;
|
||||
}
|
||||
|
||||
const DEFAULT_MODEL = 'google/gemini-2.5-flash';
|
||||
const DEFAULT_MODEL = MANA_LLM.REASONING;
|
||||
const DEFAULT_FETCH_TIMEOUT_MS = 120_000;
|
||||
|
||||
export function createServerLlmClient(opts: ServerLlmClientOptions): LlmClient {
|
||||
|
|
|
|||
|
|
@ -121,7 +121,10 @@ async function llmExtractEvents(
|
|||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: 'ollama/gemma3:4b',
|
||||
// JSON event extraction → STRUCTURED alias (resolved by mana-llm).
|
||||
// SSOT: packages/shared-ai/src/llm-aliases.ts. Inlined because
|
||||
// mana-events doesn't depend on @mana/shared-ai today.
|
||||
model: 'mana/structured',
|
||||
messages: [
|
||||
{ role: 'system', content: buildExtractionPrompt() },
|
||||
{ role: 'user', content: `Extrahiere Events von dieser Seite:\n\n${pageContent}` },
|
||||
|
|
|
|||
|
|
@ -19,7 +19,11 @@ export class ManaLlmClient {
|
|||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: opts.model ?? 'ollama/gemma3:4b',
|
||||
// Default to the FAST_TEXT alias — mana-research uses mana-llm
|
||||
// for query classification and short-form analysis. SSOT:
|
||||
// packages/shared-ai/src/llm-aliases.ts. Inlined because
|
||||
// mana-research doesn't depend on @mana/shared-ai today.
|
||||
model: opts.model ?? 'mana/fast-text',
|
||||
messages,
|
||||
max_tokens: opts.maxTokens ?? 256,
|
||||
temperature: opts.temperature ?? 0.2,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue