mirror of
https://github.com/Memo-2023/mana-monorepo.git
synced 2026-05-14 21:21:10 +02:00
Server:
- New llmText() helper in apps/api/src/lib/llm.ts for plain-text
(non-streaming) completions with token-usage reporting.
- POST /api/v1/writing/generations (Hono + requireTier('beta'))
accepts system+user prompts, forwards to mana-llm (default model
ollama/gemma3:4b), returns raw output + model + tokenUsage. The
endpoint is stateless — draft/version bookkeeping is entirely
client-side so the same route serves refinement calls later.
Client:
- writing/api.ts — Bearer-authed fetch client (follows the food/
news-research pattern).
- writing/utils/prompt-builder.ts — pure builder turning a briefing
(+ optional style preset / extracted principles) into a system+user
pair. Forbids preamble / sign-off / meta commentary so the output is
ready to paste into a version.
- writing/stores/generations.svelte.ts — orchestrates the full flow:
queued → running → call → new LocalDraftVersion → pointer flip →
succeeded. On failure leaves the current version untouched with the
error on the generation record. Emits WritingDraftGenerationStarted /
WritingDraftVersionCreated / WritingDraftGenerationFailed events.
UI:
- Generate button in DetailView.svelte (label flips "Generate" / "Neu
generieren" based on whether the draft already has content).
- GenerationStatus.svelte strip surfaces queued / running / failed with
model + duration badges; succeeded generations auto-disappear because
the new version is already live via the currentVersionId pointer.
M3 is synchronous and non-streaming by design. M7 adds mission-based
long-form with streaming + outline stage + reference injection. M6 will
reuse the same /generations endpoint for selection-refinement prompts.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
94 lines
3 KiB
TypeScript
94 lines
3 KiB
TypeScript
/**
|
|
* Writing module — one-shot prose generation against mana-llm.
|
|
*
|
|
* M3 scope: the client sends a fully-built prompt (system + user), we
|
|
* round-trip to mana-llm and return the raw completion text. Draft +
|
|
* version bookkeeping stays entirely client-side — the browser writes
|
|
* the returned text into a new LocalDraftVersion via the generations
|
|
* store. This keeps the server stateless and lets the same endpoint
|
|
* serve refinement calls later (shorten / expand / tone).
|
|
*
|
|
* Later milestones:
|
|
* M6 — selection-refinement tools will call this same endpoint with
|
|
* different system/user prompts (shorten, expand, change tone).
|
|
* M7 — long-form drafts flip to mana-ai missions with streaming; the
|
|
* sync endpoint here stays for short-form as a fast path.
|
|
*/
|
|
|
|
import { Hono } from 'hono';
|
|
import { llmText, LlmError } from '../../lib/llm';
|
|
import { logger, type AuthVariables } from '@mana/shared-hono';
|
|
|
|
const DEFAULT_MODEL = process.env.WRITING_MODEL || 'ollama/gemma3:4b';
|
|
|
|
/** Hard cap so a runaway briefing can't burn unlimited tokens. */
|
|
const MAX_OUTPUT_TOKENS = 8000;
|
|
|
|
interface GenerationRequest {
|
|
systemPrompt?: string;
|
|
userPrompt: string;
|
|
/** Kind discriminator — logged for observability, not used for routing. */
|
|
kind?: string;
|
|
/** Ghostwriter default 0.7; selection-refinements might want 0.3. */
|
|
temperature?: number;
|
|
/** Token ceiling. Server clamps to MAX_OUTPUT_TOKENS. */
|
|
maxTokens?: number;
|
|
/** Optional model override — most callers leave this unset. */
|
|
model?: string;
|
|
}
|
|
|
|
const routes = new Hono<{ Variables: AuthVariables }>();
|
|
|
|
routes.post('/generations', async (c) => {
|
|
const userId = c.get('userId');
|
|
const body = (await c.req.json()) as Partial<GenerationRequest>;
|
|
|
|
if (!body.userPrompt || typeof body.userPrompt !== 'string') {
|
|
return c.json({ error: 'userPrompt required' }, 400);
|
|
}
|
|
|
|
const maxTokens = Math.min(MAX_OUTPUT_TOKENS, Math.max(64, body.maxTokens ?? 2000));
|
|
const temperature =
|
|
typeof body.temperature === 'number' ? Math.max(0, Math.min(1.2, body.temperature)) : 0.7;
|
|
const model = body.model || DEFAULT_MODEL;
|
|
|
|
const startedAt = Date.now();
|
|
try {
|
|
const result = await llmText({
|
|
model,
|
|
system: body.systemPrompt,
|
|
user: body.userPrompt,
|
|
temperature,
|
|
maxTokens,
|
|
});
|
|
const durationMs = Date.now() - startedAt;
|
|
logger.info('writing.generation_ok', {
|
|
userId,
|
|
kind: body.kind,
|
|
model: result.model,
|
|
outputChars: result.text.length,
|
|
tokenUsage: result.tokenUsage,
|
|
durationMs,
|
|
});
|
|
return c.json({
|
|
output: result.text,
|
|
model: result.model,
|
|
tokenUsage: result.tokenUsage,
|
|
durationMs,
|
|
});
|
|
} catch (err) {
|
|
const durationMs = Date.now() - startedAt;
|
|
const message = err instanceof Error ? err.message : String(err);
|
|
logger.error('writing.generation_failed', {
|
|
userId,
|
|
kind: body.kind,
|
|
model,
|
|
error: message,
|
|
status: err instanceof LlmError ? err.status : undefined,
|
|
durationMs,
|
|
});
|
|
return c.json({ error: 'Generation failed', detail: message, durationMs }, 500);
|
|
}
|
|
});
|
|
|
|
export { routes as writingRoutes };
|