feat(writing): M3 — one-shot prose generation via mana-llm

Server:
- New llmText() helper in apps/api/src/lib/llm.ts for plain-text
  (non-streaming) completions with token-usage reporting.
- POST /api/v1/writing/generations (Hono + requireTier('beta'))
  accepts system+user prompts, forwards to mana-llm (default model
  ollama/gemma3:4b), returns raw output + model + tokenUsage. The
  endpoint is stateless — draft/version bookkeeping is entirely
  client-side so the same route serves refinement calls later.

Client:
- writing/api.ts — Bearer-authed fetch client (follows the food/
  news-research pattern).
- writing/utils/prompt-builder.ts — pure builder turning a briefing
  (+ optional style preset / extracted principles) into a system+user
  pair. Forbids preamble / sign-off / meta commentary so the output is
  ready to paste into a version.
- writing/stores/generations.svelte.ts — orchestrates the full flow:
  queued → running → call → new LocalDraftVersion → pointer flip →
  succeeded. On failure leaves the current version untouched with the
  error on the generation record. Emits WritingDraftGenerationStarted /
  WritingDraftVersionCreated / WritingDraftGenerationFailed events.

UI:
- Generate button in DetailView.svelte (label flips "Generate" / "Neu
  generieren" based on whether the draft already has content).
- GenerationStatus.svelte strip surfaces queued / running / failed with
  model + duration badges; succeeded generations auto-disappear because
  the new version is already live via the currentVersionId pointer.

M3 is synchronous and non-streaming by design. M7 adds mission-based
long-form with streaming + outline stage + reference injection. M6 will
reuse the same /generations endpoint for selection-refinement prompts.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Till JS 2026-04-24 15:11:48 +02:00
parent 3c3b2ebbc7
commit d725a8df8b
9 changed files with 814 additions and 11 deletions

View file

@ -42,6 +42,7 @@ import { newsRoutes } from './modules/news/routes';
import { newsResearchRoutes } from './modules/news-research/routes';
import { articlesRoutes } from './modules/articles/routes';
import { tracesRoutes } from './modules/traces/routes';
import { writingRoutes } from './modules/writing/routes';
import { presiRoutes } from './modules/presi/routes';
import { researchRoutes } from './modules/research/routes';
import { whoRoutes } from './modules/who/routes';
@ -96,6 +97,7 @@ const RESOURCE_MODULES = [
'research',
'traces',
'who',
'writing',
] as const;
for (const mod of RESOURCE_MODULES) {
app.use(`/api/v1/${mod}/*`, requireTier('beta'));
@ -131,6 +133,7 @@ app.route('/api/v1/presi', presiRoutes);
app.route('/api/v1/research', researchRoutes);
app.route('/api/v1/website', websiteRoutes);
app.route('/api/v1/who', whoRoutes);
app.route('/api/v1/writing', writingRoutes);
// ─── Server Info ────────────────────────────────────────────
console.log(`mana-api starting on port ${PORT}...`);

View file

@ -31,6 +31,15 @@ export interface LlmJsonOptions {
maxTokens?: number;
}
export interface LlmTextOptions {
model: string;
system?: string;
user: string;
temperature?: number;
maxTokens?: number;
signal?: AbortSignal;
}
export interface LlmStreamOptions {
model: string;
system?: string;
@ -101,6 +110,56 @@ export async function llmJson<T = unknown>(opts: LlmJsonOptions): Promise<T> {
}
}
/**
* Call the LLM and return the raw text content no JSON parsing, no
* streaming. Used when you want a finished prose artifact (a generated
* draft, a summary, a translation) as one string. Includes token usage
* when the provider reports it so generation records can store it.
*/
export interface LlmTextResult {
text: string;
tokenUsage?: { input: number; output: number };
model: string;
}
export async function llmText(opts: LlmTextOptions): Promise<LlmTextResult> {
const res = await fetch(`${LLM_URL}/v1/chat/completions`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: opts.model,
messages: buildMessages(opts.system, opts.user),
temperature: opts.temperature ?? 0.7,
max_tokens: opts.maxTokens ?? 2000,
}),
signal: opts.signal,
});
if (!res.ok) {
const body = await res.text().catch(() => '');
throw new LlmError(`mana-llm returned ${res.status}`, res.status, body);
}
const data = (await res.json()) as {
choices?: Array<{ message?: { content?: string } }>;
usage?: { prompt_tokens?: number; completion_tokens?: number };
model?: string;
};
const text = data.choices?.[0]?.message?.content;
if (!text) throw new LlmError('mana-llm response missing content');
return {
text: text.trim(),
tokenUsage:
data.usage && typeof data.usage.prompt_tokens === 'number'
? {
input: data.usage.prompt_tokens ?? 0,
output: data.usage.completion_tokens ?? 0,
}
: undefined,
model: data.model ?? opts.model,
};
}
/**
* Call the LLM in streaming mode. Invokes onToken() for each delta and
* returns the full concatenated text once the stream completes.

View file

@ -0,0 +1,94 @@
/**
* Writing module one-shot prose generation against mana-llm.
*
* M3 scope: the client sends a fully-built prompt (system + user), we
* round-trip to mana-llm and return the raw completion text. Draft +
* version bookkeeping stays entirely client-side the browser writes
* the returned text into a new LocalDraftVersion via the generations
* store. This keeps the server stateless and lets the same endpoint
* serve refinement calls later (shorten / expand / tone).
*
* Later milestones:
* M6 selection-refinement tools will call this same endpoint with
* different system/user prompts (shorten, expand, change tone).
* M7 long-form drafts flip to mana-ai missions with streaming; the
* sync endpoint here stays for short-form as a fast path.
*/
import { Hono } from 'hono';
import { llmText, LlmError } from '../../lib/llm';
import { logger, type AuthVariables } from '@mana/shared-hono';
const DEFAULT_MODEL = process.env.WRITING_MODEL || 'ollama/gemma3:4b';
/** Hard cap so a runaway briefing can't burn unlimited tokens. */
const MAX_OUTPUT_TOKENS = 8000;
interface GenerationRequest {
systemPrompt?: string;
userPrompt: string;
/** Kind discriminator — logged for observability, not used for routing. */
kind?: string;
/** Ghostwriter default 0.7; selection-refinements might want 0.3. */
temperature?: number;
/** Token ceiling. Server clamps to MAX_OUTPUT_TOKENS. */
maxTokens?: number;
/** Optional model override — most callers leave this unset. */
model?: string;
}
const routes = new Hono<{ Variables: AuthVariables }>();
routes.post('/generations', async (c) => {
const userId = c.get('userId');
const body = (await c.req.json()) as Partial<GenerationRequest>;
if (!body.userPrompt || typeof body.userPrompt !== 'string') {
return c.json({ error: 'userPrompt required' }, 400);
}
const maxTokens = Math.min(MAX_OUTPUT_TOKENS, Math.max(64, body.maxTokens ?? 2000));
const temperature =
typeof body.temperature === 'number' ? Math.max(0, Math.min(1.2, body.temperature)) : 0.7;
const model = body.model || DEFAULT_MODEL;
const startedAt = Date.now();
try {
const result = await llmText({
model,
system: body.systemPrompt,
user: body.userPrompt,
temperature,
maxTokens,
});
const durationMs = Date.now() - startedAt;
logger.info('writing.generation_ok', {
userId,
kind: body.kind,
model: result.model,
outputChars: result.text.length,
tokenUsage: result.tokenUsage,
durationMs,
});
return c.json({
output: result.text,
model: result.model,
tokenUsage: result.tokenUsage,
durationMs,
});
} catch (err) {
const durationMs = Date.now() - startedAt;
const message = err instanceof Error ? err.message : String(err);
logger.error('writing.generation_failed', {
userId,
kind: body.kind,
model,
error: message,
status: err instanceof LlmError ? err.status : undefined,
durationMs,
});
return c.json({ error: 'Generation failed', detail: message, durationMs }, 500);
}
});
export { routes as writingRoutes };