From 63a91e36a2f63f698cb24d9e2bd0633c2261c144 Mon Sep 17 00:00:00 2001 From: Till JS Date: Wed, 8 Apr 2026 22:37:07 +0200 Subject: [PATCH] fix(research): use /v1/chat/completions for mana-llm (not /api/v1/) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit End-to-end testing surfaced a 404 from the synth path. mana-llm (services/mana-llm/src/main.py) mounts the OpenAI-compatible API at /v1/* — there's no /api prefix. The first quick-depth e2e run only worked because the planner is skipped on quick (it just uses the question itself), so llmJson never fired; only llmStream did, and the streaming path also used the wrong prefix but the test happened to land before this was caught. The other apps/api modules (chat, guides, context, traces) all use the wrong /api/v1/ path too — that's a separate, pre-existing bug to be addressed in their own commits. Verified by re-running a standard-depth research run end-to-end against mana-llm pointed at the GPU server's ollama with gemma3:4b/12b: plan + retrieve + extract + synth all succeed. Co-Authored-By: Claude Opus 4.6 (1M context) --- apps/api/src/lib/llm.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/apps/api/src/lib/llm.ts b/apps/api/src/lib/llm.ts index 0f879b525..6ae0b4954 100644 --- a/apps/api/src/lib/llm.ts +++ b/apps/api/src/lib/llm.ts @@ -9,7 +9,7 @@ * llmStream() — streaming, calls onToken() for each delta and returns * the full concatenated text at the end. Used for synthesis. * - * mana-llm exposes an OpenAI-compatible /api/v1/chat/completions endpoint + * mana-llm exposes an OpenAI-compatible /v1/chat/completions endpoint * (see services/mana-llm). Models are namespaced as `provider/model`, e.g. * `ollama/gemma3:4b`, `openrouter/meta-llama/llama-3.1-70b-instruct`. * @@ -66,7 +66,7 @@ function buildMessages(system: string | undefined, user: string): LlmMessage[] { * Throws LlmError on transport/HTTP failure or if the body isn't valid JSON. */ export async function llmJson(opts: LlmJsonOptions): Promise { - const res = await fetch(`${LLM_URL}/api/v1/chat/completions`, { + const res = await fetch(`${LLM_URL}/v1/chat/completions`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ @@ -109,7 +109,7 @@ export async function llmJson(opts: LlmJsonOptions): Promise { * sentinel `data: [DONE]`. */ export async function llmStream(opts: LlmStreamOptions): Promise { - const res = await fetch(`${LLM_URL}/api/v1/chat/completions`, { + const res = await fetch(`${LLM_URL}/v1/chat/completions`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({