feat(local-llm): add Gemma 2 + allow HF/MLC hosts in CSP

WebLLM was blocked by connect-src — model config and weight shards live
on huggingface.co (+ cdn-lfs.* for LFS), and the WebGPU model_lib WASM
comes from raw.githubusercontent.com (binary-mlc-llm-libs). Also wires
Gemma 2 2B/9B into the model registry so /llm-test picks them up.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Till JS 2026-04-08 18:00:57 +02:00
parent ed746297b5
commit 4fd5ff3199
2 changed files with 22 additions and 0 deletions

View file

@ -108,6 +108,14 @@ window.__PUBLIC_GLITCHTIP_DSN__ = ${JSON.stringify(PUBLIC_GLITCHTIP_DSN)};
PUBLIC_MANA_LLM_URL_CLIENT,
PUBLIC_MANA_EVENTS_URL_CLIENT,
'wss://sync.mana.how',
// @mana/local-llm (WebLLM) downloads model weights + config from
// the mlc-ai HuggingFace repos and the WebGPU model library WASM
// from the binary-mlc-llm-libs GitHub raw host.
'https://huggingface.co',
'https://*.huggingface.co',
'https://cdn-lfs.huggingface.co',
'https://cdn-lfs-us-1.huggingface.co',
'https://raw.githubusercontent.com',
// Allow all localhost ports in development
...(isDev ? ['http://localhost:*', 'ws://localhost:*'] : []),
].filter(Boolean),

View file

@ -20,6 +20,20 @@ export const MODELS = {
downloadSizeMb: 400,
ramUsageMb: 800,
},
/** Google Gemma 2 — strong general-purpose model, similar size class to Qwen 1.5B */
'gemma-2-2b': {
modelId: 'gemma-2-2b-it-q4f16_1-MLC',
displayName: 'Gemma 2 2B',
downloadSizeMb: 1400,
ramUsageMb: 2200,
},
/** Google Gemma 2 9B — much higher quality, needs a beefy GPU (~6GB VRAM) */
'gemma-2-9b': {
modelId: 'gemma-2-9b-it-q4f16_1-MLC',
displayName: 'Gemma 2 9B',
downloadSizeMb: 5300,
ramUsageMb: 6500,
},
} as const satisfies Record<string, ModelConfig>;
export type ModelKey = keyof typeof MODELS;