mirror of
https://github.com/Memo-2023/mana-monorepo.git
synced 2026-05-17 01:19:40 +02:00
Pre-launch theme system audit found multiple parallel layers in themes.css
(--theme-X full hsl strings, --X partial shadcn aliases, --color-X populated
by runtime store with raw channels) plus dead-code companion files. The
inconsistency caused light-mode regressions when scoped-CSS consumers
wrote `var(--color-X)` standalone — the variable holds raw HSL channels
which is invalid as a color value, browser fell back to inherited (white).
Rewrite to one consistent layer:
- Source of truth: --color-X defined as raw HSL channels (e.g.
`0 0% 17%`) in :root, .dark, and all variant [data-theme="..."]
blocks. Matches the format the runtime store
(@mana/shared-theme/src/utils.ts) writes, eliminating the
static-fallback-vs-runtime mismatch and the corresponding flash
of unstyled content on hydration.
- @theme inline uses self-reference + Tailwind v4 <alpha-value>
placeholder so utility classes generate correctly AND opacity
modifiers work: `text-foreground/50` → `hsl(var(--color-foreground) / 0.5)`.
- @layer components (.btn-primary, .card, .badge, etc.) wraps
var(--color-X) refs with hsl() — they were broken in light mode
too for the same reason.
Convention going forward (also documented in the file header):
1. Markup: use Tailwind utility classes (text-foreground, bg-card, …)
2. Scoped CSS: hsl(var(--color-X)) — always wrap with hsl()
3. NEVER raw var(--color-X) in CSS — that's the bug pattern
Net file: 692 → 580 LOC. Single source layer, no indirection.
Also delete dead companion files (zero imports anywhere):
- tailwind-v4.css (had broken self-reference, never imported)
- theme-variables.css (legacy hex-based palette)
- components.css (legacy component utilities)
- index.js / preset.js / colors.js (Tailwind v3 preset format,
irrelevant under Tailwind v4)
package.json exports map shrinks accordingly to just `./themes.css`.
Consumers using `hsl(var(--color-X))` (~379 files across mana-web,
manavoxel-web, arcade-web) keep working unchanged — the public API
name `--color-X` is preserved. Only the broken pattern `var(--color-X)`
(~61 files) needs a follow-up sweep, handled in a separate commit.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
129 lines
4 KiB
TypeScript
129 lines
4 KiB
TypeScript
/**
|
|
* Chat module — LLM completions (sync + streaming SSE)
|
|
* Ported from apps/chat/apps/server
|
|
*
|
|
* CRUD for conversations/messages handled by mana-sync.
|
|
* This module handles AI completions via mana-llm or OpenRouter.
|
|
*/
|
|
|
|
import { Hono } from 'hono';
|
|
import { streamSSE } from 'hono/streaming';
|
|
import { consumeCredits, validateCredits } from '@mana/shared-hono/credits';
|
|
import type { AuthVariables } from '@mana/shared-hono';
|
|
|
|
const LLM_URL = process.env.MANA_LLM_URL || 'http://localhost:3025';
|
|
|
|
const routes = new Hono<{ Variables: AuthVariables }>();
|
|
|
|
// ─── Chat Completion (sync) ──────────────────────────────────
|
|
|
|
routes.post('/completions', async (c) => {
|
|
const userId = c.get('userId');
|
|
const { messages, model, temperature, maxTokens } = await c.req.json();
|
|
|
|
if (!messages?.length) return c.json({ error: 'messages required' }, 400);
|
|
|
|
const isLocal = !model || model.startsWith('ollama/') || model.startsWith('local/');
|
|
const cost = isLocal ? 0.1 : 5;
|
|
|
|
const validation = await validateCredits(userId, 'AI_CHAT', cost);
|
|
if (!validation.hasCredits) {
|
|
return c.json({ error: 'Insufficient credits', required: cost }, 402);
|
|
}
|
|
|
|
try {
|
|
const llmRes = await fetch(`${LLM_URL}/api/v1/chat/completions`, {
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify({
|
|
messages,
|
|
model: model || 'gemma3:4b',
|
|
temperature: temperature || 0.7,
|
|
max_tokens: maxTokens || 2000,
|
|
}),
|
|
});
|
|
|
|
if (!llmRes.ok) return c.json({ error: 'LLM request failed' }, 502);
|
|
|
|
const data = await llmRes.json();
|
|
await consumeCredits(userId, 'AI_CHAT', cost, `Chat: ${model || 'gemma3:4b'}`);
|
|
|
|
return c.json(data);
|
|
} catch (_err) {
|
|
return c.json({ error: 'Chat completion failed' }, 500);
|
|
}
|
|
});
|
|
|
|
// ─── Chat Completion (streaming SSE) ─────────────────────────
|
|
|
|
routes.post('/completions/stream', async (c) => {
|
|
const userId = c.get('userId');
|
|
const { messages, model, temperature, maxTokens } = await c.req.json();
|
|
|
|
if (!messages?.length) return c.json({ error: 'messages required' }, 400);
|
|
|
|
const isLocal = !model || model.startsWith('ollama/') || model.startsWith('local/');
|
|
const cost = isLocal ? 0.1 : 5;
|
|
|
|
const validation = await validateCredits(userId, 'AI_CHAT', cost);
|
|
if (!validation.hasCredits) {
|
|
return c.json({ error: 'Insufficient credits' }, 402);
|
|
}
|
|
|
|
return streamSSE(c, async (stream) => {
|
|
try {
|
|
const llmRes = await fetch(`${LLM_URL}/api/v1/chat/completions`, {
|
|
method: 'POST',
|
|
headers: { 'Content-Type': 'application/json' },
|
|
body: JSON.stringify({
|
|
messages,
|
|
model: model || 'gemma3:4b',
|
|
temperature: temperature || 0.7,
|
|
max_tokens: maxTokens || 2000,
|
|
stream: true,
|
|
}),
|
|
});
|
|
|
|
if (!llmRes.ok || !llmRes.body) {
|
|
await stream.writeSSE({ data: JSON.stringify({ error: 'LLM failed' }) });
|
|
return;
|
|
}
|
|
|
|
const reader = llmRes.body.getReader();
|
|
const decoder = new TextDecoder();
|
|
|
|
while (true) {
|
|
const { done, value } = await reader.read();
|
|
if (done) break;
|
|
const chunk = decoder.decode(value, { stream: true });
|
|
// Forward SSE chunks directly
|
|
for (const line of chunk.split('\n')) {
|
|
if (line.startsWith('data: ')) {
|
|
await stream.writeSSE({ data: line.slice(6) });
|
|
}
|
|
}
|
|
}
|
|
|
|
await stream.writeSSE({ data: '[DONE]' });
|
|
consumeCredits(userId, 'AI_CHAT', cost, `Chat stream: ${model || 'gemma3:4b'}`).catch(
|
|
() => {}
|
|
);
|
|
} catch (_err) {
|
|
await stream.writeSSE({ data: JSON.stringify({ error: 'Stream failed' }) });
|
|
}
|
|
});
|
|
});
|
|
|
|
// ─── Models List ─────────────────────────────────────────────
|
|
|
|
routes.get('/models', async (c) => {
|
|
try {
|
|
const res = await fetch(`${LLM_URL}/api/v1/models`);
|
|
if (res.ok) return c.json(await res.json());
|
|
} catch {
|
|
// Fallback
|
|
}
|
|
return c.json({ models: [] });
|
|
});
|
|
|
|
export { routes as chatRoutes };
|