diff --git a/services/llm-playground/.gitignore b/services/llm-playground/.gitignore
new file mode 100644
index 000000000..d56da0f1e
--- /dev/null
+++ b/services/llm-playground/.gitignore
@@ -0,0 +1,7 @@
+node_modules/
+.svelte-kit/
+.turbo/
+.env
+dist/
+build/
+*.log
diff --git a/services/llm-playground/package.json b/services/llm-playground/package.json
new file mode 100644
index 000000000..68ce25ffa
--- /dev/null
+++ b/services/llm-playground/package.json
@@ -0,0 +1,26 @@
+{
+ "name": "@mana-llm/playground",
+ "private": true,
+ "version": "0.1.0",
+ "type": "module",
+ "scripts": {
+ "dev": "vite dev --port 5190",
+ "build": "vite build",
+ "preview": "vite preview",
+ "type-check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json"
+ },
+ "devDependencies": {
+ "@sveltejs/adapter-node": "^5.4.0",
+ "@sveltejs/kit": "^2.47.1",
+ "@sveltejs/vite-plugin-svelte": "^6.2.0",
+ "@tailwindcss/vite": "^4.1.7",
+ "svelte": "^5.41.0",
+ "svelte-check": "^4.3.3",
+ "tailwindcss": "^4.1.17",
+ "typescript": "^5.9.3",
+ "vite": "^7.1.7"
+ },
+ "dependencies": {
+ "marked": "^17.0.0"
+ }
+}
diff --git a/services/llm-playground/src/app.css b/services/llm-playground/src/app.css
new file mode 100644
index 000000000..0bf12813a
--- /dev/null
+++ b/services/llm-playground/src/app.css
@@ -0,0 +1,103 @@
+@import 'tailwindcss';
+
+:root {
+ --color-bg: #0f0f0f;
+ --color-surface: #1a1a1a;
+ --color-surface-hover: #252525;
+ --color-border: #333;
+ --color-text: #e5e5e5;
+ --color-text-muted: #888;
+ --color-primary: #3b82f6;
+ --color-primary-hover: #2563eb;
+ --color-success: #22c55e;
+ --color-error: #ef4444;
+ --color-warning: #f59e0b;
+}
+
+html {
+ background-color: var(--color-bg);
+ color: var(--color-text);
+}
+
+body {
+ font-family:
+ 'Inter',
+ -apple-system,
+ BlinkMacSystemFont,
+ 'Segoe UI',
+ Roboto,
+ sans-serif;
+}
+
+/* Custom scrollbar */
+::-webkit-scrollbar {
+ width: 8px;
+ height: 8px;
+}
+
+::-webkit-scrollbar-track {
+ background: var(--color-surface);
+}
+
+::-webkit-scrollbar-thumb {
+ background: var(--color-border);
+ border-radius: 4px;
+}
+
+::-webkit-scrollbar-thumb:hover {
+ background: #444;
+}
+
+/* Prose styles for markdown */
+.prose {
+ line-height: 1.6;
+}
+
+.prose pre {
+ background: var(--color-bg);
+ padding: 1rem;
+ border-radius: 0.5rem;
+ overflow-x: auto;
+ margin: 0.5rem 0;
+}
+
+.prose code {
+ background: var(--color-bg);
+ padding: 0.125rem 0.375rem;
+ border-radius: 0.25rem;
+ font-size: 0.875em;
+}
+
+.prose pre code {
+ background: transparent;
+ padding: 0;
+}
+
+.prose p {
+ margin: 0.5rem 0;
+}
+
+.prose ul,
+.prose ol {
+ margin: 0.5rem 0;
+ padding-left: 1.5rem;
+}
+
+.prose li {
+ margin: 0.25rem 0;
+}
+
+.prose h1,
+.prose h2,
+.prose h3 {
+ margin-top: 1rem;
+ margin-bottom: 0.5rem;
+ font-weight: 600;
+}
+
+.prose blockquote {
+ border-left: 3px solid var(--color-border);
+ padding-left: 1rem;
+ margin: 0.5rem 0;
+ color: var(--color-text-muted);
+}
diff --git a/services/llm-playground/src/app.d.ts b/services/llm-playground/src/app.d.ts
new file mode 100644
index 000000000..db1aa5db0
--- /dev/null
+++ b/services/llm-playground/src/app.d.ts
@@ -0,0 +1,13 @@
+///
+
+declare global {
+ namespace App {
+ // interface Error {}
+ // interface Locals {}
+ // interface PageData {}
+ // interface PageState {}
+ // interface Platform {}
+ }
+}
+
+export {};
diff --git a/services/llm-playground/src/app.html b/services/llm-playground/src/app.html
new file mode 100644
index 000000000..77a5ff52c
--- /dev/null
+++ b/services/llm-playground/src/app.html
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+ %sveltekit.head%
+
+
+ %sveltekit.body%
+
+
diff --git a/services/llm-playground/src/lib/api/llm.ts b/services/llm-playground/src/lib/api/llm.ts
new file mode 100644
index 000000000..3c44b627e
--- /dev/null
+++ b/services/llm-playground/src/lib/api/llm.ts
@@ -0,0 +1,112 @@
+import type {
+ ChatCompletionRequest,
+ ChatCompletionResponse,
+ HealthResponse,
+ ModelsResponse,
+ StreamChunk,
+} from '$lib/types';
+import { env } from '$env/dynamic/public';
+
+const API_BASE = env.PUBLIC_MANA_LLM_URL || 'http://localhost:3025';
+
+export async function getHealth(): Promise {
+ const response = await fetch(`${API_BASE}/health`);
+ if (!response.ok) {
+ throw new Error(`Health check failed: ${response.statusText}`);
+ }
+ return response.json();
+}
+
+export async function getModels(): Promise {
+ const response = await fetch(`${API_BASE}/v1/models`);
+ if (!response.ok) {
+ throw new Error(`Failed to fetch models: ${response.statusText}`);
+ }
+ return response.json();
+}
+
+export async function sendCompletion(
+ request: ChatCompletionRequest
+): Promise {
+ const response = await fetch(`${API_BASE}/v1/chat/completions`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({ ...request, stream: false }),
+ });
+ if (!response.ok) {
+ throw new Error(`Completion failed: ${response.statusText}`);
+ }
+ return response.json();
+}
+
+export async function* streamCompletion(
+ request: ChatCompletionRequest
+): AsyncGenerator {
+ const response = await fetch(`${API_BASE}/v1/chat/completions`, {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({ ...request, stream: true }),
+ });
+
+ if (!response.ok) {
+ const error = await response.text();
+ throw new Error(`Completion failed: ${response.statusText} - ${error}`);
+ }
+
+ if (!response.body) {
+ throw new Error('Response body is null');
+ }
+
+ const reader = response.body.getReader();
+ const decoder = new TextDecoder();
+ let buffer = '';
+
+ try {
+ while (true) {
+ const { done, value } = await reader.read();
+ if (done) break;
+
+ buffer += decoder.decode(value, { stream: true });
+ const lines = buffer.split('\n');
+ buffer = lines.pop() || '';
+
+ for (const line of lines) {
+ const trimmed = line.trim();
+ if (!trimmed || !trimmed.startsWith('data: ')) continue;
+
+ const data = trimmed.slice(6);
+ if (data === '[DONE]') return;
+
+ try {
+ const parsed: StreamChunk = JSON.parse(data);
+ const content = parsed.choices[0]?.delta?.content;
+ if (content) yield content;
+ } catch {
+ // Ignore malformed JSON chunks
+ }
+ }
+ }
+
+ // Process remaining buffer
+ if (buffer.trim()) {
+ const lines = buffer.split('\n');
+ for (const line of lines) {
+ const trimmed = line.trim();
+ if (!trimmed || !trimmed.startsWith('data: ')) continue;
+
+ const data = trimmed.slice(6);
+ if (data === '[DONE]') return;
+
+ try {
+ const parsed: StreamChunk = JSON.parse(data);
+ const content = parsed.choices[0]?.delta?.content;
+ if (content) yield content;
+ } catch {
+ // Ignore malformed JSON chunks
+ }
+ }
+ }
+ } finally {
+ reader.releaseLock();
+ }
+}
diff --git a/services/llm-playground/src/lib/components/chat/ChatInput.svelte b/services/llm-playground/src/lib/components/chat/ChatInput.svelte
new file mode 100644
index 000000000..4841d8cb3
--- /dev/null
+++ b/services/llm-playground/src/lib/components/chat/ChatInput.svelte
@@ -0,0 +1,77 @@
+
+
+
+
+
+
+ {#if chatStore.isStreaming}
+
+ {:else}
+
+ {/if}
+
+
diff --git a/services/llm-playground/src/lib/components/chat/MessageBubble.svelte b/services/llm-playground/src/lib/components/chat/MessageBubble.svelte
new file mode 100644
index 000000000..6fcf4c1cb
--- /dev/null
+++ b/services/llm-playground/src/lib/components/chat/MessageBubble.svelte
@@ -0,0 +1,62 @@
+
+
+
+
+ {#if message.role === 'assistant'}
+
+ {#if message.isStreaming && message.content === ''}
+
+
+
+
+
+ {:else}
+
+ {@html renderedContent}
+ {#if message.isStreaming}
+
+ {/if}
+ {/if}
+
+ {:else}
+
{message.content}
+ {/if}
+
+
+ {formatTime(message.timestamp)}
+ {#if message.model}
+ ยท
+ {message.model.split('/').pop()}
+ {/if}
+
+
+
diff --git a/services/llm-playground/src/lib/components/chat/MessageList.svelte b/services/llm-playground/src/lib/components/chat/MessageList.svelte
new file mode 100644
index 000000000..19681dfc6
--- /dev/null
+++ b/services/llm-playground/src/lib/components/chat/MessageList.svelte
@@ -0,0 +1,43 @@
+
+
+
+ {#if chatStore.messages.length === 0}
+
+
+
Start a conversation
+
+ Select a model from the sidebar and send a message to begin testing the mana-llm service.
+
+
+ {:else}
+
+ {#each chatStore.messages as message (message.id)}
+
+ {/each}
+
+ {/if}
+
diff --git a/services/llm-playground/src/lib/components/layout/Header.svelte b/services/llm-playground/src/lib/components/layout/Header.svelte
new file mode 100644
index 000000000..a74c80320
--- /dev/null
+++ b/services/llm-playground/src/lib/components/layout/Header.svelte
@@ -0,0 +1,79 @@
+
+
+
+
+
+
+
LLM Playground
+
mana-llm Service
+
+
+
+
+
+ {#if healthStatus === 'loading'}
+
+
Checking...
+ {:else if healthStatus === 'healthy'}
+
+
Healthy
+ {#if healthDetails}
+
({healthDetails})
+ {/if}
+ {:else}
+
+
Offline
+ {/if}
+
+
+
+
diff --git a/services/llm-playground/src/lib/components/layout/Sidebar.svelte b/services/llm-playground/src/lib/components/layout/Sidebar.svelte
new file mode 100644
index 000000000..81818fe0a
--- /dev/null
+++ b/services/llm-playground/src/lib/components/layout/Sidebar.svelte
@@ -0,0 +1,74 @@
+
+
+
diff --git a/services/llm-playground/src/lib/components/settings/ModelSelector.svelte b/services/llm-playground/src/lib/components/settings/ModelSelector.svelte
new file mode 100644
index 000000000..56c1639d6
--- /dev/null
+++ b/services/llm-playground/src/lib/components/settings/ModelSelector.svelte
@@ -0,0 +1,55 @@
+
+
+
+
+ {#if modelsStore.loading}
+
+ Loading models...
+
+ {:else if modelsStore.error}
+
+
+ {modelsStore.error}
+
+
+
+ {:else}
+
+
+ {modelsStore.models.length} models available
+
+ {/if}
+
diff --git a/services/llm-playground/src/lib/components/settings/ParameterPanel.svelte b/services/llm-playground/src/lib/components/settings/ParameterPanel.svelte
new file mode 100644
index 000000000..e40b0ffb5
--- /dev/null
+++ b/services/llm-playground/src/lib/components/settings/ParameterPanel.svelte
@@ -0,0 +1,87 @@
+
+
+
+
+
+
+ {settingsStore.temperature.toFixed(2)}
+
+
+
+ Precise
+ Creative
+
+
+
+
+
+
+ {settingsStore.maxTokens}
+
+
+
+ 256
+ 8192
+
+
+
+
+
+
+ {settingsStore.topP.toFixed(2)}
+
+
+
+ Focused
+ Diverse
+
+
+
diff --git a/services/llm-playground/src/lib/components/settings/SystemPromptEditor.svelte b/services/llm-playground/src/lib/components/settings/SystemPromptEditor.svelte
new file mode 100644
index 000000000..ad8d93ce5
--- /dev/null
+++ b/services/llm-playground/src/lib/components/settings/SystemPromptEditor.svelte
@@ -0,0 +1,27 @@
+
+
+
+
+
+
+
+
+
+ {settingsStore.systemPrompt.length} characters
+
+
diff --git a/services/llm-playground/src/lib/stores/chat.svelte.ts b/services/llm-playground/src/lib/stores/chat.svelte.ts
new file mode 100644
index 000000000..327cf0615
--- /dev/null
+++ b/services/llm-playground/src/lib/stores/chat.svelte.ts
@@ -0,0 +1,141 @@
+import type { ChatMessage, Message } from '$lib/types';
+import { streamCompletion } from '$lib/api/llm';
+import { settingsStore } from './settings.svelte';
+
+function generateId(): string {
+ return crypto.randomUUID();
+}
+
+function createChatStore() {
+ let messages = $state([]);
+ let isStreaming = $state(false);
+ let abortController = $state(null);
+
+ return {
+ get messages() {
+ return messages;
+ },
+ get isStreaming() {
+ return isStreaming;
+ },
+
+ async sendMessage(content: string) {
+ if (isStreaming || !content.trim()) return;
+
+ // Add user message
+ const userMessage: ChatMessage = {
+ id: generateId(),
+ role: 'user',
+ content: content.trim(),
+ timestamp: new Date(),
+ };
+ messages.push(userMessage);
+
+ // Create assistant message placeholder
+ const assistantMessage: ChatMessage = {
+ id: generateId(),
+ role: 'assistant',
+ content: '',
+ timestamp: new Date(),
+ model: settingsStore.model,
+ isStreaming: true,
+ };
+ messages.push(assistantMessage);
+
+ // Build messages for API
+ const apiMessages: Message[] = [];
+
+ if (settingsStore.systemPrompt.trim()) {
+ apiMessages.push({
+ role: 'system',
+ content: settingsStore.systemPrompt,
+ });
+ }
+
+ for (const msg of messages) {
+ if (msg.role === 'user' || (msg.role === 'assistant' && !msg.isStreaming)) {
+ apiMessages.push({
+ role: msg.role,
+ content: msg.content,
+ });
+ }
+ }
+
+ // Start streaming
+ isStreaming = true;
+ abortController = new AbortController();
+
+ try {
+ const stream = streamCompletion({
+ model: settingsStore.model,
+ messages: apiMessages,
+ temperature: settingsStore.temperature,
+ max_tokens: settingsStore.maxTokens,
+ top_p: settingsStore.topP,
+ stream: true,
+ });
+
+ for await (const chunk of stream) {
+ // Find and update the assistant message
+ const idx = messages.findIndex((m) => m.id === assistantMessage.id);
+ if (idx !== -1) {
+ messages[idx].content += chunk;
+ }
+ }
+
+ // Mark streaming complete
+ const idx = messages.findIndex((m) => m.id === assistantMessage.id);
+ if (idx !== -1) {
+ messages[idx].isStreaming = false;
+ messages[idx].timestamp = new Date();
+ }
+ } catch (error) {
+ // Update message with error
+ const idx = messages.findIndex((m) => m.id === assistantMessage.id);
+ if (idx !== -1) {
+ messages[idx].content = `Error: ${error instanceof Error ? error.message : 'Unknown error'}`;
+ messages[idx].isStreaming = false;
+ }
+ } finally {
+ isStreaming = false;
+ abortController = null;
+ }
+ },
+
+ stopStreaming() {
+ if (abortController) {
+ abortController.abort();
+ }
+ },
+
+ clearMessages() {
+ messages = [];
+ isStreaming = false;
+ },
+
+ exportMessages(): string {
+ return JSON.stringify(
+ {
+ exported: new Date().toISOString(),
+ settings: {
+ model: settingsStore.model,
+ temperature: settingsStore.temperature,
+ maxTokens: settingsStore.maxTokens,
+ topP: settingsStore.topP,
+ systemPrompt: settingsStore.systemPrompt,
+ },
+ messages: messages.map((m) => ({
+ role: m.role,
+ content: m.content,
+ timestamp: m.timestamp,
+ model: m.model,
+ })),
+ },
+ null,
+ 2
+ );
+ },
+ };
+}
+
+export const chatStore = createChatStore();
diff --git a/services/llm-playground/src/lib/stores/models.svelte.ts b/services/llm-playground/src/lib/stores/models.svelte.ts
new file mode 100644
index 000000000..598bcc463
--- /dev/null
+++ b/services/llm-playground/src/lib/stores/models.svelte.ts
@@ -0,0 +1,88 @@
+import type { Model, Provider } from '$lib/types';
+import { getModels } from '$lib/api/llm';
+
+interface GroupedModels {
+ provider: Provider;
+ label: string;
+ models: Model[];
+}
+
+function createModelsStore() {
+ let models = $state([]);
+ let loading = $state(false);
+ let error = $state(null);
+
+ const groupedModels = $derived.by(() => {
+ const groups: Record = {
+ ollama: [],
+ openrouter: [],
+ groq: [],
+ together: [],
+ };
+
+ for (const model of models) {
+ const id = model.id;
+ if (id.startsWith('ollama/')) {
+ groups.ollama.push(model);
+ } else if (id.startsWith('openrouter/')) {
+ groups.openrouter.push(model);
+ } else if (id.startsWith('groq/')) {
+ groups.groq.push(model);
+ } else if (id.startsWith('together/')) {
+ groups.together.push(model);
+ }
+ }
+
+ const result: GroupedModels[] = [];
+ const labels: Record = {
+ ollama: 'Ollama (Local)',
+ openrouter: 'OpenRouter',
+ groq: 'Groq',
+ together: 'Together AI',
+ };
+
+ for (const [provider, providerModels] of Object.entries(groups)) {
+ if (providerModels.length > 0) {
+ result.push({
+ provider: provider as Provider,
+ label: labels[provider as Provider],
+ models: providerModels.sort((a, b) => a.id.localeCompare(b.id)),
+ });
+ }
+ }
+
+ return result;
+ });
+
+ return {
+ get models() {
+ return models;
+ },
+ get loading() {
+ return loading;
+ },
+ get error() {
+ return error;
+ },
+ get groupedModels() {
+ return groupedModels;
+ },
+
+ async loadModels() {
+ loading = true;
+ error = null;
+
+ try {
+ const response = await getModels();
+ models = response.data;
+ } catch (e) {
+ error = e instanceof Error ? e.message : 'Failed to load models';
+ models = [];
+ } finally {
+ loading = false;
+ }
+ },
+ };
+}
+
+export const modelsStore = createModelsStore();
diff --git a/services/llm-playground/src/lib/stores/settings.svelte.ts b/services/llm-playground/src/lib/stores/settings.svelte.ts
new file mode 100644
index 000000000..c2f0abf26
--- /dev/null
+++ b/services/llm-playground/src/lib/stores/settings.svelte.ts
@@ -0,0 +1,84 @@
+import type { Settings } from '$lib/types';
+import { browser } from '$app/environment';
+
+const STORAGE_KEY = 'llm-playground-settings';
+
+const defaultSettings: Settings = {
+ model: 'ollama/llama3.2:3b',
+ temperature: 0.7,
+ maxTokens: 2048,
+ topP: 1.0,
+ systemPrompt: 'You are a helpful AI assistant.',
+};
+
+function loadSettings(): Settings {
+ if (!browser) return defaultSettings;
+
+ try {
+ const stored = localStorage.getItem(STORAGE_KEY);
+ if (stored) {
+ return { ...defaultSettings, ...JSON.parse(stored) };
+ }
+ } catch {
+ // Ignore parse errors
+ }
+ return defaultSettings;
+}
+
+function saveSettings(settings: Settings): void {
+ if (!browser) return;
+ localStorage.setItem(STORAGE_KEY, JSON.stringify(settings));
+}
+
+function createSettingsStore() {
+ let settings = $state(loadSettings());
+
+ return {
+ get model() {
+ return settings.model;
+ },
+ set model(value: string) {
+ settings.model = value;
+ saveSettings(settings);
+ },
+
+ get temperature() {
+ return settings.temperature;
+ },
+ set temperature(value: number) {
+ settings.temperature = value;
+ saveSettings(settings);
+ },
+
+ get maxTokens() {
+ return settings.maxTokens;
+ },
+ set maxTokens(value: number) {
+ settings.maxTokens = value;
+ saveSettings(settings);
+ },
+
+ get topP() {
+ return settings.topP;
+ },
+ set topP(value: number) {
+ settings.topP = value;
+ saveSettings(settings);
+ },
+
+ get systemPrompt() {
+ return settings.systemPrompt;
+ },
+ set systemPrompt(value: string) {
+ settings.systemPrompt = value;
+ saveSettings(settings);
+ },
+
+ reset() {
+ settings = { ...defaultSettings };
+ saveSettings(settings);
+ },
+ };
+}
+
+export const settingsStore = createSettingsStore();
diff --git a/services/llm-playground/src/lib/types/index.ts b/services/llm-playground/src/lib/types/index.ts
new file mode 100644
index 000000000..f5591c517
--- /dev/null
+++ b/services/llm-playground/src/lib/types/index.ts
@@ -0,0 +1,88 @@
+export interface Message {
+ role: 'system' | 'user' | 'assistant';
+ content: string;
+}
+
+export interface ChatCompletionRequest {
+ model: string;
+ messages: Message[];
+ temperature?: number;
+ max_tokens?: number;
+ top_p?: number;
+ stream?: boolean;
+}
+
+export interface ChatCompletionChoice {
+ index: number;
+ message: Message;
+ finish_reason: string | null;
+}
+
+export interface ChatCompletionResponse {
+ id: string;
+ object: string;
+ created: number;
+ model: string;
+ choices: ChatCompletionChoice[];
+ usage?: {
+ prompt_tokens: number;
+ completion_tokens: number;
+ total_tokens: number;
+ };
+}
+
+export interface StreamDelta {
+ role?: string;
+ content?: string;
+}
+
+export interface StreamChoice {
+ index: number;
+ delta: StreamDelta;
+ finish_reason: string | null;
+}
+
+export interface StreamChunk {
+ id: string;
+ object: string;
+ created: number;
+ model: string;
+ choices: StreamChoice[];
+}
+
+export interface Model {
+ id: string;
+ object: string;
+ created: number;
+ owned_by: string;
+}
+
+export interface ModelsResponse {
+ object: string;
+ data: Model[];
+}
+
+export interface HealthResponse {
+ status: string;
+ timestamp: string;
+ version?: string;
+}
+
+export interface ChatMessage {
+ id: string;
+ role: 'user' | 'assistant';
+ content: string;
+ timestamp: Date;
+ model?: string;
+ isStreaming?: boolean;
+}
+
+export interface Settings {
+ model: string;
+ temperature: number;
+ maxTokens: number;
+ topP: number;
+ systemPrompt: string;
+}
+
+export type Provider = 'ollama' | 'openrouter' | 'groq' | 'together';
diff --git a/services/llm-playground/src/routes/+layout.svelte b/services/llm-playground/src/routes/+layout.svelte
new file mode 100644
index 000000000..0b8a3f4c7
--- /dev/null
+++ b/services/llm-playground/src/routes/+layout.svelte
@@ -0,0 +1,13 @@
+
+
+
+ {@render children()}
+
diff --git a/services/llm-playground/src/routes/+page.svelte b/services/llm-playground/src/routes/+page.svelte
new file mode 100644
index 000000000..fc758c74f
--- /dev/null
+++ b/services/llm-playground/src/routes/+page.svelte
@@ -0,0 +1,21 @@
+
+
+
+ LLM Playground | mana-llm
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/services/llm-playground/static/favicon.png b/services/llm-playground/static/favicon.png
new file mode 100644
index 000000000..d831d66d7
Binary files /dev/null and b/services/llm-playground/static/favicon.png differ
diff --git a/services/llm-playground/svelte.config.js b/services/llm-playground/svelte.config.js
new file mode 100644
index 000000000..fc92816a8
--- /dev/null
+++ b/services/llm-playground/svelte.config.js
@@ -0,0 +1,12 @@
+import adapter from '@sveltejs/adapter-node';
+import { vitePreprocess } from '@sveltejs/vite-plugin-svelte';
+
+/** @type {import('@sveltejs/kit').Config} */
+const config = {
+ preprocess: vitePreprocess(),
+ kit: {
+ adapter: adapter(),
+ },
+};
+
+export default config;
diff --git a/services/llm-playground/tsconfig.json b/services/llm-playground/tsconfig.json
new file mode 100644
index 000000000..a8f10c8e3
--- /dev/null
+++ b/services/llm-playground/tsconfig.json
@@ -0,0 +1,14 @@
+{
+ "extends": "./.svelte-kit/tsconfig.json",
+ "compilerOptions": {
+ "allowJs": true,
+ "checkJs": true,
+ "esModuleInterop": true,
+ "forceConsistentCasingInFileNames": true,
+ "resolveJsonModule": true,
+ "skipLibCheck": true,
+ "sourceMap": true,
+ "strict": true,
+ "moduleResolution": "bundler"
+ }
+}
diff --git a/services/llm-playground/vite.config.ts b/services/llm-playground/vite.config.ts
new file mode 100644
index 000000000..138c229a6
--- /dev/null
+++ b/services/llm-playground/vite.config.ts
@@ -0,0 +1,7 @@
+import tailwindcss from '@tailwindcss/vite';
+import { sveltekit } from '@sveltejs/kit/vite';
+import { defineConfig } from 'vite';
+
+export default defineConfig({
+ plugins: [tailwindcss(), sveltekit()],
+});