fix(mana-media): use prom-client directly instead of shared metrics package

mana-media uses NestJS 11 while shared-nestjs-metrics targets NestJS 10,
causing DynamicModule type incompatibility. Use prom-client directly with
a simple MetricsController to expose /metrics endpoint.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Till JS 2026-03-23 11:06:09 +01:00
parent 677a499c93
commit 7910737dd9
12 changed files with 246 additions and 240 deletions

View file

@ -8,7 +8,6 @@ import {
CreditOperationType,
CREDIT_COSTS,
} from '@manacore/nestjs-integration';
import OpenAI from 'openai';
import { DATABASE_CONNECTION } from '../db/database.module';
import { Database } from '../db/connection';
import { models } from '../db/schema/models.schema';
@ -20,31 +19,13 @@ import { OllamaService } from './ollama.service';
@Injectable()
export class ChatService {
private readonly logger = new Logger(ChatService.name);
// OpenRouter config (cloud provider)
private readonly openRouterClient: OpenAI | null = null;
constructor(
private configService: ConfigService,
@Inject(DATABASE_CONNECTION) private readonly db: Database,
private readonly ollamaService: OllamaService,
private readonly creditClient: CreditClientService
) {
// OpenRouter setup (cloud provider)
const openRouterApiKey = this.configService.get<string>('OPENROUTER_API_KEY');
if (openRouterApiKey) {
this.openRouterClient = new OpenAI({
baseURL: 'https://openrouter.ai/api/v1',
apiKey: openRouterApiKey,
defaultHeaders: {
'HTTP-Referer': this.configService.get<string>('APP_URL') || 'http://localhost:3002',
'X-Title': 'Mana Chat',
},
});
this.logger.log('OpenRouter client initialized');
} else {
this.logger.warn('OPENROUTER_API_KEY not set - only local Ollama models will work');
}
}
) {}
async getAvailableModels(): Promise<Model[]> {
try {
@ -209,57 +190,28 @@ export class ChatService {
model: Model,
dto: ChatCompletionDto
): AsyncResult<ChatCompletionResponseDto> {
if (!this.openRouterClient) {
return err(ServiceError.externalError('OpenRouter', 'OpenRouter client not configured'));
}
const params = model.parameters as {
model?: string;
temperature?: number;
max_tokens?: number;
} | null;
// Route through mana-llm with openrouter/ prefix
const modelName = params?.model || 'meta-llama/llama-3.1-8b-instruct';
const prefixedModel = modelName.includes('/') ? `openrouter/${modelName}` : modelName;
const temperature = dto.temperature ?? params?.temperature ?? 0.7;
const maxTokens = dto.maxTokens ?? params?.max_tokens ?? 4096;
this.logger.log(`Sending request to OpenRouter model: ${modelName}`);
this.logger.log(`Sending request to mana-llm (OpenRouter): ${prefixedModel}`);
try {
const response = await this.openRouterClient.chat.completions.create({
model: modelName,
messages: dto.messages.map((msg) => ({
role: msg.role as 'system' | 'user' | 'assistant',
content: msg.content,
})),
temperature,
max_tokens: maxTokens,
});
const messageContent = response.choices?.[0]?.message?.content;
if (!messageContent) {
this.logger.warn('No message content in OpenRouter response');
return err(ServiceError.generationFailed('OpenRouter', 'No response generated'));
}
return ok({
content: messageContent,
usage: {
prompt_tokens: response.usage?.prompt_tokens || 0,
completion_tokens: response.usage?.completion_tokens || 0,
total_tokens: response.usage?.total_tokens || 0,
},
});
} catch (error) {
this.logger.error('Error calling OpenRouter API', error);
return err(
ServiceError.generationFailed(
'OpenRouter',
error instanceof Error ? error.message : 'Unknown error',
error instanceof Error ? error : undefined
)
);
}
return this.ollamaService.createChatCompletion(
prefixedModel,
dto.messages.map((msg) => ({
role: msg.role as 'system' | 'user' | 'assistant',
content: msg.content,
})),
temperature,
maxTokens
);
}
}

View file

@ -1,9 +1,7 @@
import { Injectable, BadRequestException } from '@nestjs/common';
import { Injectable, BadRequestException, Logger } from '@nestjs/common';
import { ConfigService } from '@nestjs/config';
import { TokenService } from '../token/token.service';
type AIProvider = 'azure' | 'google';
interface GenerateOptions {
prompt: string;
model?: string;
@ -18,21 +16,20 @@ function estimateTokens(text: string): number {
return Math.ceil(text.length / 4);
}
function getProvider(model: string): AIProvider {
if (model.startsWith('gpt')) return 'azure';
return 'google';
}
@Injectable()
export class AiService {
private readonly logger = new Logger(AiService.name);
private readonly manaLlmUrl: string;
constructor(
private configService: ConfigService,
private tokenService: TokenService
) {}
) {
this.manaLlmUrl = this.configService.get<string>('MANA_LLM_URL') || 'http://localhost:3025';
}
async generate(userId: string, options: GenerateOptions) {
const model = options.model || 'gpt-4.1';
const provider = getProvider(model);
const model = options.model || 'ollama/gemma3:4b';
// Build full prompt with referenced documents
let fullPrompt = options.prompt;
@ -53,13 +50,8 @@ export class AiService {
throw new BadRequestException('Nicht genügend Tokens. Bitte kaufe weitere Tokens.');
}
// Generate text
let completionText: string;
if (provider === 'azure') {
completionText = await this.generateWithAzure(fullPrompt, options);
} else {
completionText = await this.generateWithGoogle(fullPrompt, { ...options, model });
}
// Generate text via mana-llm
const completionText = await this.generateWithManaLlm(fullPrompt, options, model);
// Calculate actual cost and log
const actualPromptTokens = estimateTokens(fullPrompt);
@ -93,7 +85,7 @@ export class AiService {
referencedDocuments?: { title: string; content: string }[];
}
) {
const model = options.model || 'gpt-4.1';
const model = options.model || 'ollama/gemma3:4b';
let totalInputTokens = estimateTokens(options.prompt);
@ -119,66 +111,33 @@ export class AiService {
};
}
private async generateWithAzure(prompt: string, options: GenerateOptions): Promise<string> {
const apiKey = this.configService.get<string>('AZURE_OPENAI_API_KEY', '');
const endpoint = this.configService.get<string>(
'AZURE_OPENAI_ENDPOINT',
'https://memoroseopenai.openai.azure.com/'
);
const deployment = 'gpt-4.1';
const apiVersion = '2025-01-01-preview';
const response = await fetch(
`${endpoint}openai/deployments/${deployment}/chat/completions?api-version=${apiVersion}`,
{
method: 'POST',
headers: {
'Content-Type': 'application/json',
'api-key': apiKey,
},
body: JSON.stringify({
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: prompt },
],
temperature: options.temperature || 0.7,
max_tokens: options.maxTokens || 2000,
}),
}
);
private async generateWithManaLlm(
prompt: string,
options: GenerateOptions,
model: string
): Promise<string> {
const response = await fetch(`${this.manaLlmUrl}/v1/chat/completions`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model,
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: prompt },
],
temperature: options.temperature || 0.7,
max_tokens: options.maxTokens || 2000,
}),
signal: AbortSignal.timeout(120000),
});
if (!response.ok) {
throw new BadRequestException(`Azure OpenAI error: ${response.statusText}`);
const errorText = await response.text();
this.logger.error(`mana-llm error: ${response.status} - ${errorText}`);
throw new BadRequestException(`LLM generation failed: ${response.status}`);
}
const data = await response.json();
return data.choices?.[0]?.message?.content || '';
}
private async generateWithGoogle(prompt: string, options: GenerateOptions): Promise<string> {
const apiKey = this.configService.get<string>('GOOGLE_API_KEY', '');
const model = options.model || 'gemini-pro';
const response = await fetch(
`https://generativelanguage.googleapis.com/v1beta/models/${model}:generateContent?key=${apiKey}`,
{
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
contents: [{ parts: [{ text: prompt }] }],
generationConfig: {
temperature: options.temperature || 0.7,
maxOutputTokens: options.maxTokens || 2000,
},
}),
}
);
if (!response.ok) {
throw new BadRequestException(`Google AI error: ${response.statusText}`);
}
const data = await response.json();
return data.candidates?.[0]?.content?.parts?.[0]?.text || '';
}
}

View file

@ -1,6 +1,5 @@
import { Injectable, OnModuleInit } from '@nestjs/common';
import { Injectable, OnModuleInit, Logger } from '@nestjs/common';
import { ConfigService } from '@nestjs/config';
import { GoogleGenerativeAI, type GenerativeModel } from '@google/generative-ai';
import type { AIAnalysisResult } from '../types/nutrition.types';
const ANALYSIS_PROMPT = `Du bist ein Ernährungsexperte. Analysiere das Bild dieser Mahlzeit und liefere eine detaillierte Nährwertanalyse.
@ -77,36 +76,53 @@ Antworte NUR mit einem validen JSON-Objekt im folgenden Format:
@Injectable()
export class GeminiService implements OnModuleInit {
private model: GenerativeModel | null = null;
private readonly logger = new Logger(GeminiService.name);
private manaLlmUrl: string | null = null;
private readonly visionModel = 'ollama/llava:7b';
private readonly textModel = 'ollama/gemma3:4b';
constructor(private configService: ConfigService) {}
onModuleInit() {
const apiKey = this.configService.get<string>('GEMINI_API_KEY');
if (apiKey) {
const genAI = new GoogleGenerativeAI(apiKey);
// Use Gemini 2.5 Flash - fast and cost-effective
this.model = genAI.getGenerativeModel({ model: 'gemini-2.5-flash' });
}
this.manaLlmUrl = this.configService.get<string>('MANA_LLM_URL') || 'http://localhost:3025';
this.logger.log(`NutriPhi AI using mana-llm at ${this.manaLlmUrl}`);
}
async analyzeImage(imageBase64: string, mimeType = 'image/jpeg'): Promise<AIAnalysisResult> {
if (!this.model) {
throw new Error('Gemini API not configured');
if (!this.manaLlmUrl) {
throw new Error('mana-llm not configured');
}
const result = await this.model.generateContent([
ANALYSIS_PROMPT,
{
inlineData: {
mimeType,
data: imageBase64,
},
},
]);
const response = await fetch(`${this.manaLlmUrl}/v1/chat/completions`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: this.visionModel,
messages: [
{
role: 'user',
content: [
{ type: 'text', text: ANALYSIS_PROMPT },
{
type: 'image_url',
image_url: { url: `data:${mimeType};base64,${imageBase64}` },
},
],
},
],
temperature: 0.3,
}),
signal: AbortSignal.timeout(120000),
});
const response = result.response;
const text = response.text();
if (!response.ok) {
const errorText = await response.text();
this.logger.error(`mana-llm vision error: ${response.status} - ${errorText}`);
throw new Error('Failed to analyze image');
}
const data = await response.json();
const text = data.choices?.[0]?.message?.content || '';
// Extract JSON from response
const jsonMatch = text.match(/\{[\s\S]*\}/);
@ -118,15 +134,29 @@ export class GeminiService implements OnModuleInit {
}
async analyzeText(description: string): Promise<AIAnalysisResult> {
if (!this.model) {
throw new Error('Gemini API not configured');
if (!this.manaLlmUrl) {
throw new Error('mana-llm not configured');
}
const prompt = TEXT_ANALYSIS_PROMPT.replace('{INPUT}', description);
const result = await this.model.generateContent(prompt);
const response = result.response;
const text = response.text();
const response = await fetch(`${this.manaLlmUrl}/v1/chat/completions`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: this.textModel,
messages: [{ role: 'user', content: prompt }],
temperature: 0.3,
}),
signal: AbortSignal.timeout(60000),
});
if (!response.ok) {
throw new Error(`mana-llm error: ${response.status}`);
}
const data = await response.json();
const text = data.choices?.[0]?.message?.content || '';
// Extract JSON from response
const jsonMatch = text.match(/\{[\s\S]*\}/);

View file

@ -1,6 +1,5 @@
import { Injectable, Logger } from '@nestjs/common';
import { ConfigService } from '@nestjs/config';
import { GoogleGenerativeAI } from '@google/generative-ai';
import type { AnalysisResult } from '@planta/shared';
const PLANT_ANALYSIS_PROMPT = `Du bist ein erfahrener Botaniker und Pflanzenexperte. Analysiere dieses Pflanzenfoto und erstelle einen detaillierten Steckbrief.
@ -44,36 +43,48 @@ Falls du die Pflanze nicht identifizieren kannst, setze confidence auf 0 und sci
@Injectable()
export class VisionService {
private readonly logger = new Logger(VisionService.name);
private genAI: GoogleGenerativeAI | null = null;
private readonly manaLlmUrl: string;
private readonly visionModel = 'ollama/llava:7b';
constructor(private configService: ConfigService) {
const apiKey = this.configService.get<string>('GOOGLE_GEMINI_API_KEY');
if (apiKey) {
this.genAI = new GoogleGenerativeAI(apiKey);
this.logger.log('Gemini Vision AI initialized');
} else {
this.logger.warn('GOOGLE_GEMINI_API_KEY not configured - Vision analysis disabled');
}
this.manaLlmUrl = this.configService.get<string>('MANA_LLM_URL') || 'http://localhost:3025';
this.logger.log(`Planta Vision using mana-llm at ${this.manaLlmUrl}`);
}
async analyzePlantImage(imageBuffer: Buffer, mimeType: string): Promise<AnalysisResult | null> {
if (!this.genAI) {
this.logger.error('Gemini AI not configured');
return null;
}
try {
const model = this.genAI.getGenerativeModel({ model: 'gemini-2.0-flash' });
const base64 = imageBuffer.toString('base64');
const imagePart = {
inlineData: {
data: imageBuffer.toString('base64'),
mimeType: mimeType,
},
};
const result = await fetch(`${this.manaLlmUrl}/v1/chat/completions`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: this.visionModel,
messages: [
{
role: 'user',
content: [
{ type: 'text', text: PLANT_ANALYSIS_PROMPT },
{
type: 'image_url',
image_url: { url: `data:${mimeType};base64,${base64}` },
},
],
},
],
temperature: 0.3,
}),
signal: AbortSignal.timeout(120000),
});
const result = await model.generateContent([PLANT_ANALYSIS_PROMPT, imagePart]);
const response = result.response.text().trim();
if (!result.ok) {
const errorText = await result.text();
this.logger.error(`mana-llm vision error: ${result.status} - ${errorText}`);
return null;
}
const data = await result.json();
const response = (data.choices?.[0]?.message?.content || '').trim();
this.logger.debug(`Gemini raw response: ${response}`);

View file

@ -156,6 +156,7 @@ services:
CLOCK_BACKEND_URL: http://clock-backend:3033
STORAGE_BACKEND_URL: http://storage-backend:3035
ADMIN_SERVICE_KEY: ${MANA_CORE_SERVICE_KEY}
MANA_LLM_URL: http://mana-llm:3025
volumes:
- analytics_data:/data/analytics
ports:
@ -318,14 +319,10 @@ services:
PORT: 3030
DATABASE_URL: postgresql://postgres:${POSTGRES_PASSWORD:-mana123}@postgres:5432/chat
MANA_CORE_AUTH_URL: http://mana-auth:3001
OLLAMA_URL: http://host.docker.internal:11434
OLLAMA_TIMEOUT: 120000
OPENROUTER_API_KEY: ${OPENROUTER_API_KEY:-}
MANA_LLM_URL: http://mana-llm:3025
LLM_TIMEOUT: 120000
SUPABASE_URL: ${SUPABASE_URL:-}
SUPABASE_SERVICE_KEY: ${SUPABASE_SERVICE_ROLE_KEY:-}
AZURE_OPENAI_ENDPOINT: ${AZURE_OPENAI_ENDPOINT:-}
AZURE_OPENAI_API_KEY: ${AZURE_OPENAI_API_KEY:-}
AZURE_OPENAI_API_VERSION: 2024-12-01-preview
CORS_ORIGINS: https://chat.mana.how,https://mana.how
ADMIN_SERVICE_KEY: ${MANA_CORE_SERVICE_KEY}
GLITCHTIP_DSN: https://7ffb55d23705497989dbabd486a42014@glitchtip.mana.how/4
@ -542,7 +539,7 @@ services:
DB_PORT: 5432
DB_USER: postgres
MANA_CORE_AUTH_URL: http://mana-auth:3001
GEMINI_API_KEY: ${GEMINI_API_KEY:-}
MANA_LLM_URL: http://mana-llm:3025
CORS_ORIGINS: https://nutriphi.mana.how,https://mana.how
GLITCHTIP_DSN: https://61b5689b903746b698bd1f77ae9e0be1@glitchtip.mana.how/11
ports:
@ -700,7 +697,7 @@ services:
DB_USER: postgres
MANA_CORE_AUTH_URL: http://mana-auth:3001
CORS_ORIGINS: https://planta.mana.how,https://mana.how
GOOGLE_GEMINI_API_KEY: ${GOOGLE_GEMINI_API_KEY}
MANA_LLM_URL: http://mana-llm:3025
S3_ENDPOINT: http://minio:9000
S3_PUBLIC_ENDPOINT: https://minio.mana.how
S3_REGION: us-east-1
@ -951,8 +948,7 @@ services:
S3_ACCESS_KEY: ${MINIO_ACCESS_KEY:-minioadmin}
S3_SECRET_KEY: ${MINIO_SECRET_KEY:-minioadmin}
S3_BUCKET: project-doc-bot
OPENAI_API_KEY: ${OPENAI_API_KEY:-}
OPENAI_MODEL: gpt-4o-mini
MANA_LLM_URL: http://mana-llm:3025
volumes:
- matrix_bots_data:/app/data
ports:

21
pnpm-lock.yaml generated
View file

@ -942,6 +942,9 @@ importers:
'@manacore/shared-error-tracking':
specifier: workspace:*
version: link:../../../../packages/shared-error-tracking
'@manacore/shared-i18n':
specifier: workspace:*
version: link:../../../../packages/shared-i18n
'@manacore/shared-icons':
specifier: workspace:*
version: link:../../../../packages/shared-icons
@ -960,7 +963,13 @@ importers:
leaflet:
specifier: ^1.9.4
version: 1.9.4
svelte-i18n:
specifier: ^4.0.1
version: 4.0.1(svelte@5.44.0)
devDependencies:
'@manacore/shared-pwa':
specifier: workspace:*
version: link:../../../../packages/shared-pwa
'@manacore/shared-vite-config':
specifier: workspace:*
version: link:../../../../packages/shared-vite-config
@ -982,6 +991,9 @@ importers:
'@types/node':
specifier: ^20.0.0
version: 20.19.25
'@vite-pwa/sveltekit':
specifier: ^1.1.0
version: 1.1.0(@sveltejs/kit@2.49.0(@opentelemetry/api@1.9.0)(@sveltejs/vite-plugin-svelte@5.1.1(svelte@5.44.0)(vite@6.4.1(@types/node@20.19.25)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.1)))(svelte@5.44.0)(vite@6.4.1(@types/node@20.19.25)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.1)))(vite@6.4.1(@types/node@20.19.25)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.1))(workbox-build@7.4.0(@types/babel__core@7.20.5))(workbox-window@7.4.0)
svelte:
specifier: ^5.41.0
version: 5.44.0
@ -1514,6 +1526,9 @@ importers:
'@manacore/shared-utils':
specifier: workspace:*
version: link:../../../../packages/shared-utils
'@manacore/spiral-db':
specifier: workspace:^
version: link:../../../../packages/spiral-db
date-fns:
specifier: ^4.1.0
version: 4.1.0
@ -7688,9 +7703,6 @@ importers:
services/mana-media/apps/api:
dependencies:
'@manacore/shared-nestjs-metrics':
specifier: workspace:*
version: link:../../../../packages/shared-nestjs-metrics
'@nestjs/bullmq':
specifier: ^11.0.0
version: 11.0.4(@nestjs/common@11.1.9(class-transformer@0.5.1)(class-validator@0.14.3)(reflect-metadata@0.2.2)(rxjs@7.8.2))(@nestjs/core@11.1.9)(bullmq@5.67.2)
@ -7727,6 +7739,9 @@ importers:
postgres:
specifier: ^3.4.5
version: 3.4.7
prom-client:
specifier: ^15.1.0
version: 15.1.3
reflect-metadata:
specifier: ^0.2.0
version: 0.2.2

View file

@ -1,6 +1,5 @@
import { Injectable, Logger } from '@nestjs/common';
import { ConfigService } from '@nestjs/config';
import { GoogleGenerativeAI } from '@google/generative-ai';
export interface FeedbackAnalysis {
title: string;
@ -10,26 +9,25 @@ export interface FeedbackAnalysis {
@Injectable()
export class AiService {
private readonly logger = new Logger(AiService.name);
private genAI: GoogleGenerativeAI | null = null;
private readonly manaLlmUrl: string | null = null;
constructor(private configService: ConfigService) {
const apiKey = this.configService.get<string>('ai.geminiApiKey');
if (apiKey) {
this.genAI = new GoogleGenerativeAI(apiKey);
const url = this.configService.get<string>('MANA_LLM_URL');
if (url) {
this.manaLlmUrl = url;
this.logger.log(`AI service using mana-llm at ${url}`);
} else {
this.logger.warn('GOOGLE_GENAI_API_KEY not configured - AI features disabled');
this.logger.warn('MANA_LLM_URL not configured - AI features disabled');
}
}
async analyzeFeedback(feedbackText: string): Promise<FeedbackAnalysis> {
// Fallback if AI not available
if (!this.genAI) {
if (!this.manaLlmUrl) {
return this.fallbackAnalysis(feedbackText);
}
try {
const model = this.genAI.getGenerativeModel({ model: 'gemini-2.0-flash' });
const prompt = `Analysiere dieses User-Feedback und generiere:
1. Einen kurzen, prägnanten deutschen Titel (max 60 Zeichen) der den Kern des Feedbacks zusammenfasst
2. Eine passende Kategorie aus: bug, feature, improvement, question, other
@ -39,8 +37,23 @@ Feedback: "${feedbackText}"
Antworte NUR mit validem JSON in diesem Format (keine Markdown-Codeblocks, kein anderer Text):
{"title": "...", "category": "..."}`;
const result = await model.generateContent(prompt);
const response = result.response.text().trim();
const result = await fetch(`${this.manaLlmUrl}/v1/chat/completions`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: 'ollama/gemma3:4b',
messages: [{ role: 'user', content: prompt }],
temperature: 0.3,
}),
signal: AbortSignal.timeout(30000),
});
if (!result.ok) {
throw new Error(`mana-llm error: ${result.status}`);
}
const data = await result.json();
const response = (data.choices?.[0]?.message?.content || '').trim();
// Parse JSON response - handle potential markdown code blocks
let jsonStr = response;

View file

@ -10,7 +10,6 @@ WORKDIR /app
COPY package.json pnpm-lock.yaml pnpm-workspace.yaml ./
COPY patches ./patches
COPY packages/shared-drizzle-config ./packages/shared-drizzle-config
COPY packages/shared-nestjs-metrics ./packages/shared-nestjs-metrics
COPY services/mana-media ./services/mana-media
# Install all dependencies
@ -42,8 +41,6 @@ COPY --from=builder --chown=nestjs:nodejs /app/services/mana-media/apps/api/node
# Copy shared packages that are symlinked
COPY --from=builder --chown=nestjs:nodejs /app/packages/shared-drizzle-config /app/packages/shared-drizzle-config
COPY --from=builder --chown=nestjs:nodejs /app/packages/shared-nestjs-metrics /app/packages/shared-nestjs-metrics
# Copy built application
COPY --from=builder --chown=nestjs:nodejs /app/services/mana-media/apps/api/dist ./dist
COPY --from=builder --chown=nestjs:nodejs /app/services/mana-media/apps/api/package.json ./

View file

@ -13,7 +13,6 @@
"db:studio": "drizzle-kit studio"
},
"dependencies": {
"@manacore/shared-nestjs-metrics": "workspace:*",
"@nestjs/bullmq": "^11.0.0",
"@nestjs/common": "^11.0.0",
"@nestjs/config": "^3.3.0",
@ -26,6 +25,7 @@
"minio": "^8.0.0",
"postgres": "^3.4.5",
"reflect-metadata": "^0.2.0",
"prom-client": "^15.1.0",
"rxjs": "^7.8.0",
"sharp": "^0.33.0",
"uuid": "^11.0.0",

View file

@ -1,7 +1,6 @@
import { Module } from '@nestjs/common';
import { ConfigModule } from '@nestjs/config';
import { BullModule } from '@nestjs/bullmq';
import { MetricsModule } from '@manacore/shared-nestjs-metrics';
import { DatabaseModule } from './db/database.module';
import { UploadModule } from './modules/upload/upload.module';
import { StorageModule } from './modules/storage/storage.module';
@ -9,16 +8,13 @@ import { ProcessModule } from './modules/process/process.module';
import { DeliveryModule } from './modules/delivery/delivery.module';
import { MatrixModule } from './modules/matrix/matrix.module';
import { HealthController } from './health.controller';
import { MetricsController } from './metrics.controller';
@Module({
imports: [
ConfigModule.forRoot({
isGlobal: true,
}),
MetricsModule.register({
prefix: 'media_',
excludePaths: ['/health'],
}),
BullModule.forRoot({
connection: {
host: process.env.REDIS_HOST || 'localhost',
@ -33,6 +29,6 @@ import { HealthController } from './health.controller';
DeliveryModule,
MatrixModule,
],
controllers: [HealthController],
controllers: [HealthController, MetricsController],
})
export class AppModule {}

View file

@ -0,0 +1,31 @@
import { Controller, Get, Res } from '@nestjs/common';
import { Response } from 'express';
import { collectDefaultMetrics, Registry, Counter, Histogram } from 'prom-client';
const register = new Registry();
register.setDefaultLabels({ service: 'mana-media' });
collectDefaultMetrics({ register, prefix: 'media_' });
export const httpRequestsTotal = new Counter({
name: 'media_http_requests_total',
help: 'Total HTTP requests',
labelNames: ['method', 'path', 'status'],
registers: [register],
});
export const httpRequestDuration = new Histogram({
name: 'media_http_request_duration_seconds',
help: 'HTTP request duration in seconds',
labelNames: ['method', 'path', 'status'],
buckets: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10],
registers: [register],
});
@Controller('metrics')
export class MetricsController {
@Get()
async getMetrics(@Res() res: Response) {
res.set('Content-Type', register.contentType);
res.end(await register.metrics());
}
}

View file

@ -1,6 +1,5 @@
import { Injectable, Inject, Logger } from '@nestjs/common';
import { ConfigService } from '@nestjs/config';
import OpenAI from 'openai';
import { eq, desc } from 'drizzle-orm';
import { DATABASE_CONNECTION } from '../database/database.module';
import { generations, projectItems, projects } from '../database/schema';
@ -13,25 +12,18 @@ type Database = PostgresJsDatabase<typeof schema>;
@Injectable()
export class GenerationService {
private readonly logger = new Logger(GenerationService.name);
private readonly openai: OpenAI;
private readonly manaLlmUrl: string;
private readonly model: string;
constructor(
@Inject(DATABASE_CONNECTION) private db: Database,
private configService: ConfigService
) {
this.openai = new OpenAI({
apiKey: this.configService.get<string>('openai.apiKey'),
});
this.model = this.configService.get<string>('openai.model') || 'gpt-4o-mini';
this.manaLlmUrl = this.configService.get<string>('MANA_LLM_URL') || 'http://localhost:3025';
this.model = this.configService.get<string>('openai.model') || 'ollama/gemma3:4b';
}
async generateBlogpost(projectId: string, style: keyof typeof BLOG_STYLES): Promise<string> {
const apiKey = this.configService.get<string>('openai.apiKey');
if (!apiKey) {
throw new Error('OpenAI API key not configured');
}
// Get project info
const [project] = await this.db.select().from(projects).where(eq(projects.id, projectId));
if (!project) {
@ -46,7 +38,9 @@ export class GenerationService {
.orderBy(projectItems.createdAt);
if (items.length === 0) {
throw new Error('Keine Inhalte im Projekt. Füge zuerst Fotos, Sprachnotizen oder Text hinzu.');
throw new Error(
'Keine Inhalte im Projekt. Füge zuerst Fotos, Sprachnotizen oder Text hinzu.'
);
}
// Build content summary
@ -76,17 +70,29 @@ Erstellt am: ${project.createdAt.toLocaleDateString('de-DE')}
Die folgenden Inhalte wurden während des Projekts gesammelt:`;
const response = await this.openai.chat.completions.create({
model: this.model,
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: contentSummary },
],
temperature: 0.7,
max_tokens: 2000,
const response = await fetch(`${this.manaLlmUrl}/v1/chat/completions`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: this.model,
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: contentSummary },
],
temperature: 0.7,
max_tokens: 2000,
}),
signal: AbortSignal.timeout(120000),
});
const content = response.choices[0]?.message?.content || '';
if (!response.ok) {
const errorText = await response.text();
this.logger.error(`mana-llm error: ${response.status} - ${errorText}`);
throw new Error(`LLM generation failed: ${response.status}`);
}
const data = await response.json();
const content = data.choices?.[0]?.message?.content || '';
// Save generation
await this.db.insert(generations).values({