diff --git a/apps/memoro/apps/audio-server/src/services/transcription.ts b/apps/memoro/apps/audio-server/src/services/transcription.ts index a93905e47..13973368d 100644 --- a/apps/memoro/apps/audio-server/src/services/transcription.ts +++ b/apps/memoro/apps/audio-server/src/services/transcription.ts @@ -1,451 +1,677 @@ -import { getAvailableSpeechServices, pickRandomService, type SpeechServiceConfig } from '../lib/azure.ts'; +import { + getAvailableSpeechServices, + pickRandomService, + type SpeechServiceConfig, +} from '../lib/azure.ts'; import { convertToAzureWav } from './ffmpeg.ts'; import { BatchTranscriptionService } from './batch.ts'; import * as path from 'path'; const CANDIDATE_LOCALES = [ - 'de-DE', - 'en-GB', - 'fr-FR', - 'it-IT', - 'es-ES', - 'sv-SE', - 'ru-RU', - 'nl-NL', - 'tr-TR', - 'pt-PT', + 'de-DE', + 'en-GB', + 'fr-FR', + 'it-IT', + 'es-ES', + 'sv-SE', + 'ru-RU', + 'nl-NL', + 'tr-TR', + 'pt-PT', ]; const TOTAL_TIMEOUT_MS = 1_200_000; // 20 minutes const FAST_TIMEOUT_MS = 1_200_000; // 20 minutes +// Self-hosted mana-stt service (WhisperX on GPU server) +const MANA_STT_URL = process.env.MANA_STT_URL || ''; +const MANA_STT_API_KEY = process.env.MANA_STT_API_KEY || ''; + interface TranscriptionResult { - transcript: string; - utterances: Array<{ - speaker: number; - text: string; - offset: number; - duration: number; - }>; - speakers: Record; - speakerMap: Record; - languages: string[]; - primary_language: string; + transcript: string; + utterances: Array<{ + speaker: number; + text: string; + offset: number; + duration: number; + }>; + speakers: Record; + speakerMap: Record; + languages: string[]; + primary_language: string; } interface TranscribeParams { - audioBuffer: Buffer; - audioPath: string; - memoId: string; - userId: string; - spaceId?: string; - recordingLanguages?: string[]; - enableDiarization?: boolean; - isAppend?: boolean; - recordingIndex?: number; - serviceKey: string; - serverUrl: string; + audioBuffer: Buffer; + audioPath: string; + memoId: string; + userId: string; + spaceId?: string; + recordingLanguages?: string[]; + enableDiarization?: boolean; + isAppend?: boolean; + recordingIndex?: number; + serviceKey: string; + serverUrl: string; } export class TranscriptionService { - private readonly batchService = new BatchTranscriptionService(); + private readonly batchService = new BatchTranscriptionService(); - async transcribeWithFallback(params: TranscribeParams): Promise { - const { audioBuffer, audioPath, memoId, userId, recordingLanguages, enableDiarization, isAppend, recordingIndex, serviceKey, serverUrl } = params; - const startTime = Date.now(); + async transcribeWithFallback(params: TranscribeParams): Promise { + const { + audioBuffer, + audioPath, + memoId, + userId, + recordingLanguages, + enableDiarization, + isAppend, + recordingIndex, + serviceKey, + serverUrl, + } = params; + const startTime = Date.now(); - const checkTimeout = (stage: string): void => { - const elapsed = Date.now() - startTime; - if (elapsed > TOTAL_TIMEOUT_MS) { - throw new Error(`Fallback chain timeout exceeded after ${elapsed}ms in stage: ${stage}`); - } - }; + const checkTimeout = (stage: string): void => { + const elapsed = Date.now() - startTime; + if (elapsed > TOTAL_TIMEOUT_MS) { + throw new Error(`Fallback chain timeout exceeded after ${elapsed}ms in stage: ${stage}`); + } + }; - const withTimeout = (promise: Promise, timeoutMs: number, label: string): Promise => { - return Promise.race([ - promise, - new Promise((_, reject) => - setTimeout(() => reject(new Error(`${label} timeout after ${timeoutMs}ms`)), timeoutMs), - ), - ]); - }; + const withTimeout = (promise: Promise, timeoutMs: number, label: string): Promise => { + return Promise.race([ + promise, + new Promise((_, reject) => + setTimeout(() => reject(new Error(`${label} timeout after ${timeoutMs}ms`)), timeoutMs) + ), + ]); + }; - try { - console.log(`[Transcription] Starting fallback chain for memo ${memoId} (${audioPath})`); + try { + console.log(`[Transcription] Starting fallback chain for memo ${memoId} (${audioPath})`); - // Attempt 1: Fast realtime transcription - try { - checkTimeout('initial-fast'); - const services = getAvailableSpeechServices(); - const service = pickRandomService(services); + // Attempt 0: Self-hosted mana-stt (WhisperX on GPU server) — primary + if (MANA_STT_URL) { + try { + checkTimeout('mana-stt'); + console.log(`[Transcription] Trying mana-stt (WhisperX) at ${MANA_STT_URL}`); - const wavBuffer = await convertToAzureWav(audioBuffer, path.extname(audioPath) || '.m4a'); + const result = await withTimeout( + this.performManaSTTTranscription( + audioBuffer, + audioPath, + recordingLanguages, + enableDiarization + ), + FAST_TIMEOUT_MS, + 'mana-stt transcription' + ); - const result = await withTimeout( - this.performRealtimeTranscription(wavBuffer, service, recordingLanguages, enableDiarization), - FAST_TIMEOUT_MS, - 'Fast transcription', - ); + await this.notifyServer( + memoId, + userId, + result, + 'fast', + serviceKey, + serverUrl, + isAppend, + recordingIndex + ); + console.log(`[Transcription] mana-stt (WhisperX) succeeded for memo ${memoId}`); + return; + } catch (manaSttError: unknown) { + const msg = manaSttError instanceof Error ? manaSttError.message : String(manaSttError); + console.warn(`[Transcription] mana-stt failed, falling back to Azure: ${msg}`); + } + } - await this.notifyServer(memoId, userId, result, 'fast', serviceKey, serverUrl, isAppend, recordingIndex); - console.log(`[Transcription] Fast transcription succeeded for memo ${memoId}`); - return; - } catch (fastError: unknown) { - const fastErrMsg = fastError instanceof Error ? fastError.message : String(fastError); - console.warn(`[Transcription] Fast route failed: ${fastErrMsg}`); + // Attempt 1: Fast realtime transcription (Azure) + try { + checkTimeout('initial-fast'); + const services = getAvailableSpeechServices(); + const service = pickRandomService(services); - // Attempt 2: Service retry with different Azure key (429 rate limit) - if (this.shouldRetryWithDifferentService(fastErrMsg)) { - try { - checkTimeout('service-retry'); - console.log(`[Transcription] Retrying with different Azure service key`); + const wavBuffer = await convertToAzureWav(audioBuffer, path.extname(audioPath) || '.m4a'); - const services = getAvailableSpeechServices(); - if (services.length > 1) { - const service = pickRandomService(services); - const wavBuffer = await convertToAzureWav(audioBuffer, path.extname(audioPath) || '.m4a'); - const result = await withTimeout( - this.performRealtimeTranscription(wavBuffer, service, recordingLanguages, enableDiarization), - FAST_TIMEOUT_MS, - 'Service retry transcription', - ); - await this.notifyServer(memoId, userId, result, 'fast', serviceKey, serverUrl, isAppend, recordingIndex); - console.log(`[Transcription] Service retry succeeded for memo ${memoId}`); - return; - } else { - console.warn(`[Transcription] Only one Azure service configured, skipping service retry`); - } - } catch (serviceRetryError: unknown) { - const msg = serviceRetryError instanceof Error ? serviceRetryError.message : String(serviceRetryError); - console.warn(`[Transcription] Service retry failed: ${msg}`); - } - } + const result = await withTimeout( + this.performRealtimeTranscription( + wavBuffer, + service, + recordingLanguages, + enableDiarization + ), + FAST_TIMEOUT_MS, + 'Fast transcription' + ); - // Attempt 3: FFmpeg conversion + retry (422 / format errors) - if (this.shouldRetryWithConversion(fastErrMsg)) { - try { - checkTimeout('conversion-retry'); - console.log(`[Transcription] Retrying with enhanced audio conversion`); + await this.notifyServer( + memoId, + userId, + result, + 'fast', + serviceKey, + serverUrl, + isAppend, + recordingIndex + ); + console.log(`[Transcription] Fast transcription succeeded for memo ${memoId}`); + return; + } catch (fastError: unknown) { + const fastErrMsg = fastError instanceof Error ? fastError.message : String(fastError); + console.warn(`[Transcription] Fast route failed: ${fastErrMsg}`); - const services = getAvailableSpeechServices(); - const service = pickRandomService(services); + // Attempt 2: Service retry with different Azure key (429 rate limit) + if (this.shouldRetryWithDifferentService(fastErrMsg)) { + try { + checkTimeout('service-retry'); + console.log(`[Transcription] Retrying with different Azure service key`); - // Force conversion even if already attempted — use explicit wav extension - const wavBuffer = await convertToAzureWav(audioBuffer, '.wav'); + const services = getAvailableSpeechServices(); + if (services.length > 1) { + const service = pickRandomService(services); + const wavBuffer = await convertToAzureWav( + audioBuffer, + path.extname(audioPath) || '.m4a' + ); + const result = await withTimeout( + this.performRealtimeTranscription( + wavBuffer, + service, + recordingLanguages, + enableDiarization + ), + FAST_TIMEOUT_MS, + 'Service retry transcription' + ); + await this.notifyServer( + memoId, + userId, + result, + 'fast', + serviceKey, + serverUrl, + isAppend, + recordingIndex + ); + console.log(`[Transcription] Service retry succeeded for memo ${memoId}`); + return; + } else { + console.warn( + `[Transcription] Only one Azure service configured, skipping service retry` + ); + } + } catch (serviceRetryError: unknown) { + const msg = + serviceRetryError instanceof Error + ? serviceRetryError.message + : String(serviceRetryError); + console.warn(`[Transcription] Service retry failed: ${msg}`); + } + } - const result = await withTimeout( - this.performRealtimeTranscription(wavBuffer, service, recordingLanguages, enableDiarization), - FAST_TIMEOUT_MS, - 'Conversion retry transcription', - ); - await this.notifyServer(memoId, userId, result, 'fast', serviceKey, serverUrl, isAppend, recordingIndex); - console.log(`[Transcription] Conversion retry succeeded for memo ${memoId}`); - return; - } catch (conversionError: unknown) { - const msg = conversionError instanceof Error ? conversionError.message : String(conversionError); - console.warn(`[Transcription] Conversion retry failed: ${msg}. Falling back to batch.`); - } - } + // Attempt 3: FFmpeg conversion + retry (422 / format errors) + if (this.shouldRetryWithConversion(fastErrMsg)) { + try { + checkTimeout('conversion-retry'); + console.log(`[Transcription] Retrying with enhanced audio conversion`); - // Attempt 4: Azure batch transcription fallback - checkTimeout('batch-fallback'); - console.log(`[Transcription] Falling back to Azure Batch transcription for memo ${memoId}`); + const services = getAvailableSpeechServices(); + const service = pickRandomService(services); - try { - const services = getAvailableSpeechServices(); - const service = pickRandomService(services); - const batchResult = await this.batchService.createBatchJob( - audioBuffer, - userId, - service, - recordingLanguages, - enableDiarization, - ); - console.log(`[Transcription] Batch job created: ${batchResult.jobId} for memo ${memoId}`); - // Batch jobs complete asynchronously via webhook — no immediate notify here - return; - } catch (batchError: unknown) { - const msg = batchError instanceof Error ? batchError.message : String(batchError); - throw new Error(`All transcription methods failed. Batch error: ${msg}`); - } - } - } catch (error: unknown) { - const errorMsg = error instanceof Error ? error.message : String(error); - console.error(`[Transcription] All fallback attempts failed for memo ${memoId}: ${errorMsg}`); + // Force conversion even if already attempted — use explicit wav extension + const wavBuffer = await convertToAzureWav(audioBuffer, '.wav'); - await this.notifyServerError(memoId, userId, errorMsg, serviceKey, serverUrl); - } - } + const result = await withTimeout( + this.performRealtimeTranscription( + wavBuffer, + service, + recordingLanguages, + enableDiarization + ), + FAST_TIMEOUT_MS, + 'Conversion retry transcription' + ); + await this.notifyServer( + memoId, + userId, + result, + 'fast', + serviceKey, + serverUrl, + isAppend, + recordingIndex + ); + console.log(`[Transcription] Conversion retry succeeded for memo ${memoId}`); + return; + } catch (conversionError: unknown) { + const msg = + conversionError instanceof Error ? conversionError.message : String(conversionError); + console.warn(`[Transcription] Conversion retry failed: ${msg}. Falling back to batch.`); + } + } - async performRealtimeTranscription( - audioBuffer: Buffer, - speechService: SpeechServiceConfig, - languages?: string[], - diarization?: boolean, - ): Promise { - const definition: Record = { - wordLevelTimestampsEnabled: true, - punctuationMode: 'Automatic', - profanityFilterMode: 'None', - }; + // Attempt 4: Azure batch transcription fallback + checkTimeout('batch-fallback'); + console.log(`[Transcription] Falling back to Azure Batch transcription for memo ${memoId}`); - if (diarization !== false) { - definition['diarization'] = { - enabled: true, - maxSpeakers: 10, - }; - } + try { + const services = getAvailableSpeechServices(); + const service = pickRandomService(services); + const batchResult = await this.batchService.createBatchJob( + audioBuffer, + userId, + service, + recordingLanguages, + enableDiarization + ); + console.log(`[Transcription] Batch job created: ${batchResult.jobId} for memo ${memoId}`); + // Batch jobs complete asynchronously via webhook — no immediate notify here + return; + } catch (batchError: unknown) { + const msg = batchError instanceof Error ? batchError.message : String(batchError); + throw new Error(`All transcription methods failed. Batch error: ${msg}`); + } + } + } catch (error: unknown) { + const errorMsg = error instanceof Error ? error.message : String(error); + console.error(`[Transcription] All fallback attempts failed for memo ${memoId}: ${errorMsg}`); - const candidateLocales = - languages && languages.length > 0 ? languages : CANDIDATE_LOCALES; + await this.notifyServerError(memoId, userId, errorMsg, serviceKey, serverUrl); + } + } - definition['languageIdentification'] = { - candidateLocales, - }; + async performRealtimeTranscription( + audioBuffer: Buffer, + speechService: SpeechServiceConfig, + languages?: string[], + diarization?: boolean + ): Promise { + const definition: Record = { + wordLevelTimestampsEnabled: true, + punctuationMode: 'Automatic', + profanityFilterMode: 'None', + }; - console.log(`[Azure] Sending realtime transcription request to ${speechService.name}`); - console.log(`[Azure] Definition: ${JSON.stringify(definition)}`); + if (diarization !== false) { + definition['diarization'] = { + enabled: true, + maxSpeakers: 10, + }; + } - const formData = new FormData(); - formData.append('definition', JSON.stringify(definition)); + const candidateLocales = languages && languages.length > 0 ? languages : CANDIDATE_LOCALES; - const audioBlob = new Blob([audioBuffer], { type: 'audio/wav' }); - formData.append('audio', audioBlob, 'audio.wav'); + definition['languageIdentification'] = { + candidateLocales, + }; - const response = await fetch(`${speechService.endpoint}?api-version=2024-11-15`, { - method: 'POST', - headers: { - 'Ocp-Apim-Subscription-Key': speechService.key, - Accept: 'application/json', - }, - body: formData, - }); + console.log(`[Azure] Sending realtime transcription request to ${speechService.name}`); + console.log(`[Azure] Definition: ${JSON.stringify(definition)}`); - if (!response.ok) { - const errorText = await response.text(); + const formData = new FormData(); + formData.append('definition', JSON.stringify(definition)); - if (response.status === 429) { - const retryAfter = response.headers.get('retry-after') ?? 'n/a'; - const requestId = response.headers.get('x-ms-request-id') ?? 'n/a'; - const quotaReason = response.headers.get('x-ms-service-quota-reason') ?? 'n/a'; - console.error( - `[AZURE_429_ERROR] Rate limited on ${speechService.name} — retry-after: ${retryAfter}, request-id: ${requestId}, quota-reason: ${quotaReason}`, - ); - console.error(`[AZURE_429_ERROR] Body: ${errorText}`); - throw new Error(`[AZURE_429_ERROR] Azure Speech API rate limited (429): ${errorText}`); - } + const audioBlob = new Blob([audioBuffer], { type: 'audio/wav' }); + formData.append('audio', audioBlob, 'audio.wav'); - if (response.status === 422) { - const requestId = response.headers.get('x-ms-request-id') ?? 'n/a'; - console.error( - `[AZURE_422_ERROR] Format error on ${speechService.name} — request-id: ${requestId}`, - ); - console.error(`[AZURE_422_ERROR] Body: ${errorText}`); - throw new Error(`[AZURE_422_ERROR] Azure Speech API format error (422): ${errorText}`); - } + const response = await fetch(`${speechService.endpoint}?api-version=2024-11-15`, { + method: 'POST', + headers: { + 'Ocp-Apim-Subscription-Key': speechService.key, + Accept: 'application/json', + }, + body: formData, + }); - throw new Error(`Azure Speech API error: ${response.status} - ${errorText}`); - } + if (!response.ok) { + const errorText = await response.text(); - const azureResult = (await response.json()) as Parameters[0]; - console.log(`[Azure] Transcription response received from ${speechService.name}`); - console.log(`[Azure] Phrase count: ${azureResult?.phrases?.length ?? 0}`); + if (response.status === 429) { + const retryAfter = response.headers.get('retry-after') ?? 'n/a'; + const requestId = response.headers.get('x-ms-request-id') ?? 'n/a'; + const quotaReason = response.headers.get('x-ms-service-quota-reason') ?? 'n/a'; + console.error( + `[AZURE_429_ERROR] Rate limited on ${speechService.name} — retry-after: ${retryAfter}, request-id: ${requestId}, quota-reason: ${quotaReason}` + ); + console.error(`[AZURE_429_ERROR] Body: ${errorText}`); + throw new Error(`[AZURE_429_ERROR] Azure Speech API rate limited (429): ${errorText}`); + } - return this.processTranscriptionResult(azureResult); - } + if (response.status === 422) { + const requestId = response.headers.get('x-ms-request-id') ?? 'n/a'; + console.error( + `[AZURE_422_ERROR] Format error on ${speechService.name} — request-id: ${requestId}` + ); + console.error(`[AZURE_422_ERROR] Body: ${errorText}`); + throw new Error(`[AZURE_422_ERROR] Azure Speech API format error (422): ${errorText}`); + } - processTranscriptionResult(azureResult: { - phrases?: Array<{ - text?: string; - speaker?: number; - offsetMilliseconds?: number; - durationMilliseconds?: number; - locale?: string; - words?: unknown[]; - }>; - combinedPhrases?: Array<{ text?: string }>; - locale?: string; - }): TranscriptionResult { - let transcript = ''; - let primary_language = 'de-DE'; - let languages: string[] = ['de-DE']; + throw new Error(`Azure Speech API error: ${response.status} - ${errorText}`); + } - // Determine languages from phrase-level locale analysis (more accurate than top-level) - if (azureResult.phrases && azureResult.phrases.length > 0) { - const phraseCounts: Record = {}; - const charCounts: Record = {}; + const azureResult = (await response.json()) as Parameters< + typeof this.processTranscriptionResult + >[0]; + console.log(`[Azure] Transcription response received from ${speechService.name}`); + console.log(`[Azure] Phrase count: ${azureResult?.phrases?.length ?? 0}`); - for (const phrase of azureResult.phrases) { - if (phrase.locale) { - phraseCounts[phrase.locale] = (phraseCounts[phrase.locale] ?? 0) + 1; - charCounts[phrase.locale] = (charCounts[phrase.locale] ?? 0) + (phrase.text?.length ?? 0); - } - } + return this.processTranscriptionResult(azureResult); + } - const uniqueLanguages = Object.keys(phraseCounts); - if (uniqueLanguages.length > 0) { - // Pick primary by character count — more accurate than phrase count - primary_language = uniqueLanguages.reduce((best, lang) => - (charCounts[lang] ?? 0) > (charCounts[best] ?? 0) ? lang : best, - ); - languages = uniqueLanguages; - console.log(`[Transcription] Language detection: ${JSON.stringify(charCounts)}, primary: ${primary_language}`); - } - } else if (azureResult.locale) { - primary_language = azureResult.locale; - languages = [azureResult.locale]; - } + processTranscriptionResult(azureResult: { + phrases?: Array<{ + text?: string; + speaker?: number; + offsetMilliseconds?: number; + durationMilliseconds?: number; + locale?: string; + words?: unknown[]; + }>; + combinedPhrases?: Array<{ text?: string }>; + locale?: string; + }): TranscriptionResult { + let transcript = ''; + let primary_language = 'de-DE'; + let languages: string[] = ['de-DE']; - // Build transcript text - if (azureResult.combinedPhrases && azureResult.combinedPhrases.length > 0) { - transcript = azureResult.combinedPhrases[0]?.text ?? ''; - } else if (azureResult.phrases && azureResult.phrases.length > 0) { - transcript = azureResult.phrases.map((p) => p.text ?? '').join(' '); - } + // Determine languages from phrase-level locale analysis (more accurate than top-level) + if (azureResult.phrases && azureResult.phrases.length > 0) { + const phraseCounts: Record = {}; + const charCounts: Record = {}; - // Build utterances and speaker maps - const utterances: TranscriptionResult['utterances'] = []; - const speakerIdSet = new Set(); + for (const phrase of azureResult.phrases) { + if (phrase.locale) { + phraseCounts[phrase.locale] = (phraseCounts[phrase.locale] ?? 0) + 1; + charCounts[phrase.locale] = (charCounts[phrase.locale] ?? 0) + (phrase.text?.length ?? 0); + } + } - if (azureResult.phrases) { - for (const phrase of azureResult.phrases) { - if (phrase.speaker !== undefined && phrase.text) { - utterances.push({ - speaker: phrase.speaker, - text: phrase.text, - offset: phrase.offsetMilliseconds ?? 0, - duration: phrase.durationMilliseconds ?? 0, - }); - speakerIdSet.add(phrase.speaker); - } - } - } + const uniqueLanguages = Object.keys(phraseCounts); + if (uniqueLanguages.length > 0) { + // Pick primary by character count — more accurate than phrase count + primary_language = uniqueLanguages.reduce((best, lang) => + (charCounts[lang] ?? 0) > (charCounts[best] ?? 0) ? lang : best + ); + languages = uniqueLanguages; + console.log( + `[Transcription] Language detection: ${JSON.stringify(charCounts)}, primary: ${primary_language}` + ); + } + } else if (azureResult.locale) { + primary_language = azureResult.locale; + languages = [azureResult.locale]; + } - // Sort by time - utterances.sort((a, b) => a.offset - b.offset); + // Build transcript text + if (azureResult.combinedPhrases && azureResult.combinedPhrases.length > 0) { + transcript = azureResult.combinedPhrases[0]?.text ?? ''; + } else if (azureResult.phrases && azureResult.phrases.length > 0) { + transcript = azureResult.phrases.map((p) => p.text ?? '').join(' '); + } - // Build speaker label maps - const speakers: Record = {}; - const speakerMap: Record = {}; + // Build utterances and speaker maps + const utterances: TranscriptionResult['utterances'] = []; + const speakerIdSet = new Set(); - for (const speakerId of speakerIdSet) { - const label = `Speaker ${speakerId}`; - speakers[String(speakerId)] = label; - speakerMap[label] = speakerId; - } + if (azureResult.phrases) { + for (const phrase of azureResult.phrases) { + if (phrase.speaker !== undefined && phrase.text) { + utterances.push({ + speaker: phrase.speaker, + text: phrase.text, + offset: phrase.offsetMilliseconds ?? 0, + duration: phrase.durationMilliseconds ?? 0, + }); + speakerIdSet.add(phrase.speaker); + } + } + } - console.log( - `[Transcription] Processed: ${transcript.length} chars, ${utterances.length} utterances, ${speakerIdSet.size} speakers, lang: ${primary_language}`, - ); + // Sort by time + utterances.sort((a, b) => a.offset - b.offset); - return { transcript, utterances, speakers, speakerMap, languages, primary_language }; - } + // Build speaker label maps + const speakers: Record = {}; + const speakerMap: Record = {}; - shouldRetryWithDifferentService(errorMsg: string): boolean { - const has429 = /429|AZURE_429_ERROR|rate.?limit|too many requests/i.test(errorMsg); - console.log(`[Transcription] shouldRetryWithDifferentService: ${has429} (${errorMsg.substring(0, 100)})`); - return has429; - } + for (const speakerId of speakerIdSet) { + const label = `Speaker ${speakerId}`; + speakers[String(speakerId)] = label; + speakerMap[label] = speakerId; + } - shouldRetryWithConversion(errorMsg: string): boolean { - const patterns = [ - /422/, - /AZURE_422_ERROR/, - /audio.?format/i, - /InvalidAudioFormat/i, - /audio\/x-m4a/i, - /unsupported.*format/i, - /invalid.*audio/i, - /codec.*not.*supported/i, - /content.*type.*unsupported/i, - /bitrate.*not.*supported/i, - /sample.*rate.*invalid/i, - /media.*type.*not.*supported/i, - ]; - const matches = patterns.some((p) => p.test(errorMsg)); - console.log(`[Transcription] shouldRetryWithConversion: ${matches} (${errorMsg.substring(0, 100)})`); - return matches; - } + console.log( + `[Transcription] Processed: ${transcript.length} chars, ${utterances.length} utterances, ${speakerIdSet.size} speakers, lang: ${primary_language}` + ); - async notifyServer( - memoId: string, - userId: string, - result: TranscriptionResult, - route: 'fast' | 'batch', - serviceKey: string, - serverUrl: string, - isAppend?: boolean, - recordingIndex?: number, - ): Promise { - const endpoint = isAppend - ? `${serverUrl}/api/v1/internal/append-transcription-completed` - : `${serverUrl}/api/v1/internal/transcription-completed`; + return { transcript, utterances, speakers, speakerMap, languages, primary_language }; + } - const body: Record = { - memoId, - userId, - transcriptionResult: result, - route, - success: true, - }; + /** + * Transcribe via self-hosted mana-stt service (WhisperX on GPU server). + * Uses the /transcribe/whisperx endpoint which returns rich data with + * diarization, word alignment, and utterances. + */ + async performManaSTTTranscription( + audioBuffer: Buffer, + audioPath: string, + languages?: string[], + diarization?: boolean + ): Promise { + const ext = path.extname(audioPath) || '.m4a'; + const mimeTypes: Record = { + '.m4a': 'audio/mp4', + '.mp3': 'audio/mpeg', + '.wav': 'audio/wav', + '.flac': 'audio/flac', + '.ogg': 'audio/ogg', + '.webm': 'audio/webm', + '.mp4': 'audio/mp4', + }; + const mimeType = mimeTypes[ext] || 'audio/wav'; - if (isAppend) { - body['recordingIndex'] = recordingIndex; - } + // Determine language hint from recording languages (e.g., 'de-DE' → 'de') + const langHint = languages?.[0]?.split('-')[0] || null; - console.log(`[Callback] Notifying server at ${endpoint} for memo ${memoId}`); + const formData = new FormData(); + const audioBlob = new Blob([audioBuffer], { type: mimeType }); + formData.append('file', audioBlob, `audio${ext}`); + if (langHint) formData.append('language', langHint); + formData.append('diarization', String(diarization !== false)); + formData.append('alignment', 'true'); - const response = await fetch(endpoint, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - 'X-Service-Key': serviceKey, - }, - body: JSON.stringify(body), - }); + const headers: Record = { + Accept: 'application/json', + }; + if (MANA_STT_API_KEY) { + headers['X-API-Key'] = MANA_STT_API_KEY; + } - if (!response.ok) { - const errorText = await response.text(); - throw new Error(`Server callback failed: ${response.status} - ${errorText}`); - } + console.log( + `[mana-stt] Sending WhisperX request (${audioBuffer.length} bytes, lang=${langHint})` + ); - console.log(`[Callback] Server notified successfully for memo ${memoId}`); - } + const response = await fetch(`${MANA_STT_URL}/transcribe/whisperx`, { + method: 'POST', + headers, + body: formData, + }); - async notifyServerError( - memoId: string, - userId: string, - errorMsg: string, - serviceKey: string, - serverUrl: string, - ): Promise { - const endpoint = `${serverUrl}/api/v1/internal/transcription-completed`; + if (!response.ok) { + const errorText = await response.text(); + throw new Error(`mana-stt error: ${response.status} - ${errorText}`); + } - console.error(`[Callback] Notifying server of transcription error for memo ${memoId}: ${errorMsg}`); + const sttResult = (await response.json()) as { + text: string; + language?: string; + duration_seconds?: number; + utterances?: Array<{ + speaker: number; + text: string; + offset: number; + duration: number; + }>; + speakers?: Record; + speaker_map?: Record; + languages?: string[]; + primary_language?: string; + }; - try { - const response = await fetch(endpoint, { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - 'X-Service-Key': serviceKey, - }, - body: JSON.stringify({ - memoId, - userId, - error: errorMsg, - success: false, - timestamp: new Date().toISOString(), - }), - }); + console.log( + `[mana-stt] Response: ${sttResult.text.length} chars, ` + + `${sttResult.utterances?.length ?? 0} utterances, ` + + `lang=${sttResult.primary_language ?? sttResult.language}` + ); - if (!response.ok) { - const text = await response.text(); - console.error(`[Callback] Error notification failed: ${response.status} - ${text}`); - } - } catch (notifyErr: unknown) { - const msg = notifyErr instanceof Error ? notifyErr.message : String(notifyErr); - console.error(`[Callback] Failed to notify server of error: ${msg}`); - } - } + // Map mana-stt language codes to locale format (e.g., 'de' → 'de-DE') + const localeMap: Record = { + de: 'de-DE', + en: 'en-GB', + fr: 'fr-FR', + it: 'it-IT', + es: 'es-ES', + sv: 'sv-SE', + ru: 'ru-RU', + nl: 'nl-NL', + tr: 'tr-TR', + pt: 'pt-PT', + ja: 'ja-JP', + ko: 'ko-KR', + zh: 'zh-CN', + ar: 'ar-SA', + hi: 'hi-IN', + }; + + const rawLang = sttResult.primary_language ?? sttResult.language ?? 'de'; + const primaryLocale = localeMap[rawLang] ?? `${rawLang}-${rawLang.toUpperCase()}`; + const detectedLanguages = (sttResult.languages ?? [rawLang]).map( + (l) => localeMap[l] ?? `${l}-${l.toUpperCase()}` + ); + + return { + transcript: sttResult.text, + utterances: sttResult.utterances ?? [], + speakers: sttResult.speakers ?? {}, + speakerMap: sttResult.speaker_map ?? {}, + languages: detectedLanguages, + primary_language: primaryLocale, + }; + } + + shouldRetryWithDifferentService(errorMsg: string): boolean { + const has429 = /429|AZURE_429_ERROR|rate.?limit|too many requests/i.test(errorMsg); + console.log( + `[Transcription] shouldRetryWithDifferentService: ${has429} (${errorMsg.substring(0, 100)})` + ); + return has429; + } + + shouldRetryWithConversion(errorMsg: string): boolean { + const patterns = [ + /422/, + /AZURE_422_ERROR/, + /audio.?format/i, + /InvalidAudioFormat/i, + /audio\/x-m4a/i, + /unsupported.*format/i, + /invalid.*audio/i, + /codec.*not.*supported/i, + /content.*type.*unsupported/i, + /bitrate.*not.*supported/i, + /sample.*rate.*invalid/i, + /media.*type.*not.*supported/i, + ]; + const matches = patterns.some((p) => p.test(errorMsg)); + console.log( + `[Transcription] shouldRetryWithConversion: ${matches} (${errorMsg.substring(0, 100)})` + ); + return matches; + } + + async notifyServer( + memoId: string, + userId: string, + result: TranscriptionResult, + route: 'fast' | 'batch', + serviceKey: string, + serverUrl: string, + isAppend?: boolean, + recordingIndex?: number + ): Promise { + const endpoint = isAppend + ? `${serverUrl}/api/v1/internal/append-transcription-completed` + : `${serverUrl}/api/v1/internal/transcription-completed`; + + const body: Record = { + memoId, + userId, + transcriptionResult: result, + route, + success: true, + }; + + if (isAppend) { + body['recordingIndex'] = recordingIndex; + } + + console.log(`[Callback] Notifying server at ${endpoint} for memo ${memoId}`); + + const response = await fetch(endpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'X-Service-Key': serviceKey, + }, + body: JSON.stringify(body), + }); + + if (!response.ok) { + const errorText = await response.text(); + throw new Error(`Server callback failed: ${response.status} - ${errorText}`); + } + + console.log(`[Callback] Server notified successfully for memo ${memoId}`); + } + + async notifyServerError( + memoId: string, + userId: string, + errorMsg: string, + serviceKey: string, + serverUrl: string + ): Promise { + const endpoint = `${serverUrl}/api/v1/internal/transcription-completed`; + + console.error( + `[Callback] Notifying server of transcription error for memo ${memoId}: ${errorMsg}` + ); + + try { + const response = await fetch(endpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'X-Service-Key': serviceKey, + }, + body: JSON.stringify({ + memoId, + userId, + error: errorMsg, + success: false, + timestamp: new Date().toISOString(), + }), + }); + + if (!response.ok) { + const text = await response.text(); + console.error(`[Callback] Error notification failed: ${response.status} - ${text}`); + } + } catch (notifyErr: unknown) { + const msg = notifyErr instanceof Error ? notifyErr.message : String(notifyErr); + console.error(`[Callback] Failed to notify server of error: ${msg}`); + } + } } diff --git a/apps/memoro/apps/server/src/index.ts b/apps/memoro/apps/server/src/index.ts index 75552e1d1..152d55a07 100644 --- a/apps/memoro/apps/server/src/index.ts +++ b/apps/memoro/apps/server/src/index.ts @@ -20,6 +20,7 @@ import { cleanupRoutes } from './routes/cleanup'; import { meetingRoutes } from './routes/meetings'; import { meetingWebhookRoutes } from './routes/meetings-webhooks'; import { COSTS } from './lib/credits'; +import { rateLimiter } from './middleware/rate-limiter'; const app = new Hono(); @@ -46,6 +47,16 @@ app.use( }) ); +// ── Rate limiting ───────────────────────────────────────────────────────────── + +app.use( + '/api/v1/*', + rateLimiter({ + windowMs: 60_000, + max: 100, + }) +); + // ── Health check ─────────────────────────────────────────────────────────────── app.get('/health', (c) => diff --git a/apps/memoro/apps/server/src/lib/ai.ts b/apps/memoro/apps/server/src/lib/ai.ts index a4b56994f..aab359640 100644 --- a/apps/memoro/apps/server/src/lib/ai.ts +++ b/apps/memoro/apps/server/src/lib/ai.ts @@ -1,17 +1,25 @@ /** - * AI text generation with Gemini (primary) → Azure OpenAI (fallback). + * AI text generation with mana-llm (primary) → Gemini → Azure OpenAI (fallbacks). * - * Mirrors the NestJS AiService without the DI framework. + * Fallback chain: + * 1. mana-llm (self-hosted, OpenAI-compatible API on port 3025) + * 2. Gemini (Google Cloud) + * 3. Azure OpenAI (Microsoft Cloud) */ +// Self-hosted mana-llm service +const MANA_LLM_URL = process.env.MANA_LLM_URL || ''; +const MANA_LLM_MODEL = process.env.MANA_LLM_MODEL || 'ollama/gemma3:4b'; + +// Gemini (cloud fallback) const GEMINI_ENDPOINT = 'https://generativelanguage.googleapis.com/v1beta/models'; const GEMINI_MODEL = 'gemini-2.0-flash-001'; -const GEMINI_DEFAULT_TEMPERATURE = 0.7; -const GEMINI_DEFAULT_MAX_TOKENS = 1024; +// Azure OpenAI (cloud fallback) const AZURE_API_VERSION = '2024-02-01'; -const AZURE_DEFAULT_TEMPERATURE = 0.7; -const AZURE_DEFAULT_MAX_TOKENS = 1024; + +const DEFAULT_TEMPERATURE = 0.7; +const DEFAULT_MAX_TOKENS = 1024; export interface GenerateOptions { temperature?: number; @@ -20,28 +28,79 @@ export interface GenerateOptions { } /** - * Generate text using Gemini with Azure OpenAI as fallback. + * Generate text using mana-llm → Gemini → Azure OpenAI fallback chain. */ export async function generateText(prompt: string, options?: GenerateOptions): Promise { - const geminiKey = process.env.GEMINI_API_KEY; + // Attempt 1: Self-hosted mana-llm + if (MANA_LLM_URL) { + const result = await callManaLLM(prompt, options); + if (result !== null) return result; + console.warn('[ai] mana-llm failed, falling back to Gemini'); + } + // Attempt 2: Gemini + const geminiKey = process.env.GEMINI_API_KEY; if (geminiKey) { const result = await callGemini(prompt, geminiKey, options); if (result !== null) return result; console.warn('[ai] Gemini failed, falling back to Azure OpenAI'); - } else { - console.warn('[ai] No GEMINI_API_KEY, using Azure OpenAI directly'); } + // Attempt 3: Azure OpenAI const azureKey = process.env.AZURE_OPENAI_KEY; - if (!azureKey) { - throw new Error('No AI provider available: both GEMINI_API_KEY and AZURE_OPENAI_KEY are missing'); + if (azureKey) { + const result = await callAzure(prompt, azureKey, options); + if (result !== null) return result; } - const result = await callAzure(prompt, azureKey, options); - if (result !== null) return result; + throw new Error('All AI providers failed (mana-llm, Gemini, Azure OpenAI)'); +} - throw new Error('All AI providers failed'); +/** + * Call self-hosted mana-llm service (OpenAI-compatible API). + */ +async function callManaLLM(prompt: string, options?: GenerateOptions): Promise { + const temperature = options?.temperature ?? DEFAULT_TEMPERATURE; + const maxTokens = options?.maxTokens ?? DEFAULT_MAX_TOKENS; + + try { + const url = `${MANA_LLM_URL}/v1/chat/completions`; + const start = Date.now(); + + const messages: Array<{ role: string; content: string }> = []; + if (options?.systemInstruction) { + messages.push({ role: 'system', content: options.systemInstruction }); + } + messages.push({ role: 'user', content: prompt }); + + const response = await fetch(url, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + model: MANA_LLM_MODEL, + messages, + temperature, + max_tokens: maxTokens, + stream: false, + }), + }); + + if (!response.ok) { + const errorText = await response.text(); + console.error(`[ai] mana-llm error (${response.status}): ${errorText}`); + return null; + } + + const data = (await response.json()) as { + choices?: Array<{ message?: { content?: string } }>; + }; + const content = data.choices?.[0]?.message?.content?.trim() ?? ''; + console.debug(`[ai] mana-llm responded in ${Date.now() - start}ms (${content.length} chars)`); + return content || null; + } catch (error) { + console.error(`[ai] mana-llm call failed: ${error instanceof Error ? error.message : error}`); + return null; + } } async function callGemini( @@ -49,8 +108,8 @@ async function callGemini( apiKey: string, options?: GenerateOptions ): Promise { - const temperature = options?.temperature ?? GEMINI_DEFAULT_TEMPERATURE; - const maxOutputTokens = options?.maxTokens ?? GEMINI_DEFAULT_MAX_TOKENS; + const temperature = options?.temperature ?? DEFAULT_TEMPERATURE; + const maxOutputTokens = options?.maxTokens ?? DEFAULT_MAX_TOKENS; try { const url = `${GEMINI_ENDPOINT}/${GEMINI_MODEL}:generateContent?key=${apiKey}`; @@ -102,8 +161,8 @@ async function callAzure( return null; } - const temperature = options?.temperature ?? AZURE_DEFAULT_TEMPERATURE; - const maxTokens = options?.maxTokens ?? AZURE_DEFAULT_MAX_TOKENS; + const temperature = options?.temperature ?? DEFAULT_TEMPERATURE; + const maxTokens = options?.maxTokens ?? DEFAULT_MAX_TOKENS; try { const url = `${endpoint}/openai/deployments/${deployment}/chat/completions?api-version=${AZURE_API_VERSION}`; diff --git a/apps/memoro/apps/server/src/middleware/rate-limiter.ts b/apps/memoro/apps/server/src/middleware/rate-limiter.ts new file mode 100644 index 000000000..d1d0782e5 --- /dev/null +++ b/apps/memoro/apps/server/src/middleware/rate-limiter.ts @@ -0,0 +1,63 @@ +import type { MiddlewareHandler } from 'hono'; + +interface RateLimiterOptions { + /** Time window in milliseconds (default: 60000 = 1 minute) */ + windowMs?: number; + /** Max requests per window per IP (default: 100) */ + max?: number; +} + +interface RateLimitEntry { + count: number; + resetAt: number; +} + +/** + * Simple in-memory rate limiter middleware for Hono. + * Limits requests per IP address within a sliding time window. + */ +export function rateLimiter(options: RateLimiterOptions = {}): MiddlewareHandler { + const windowMs = options.windowMs ?? 60_000; + const max = options.max ?? 100; + const store = new Map(); + + // Periodic cleanup of expired entries every 5 minutes + setInterval(() => { + const now = Date.now(); + for (const [key, entry] of store) { + if (now >= entry.resetAt) { + store.delete(key); + } + } + }, 5 * 60_000); + + return async (c, next) => { + const ip = + c.req.header('x-forwarded-for')?.split(',')[0]?.trim() || + c.req.header('x-real-ip') || + 'unknown'; + + const now = Date.now(); + let entry = store.get(ip); + + if (!entry || now >= entry.resetAt) { + entry = { count: 0, resetAt: now + windowMs }; + store.set(ip, entry); + } + + entry.count++; + + c.header('X-RateLimit-Limit', String(max)); + c.header('X-RateLimit-Remaining', String(Math.max(0, max - entry.count))); + c.header('X-RateLimit-Reset', String(Math.ceil(entry.resetAt / 1000))); + + if (entry.count > max) { + return c.json( + { error: 'Too many requests', retryAfter: Math.ceil((entry.resetAt - now) / 1000) }, + 429 + ); + } + + await next(); + }; +} diff --git a/apps/memoro/apps/web/.env.example b/apps/memoro/apps/web/.env.example index 7e256a9e5..fa49d28a4 100644 --- a/apps/memoro/apps/web/.env.example +++ b/apps/memoro/apps/web/.env.example @@ -33,6 +33,5 @@ PUBLIC_APPLE_REDIRECT_URI=http://localhost:5173/auth/apple-callback # Change to PUBLIC_POSTHOG_KEY=your-posthog-key PUBLIC_POSTHOG_HOST=https://eu.i.posthog.com -# Sentry Error Tracking (optional) -# SENTRY_AUTH_TOKEN=your-sentry-auth-token -# PUBLIC_SENTRY_DSN=https://YOUR_DSN@sentry.io/PROJECT_ID +# GlitchTip Error Tracking (self-hosted, Sentry-compatible) +PUBLIC_GLITCHTIP_DSN= diff --git a/apps/memoro/apps/web/src/app.d.ts b/apps/memoro/apps/web/src/app.d.ts index 3460dd08a..e5044fb70 100644 --- a/apps/memoro/apps/web/src/app.d.ts +++ b/apps/memoro/apps/web/src/app.d.ts @@ -27,11 +27,7 @@ declare module '$env/static/public' { export const PUBLIC_APPLE_REDIRECT_URI: string; export const PUBLIC_POSTHOG_KEY: string; export const PUBLIC_POSTHOG_HOST: string; - export const PUBLIC_SENTRY_DSN: string; -} - -declare module '$env/static/private' { - export const SENTRY_AUTH_TOKEN: string; + export const PUBLIC_GLITCHTIP_DSN: string; } export {}; diff --git a/apps/memoro/apps/web/src/hooks.client.ts b/apps/memoro/apps/web/src/hooks.client.ts new file mode 100644 index 000000000..34a4ea4c8 --- /dev/null +++ b/apps/memoro/apps/web/src/hooks.client.ts @@ -0,0 +1,12 @@ +import { initErrorTracking, handleSvelteError } from '@manacore/shared-error-tracking/browser'; +import type { HandleClientError } from '@sveltejs/kit'; + +initErrorTracking({ + serviceName: 'memoro-web', + dsn: (window as any).__PUBLIC_GLITCHTIP_DSN__, + environment: import.meta.env.MODE, +}); + +export const handleError: HandleClientError = ({ error }) => { + handleSvelteError(error); +}; diff --git a/apps/memoro/apps/web/src/hooks.server.ts b/apps/memoro/apps/web/src/hooks.server.ts index 0de08954c..1e2c805f1 100644 --- a/apps/memoro/apps/web/src/hooks.server.ts +++ b/apps/memoro/apps/web/src/hooks.server.ts @@ -1,26 +1,32 @@ /** * Server-side hooks for SvelteKit - * Implements custom CSRF protection that allows OAuth callbacks + * - Injects runtime environment variables for client-side use + * - Custom CSRF protection that allows OAuth callbacks + * - GlitchTip error tracking DSN injection */ import type { Handle } from '@sveltejs/kit'; +import { injectUmamiAnalytics } from '@manacore/shared-utils/analytics-server'; +import { setSecurityHeaders } from '@manacore/shared-utils/security-headers'; + +// Get client-side URLs from environment (Docker runtime) +const PUBLIC_MANA_CORE_AUTH_URL_CLIENT = + process.env.PUBLIC_MANA_CORE_AUTH_URL_CLIENT || process.env.PUBLIC_MANA_CORE_AUTH_URL || ''; +const PUBLIC_MEMORO_SERVER_URL = process.env.PUBLIC_MEMORO_SERVER_URL || ''; +const PUBLIC_GLITCHTIP_DSN = process.env.PUBLIC_GLITCHTIP_DSN || ''; // Routes that are allowed to receive cross-origin POST requests // (OAuth callbacks from external providers) const ALLOWED_PATHS = [ - '/auth/apple-callback-handler', // Apple Sign-In OAuth callback (server endpoint) - '/auth/apple-callback', // Apple Sign-In OAuth callback (legacy/fallback) - '/auth/google-callback', // Google Sign-In OAuth callback (if needed) + '/auth/apple-callback-handler', + '/auth/apple-callback', + '/auth/google-callback', ]; -/** - * Custom CSRF protection that allows specific OAuth callback routes - * while protecting all other routes - */ export const handle: Handle = async ({ event, resolve }) => { const { request, url } = event; - // Only check POST, PATCH, PUT, DELETE requests + // CSRF protection: block cross-origin mutations except OAuth callbacks if (['POST', 'PATCH', 'PUT', 'DELETE'].includes(request.method)) { const origin = request.headers.get('origin'); const forbidden = @@ -29,20 +35,35 @@ export const handle: Handle = async ({ event, resolve }) => { !ALLOWED_PATHS.some((path) => url.pathname === path); if (forbidden) { - // Log the blocked request for debugging console.warn('CSRF: Blocked cross-origin request:', { method: request.method, path: url.pathname, origin: origin, expectedOrigin: url.origin, }); - - return new Response('Cross-site POST form submissions are forbidden', { - status: 403, - }); + return new Response('Cross-site POST form submissions are forbidden', { status: 403 }); } } - // Allow the request to proceed - return resolve(event); + const response = await resolve(event, { + transformPageChunk: ({ html }) => { + const envScript = ``; + return injectUmamiAnalytics(html.replace('', `${envScript}`)); + }, + }); + + setSecurityHeaders(response, { + connectSrc: [ + PUBLIC_MANA_CORE_AUTH_URL_CLIENT || 'http://localhost:3001', + PUBLIC_MEMORO_SERVER_URL || 'http://localhost:3015', + PUBLIC_GLITCHTIP_DSN ? new URL(PUBLIC_GLITCHTIP_DSN).origin : '', + 'http://localhost:3050', // mana-sync server + ].filter(Boolean), + }); + + return response; }; diff --git a/apps/memoro/apps/web/src/lib/config/env.ts b/apps/memoro/apps/web/src/lib/config/env.ts index 1d375eac3..665efd97f 100644 --- a/apps/memoro/apps/web/src/lib/config/env.ts +++ b/apps/memoro/apps/web/src/lib/config/env.ts @@ -13,7 +13,7 @@ import { PUBLIC_APPLE_REDIRECT_URI, PUBLIC_POSTHOG_KEY, PUBLIC_POSTHOG_HOST, - PUBLIC_SENTRY_DSN, + PUBLIC_GLITCHTIP_DSN, } from '$env/static/public'; export const env = { @@ -48,16 +48,16 @@ export const env = { }, }, - // Error tracking (optional) - sentry: { - dsn: PUBLIC_SENTRY_DSN || '', + // Error tracking (GlitchTip — Sentry-compatible, self-hosted) + glitchtip: { + dsn: PUBLIC_GLITCHTIP_DSN || '', }, } as const; // Helper to check if optional features are enabled export const features = { hasPosthog: !!PUBLIC_POSTHOG_KEY, - hasSentry: !!PUBLIC_SENTRY_DSN, + hasGlitchtip: !!PUBLIC_GLITCHTIP_DSN, } as const; // Log environment configuration on startup (useful for debugging deployment issues) @@ -69,7 +69,7 @@ if (typeof window !== 'undefined') { appleRedirectUri: env.oauth.appleRedirectUri || '❌ NOT SET', googleOAuth: !!env.oauth.googleClientId ? '✅ Configured' : '❌ Missing', posthog: features.hasPosthog ? '✅ Enabled' : '⚪ Disabled', - sentry: features.hasSentry ? '✅ Enabled' : '⚪ Disabled', + glitchtip: features.hasGlitchtip ? '✅ Enabled' : '⚪ Disabled', }); // Specific warning for Apple Sign-In if not configured diff --git a/apps/memoro/apps/web/src/routes/+error.svelte b/apps/memoro/apps/web/src/routes/+error.svelte new file mode 100644 index 000000000..6f5f81b46 --- /dev/null +++ b/apps/memoro/apps/web/src/routes/+error.svelte @@ -0,0 +1,10 @@ + + +
+

{$page.status}

+

{$page.error?.message || $_('error.notFound')}

+ {$_('error.backToHome')} +