managarten/apps/context/apps/mobile/services/aiService.ts
Till JS 5bd967900f refactor(context-mobile): migrate from Supabase to backend API + mana-core-auth
Complete migration of Context mobile app from direct Supabase access
to NestJS backend API with mana-core-auth authentication.

New files:
- context/AuthProvider.tsx: mana-core-auth integration via @manacore/shared-auth
- services/backendApi.ts: Backend API client for spaces, documents, AI, tokens

Rewritten services (same exports, backend implementation):
- supabaseService.ts: Now thin wrapper around backendApi
- aiService.ts: Uses backendApi for auth token
- tokenCountingService.ts: Model prices from backend API
- tokenTransactionService.ts: All token ops via backend API
- revenueCatService.ts: Token balance via backend API

Updated 16 consumer files (auth forms, token components, AI toolbars)

Deleted:
- utils/supabase.ts, context/AuthContext.tsx
- services/spaceService.ts, services/spaceServiceDirect.ts

Dependencies:
- Added: @manacore/shared-auth, expo-secure-store
- Removed: @supabase/supabase-js, @google/generative-ai, openai, @azure/openai

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-23 12:01:58 +01:00

99 lines
2.4 KiB
TypeScript

import { aiApi } from './backendApi';
// Typdefinitionen
export type AIProvider = 'azure' | 'google';
export type AIModelOption = {
label: string;
value: string;
provider: AIProvider;
};
export type AIGenerationOptions = {
model?: string;
temperature?: number;
maxTokens?: number;
prompt?: string;
documentId?: string;
referencedDocuments?: { title: string; content: string }[];
};
export type AIGenerationResult = {
text: string;
tokenInfo: {
promptTokens: number;
completionTokens: number;
totalTokens: number;
tokensUsed: number;
remainingTokens: number;
};
};
// Verfügbare Modelle (routed through mana-llm on the backend)
export const availableModels: AIModelOption[] = [
{ label: 'Gemma 3 4B (Lokal)', value: 'ollama/gemma3:4b', provider: 'azure' },
{
label: 'Llama 3.1 8B',
value: 'openrouter/meta-llama/llama-3.1-8b-instruct',
provider: 'azure',
},
];
/**
* Prüft, ob der Benutzer genügend Tokens für eine Anfrage hat
*/
export const checkTokenBalance = async (
prompt: string,
model: string,
estimatedCompletionLength: number = 500,
referencedDocuments?: { title: string; content: string }[]
): Promise<{ hasEnough: boolean; estimate: any; balance: number }> => {
return aiApi.estimate({
prompt,
model,
estimatedCompletionLength,
referencedDocuments,
});
};
/**
* Generiert Text über das Backend (welches mana-llm nutzt)
*/
export const generateText = async (
prompt: string,
_provider: AIProvider = 'azure',
options: AIGenerationOptions = {}
): Promise<AIGenerationResult> => {
try {
const result = await aiApi.generate({
prompt,
model: options.model || 'ollama/gemma3:4b',
temperature: options.temperature,
maxTokens: options.maxTokens,
documentId: options.documentId,
referencedDocuments: options.referencedDocuments,
});
return {
text: result.text,
tokenInfo: result.tokenInfo,
};
} catch (error: any) {
console.error('Fehler bei der Textgenerierung:', error);
throw new Error(`Textgenerierung fehlgeschlagen: ${error.message}`);
}
};
/**
* Hilfsfunktion zum Abrufen von Modelloptionen für einen bestimmten Provider
*/
export const getModelsByProvider = (_provider: AIProvider): AIModelOption[] => {
return availableModels;
};
/**
* Hilfsfunktion zum Abrufen des Providers für ein bestimmtes Modell
*/
export const getProviderForModel = (_modelValue: string): AIProvider => {
return 'azure';
};