managarten/packages/shared-llm/src/llm.module.ts
Till JS e2f144962c feat: add unified @manacore/shared-llm package and migrate all backends
Create a shared LLM client package that provides a unified interface
to the mana-llm service, replacing 9 individual fetch-based integrations
with consistent error handling, retry logic, and JSON extraction.

Package (@manacore/shared-llm):
- LlmModule with forRoot/forRootAsync (NestJS dynamic module)
- LlmClientService: chat, json, vision, visionJson, embed, stream
- LlmClient standalone class for non-NestJS consumers
- extractJson utility (consolidates 3 markdown-stripping implementations)
- retryFetch with exponential backoff (429, 5xx, network errors)
- 44 unit tests (json-extractor, retry, llm-client)

Migrated backends:
- mana-core-auth: raw fetch → llm.json()
- planta: raw fetch + vision → llm.visionJson()
- nutriphi: raw fetch + regex → llm.visionJson() + llm.json()
- chat: custom OllamaService (175 LOC) → llm.chatMessages()
- context: raw fetch → llm.chat() (keeps token tracking)
- traces: 2x raw fetch → llm.chat()
- manadeck: @google/genai SDK → llm.json() + llm.visionJson()
- bot-services: raw Ollama API → LlmClient standalone
- matrix-ollama-bot: raw fetch → llm.chatMessages() + llm.vision()

New credit operations:
- AI_PLANT_ANALYSIS (2 credits, planta)
- AI_GUIDE_GENERATION (5 credits, traces)
- AI_CONTEXT_GENERATION (2 credits, context)
- AI_BOT_CHAT (0.1 credits, matrix)

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-23 22:06:30 +01:00

80 lines
1.8 KiB
TypeScript

import { DynamicModule, Module, Global, Provider } from '@nestjs/common';
import type {
LlmModuleOptions,
LlmModuleAsyncOptions,
LlmOptionsFactory,
} from './interfaces/llm-options.interface';
import { LlmClientService } from './llm-client.service';
import { LLM_MODULE_OPTIONS } from './llm.constants';
@Global()
@Module({})
export class LlmModule {
static forRoot(options: LlmModuleOptions): DynamicModule {
return {
module: LlmModule,
providers: [
{
provide: LLM_MODULE_OPTIONS,
useValue: options,
},
LlmClientService,
],
exports: [LLM_MODULE_OPTIONS, LlmClientService],
};
}
static forRootAsync(options: LlmModuleAsyncOptions): DynamicModule {
const asyncProviders = this.createAsyncProviders(options);
return {
module: LlmModule,
imports: options.imports || [],
providers: [...asyncProviders, LlmClientService],
exports: [LLM_MODULE_OPTIONS, LlmClientService],
};
}
private static createAsyncProviders(options: LlmModuleAsyncOptions): Provider[] {
if (options.useFactory) {
return [
{
provide: LLM_MODULE_OPTIONS,
useFactory: options.useFactory,
inject: options.inject || [],
},
];
}
const useClass = options.useClass;
const useExisting = options.useExisting;
if (useClass) {
return [
{
provide: LLM_MODULE_OPTIONS,
useFactory: async (optionsFactory: LlmOptionsFactory) =>
await optionsFactory.createLlmOptions(),
inject: [useClass],
},
{
provide: useClass,
useClass,
},
];
}
if (useExisting) {
return [
{
provide: LLM_MODULE_OPTIONS,
useFactory: async (optionsFactory: LlmOptionsFactory) =>
await optionsFactory.createLlmOptions(),
inject: [useExisting],
},
];
}
return [];
}
}