mirror of
https://github.com/Memo-2023/mana-monorepo.git
synced 2026-05-14 17:41:09 +02:00
test(byok): add 35 unit tests + update docs to as-built status
Three new test suites covering the critical BYOK paths: Pricing (14 tests): estimateCost for known/unknown models, scaling, formatCost edge cases, coverage check for all model IDs. ByokBackend (10 tests): tier identification, resolver behavior, provider dispatch, parameter passthrough, onUsage callback, error paths (no key, unregistered provider), invalidateAvailability. ByokVault (11 tests): encryption at rest verification, decryption round-trip, auto-default for first key, promoting default demotes previous, getForProvider logic, listMeta excludes apiKey, soft delete, recordUsage accumulation, cross-provider isolation. Updates docs/architecture/BYOK_PLAN.md with as-built status — phase table with commit references, deviations from original plan (no server-proxy fallback, no sensitive opt-in UI, no per-task provider override yet), test coverage matrix, troubleshooting guide, v2 follow-ups. Provider adapters remain unit-untested (need fetch mocking + SSE parsing) — smoke tests only. Total: 35/35 tests passing. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
7c6567a815
commit
e4f0a410d1
4 changed files with 614 additions and 23 deletions
190
apps/mana/apps/web/src/lib/byok/vault.test.ts
Normal file
190
apps/mana/apps/web/src/lib/byok/vault.test.ts
Normal file
|
|
@ -0,0 +1,190 @@
|
|||
/**
|
||||
* ByokVault tests — encrypted key CRUD in IndexedDB.
|
||||
*
|
||||
* Uses fake-indexeddb and a real AES-GCM key from SubtleCrypto.
|
||||
*/
|
||||
|
||||
import 'fake-indexeddb/auto';
|
||||
import { describe, it, expect, beforeAll, beforeEach, vi } from 'vitest';
|
||||
|
||||
vi.mock('$lib/stores/funnel-tracking', () => ({ trackFirstContent: vi.fn() }));
|
||||
vi.mock('$lib/triggers/registry', () => ({ fire: vi.fn() }));
|
||||
vi.mock('$lib/triggers/inline-suggest', () => ({
|
||||
checkInlineSuggestion: vi.fn().mockResolvedValue(null),
|
||||
}));
|
||||
|
||||
// Placeholder key, replaced in beforeAll
|
||||
let testKey: CryptoKey | null = null;
|
||||
|
||||
vi.mock('$lib/data/crypto/key-provider', () => ({
|
||||
getActiveKey: () => testKey,
|
||||
isVaultUnlocked: () => testKey !== null,
|
||||
}));
|
||||
|
||||
import { db } from '$lib/data/database';
|
||||
import { byokVault } from './vault';
|
||||
|
||||
beforeAll(async () => {
|
||||
testKey = await crypto.subtle.generateKey({ name: 'AES-GCM', length: 256 }, true, [
|
||||
'encrypt',
|
||||
'decrypt',
|
||||
]);
|
||||
});
|
||||
|
||||
beforeEach(async () => {
|
||||
await db.table('_byokKeys').clear();
|
||||
});
|
||||
|
||||
describe('ByokVault CRUD', () => {
|
||||
it('creates a key encrypted at rest', async () => {
|
||||
const key = await byokVault.create({
|
||||
provider: 'openai',
|
||||
label: 'Test Key',
|
||||
apiKey: 'sk-verysecret123',
|
||||
});
|
||||
|
||||
expect(key.id).toBeTruthy();
|
||||
expect(key.apiKey).toBe('sk-verysecret123');
|
||||
|
||||
const raw = await db.table('_byokKeys').get(key.id);
|
||||
expect(raw.apiKeyEncrypted).not.toBe('sk-verysecret123');
|
||||
expect(JSON.stringify(raw.apiKeyEncrypted)).not.toContain('sk-verysecret123');
|
||||
});
|
||||
|
||||
it('decrypts correctly on read', async () => {
|
||||
await byokVault.create({
|
||||
provider: 'openai',
|
||||
label: 'Test',
|
||||
apiKey: 'sk-abc123',
|
||||
});
|
||||
|
||||
const all = await byokVault.listAll();
|
||||
expect(all).toHaveLength(1);
|
||||
expect(all[0].apiKey).toBe('sk-abc123');
|
||||
});
|
||||
|
||||
it('first key for a provider becomes default automatically', async () => {
|
||||
const k1 = await byokVault.create({
|
||||
provider: 'openai',
|
||||
label: 'First',
|
||||
apiKey: 'sk-1',
|
||||
});
|
||||
expect(k1.isDefault).toBe(true);
|
||||
});
|
||||
|
||||
it('promoting a key to default demotes the previous default', async () => {
|
||||
const k1 = await byokVault.create({
|
||||
provider: 'openai',
|
||||
label: 'First',
|
||||
apiKey: 'sk-1',
|
||||
});
|
||||
const k2 = await byokVault.create({
|
||||
provider: 'openai',
|
||||
label: 'Second',
|
||||
apiKey: 'sk-2',
|
||||
isDefault: false,
|
||||
});
|
||||
expect(k1.isDefault).toBe(true);
|
||||
|
||||
await byokVault.update(k2.id, { isDefault: true });
|
||||
|
||||
const meta = await byokVault.listMeta();
|
||||
const first = meta.find((k) => k.id === k1.id)!;
|
||||
const second = meta.find((k) => k.id === k2.id)!;
|
||||
expect(first.isDefault).toBe(false);
|
||||
expect(second.isDefault).toBe(true);
|
||||
});
|
||||
|
||||
it('getForProvider returns default if set', async () => {
|
||||
await byokVault.create({ provider: 'anthropic', label: 'A', apiKey: 'k1' });
|
||||
await byokVault.create({
|
||||
provider: 'anthropic',
|
||||
label: 'B',
|
||||
apiKey: 'k2',
|
||||
isDefault: false,
|
||||
});
|
||||
|
||||
const found = await byokVault.getForProvider('anthropic');
|
||||
expect(found?.label).toBe('A');
|
||||
expect(found?.apiKey).toBe('k1');
|
||||
});
|
||||
|
||||
it('getForProvider returns null when no keys for provider', async () => {
|
||||
await byokVault.create({ provider: 'openai', label: 'A', apiKey: 'k' });
|
||||
const found = await byokVault.getForProvider('anthropic');
|
||||
expect(found).toBeNull();
|
||||
});
|
||||
|
||||
it('listMeta does NOT decrypt the api key', async () => {
|
||||
await byokVault.create({ provider: 'openai', label: 'Test', apiKey: 'sk-secret' });
|
||||
const meta = await byokVault.listMeta();
|
||||
expect(meta[0]).not.toHaveProperty('apiKey');
|
||||
});
|
||||
|
||||
it('delete is soft', async () => {
|
||||
const k = await byokVault.create({
|
||||
provider: 'openai',
|
||||
label: 'Test',
|
||||
apiKey: 'sk',
|
||||
});
|
||||
await byokVault.delete(k.id);
|
||||
|
||||
const meta = await byokVault.listMeta();
|
||||
expect(meta).toHaveLength(0);
|
||||
|
||||
const raw = await db.table('_byokKeys').get(k.id);
|
||||
expect(raw).toBeDefined();
|
||||
expect(raw.deletedAt).toBeTruthy();
|
||||
});
|
||||
|
||||
it('update changes label and model', async () => {
|
||||
const k = await byokVault.create({
|
||||
provider: 'openai',
|
||||
label: 'Old',
|
||||
apiKey: 'sk',
|
||||
model: 'gpt-4o',
|
||||
});
|
||||
await byokVault.update(k.id, { label: 'New', model: 'gpt-5' });
|
||||
|
||||
const meta = await byokVault.listMeta();
|
||||
expect(meta[0].label).toBe('New');
|
||||
expect(meta[0].model).toBe('gpt-5');
|
||||
});
|
||||
|
||||
it('recordUsage increments counters', async () => {
|
||||
const k = await byokVault.create({
|
||||
provider: 'openai',
|
||||
label: 'Test',
|
||||
apiKey: 'sk',
|
||||
});
|
||||
|
||||
await byokVault.recordUsage(k.id, 100, 0.015);
|
||||
await byokVault.recordUsage(k.id, 50, 0.008);
|
||||
|
||||
const meta = await byokVault.listMeta();
|
||||
expect(meta[0].usageCount).toBe(2);
|
||||
expect(meta[0].totalTokens).toBe(150);
|
||||
expect(meta[0].totalCostUsd).toBeCloseTo(0.023, 6);
|
||||
expect(meta[0].lastUsedAt).toBeTruthy();
|
||||
});
|
||||
|
||||
it('handles multiple providers independently', async () => {
|
||||
await byokVault.create({ provider: 'openai', label: 'OpenAI', apiKey: 'sk-oai' });
|
||||
await byokVault.create({
|
||||
provider: 'anthropic',
|
||||
label: 'Anthropic',
|
||||
apiKey: 'sk-ant',
|
||||
});
|
||||
await byokVault.create({ provider: 'gemini', label: 'Gemini', apiKey: 'g-key' });
|
||||
|
||||
const openai = await byokVault.getForProvider('openai');
|
||||
const anthropic = await byokVault.getForProvider('anthropic');
|
||||
const gemini = await byokVault.getForProvider('gemini');
|
||||
const mistral = await byokVault.getForProvider('mistral');
|
||||
|
||||
expect(openai?.apiKey).toBe('sk-oai');
|
||||
expect(anthropic?.apiKey).toBe('sk-ant');
|
||||
expect(gemini?.apiKey).toBe('g-key');
|
||||
expect(mistral).toBeNull();
|
||||
});
|
||||
});
|
||||
|
|
@ -1,7 +1,17 @@
|
|||
# BYOK — Bring Your Own Key
|
||||
|
||||
> Architecture and implementation plan for user-provided API keys.
|
||||
> Status: planning (2026-04-14)
|
||||
> Architecture and as-built docs for user-provided API keys.
|
||||
> Status: **implemented 2026-04-14** (Phase 1-5 complete, 35 unit tests passing).
|
||||
|
||||
## Quick start for users
|
||||
|
||||
1. Gehe zu `/settings/ai-keys`
|
||||
2. Klicke "Key hinzufuegen", waehle Provider (OpenAI/Anthropic/Gemini/Mistral)
|
||||
3. Label eingeben, API-Key einfuegen, optional Modell waehlen
|
||||
4. Im Companion-Chat Toolbar → "KI-Modus" → "Dein API-Key"
|
||||
5. Kosten + Usage werden pro Key auf der Settings-Page angezeigt
|
||||
|
||||
## Architecture summary (as built)
|
||||
|
||||
## Goals
|
||||
|
||||
|
|
@ -284,33 +294,98 @@ export interface LlmSettings {
|
|||
}
|
||||
```
|
||||
|
||||
## Implementation order
|
||||
## Implementation (as built)
|
||||
|
||||
**Phase 1 — Foundation (1.5h)**
|
||||
1. Extend LlmTier with 'byok' in shared-llm
|
||||
2. Create ByokKey vault (IndexedDB + encrypt/decrypt)
|
||||
3. ByokBackend skeleton with provider registry
|
||||
4. Wire into orchestrator
|
||||
| Phase | Status | Commit |
|
||||
|-------|--------|--------|
|
||||
| 1. Foundation (LlmTier, ByokBackend, provider abstraction) | ✅ | `a33857fa3` |
|
||||
| 2. OpenAI provider | ✅ | `a33857fa3` |
|
||||
| 3. Anthropic + Gemini + Mistral providers | ✅ | `a33857fa3` |
|
||||
| 4. Settings UI + IndexedDB vault | ✅ | `db8c2574d` |
|
||||
| 5. Pricing table + usage tracking | ✅ | `db8c2574d` |
|
||||
| Tests (35 unit tests) | ✅ | (this commit) |
|
||||
|
||||
**Phase 2 — First provider (30min)**
|
||||
5. OpenAI adapter (simplest — CORS ok)
|
||||
6. Test via companion chat
|
||||
## Deviations from the original plan
|
||||
|
||||
**Phase 3 — More providers (1.5h)**
|
||||
7. Anthropic adapter (with dangerous-header)
|
||||
8. Gemini adapter (different message format)
|
||||
9. Mistral adapter (OpenAI-compatible, trivial)
|
||||
These things ended up different from what the plan called for:
|
||||
|
||||
**Phase 4 — UI (1.5h)**
|
||||
10. Settings/ai-keys page
|
||||
11. Add + edit + delete key modals
|
||||
12. Usage tracking (increment on each call)
|
||||
- **Server-proxy fallback dropped.** The plan said "Browser-direct primary,
|
||||
server-proxy fallback on CORS." In practice I kept only browser-direct
|
||||
and left CORS as a user-facing error. All 4 providers support direct
|
||||
browser fetches (Anthropic via `anthropic-dangerous-direct-browser-access`).
|
||||
|
||||
**Phase 5 — Polish (30min)**
|
||||
13. Pricing table + cost estimation
|
||||
14. Companion toolbar dropdown extension (BYOK options)
|
||||
- **Sensitive-content opt-in UI not built.** The orchestrator STILL blocks
|
||||
BYOK for `sensitive` content by default — that invariant holds — but
|
||||
there is no UI for users to opt-in per-provider yet. Add when a user
|
||||
actually asks for it.
|
||||
|
||||
**Total: ~5h**
|
||||
- **Per-task BYOK provider overrides (e.g. `byok:anthropic`) not wired.**
|
||||
The tier-selector in the Companion chat only lets you pick `byok` in
|
||||
aggregate. The resolver currently picks the most-recently-used key
|
||||
across all providers. Extending this to support `byok:{provider}`
|
||||
syntax in `taskOverrides` is a small follow-up.
|
||||
|
||||
- **Default-provider setting not surfaced.** The `LlmSettings.byok.defaultProvider`
|
||||
field in the plan isn't in the settings type yet. The resolver uses
|
||||
"most-recently-used" as a proxy, which is actually a reasonable
|
||||
default UX-wise.
|
||||
|
||||
## Test coverage
|
||||
|
||||
| Area | Tests | File |
|
||||
|------|-------|------|
|
||||
| `estimateCost` + `formatCost` (pricing) | 14 | `packages/shared-llm/src/pricing.test.ts` |
|
||||
| `ByokBackend` (dispatch, resolver, usage callback) | 10 | `packages/shared-llm/src/backends/byok.test.ts` |
|
||||
| `byokVault` (CRUD + encryption + defaults) | 11 | `apps/mana/apps/web/src/lib/byok/vault.test.ts` |
|
||||
| **Total** | **35** | All passing |
|
||||
|
||||
**NOT tested** (would need fetch mocking + SSE parsing):
|
||||
- OpenAI adapter (`openai-compat.ts`)
|
||||
- Anthropic adapter (different SSE event schema)
|
||||
- Gemini adapter (different REST format)
|
||||
- Mistral adapter (reuses OpenAI)
|
||||
|
||||
These run against real provider APIs in production — manual smoke tests
|
||||
are the current verification path.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Vault ist gesperrt" on the Settings page
|
||||
|
||||
Keys are encrypted with your user master key. Sign out/in to re-derive it,
|
||||
or if that fails check `key-provider.ts` → `getActiveKey()`.
|
||||
|
||||
### "Kein BYOK-Schluessel konfiguriert" in the Companion
|
||||
|
||||
No keys have been added yet. Go to `/settings/ai-keys` and add one.
|
||||
|
||||
### CORS error in browser console
|
||||
|
||||
Some networks or proxies block direct-to-provider fetches. Options:
|
||||
1. Try a different network
|
||||
2. Use `mana-server` or `cloud` tier instead (server-proxied)
|
||||
3. File an issue — we can add server-proxy fallback per-provider if needed
|
||||
|
||||
### Anthropic returns 401 with a valid key
|
||||
|
||||
Make sure the key starts with `sk-ant-`. Make sure
|
||||
`anthropic-dangerous-direct-browser-access: true` is being sent (it is,
|
||||
by default — inspect in DevTools Network tab).
|
||||
|
||||
### Gemini key works in Google's API Explorer but not here
|
||||
|
||||
Gemini keys are tied to specific Google Cloud projects. Make sure the
|
||||
project has the Generative Language API enabled. Free-tier keys may
|
||||
have rate limits that trigger 429.
|
||||
|
||||
## Follow-ups
|
||||
|
||||
Small, fast wins for v2:
|
||||
- Per-task provider override syntax (`byok:anthropic`)
|
||||
- Settings page for `LlmSettings.byok.defaultProvider`
|
||||
- Sensitive-content opt-in toggle per provider
|
||||
- Ollama-BYOK (user's self-hosted Ollama)
|
||||
- Provider adapter tests with fetch mocking
|
||||
|
||||
## Decisions
|
||||
|
||||
|
|
|
|||
226
packages/shared-llm/src/backends/byok.test.ts
Normal file
226
packages/shared-llm/src/backends/byok.test.ts
Normal file
|
|
@ -0,0 +1,226 @@
|
|||
import { describe, it, expect, vi } from 'vitest';
|
||||
import { ByokBackend, type ByokKeyResolver } from './byok';
|
||||
import type { ByokProvider, ByokProviderId } from './byok-providers/types';
|
||||
import type { GenerateResult } from '../types';
|
||||
|
||||
function makeProvider(id: ByokProviderId, call?: ByokProvider['call']): ByokProvider {
|
||||
return {
|
||||
id,
|
||||
displayName: id,
|
||||
defaultModel: `${id}-default`,
|
||||
availableModels: [`${id}-default`, `${id}-big`],
|
||||
call:
|
||||
call ??
|
||||
(async () => ({
|
||||
content: `response from ${id}`,
|
||||
usage: { promptTokens: 10, completionTokens: 20, totalTokens: 30 },
|
||||
latencyMs: 0,
|
||||
})),
|
||||
};
|
||||
}
|
||||
|
||||
describe('ByokBackend', () => {
|
||||
it('has tier === "byok"', () => {
|
||||
const backend = new ByokBackend({
|
||||
resolver: async () => null,
|
||||
providers: [makeProvider('openai')],
|
||||
});
|
||||
expect(backend.tier).toBe('byok');
|
||||
});
|
||||
|
||||
it('isReady returns false when no key resolves', async () => {
|
||||
const backend = new ByokBackend({
|
||||
resolver: async () => null,
|
||||
providers: [makeProvider('openai')],
|
||||
});
|
||||
expect(await backend.isReady()).toBe(false);
|
||||
});
|
||||
|
||||
it('isReady returns true when a key resolves', async () => {
|
||||
const resolver: ByokKeyResolver = async () => ({
|
||||
provider: 'openai',
|
||||
apiKey: 'sk-test',
|
||||
model: 'gpt-4o',
|
||||
});
|
||||
const backend = new ByokBackend({
|
||||
resolver,
|
||||
providers: [makeProvider('openai')],
|
||||
});
|
||||
expect(await backend.isReady()).toBe(true);
|
||||
});
|
||||
|
||||
it('generate() dispatches to correct provider', async () => {
|
||||
const openaiCall = vi.fn(async () => ({
|
||||
content: 'openai hi',
|
||||
usage: { promptTokens: 5, completionTokens: 10, totalTokens: 15 },
|
||||
latencyMs: 0,
|
||||
}));
|
||||
const anthropicCall = vi.fn(async () => ({
|
||||
content: 'anthropic hi',
|
||||
usage: { promptTokens: 5, completionTokens: 10, totalTokens: 15 },
|
||||
latencyMs: 0,
|
||||
}));
|
||||
|
||||
const resolver: ByokKeyResolver = async () => ({
|
||||
provider: 'anthropic',
|
||||
apiKey: 'sk-ant',
|
||||
model: 'claude-opus-4-6',
|
||||
});
|
||||
|
||||
const backend = new ByokBackend({
|
||||
resolver,
|
||||
providers: [makeProvider('openai', openaiCall), makeProvider('anthropic', anthropicCall)],
|
||||
});
|
||||
|
||||
const result = await backend.generate({
|
||||
taskName: 'test',
|
||||
contentClass: 'personal',
|
||||
messages: [{ role: 'user', content: 'hi' }],
|
||||
});
|
||||
|
||||
expect(anthropicCall).toHaveBeenCalledOnce();
|
||||
expect(openaiCall).not.toHaveBeenCalled();
|
||||
expect(result.content).toBe('anthropic hi');
|
||||
});
|
||||
|
||||
it('generate() passes apiKey, model, messages to provider', async () => {
|
||||
const call = vi.fn(async () => ({
|
||||
content: '',
|
||||
usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
|
||||
latencyMs: 0,
|
||||
}));
|
||||
|
||||
const resolver: ByokKeyResolver = async () => ({
|
||||
provider: 'openai',
|
||||
apiKey: 'sk-test-key',
|
||||
model: 'gpt-4o',
|
||||
});
|
||||
|
||||
const backend = new ByokBackend({
|
||||
resolver,
|
||||
providers: [makeProvider('openai', call)],
|
||||
});
|
||||
|
||||
await backend.generate({
|
||||
taskName: 'test',
|
||||
contentClass: 'personal',
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
temperature: 0.5,
|
||||
maxTokens: 200,
|
||||
});
|
||||
|
||||
expect(call).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
apiKey: 'sk-test-key',
|
||||
model: 'gpt-4o',
|
||||
temperature: 0.5,
|
||||
maxTokens: 200,
|
||||
messages: [{ role: 'user', content: 'hello' }],
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('generate() throws when no key configured', async () => {
|
||||
const backend = new ByokBackend({
|
||||
resolver: async () => null,
|
||||
providers: [makeProvider('openai')],
|
||||
});
|
||||
|
||||
await expect(
|
||||
backend.generate({
|
||||
taskName: 'test',
|
||||
contentClass: 'personal',
|
||||
messages: [{ role: 'user', content: 'hi' }],
|
||||
})
|
||||
).rejects.toThrow(/Kein BYOK-Schluessel/);
|
||||
});
|
||||
|
||||
it('generate() throws when provider not registered', async () => {
|
||||
const resolver: ByokKeyResolver = async () => ({
|
||||
provider: 'gemini' as ByokProviderId,
|
||||
apiKey: 'k',
|
||||
model: 'm',
|
||||
});
|
||||
const backend = new ByokBackend({
|
||||
resolver,
|
||||
providers: [makeProvider('openai')], // no gemini!
|
||||
});
|
||||
|
||||
await expect(
|
||||
backend.generate({
|
||||
taskName: 'test',
|
||||
contentClass: 'personal',
|
||||
messages: [{ role: 'user', content: 'hi' }],
|
||||
})
|
||||
).rejects.toThrow(/Provider nicht unterstuetzt/);
|
||||
});
|
||||
|
||||
it('onUsage callback fires after successful generation', async () => {
|
||||
const onUsage = vi.fn();
|
||||
const resolver: ByokKeyResolver = async () => ({
|
||||
provider: 'openai',
|
||||
apiKey: 'sk',
|
||||
model: 'gpt-4o',
|
||||
});
|
||||
const backend = new ByokBackend({
|
||||
resolver,
|
||||
providers: [makeProvider('openai')],
|
||||
onUsage,
|
||||
});
|
||||
|
||||
await backend.generate({
|
||||
taskName: 'test',
|
||||
contentClass: 'personal',
|
||||
messages: [{ role: 'user', content: 'hi' }],
|
||||
});
|
||||
|
||||
expect(onUsage).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
provider: 'openai',
|
||||
model: 'gpt-4o',
|
||||
promptTokens: 10,
|
||||
completionTokens: 20,
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
it('onUsage does not fire when usage is missing', async () => {
|
||||
const onUsage = vi.fn();
|
||||
const call = async (): Promise<GenerateResult> => ({
|
||||
content: 'x',
|
||||
latencyMs: 0,
|
||||
// no usage field
|
||||
});
|
||||
const resolver: ByokKeyResolver = async () => ({
|
||||
provider: 'openai',
|
||||
apiKey: 'sk',
|
||||
model: 'gpt-4o',
|
||||
});
|
||||
const backend = new ByokBackend({
|
||||
resolver,
|
||||
providers: [makeProvider('openai', call)],
|
||||
onUsage,
|
||||
});
|
||||
|
||||
await backend.generate({
|
||||
taskName: 'test',
|
||||
contentClass: 'personal',
|
||||
messages: [{ role: 'user', content: 'hi' }],
|
||||
});
|
||||
|
||||
expect(onUsage).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('invalidateAvailability resets the cached flag', async () => {
|
||||
const backend = new ByokBackend({
|
||||
resolver: async () => null,
|
||||
providers: [makeProvider('openai')],
|
||||
});
|
||||
|
||||
await backend.isReady(); // sets internal flag to false
|
||||
expect(backend.isAvailable()).toBe(false);
|
||||
|
||||
backend.invalidateAvailability();
|
||||
expect(backend.isAvailable()).toBe(true); // back to unknown/available
|
||||
});
|
||||
});
|
||||
100
packages/shared-llm/src/pricing.test.ts
Normal file
100
packages/shared-llm/src/pricing.test.ts
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
import { describe, it, expect } from 'vitest';
|
||||
import { estimateCost, formatCost, MODEL_PRICING } from './pricing';
|
||||
|
||||
describe('estimateCost', () => {
|
||||
it('computes cost for known model', () => {
|
||||
// gpt-4o-mini: input 0.3/M, output 1.2/M
|
||||
// 1M input + 0.5M output = 0.3 + 0.6 = 0.9
|
||||
const cost = estimateCost('gpt-4o-mini', 1_000_000, 500_000);
|
||||
expect(cost).toBeCloseTo(0.9, 4);
|
||||
});
|
||||
|
||||
it('returns 0 for unknown model', () => {
|
||||
expect(estimateCost('unknown-model-xyz', 1000, 500)).toBe(0);
|
||||
});
|
||||
|
||||
it('handles zero tokens', () => {
|
||||
expect(estimateCost('gpt-4o', 0, 0)).toBe(0);
|
||||
});
|
||||
|
||||
it('handles only input tokens', () => {
|
||||
// claude-opus-4-6: input 15/M, output 75/M
|
||||
const cost = estimateCost('claude-opus-4-6', 1_000_000, 0);
|
||||
expect(cost).toBe(15);
|
||||
});
|
||||
|
||||
it('handles only output tokens', () => {
|
||||
// gemini-2.5-flash: input 0.15/M, output 0.6/M
|
||||
const cost = estimateCost('gemini-2.5-flash', 0, 1_000_000);
|
||||
expect(cost).toBe(0.6);
|
||||
});
|
||||
|
||||
it('scales linearly with token count', () => {
|
||||
const cost1k = estimateCost('gpt-4o', 1000, 1000);
|
||||
const cost10k = estimateCost('gpt-4o', 10_000, 10_000);
|
||||
expect(cost10k).toBeCloseTo(cost1k * 10, 6);
|
||||
});
|
||||
|
||||
it('has pricing for all OpenAI models', () => {
|
||||
const openaiModels = [
|
||||
'gpt-5',
|
||||
'gpt-5-mini',
|
||||
'gpt-4o',
|
||||
'gpt-4o-mini',
|
||||
'gpt-4-turbo',
|
||||
'o1',
|
||||
'o1-mini',
|
||||
];
|
||||
for (const model of openaiModels) {
|
||||
expect(MODEL_PRICING[model]).toBeDefined();
|
||||
}
|
||||
});
|
||||
|
||||
it('has pricing for all Anthropic models', () => {
|
||||
const anthropicModels = [
|
||||
'claude-opus-4-6',
|
||||
'claude-opus-4-5',
|
||||
'claude-sonnet-4-6',
|
||||
'claude-sonnet-4-5',
|
||||
'claude-haiku-4-5',
|
||||
];
|
||||
for (const model of anthropicModels) {
|
||||
expect(MODEL_PRICING[model]).toBeDefined();
|
||||
}
|
||||
});
|
||||
|
||||
it('has pricing for all Gemini models', () => {
|
||||
const geminiModels = [
|
||||
'gemini-2.5-pro',
|
||||
'gemini-2.5-flash',
|
||||
'gemini-2.5-flash-lite',
|
||||
'gemini-2.0-flash',
|
||||
];
|
||||
for (const model of geminiModels) {
|
||||
expect(MODEL_PRICING[model]).toBeDefined();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe('formatCost', () => {
|
||||
it('shows dash for zero', () => {
|
||||
expect(formatCost(0)).toBe('—');
|
||||
});
|
||||
|
||||
it('shows "< $0.0001" for very small amounts', () => {
|
||||
expect(formatCost(0.00001)).toBe('< $0.0001');
|
||||
});
|
||||
|
||||
it('shows 4 decimals for amounts < 0.01', () => {
|
||||
expect(formatCost(0.005)).toBe('$0.0050');
|
||||
});
|
||||
|
||||
it('shows 3 decimals for amounts < 1', () => {
|
||||
expect(formatCost(0.123)).toBe('$0.123');
|
||||
});
|
||||
|
||||
it('shows 2 decimals for amounts >= 1', () => {
|
||||
expect(formatCost(1.234)).toBe('$1.23');
|
||||
expect(formatCost(100.567)).toBe('$100.57');
|
||||
});
|
||||
});
|
||||
Loading…
Add table
Add a link
Reference in a new issue