chore(cloud-tier): upgrade default model gemini-2.0-flash → gemini-2.5-flash

gemini-2.0-flash is deprecated June 1 2026. gemini-2.5-flash has been
stable since Q1 2026 with similar pricing ($0.15/$0.60 per 1M tokens
vs $0.10/$0.40 — pricing table already had the entry).

Three files touched:
- packages/shared-llm/src/backends/cloud.ts — client default
- services/mana-llm/src/config.py — server default
- services/mana-llm/src/providers/google.py — Ollama→Gemini fallback
  map + constructor default + deduplicated model list

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Till JS 2026-04-16 12:32:03 +02:00
parent 95e65bbdcb
commit 8a0bf93699
3 changed files with 12 additions and 13 deletions

View file

@ -27,7 +27,7 @@ export class CloudBackend implements LlmBackend {
private readonly defaultModel: string;
constructor(opts: CloudBackendOptions = {}) {
this.defaultModel = opts.defaultModel ?? 'google/gemini-2.0-flash';
this.defaultModel = opts.defaultModel ?? 'google/gemini-2.5-flash';
}
isAvailable(): boolean {

View file

@ -30,7 +30,7 @@ class Settings(BaseSettings):
# Google Gemini (Fallback provider)
google_api_key: str | None = None
google_default_model: str = "gemini-2.0-flash"
google_default_model: str = "gemini-2.5-flash"
# Auto-fallback: Ollama → Google when Ollama is overloaded/down
auto_fallback_enabled: bool = True

View file

@ -29,16 +29,16 @@ logger = logging.getLogger(__name__)
# Model mapping: Ollama model → Google Gemini equivalent
OLLAMA_TO_GEMINI: dict[str, str] = {
"gemma3:4b": "gemini-2.0-flash",
"gemma3:12b": "gemini-2.0-flash",
"gemma3:4b": "gemini-2.5-flash",
"gemma3:12b": "gemini-2.5-flash",
"gemma3:27b": "gemini-2.5-pro",
"llava:7b": "gemini-2.0-flash", # Gemini has native vision
"qwen3-vl:4b": "gemini-2.0-flash", # vision fallback
"qwen2.5-coder:7b": "gemini-2.0-flash",
"llava:7b": "gemini-2.5-flash", # Gemini has native vision
"qwen3-vl:4b": "gemini-2.5-flash", # vision fallback
"qwen2.5-coder:7b": "gemini-2.5-flash",
"qwen2.5-coder:14b": "gemini-2.5-pro",
"phi3.5:latest": "gemini-2.0-flash",
"ministral-3:3b": "gemini-2.0-flash",
"deepseek-ocr:latest": "gemini-2.0-flash",
"phi3.5:latest": "gemini-2.5-flash",
"ministral-3:3b": "gemini-2.5-flash",
"deepseek-ocr:latest": "gemini-2.5-flash",
}
@ -47,7 +47,7 @@ class GoogleProvider(LLMProvider):
name = "google"
def __init__(self, api_key: str, default_model: str = "gemini-2.0-flash"):
def __init__(self, api_key: str, default_model: str = "gemini-2.5-flash"):
self.api_key = api_key
self.default_model = default_model
self.client = genai.Client(api_key=api_key)
@ -219,9 +219,8 @@ class GoogleProvider(LLMProvider):
"""List available Google Gemini models."""
# Return a static list of commonly used models
return [
ModelInfo(id="google/gemini-2.0-flash", owned_by="google"),
ModelInfo(id="google/gemini-2.5-pro", owned_by="google"),
ModelInfo(id="google/gemini-2.5-flash", owned_by="google"),
ModelInfo(id="google/gemini-2.5-pro", owned_by="google"),
]
async def embeddings(