mirror of
https://github.com/Memo-2023/mana-monorepo.git
synced 2026-05-15 22:39:40 +02:00
Python/FastAPI service providing unified OpenAI-compatible API for Ollama and cloud LLM providers (OpenRouter, Groq, Together). Features: - Chat completions with streaming (SSE) - Vision/multimodal support - Embeddings generation - Multi-provider routing (provider/model format) - Prometheus metrics - Optional Redis caching
38 lines
784 B
TOML
38 lines
784 B
TOML
[project]
|
|
name = "mana-llm"
|
|
version = "0.1.0"
|
|
description = "Central LLM abstraction service for Ollama and OpenAI-compatible APIs"
|
|
requires-python = ">=3.11"
|
|
dependencies = [
|
|
"fastapi>=0.115.0",
|
|
"uvicorn[standard]>=0.32.0",
|
|
"pydantic>=2.10.0",
|
|
"pydantic-settings>=2.6.0",
|
|
"httpx>=0.28.0",
|
|
"sse-starlette>=2.2.0",
|
|
"redis>=5.2.0",
|
|
"prometheus-client>=0.21.0",
|
|
]
|
|
|
|
[project.optional-dependencies]
|
|
dev = [
|
|
"pytest>=8.3.0",
|
|
"pytest-asyncio>=0.24.0",
|
|
"pytest-httpx>=0.35.0",
|
|
"ruff>=0.8.0",
|
|
]
|
|
|
|
[build-system]
|
|
requires = ["hatchling"]
|
|
build-backend = "hatchling.build"
|
|
|
|
[tool.ruff]
|
|
line-length = 100
|
|
target-version = "py311"
|
|
|
|
[tool.ruff.lint]
|
|
select = ["E", "F", "I", "W"]
|
|
|
|
[tool.pytest.ini_options]
|
|
asyncio_mode = "auto"
|
|
testpaths = ["tests"]
|