mirror of
https://github.com/Memo-2023/mana-monorepo.git
synced 2026-05-14 22:01:09 +02:00
The Windows GPU server has been the actual production home for these
services for some time, and the running code there has drifted ahead of
the repo. This sync pulls the live versions back into the repo so the
Windows box is no longer the only place those changes exist.
Pulled from C:\mana\services\* on mana-server-gpu (192.168.178.11):
mana-llm:
- src/main.py, src/config.py — small fixes (auth wiring, config tweaks)
- src/api_auth.py — NEW (cross-service GPU_API_KEY validator)
- service.pyw — Windows runner used by the ManaLLM scheduled task
(sets up logging redirect, loads .env, calls uvicorn)
mana-stt:
- app/main.py — substantial cleanup (684→392 lines), drops the
whisperx-as-separate-backend branching now that whisper_service.py
rolls whisperx in directly
- app/whisper_service.py — full CUDA + whisperx rewrite (158→358 lines)
- app/auth.py + external_auth.py — significantly expanded auth
- app/vram_manager.py — NEW (shared VRAM accounting helper)
- service.pyw — Windows runner with CUDA pre-init, FFmpeg PATH
injection, .env loading
- removed: app/whisper_service_cuda.py (folded into whisper_service.py)
- removed: app/whisperx_service.py (folded into whisper_service.py)
mana-tts:
- app/auth.py, external_auth.py — same auth expansion as stt
- app/f5_service.py, kokoro_service.py — Windows tweaks
- app/vram_manager.py — NEW (same shared helper as stt)
- service.pyw — Windows runner
mana-video-gen:
- service.pyw — Windows runner (no other changes; the .py code on the
GPU box is byte-identical to what's already in the repo)
The service.pyw files contain absolute Windows paths
(C:\mana\services\<svc>) and a hardcoded FFmpeg PATH for the tills user
profile. Kept as-is intentionally — they exist to be deployed to that
one machine and any abstraction layer would just hide what's actually
happening. Anyone redeploying to a different layout will need to edit
the path strings, which is a known and obvious change.
Mac-Mini infrastructure for these services (launchd plists, install
scripts, scripts/mac-mini/setup-{stt,tts}.sh, the Mac-flux2c image-gen
implementation) is still on disk and will be removed in a follow-up
commit, along with replacing mana-image-gen with the Windows
diffusers+CUDA implementation. This commit is just the live-code sync.
114 lines
4 KiB
Python
114 lines
4 KiB
Python
"""
|
|
VRAM Manager — Automatic model unloading after idle timeout.
|
|
|
|
Tracks last usage time per model and unloads after configurable timeout.
|
|
Designed for shared GPU environments (multiple services on one RTX 3090).
|
|
|
|
Usage in a service:
|
|
from vram_manager import VramManager
|
|
|
|
vram = VramManager(idle_timeout=300) # 5 min
|
|
|
|
# Before using a model
|
|
vram.touch()
|
|
|
|
# Call periodically (e.g., from health check or background task)
|
|
vram.check_idle(unload_fn=my_unload_function)
|
|
"""
|
|
|
|
import os
|
|
import time
|
|
import logging
|
|
import threading
|
|
from typing import Optional, Callable
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
DEFAULT_IDLE_TIMEOUT = int(os.getenv("VRAM_IDLE_TIMEOUT", "300")) # 5 minutes
|
|
|
|
|
|
class VramManager:
|
|
def __init__(self, idle_timeout: int = DEFAULT_IDLE_TIMEOUT, service_name: str = "unknown"):
|
|
self.idle_timeout = idle_timeout
|
|
self.service_name = service_name
|
|
self.last_used: float = 0.0
|
|
self.model_loaded: bool = False
|
|
self._lock = threading.Lock()
|
|
self._timer: Optional[threading.Timer] = None
|
|
|
|
def touch(self):
|
|
"""Mark the model as recently used. Call before/after each inference."""
|
|
with self._lock:
|
|
self.last_used = time.time()
|
|
self.model_loaded = True
|
|
self._schedule_check()
|
|
|
|
def mark_loaded(self):
|
|
"""Mark that a model has been loaded into VRAM."""
|
|
with self._lock:
|
|
self.model_loaded = True
|
|
self.last_used = time.time()
|
|
self._schedule_check()
|
|
logger.info(f"[{self.service_name}] Model loaded, idle timeout: {self.idle_timeout}s")
|
|
|
|
def mark_unloaded(self):
|
|
"""Mark that a model has been unloaded from VRAM."""
|
|
with self._lock:
|
|
self.model_loaded = False
|
|
if self._timer:
|
|
self._timer.cancel()
|
|
self._timer = None
|
|
logger.info(f"[{self.service_name}] Model unloaded, VRAM freed")
|
|
|
|
def is_idle(self) -> bool:
|
|
"""Check if the model has been idle longer than the timeout."""
|
|
if not self.model_loaded:
|
|
return False
|
|
return (time.time() - self.last_used) > self.idle_timeout
|
|
|
|
def seconds_until_unload(self) -> Optional[float]:
|
|
"""Seconds until the model will be unloaded, or None if not loaded."""
|
|
if not self.model_loaded:
|
|
return None
|
|
remaining = self.idle_timeout - (time.time() - self.last_used)
|
|
return max(0, remaining)
|
|
|
|
def check_and_unload(self, unload_fn: Callable[[], None]) -> bool:
|
|
"""Check if idle and unload if so. Returns True if unloaded."""
|
|
if self.is_idle():
|
|
logger.info(f"[{self.service_name}] Idle for >{self.idle_timeout}s, unloading model...")
|
|
try:
|
|
unload_fn()
|
|
self.mark_unloaded()
|
|
return True
|
|
except Exception as e:
|
|
logger.error(f"[{self.service_name}] Failed to unload: {e}")
|
|
return False
|
|
|
|
def _schedule_check(self):
|
|
"""Schedule an idle check after the timeout period."""
|
|
if self._timer:
|
|
self._timer.cancel()
|
|
|
|
self._timer = threading.Timer(
|
|
self.idle_timeout + 5, # Small buffer
|
|
self._auto_check,
|
|
)
|
|
self._timer.daemon = True
|
|
self._timer.start()
|
|
|
|
def _auto_check(self):
|
|
"""Auto-triggered idle check (called by timer)."""
|
|
# This is just a log — actual unloading needs the unload_fn
|
|
# which depends on the service. The service should call check_and_unload.
|
|
if self.is_idle():
|
|
logger.info(f"[{self.service_name}] Model idle for >{self.idle_timeout}s — ready to unload")
|
|
|
|
def status(self) -> dict:
|
|
"""Get current VRAM manager status."""
|
|
return {
|
|
"model_loaded": self.model_loaded,
|
|
"idle_seconds": round(time.time() - self.last_used, 1) if self.model_loaded else None,
|
|
"idle_timeout": self.idle_timeout,
|
|
"seconds_until_unload": round(self.seconds_until_unload(), 1) if self.model_loaded else None,
|
|
}
|