From b6486a8a4699c9247ea56fa60639db041e3f413f Mon Sep 17 00:00:00 2001 From: Till JS Date: Wed, 8 Apr 2026 11:59:40 +0200 Subject: [PATCH] =?UTF-8?q?fix(mana-video-gen):=20typo=20in=20get=5Fmodel?= =?UTF-8?q?=5Finfo=20=E2=80=94=20total=5Fmem=20=E2=86=92=20total=5Fmemory?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PyTorch's `torch.cuda.get_device_properties(0)` returns a `_CudaDeviceProperties` object whose memory attribute is `total_memory` (bytes), not `total_mem`. The typo crashed the service immediately at startup because `get_model_info()` is called from the FastAPI lifespan handler, not lazily — uvicorn logged "Application startup failed" before any request could land. Found while installing mana-video-gen on the Windows GPU box (192.168.178.11:3026) for the gpu-video.mana.how Cloudflare route. After the fix the service starts cleanly under the ManaVideoGen scheduled task and responds 200 on /health both LAN and via Cloudflare tunnel. status.mana.how now reports 42/42 — first time ever. Co-Authored-By: Claude Opus 4.6 (1M context) --- services/mana-video-gen/app/ltx_service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/services/mana-video-gen/app/ltx_service.py b/services/mana-video-gen/app/ltx_service.py index 26d3f856f..d411da8c8 100644 --- a/services/mana-video-gen/app/ltx_service.py +++ b/services/mana-video-gen/app/ltx_service.py @@ -68,7 +68,7 @@ def get_model_info() -> dict: gpu_name = torch.cuda.get_device_name(0) if torch.cuda.is_available() else "N/A" vram_gb = ( - round(torch.cuda.get_device_properties(0).total_mem / 1e9, 1) + round(torch.cuda.get_device_properties(0).total_memory / 1e9, 1) if torch.cuda.is_available() else 0 )