from fastapi import APIRouter
import torch
from schemas import GPUStatus

router = APIRouter(prefix="/status", tags=["status"])

@router.get("/gpu", response_model=GPUStatus)
async def get_gpu_status():
    is_busy = torch.cuda.memory_allocated() > 0 if torch.cuda.is_available() else False  # 简单检查
    device_name = torch.cuda.get_device_name(0) if torch.cuda.is_available() else "CPU"
    status = "运行中" if is_busy else "空闲"
    return GPUStatus(isGpuBusy=is_busy, deviceName=device_name, status=status)


