import os
import numpy as np
import cv2
from fastapi import FastAPI, APIRouter, File, UploadFile, HTTPException
from fastapi.responses import JSONResponse
from contextlib import asynccontextmanager
from models.inference import TRTInference
import logging

logger = logging.getLogger(__name__)
router = APIRouter(tags=['首页'])

# 全局变量存储模型实例
model: TRTInference = None

@asynccontextmanager
async def lifespan(app: FastAPI):
    """应用生命周期事件处理器"""
    global model
    try:
        # 模型路径
        ENGINE_PATH = os.path.join(os.path.dirname(__file__), "../models/best.engine")
        
        if not os.path.exists(ENGINE_PATH):
            raise FileNotFoundError(f"引擎文件未找到: {ENGINE_PATH}")
            
        model = TRTInference(ENGINE_PATH)
        logger.info("模型加载成功")
        yield
    except Exception as e:
        logger.error(f"模型加载失败: {str(e)}")
        raise
    finally:
        # 应用关闭时的清理工作
        if model is not None:
            del model
        logger.info("应用关闭")

@router.get("/")
async def root():
    return {"message": "YOLO TensorRT Inference API is running!"}

@router.get("/health")
async def health_check():
    """健康检查"""
    if model is None:
        raise HTTPException(status_code=500, detail="模型未加载")
    return {"status": "healthy"}

@router.post("/predict/", response_model=dict)
async def predict(file: UploadFile = File(...), 
                  conf_threshold: float = 0.25, 
                  iou_threshold: float = 0.45):
    """
    图像目标检测
    
    - **file**: 上传的图像文件 (jpg, png等)
    - **conf_threshold**: 置信度阈值 (0.0 - 1.0)
    - **iou_threshold**: IOU阈值用于NMS (0.0 - 1.0)
    """
    # 验证文件类型
    if not file.content_type.startswith("image/"):
        raise HTTPException(status_code=400, detail="文件必须是图像")
    
    try:
        # 读取图像
        contents = await file.read()
        nparr = np.frombuffer(contents, np.uint8)
        image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        
        if image is None:
            raise HTTPException(status_code=400, detail="无法解码图像")
        
        # 执行推理
        results = model.infer(image)
        
        return {
            "success": True,
            "filename": file.filename,
            "detections": results,
            "count": len(results)
        }
        
    except Exception as e:
        logger.error(f"推理错误: {str(e)}")
        raise HTTPException(status_code=500, detail=f"推理失败: {str(e)}")