#!/usr/bin/env python3
"""
YOLO推理服务器
基于FastAPI和Ultralytics YOLO实现的高性能目标检测服务
支持YOLO 11 n和s模型的自动下载和推理
"""

import asyncio
import logging
import time
import os
import json
import base64
from typing import Dict, List, Any, Optional, Union
from pathlib import Path
import hashlib
import aiohttp
import aiofiles
from collections import deque, defaultdict
from datetime import datetime, timedelta
import threading

import numpy as np
import cv2
from PIL import Image
import torch
from ultralytics import YOLO
from fastapi import FastAPI, HTTPException, UploadFile, File, BackgroundTasks, Form
from fastapi.responses import JSONResponse, StreamingResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
import uvicorn
import queue
import uuid
from typing import Generator

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# 性能日志记录器
perf_logger = logging.getLogger("performance")
perf_handler = logging.FileHandler("yolo_performance.log")
perf_formatter = logging.Formatter('%(asctime)s - %(message)s')
perf_handler.setFormatter(perf_formatter)
perf_logger.addHandler(perf_handler)
perf_logger.setLevel(logging.INFO)

# YOLO模型配置
YOLO_MODELS = {
    "yolo11n": {
        "url": "https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n.pt",
        "filename": "yolo11n.pt",
        "size": "nano",
        "description": "YOLO 11 Nano - 最小模型，速度最快"
    },
    "yolo11s": {
        "url": "https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt",
        "filename": "yolo11s.pt",
        "size": "small",
        "description": "YOLO 11 Small - 小型模型，平衡速度和精度"
    }
}

# COCO数据集类别名称
COCO_CLASSES = [
    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
    'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench',
    'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',
    'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
    'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
    'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
    'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
    'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
    'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse',
    'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',
    'toothbrush'
]

# 请求/响应模型
class DetectionRequest(BaseModel):
    """检测请求模型"""
    model_name: str = Field("yolo11n", description="模型名称: yolo11n 或 yolo11s")
    image_data: Optional[str] = Field(None, description="Base64编码的图像数据")
    confidence_threshold: float = Field(0.5, description="置信度阈值")
    iou_threshold: float = Field(0.4, description="IoU阈值")
    max_detections: int = Field(100, description="最大检测数量")
    class_filter: Optional[List[str]] = Field(None, description="类别过滤器，只返回指定类别")
    return_crops: bool = Field(False, description="是否返回检测区域的裁剪图像")
    draw_results: bool = Field(False, description="是否在图像上绘制检测结果")

class DetectionResult(BaseModel):
    """检测结果模型"""
    class_id: int
    class_name: str
    confidence: float
    bbox: List[float]  # [x1, y1, x2, y2]
    area: float
    crop_image: Optional[str] = None  # 裁剪图像的Base64编码

class InferenceResponse(BaseModel):
    """推理响应模型"""
    success: bool
    message: str
    model_name: str
    results: List[DetectionResult]
    inference_time: float
    preprocessing_time: float
    postprocessing_time: float
    total_time: float
    image_size: List[int]  # [width, height]
    result_image: Optional[str] = None  # 绘制结果的图像Base64编码

class ModelInfo(BaseModel):
    """模型信息模型"""
    name: str
    filename: str
    size: str
    description: str
    loaded: bool
    download_progress: float
    file_size: Optional[int] = None
    last_used: Optional[float] = None


class RTSPStreamProcessor:
    """RTSP视频流处理器"""

    def __init__(self):
        self.active_streams = {}  # 存储活跃的视频流
        self.stream_lock = threading.Lock()

    def start_stream_processing(self, stream_id: str, rtsp_url: str,
                               inference_engine, model_name: str = "yolo11n",
                               confidence_threshold: float = 0.5,
                               fps_target: int = 1) -> bool:
        """启动RTSP流处理"""
        try:
            logger.info(f"开始处理RTSP流: {stream_id}, URL: {rtsp_url}")

            # 创建视频捕获对象
            cap = cv2.VideoCapture(rtsp_url)
            if not cap.isOpened():
                logger.error(f"无法打开RTSP流: {rtsp_url}")
                return False

            # 获取视频信息
            fps = cap.get(cv2.CAP_PROP_FPS)
            width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

            logger.info(f"视频流信息 - FPS: {fps}, 分辨率: {width}x{height}")

            # 计算抽帧间隔
            frame_interval = max(1, int(fps / fps_target)) if fps > 0 else 30

            # 创建结果队列
            result_queue = queue.Queue(maxsize=100)

            # 存储流信息
            with self.stream_lock:
                self.active_streams[stream_id] = {
                    'cap': cap,
                    'result_queue': result_queue,
                    'thread': None,
                    'stop_event': threading.Event(),
                    'info': {
                        'rtsp_url': rtsp_url,
                        'fps': fps,
                        'width': width,
                        'height': height,
                        'frame_interval': frame_interval,
                        'model_name': model_name,
                        'confidence_threshold': confidence_threshold
                    }
                }

            # 启动处理线程
            processing_thread = threading.Thread(
                target=self._process_stream,
                args=(stream_id, inference_engine),
                daemon=True
            )
            processing_thread.start()

            self.active_streams[stream_id]['thread'] = processing_thread

            logger.info(f"RTSP流处理已启动: {stream_id}")
            return True

        except Exception as e:
            logger.error(f"启动RTSP流处理失败: {e}")
            return False

    def _process_stream(self, stream_id: str, inference_engine):
        """处理视频流的后台线程"""
        try:
            stream_info = self.active_streams[stream_id]
            cap = stream_info['cap']
            result_queue = stream_info['result_queue']
            stop_event = stream_info['stop_event']
            info = stream_info['info']

            frame_count = 0
            last_process_time = time.time()

            while not stop_event.is_set():
                ret, frame = cap.read()
                if not ret:
                    logger.warning(f"无法读取帧，流可能已断开: {stream_id}")
                    break

                frame_count += 1

                # 按间隔抽帧处理
                if frame_count % info['frame_interval'] == 0:
                    current_time = time.time()

                    try:
                        # 执行推理（同步版本）
                        result = inference_engine.infer_sync(
                            image=frame,
                            model_name=info['model_name'],
                            confidence_threshold=info['confidence_threshold'],
                            iou_threshold=0.4,
                            max_detections=100,
                            draw_results=False,
                            return_crops=False
                        )

                        # 添加时间戳和帧信息
                        result['timestamp'] = current_time
                        result['frame_number'] = frame_count
                        result['stream_id'] = stream_id
                        result['processing_interval'] = current_time - last_process_time

                        # 将结果放入队列
                        if not result_queue.full():
                            result_queue.put(result)
                        else:
                            # 队列满时，移除最老的结果
                            try:
                                result_queue.get_nowait()
                                result_queue.put(result)
                            except queue.Empty:
                                pass

                        last_process_time = current_time

                        logger.info(f"流 {stream_id} 处理帧 {frame_count}, "
                                   f"检测到 {len(result.get('results', []))} 个对象")

                    except Exception as e:
                        logger.error(f"处理帧失败 {stream_id}: {e}")

                # 控制处理速度，避免CPU过载
                time.sleep(0.01)

        except Exception as e:
            logger.error(f"视频流处理线程异常 {stream_id}: {e}")
        finally:
            # 清理资源
            if stream_id in self.active_streams:
                cap = self.active_streams[stream_id]['cap']
                cap.release()
                logger.info(f"视频流处理已停止: {stream_id}")

    def stop_stream_processing(self, stream_id: str) -> bool:
        """停止RTSP流处理"""
        try:
            with self.stream_lock:
                if stream_id not in self.active_streams:
                    return False

                stream_info = self.active_streams[stream_id]
                stream_info['stop_event'].set()

                # 等待线程结束
                if stream_info['thread'] and stream_info['thread'].is_alive():
                    stream_info['thread'].join(timeout=5)

                # 释放资源
                stream_info['cap'].release()

                # 移除流信息
                del self.active_streams[stream_id]

            logger.info(f"已停止RTSP流处理: {stream_id}")
            return True

        except Exception as e:
            logger.error(f"停止RTSP流处理失败: {e}")
            return False

    def get_stream_results(self, stream_id: str, max_results: int = 10) -> List[Dict]:
        """获取流处理结果"""
        if stream_id not in self.active_streams:
            return []

        result_queue = self.active_streams[stream_id]['result_queue']
        results = []

        try:
            for _ in range(max_results):
                if result_queue.empty():
                    break
                result = result_queue.get_nowait()
                results.append(result)
        except queue.Empty:
            pass

        return results

    def get_active_streams(self) -> Dict:
        """获取活跃流信息"""
        with self.stream_lock:
            return {
                stream_id: {
                    'info': stream_info['info'],
                    'queue_size': stream_info['result_queue'].qsize(),
                    'is_active': not stream_info['stop_event'].is_set()
                }
                for stream_id, stream_info in self.active_streams.items()
            }


class PerformanceMonitor:
    """性能监控器 - 记录推理速率和性能指标"""

    def __init__(self):
        self.model_stats = defaultdict(lambda: {
            'total_requests': 0,
            'total_inference_time': 0.0,
            'total_processing_time': 0.0,
            'error_count': 0,
            'last_used': None
        })
        self.global_stats = {
            'total_requests': 0,
            'total_errors': 0,
            'start_time': time.time(),
            'last_request_time': None
        }
        self.lock = threading.Lock()

    def record_inference(self, model_name: str, inference_time: float,
                        total_time: float, success: bool = True,
                        detections_count: int = 0):
        """记录推理性能数据（简化版）"""
        try:
            with self.lock:
                current_time = time.time()

                # 更新全局统计
                self.global_stats['total_requests'] += 1
                self.global_stats['last_request_time'] = current_time
                if not success:
                    self.global_stats['total_errors'] += 1

                # 更新模型统计
                model_stat = self.model_stats[model_name]
                model_stat['total_requests'] += 1
                model_stat['last_used'] = current_time

                if success:
                    model_stat['total_inference_time'] += inference_time
                    model_stat['total_processing_time'] += total_time
                    # 简化：不记录到队列中，避免内存积累
                else:
                    model_stat['error_count'] += 1

            # 简化的日志记录
            status = "成功" if success else "失败"
            logger.info(f"推理记录 - 模型: {model_name}, 状态: {status}, "
                       f"推理时间: {inference_time:.3f}s, 总时间: {total_time:.3f}s, "
                       f"检测数: {detections_count}")
        except Exception as e:
            logger.error(f"性能记录失败: {e}")

    def get_simple_stats(self) -> Dict:
        """获取简单的统计数据"""
        try:
            with self.lock:
                uptime = time.time() - self.global_stats['start_time']
                return {
                    'total_requests': self.global_stats['total_requests'],
                    'total_errors': self.global_stats['total_errors'],
                    'uptime_seconds': uptime,
                    'error_rate': self.global_stats['total_errors'] / max(1, self.global_stats['total_requests'])
                }
        except Exception as e:
            logger.error(f"获取统计数据失败: {e}")
            return {'total_requests': 0, 'total_errors': 0, 'uptime_seconds': 0, 'error_rate': 0}










class ModelDownloader:
    """模型下载器"""
    
    def __init__(self, models_dir: str = "./models"):
        self.models_dir = Path(models_dir)
        self.models_dir.mkdir(parents=True, exist_ok=True)
        self.download_progress = {}
        
    async def download_model(self, model_name: str, progress_callback=None) -> bool:
        """下载模型文件"""
        if model_name not in YOLO_MODELS:
            logger.error(f"未知模型: {model_name}")
            return False
            
        model_config = YOLO_MODELS[model_name]
        model_path = self.models_dir / model_config["filename"]
        
        # 检查文件是否已存在
        if model_path.exists():
            logger.info(f"模型 {model_name} 已存在: {model_path}")
            return True
            
        logger.info(f"开始下载模型 {model_name} 从 {model_config['url']}")
        
        try:
            async with aiohttp.ClientSession() as session:
                async with session.get(model_config["url"]) as response:
                    if response.status != 200:
                        logger.error(f"下载失败，HTTP状态码: {response.status}")
                        return False
                        
                    total_size = int(response.headers.get('content-length', 0))
                    downloaded_size = 0
                    
                    async with aiofiles.open(model_path, 'wb') as f:
                        async for chunk in response.content.iter_chunked(8192):
                            await f.write(chunk)
                            downloaded_size += len(chunk)
                            
                            if total_size > 0:
                                progress = (downloaded_size / total_size) * 100
                                self.download_progress[model_name] = progress
                                
                                if progress_callback:
                                    await progress_callback(model_name, progress)
                                    
                                if downloaded_size % (1024 * 1024) == 0:  # 每MB记录一次
                                    logger.info(f"下载进度 {model_name}: {progress:.1f}%")
            
            logger.info(f"模型 {model_name} 下载完成: {model_path}")
            self.download_progress[model_name] = 100.0
            return True
            
        except Exception as e:
            logger.error(f"下载模型 {model_name} 失败: {e}")
            # 删除不完整的文件
            if model_path.exists():
                model_path.unlink()
            return False
    
    def get_download_progress(self, model_name: str) -> float:
        """获取下载进度"""
        return self.download_progress.get(model_name, 0.0)
    
    def is_model_available(self, model_name: str) -> bool:
        """检查模型是否可用"""
        if model_name not in YOLO_MODELS:
            return False
        model_config = YOLO_MODELS[model_name]
        model_path = self.models_dir / model_config["filename"]
        return model_path.exists()


class YOLOInferenceEngine:
    """YOLO推理引擎 - 支持热更新"""

    def __init__(self, models_dir: str = "./models"):
        self.models_dir = Path(models_dir)
        self.models: Dict[str, YOLO] = {}
        self.model_info: Dict[str, Dict] = {}
        self.performance_monitor = PerformanceMonitor()

        # 热更新相关
        self.model_lock = threading.RLock()  # 可重入锁，支持嵌套调用
        self.active_models: Dict[str, YOLO] = {}  # 当前活跃的模型
        self.loading_models: Dict[str, threading.Event] = {}  # 正在加载的模型
        self.default_model = "yolo11n"  # 默认模型
        self.model_usage_count: Dict[str, int] = {}  # 模型使用计数
        
    def load_model(self, model_name: str, hot_swap: bool = True) -> bool:
        """加载YOLO模型 - 支持热更新

        Args:
            model_name: 模型名称
            hot_swap: 是否启用热更新（无中断切换）
        """
        if model_name not in YOLO_MODELS:
            logger.error(f"未知模型: {model_name}")
            return False

        # 如果模型已经加载，直接返回成功
        with self.model_lock:
            if model_name in self.active_models:
                logger.info(f"模型 {model_name} 已经加载")
                return True

            # 检查是否正在加载
            if model_name in self.loading_models:
                logger.info(f"模型 {model_name} 正在加载中，等待完成...")
                loading_event = self.loading_models[model_name]

        # 在锁外等待加载完成
        if model_name in self.loading_models:
            loading_event.wait(timeout=60)  # 最多等待60秒
            with self.model_lock:
                return model_name in self.active_models

        model_config = YOLO_MODELS[model_name]
        model_path = self.models_dir / model_config["filename"]

        if not model_path.exists():
            logger.error(f"模型文件不存在: {model_path}")
            return False

        # 创建加载事件
        with self.model_lock:
            if model_name in self.loading_models:
                # 其他线程已经开始加载
                loading_event = self.loading_models[model_name]
            else:
                loading_event = threading.Event()
                self.loading_models[model_name] = loading_event

        try:
            logger.info(f"开始加载模型 {model_name}...")
            start_time = time.time()

            # 在后台加载新模型
            new_model = YOLO(str(model_path))
            load_time = time.time() - start_time

            # 原子性地更新模型
            with self.model_lock:
                # 如果启用热更新，保留旧模型直到新模型就绪
                old_model = self.active_models.get(model_name) if hot_swap else None

                # 更新活跃模型
                self.active_models[model_name] = new_model
                self.models[model_name] = new_model  # 保持兼容性

                # 更新模型信息
                self.model_info[model_name] = {
                    "loaded": True,
                    "load_time": load_time,
                    "last_used": time.time(),
                    "file_size": model_path.stat().st_size,
                    "hot_swapped": hot_swap
                }

                # 初始化使用计数
                self.model_usage_count[model_name] = 0

                # 清理加载状态
                if model_name in self.loading_models:
                    del self.loading_models[model_name]

            # 通知等待的线程
            loading_event.set()

            logger.info(f"模型 {model_name} 热更新加载成功，耗时: {load_time:.3f}s")

            # 如果有旧模型，延迟清理（给正在进行的推理一些时间完成）
            if old_model and hot_swap:
                def cleanup_old_model():
                    time.sleep(2)  # 等待2秒让正在进行的推理完成
                    logger.info(f"清理旧版本模型 {model_name}")

                threading.Thread(target=cleanup_old_model, daemon=True).start()

            return True

        except Exception as e:
            logger.error(f"加载模型 {model_name} 失败: {e}")

            # 清理加载状态
            with self.model_lock:
                if model_name in self.loading_models:
                    del self.loading_models[model_name]

            loading_event.set()  # 通知等待的线程失败
            return False
    
    def unload_model(self, model_name: str) -> bool:
        """卸载模型 - 支持热更新安全卸载"""
        with self.model_lock:
            unloaded = False

            # 从活跃模型中移除
            if model_name in self.active_models:
                del self.active_models[model_name]
                unloaded = True

            # 从模型缓存中移除
            if model_name in self.models:
                del self.models[model_name]
                unloaded = True

            # 更新模型信息
            if model_name in self.model_info:
                self.model_info[model_name]["loaded"] = False

            # 清理使用计数
            if model_name in self.model_usage_count:
                del self.model_usage_count[model_name]

            if unloaded:
                logger.info(f"模型 {model_name} 卸载成功")
                return True

        return False
    
    def get_model(self, model_name: str, auto_load: bool = True) -> Optional[YOLO]:
        """获取模型实例 - 支持自动加载和热更新

        Args:
            model_name: 模型名称
            auto_load: 如果模型未加载，是否自动加载
        """
        with self.model_lock:
            # 优先从活跃模型中获取
            if model_name in self.active_models:
                self.model_info[model_name]["last_used"] = time.time()
                self.model_usage_count[model_name] = self.model_usage_count.get(model_name, 0) + 1
                return self.active_models[model_name]

            # 如果启用自动加载且模型可用，尝试加载
            if auto_load and model_name in YOLO_MODELS:
                model_config = YOLO_MODELS[model_name]
                model_path = self.models_dir / model_config["filename"]

                if model_path.exists():
                    logger.info(f"自动加载模型 {model_name}")
                    # 在锁外进行加载，避免死锁

        # 在锁外进行加载
        if auto_load and model_name in YOLO_MODELS:
            if self.load_model(model_name, hot_swap=True):
                with self.model_lock:
                    if model_name in self.active_models:
                        self.model_info[model_name]["last_used"] = time.time()
                        self.model_usage_count[model_name] = self.model_usage_count.get(model_name, 0) + 1
                        return self.active_models[model_name]

        # 回退到默认模型
        if model_name != self.default_model:
            logger.warning(f"模型 {model_name} 不可用，回退到默认模型 {self.default_model}")
            return self.get_model(self.default_model, auto_load=auto_load)

        return None

    def list_models(self) -> List[Dict]:
        """列出所有模型信息"""
        models_info = []
        for model_name, config in YOLO_MODELS.items():
            with self.model_lock:
                is_active = model_name in self.active_models
                is_loading = model_name in self.loading_models
                usage_count = self.model_usage_count.get(model_name, 0)

            info = {
                "name": model_name,
                "filename": config["filename"],
                "size": config["size"],
                "description": config["description"],
                "loaded": model_name in self.models,
                "active": is_active,
                "loading": is_loading,
                "usage_count": usage_count,
                "download_progress": 100.0 if self.is_model_available(model_name) else 0.0,
                "file_size": None,
                "last_used": None
            }

            if model_name in self.model_info:
                info.update(self.model_info[model_name])

            models_info.append(info)
        return models_info

    def is_model_available(self, model_name: str) -> bool:
        """检查模型是否可用"""
        if model_name not in YOLO_MODELS:
            return False
        model_config = YOLO_MODELS[model_name]
        model_path = self.models_dir / model_config["filename"]
        return model_path.exists()

    async def infer(self, model_name: str, image: np.ndarray,
                   confidence_threshold: float = 0.5,
                   iou_threshold: float = 0.4,
                   max_detections: int = 100,
                   class_filter: Optional[List[str]] = None,
                   return_crops: bool = False,
                   draw_results: bool = False) -> Dict:
        """执行推理"""
        start_time = time.time()

        # 获取模型
        model = self.get_model(model_name)
        if not model:
            return {
                "success": False,
                "message": f"模型 {model_name} 未加载",
                "model_name": model_name,
                "results": [],
                "inference_time": 0,
                "preprocessing_time": 0,
                "postprocessing_time": 0,
                "total_time": 0,
                "image_size": [0, 0],
                "result_image": None
            }

        try:
            # 预处理
            preprocessing_start = time.time()
            original_height, original_width = image.shape[:2]
            preprocessing_time = time.time() - preprocessing_start

            # 推理
            inference_start = time.time()
            logger.info(f"调用YOLO模型进行推理，图像尺寸: {image.shape}")
            try:
                results = model(image,
                              conf=confidence_threshold,
                              iou=iou_threshold,
                              max_det=max_detections,
                              verbose=False)
                inference_time = time.time() - inference_start
                logger.info(f"YOLO推理完成，耗时: {inference_time:.3f}s")
            except Exception as e:
                logger.error(f"YOLO推理失败: {e}")
                raise

            # 后处理
            postprocessing_start = time.time()
            detections = []

            if results and len(results) > 0:
                result = results[0]  # 取第一个结果

                if result.boxes is not None and len(result.boxes) > 0:
                    boxes = result.boxes.xyxy.cpu().numpy()  # [x1, y1, x2, y2]
                    confidences = result.boxes.conf.cpu().numpy()
                    class_ids = result.boxes.cls.cpu().numpy().astype(int)

                    for i, (box, conf, cls_id) in enumerate(zip(boxes, confidences, class_ids)):
                        if cls_id < len(COCO_CLASSES):
                            class_name = COCO_CLASSES[cls_id]
                        else:
                            class_name = f"class_{cls_id}"

                        # 应用类别过滤器
                        if class_filter and class_name not in class_filter:
                            continue

                        x1, y1, x2, y2 = box
                        area = (x2 - x1) * (y2 - y1)

                        detection = {
                            "class_id": int(cls_id),
                            "class_name": class_name,
                            "confidence": float(conf),
                            "bbox": [float(x1), float(y1), float(x2), float(y2)],
                            "area": float(area),
                            "crop_image": None
                        }

                        # 添加裁剪图像
                        if return_crops:
                            crop_image = self._crop_image_region(image, detection["bbox"])
                            detection["crop_image"] = crop_image

                        detections.append(detection)

            postprocessing_time = time.time() - postprocessing_start

            # 绘制结果图像
            result_image_base64 = None
            if draw_results and detections:
                result_image = self._draw_detection_results(image, detections)
                _, buffer = cv2.imencode('.jpg', result_image)
                result_image_base64 = base64.b64encode(buffer).decode('utf-8')

            total_time = time.time() - start_time

            # 记录性能数据
            self.performance_monitor.record_inference(
                model_name=model_name,
                inference_time=inference_time,
                total_time=total_time,
                success=True,
                detections_count=len(detections)
            )

            return {
                "success": True,
                "message": "推理完成",
                "model_name": model_name,
                "results": detections,
                "inference_time": inference_time,
                "preprocessing_time": preprocessing_time,
                "postprocessing_time": postprocessing_time,
                "total_time": total_time,
                "image_size": [original_width, original_height],
                "result_image": result_image_base64
            }

        except Exception as e:
            total_time = time.time() - start_time
            logger.error(f"推理失败: {e}")

            # 记录失败的推理
            self.performance_monitor.record_inference(
                model_name=model_name,
                inference_time=0,
                total_time=total_time,
                success=False,
                detections_count=0
            )

            return {
                "success": False,
                "message": f"推理失败: {str(e)}",
                "model_name": model_name,
                "results": [],
                "inference_time": 0,
                "preprocessing_time": 0,
                "postprocessing_time": 0,
                "total_time": total_time,
                "image_size": [0, 0],
                "result_image": None
            }

    def infer_sync(self, image: np.ndarray, model_name: str = "yolo11n",
                   confidence_threshold: float = 0.5, iou_threshold: float = 0.4,
                   max_detections: int = 100, draw_results: bool = False,
                   return_crops: bool = False, class_filter: List[str] = None) -> Dict:
        """同步推理方法（用于RTSP流处理）"""
        try:
            start_time = time.time()

            # 检查模型是否可用
            if model_name not in self.models:
                raise ValueError(f"模型 {model_name} 未加载")

            model = self.models[model_name]

            # 预处理
            preprocessing_start = time.time()
            original_height, original_width = image.shape[:2]
            preprocessing_time = time.time() - preprocessing_start

            # 推理
            inference_start = time.time()
            logger.info(f"调用YOLO模型进行推理，图像尺寸: {image.shape}")
            try:
                results = model(image, conf=confidence_threshold, iou=iou_threshold,
                              max_det=max_detections, verbose=False)
                inference_time = time.time() - inference_start
                logger.info(f"YOLO推理完成，耗时: {inference_time:.3f}s")
            except Exception as e:
                logger.error(f"YOLO推理失败: {e}")
                raise

            # 后处理
            postprocessing_start = time.time()
            detections = []

            if results and len(results) > 0:
                result = results[0]

                if result.boxes is not None and len(result.boxes) > 0:
                    boxes = result.boxes.xyxy.cpu().numpy()
                    confidences = result.boxes.conf.cpu().numpy()
                    class_ids = result.boxes.cls.cpu().numpy().astype(int)

                    for i, (box, conf, cls_id) in enumerate(zip(boxes, confidences, class_ids)):
                        if cls_id < len(COCO_CLASSES):
                            class_name = COCO_CLASSES[cls_id]
                        else:
                            class_name = f"class_{cls_id}"

                        # 类别过滤
                        if class_filter and class_name not in class_filter:
                            continue

                        x1, y1, x2, y2 = box
                        detection = {
                            "class_id": int(cls_id),
                            "class_name": class_name,
                            "confidence": float(conf),
                            "bbox": [float(x1), float(y1), float(x2), float(y2)],
                            "area": float((x2 - x1) * (y2 - y1))
                        }

                        # 添加裁剪图像
                        if return_crops:
                            crop_base64 = self._crop_image_region(image, box)
                            if crop_base64:
                                detection["crop_image"] = crop_base64

                        detections.append(detection)

            postprocessing_time = time.time() - postprocessing_start

            # 绘制结果图像
            result_image_base64 = None
            if draw_results and detections:
                result_image = self._draw_detection_results(image, detections)
                _, buffer = cv2.imencode('.jpg', result_image)
                result_image_base64 = base64.b64encode(buffer).decode('utf-8')

            total_time = time.time() - start_time

            # 记录性能数据
            self.performance_monitor.record_inference(
                model_name=model_name,
                inference_time=inference_time,
                total_time=total_time,
                success=True,
                detections_count=len(detections)
            )

            return {
                "success": True,
                "message": "推理完成",
                "model_name": model_name,
                "results": detections,
                "inference_time": inference_time,
                "preprocessing_time": preprocessing_time,
                "postprocessing_time": postprocessing_time,
                "total_time": total_time,
                "image_size": [original_width, original_height],
                "result_image": result_image_base64
            }

        except Exception as e:
            total_time = time.time() - start_time
            logger.error(f"同步推理失败: {e}")

            # 记录失败的推理
            self.performance_monitor.record_inference(
                model_name=model_name,
                inference_time=0,
                total_time=total_time,
                success=False,
                detections_count=0
            )

            return {
                "success": False,
                "message": f"推理失败: {str(e)}",
                "model_name": model_name,
                "results": [],
                "inference_time": 0,
                "preprocessing_time": 0,
                "postprocessing_time": 0,
                "total_time": total_time,
                "image_size": [0, 0],
                "result_image": None
            }

    def _crop_image_region(self, image: np.ndarray, bbox: List[float]) -> Optional[str]:
        """裁剪图像区域并编码为Base64"""
        try:
            x1, y1, x2, y2 = map(int, bbox)

            # 确保坐标在图像范围内
            h, w = image.shape[:2]
            x1 = max(0, min(x1, w))
            y1 = max(0, min(y1, h))
            x2 = max(0, min(x2, w))
            y2 = max(0, min(y2, h))

            if x2 <= x1 or y2 <= y1:
                return None

            # 裁剪图像
            crop = image[y1:y2, x1:x2]

            # 编码为JPEG
            _, buffer = cv2.imencode('.jpg', crop)
            return base64.b64encode(buffer).decode('utf-8')

        except Exception as e:
            logger.error(f"裁剪图像失败: {e}")
            return None

    def _draw_detection_results(self, image: np.ndarray, detections: List[Dict]) -> np.ndarray:
        """在图像上绘制检测结果"""
        result_image = image.copy()

        try:
            for detection in detections:
                bbox = detection['bbox']
                class_name = detection['class_name']
                confidence = detection['confidence']

                # 绘制边界框
                x1, y1, x2, y2 = map(int, bbox)
                cv2.rectangle(result_image, (x1, y1), (x2, y2), (0, 255, 0), 2)

                # 绘制标签
                label = f"{class_name}: {confidence:.2f}"
                label_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)[0]
                cv2.rectangle(result_image, (x1, y1 - label_size[1] - 10),
                            (x1 + label_size[0], y1), (0, 255, 0), -1)
                cv2.putText(result_image, label, (x1, y1 - 5),
                          cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)

        except Exception as e:
            logger.error(f"绘制结果失败: {e}")

        return result_image


# 全局实例
downloader = ModelDownloader()
inference_engine = YOLOInferenceEngine()
rtsp_processor = RTSPStreamProcessor()

# FastAPI应用
app = FastAPI(
    title="YOLO推理服务",
    description="基于Ultralytics YOLO的高性能目标检测服务",
    version="1.0.0"
)

# 添加CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 允许所有源，生产环境中应该限制为特定域名
    allow_credentials=True,
    allow_methods=["*"],  # 允许所有HTTP方法
    allow_headers=["*"],  # 允许所有头部
)

def decode_base64_image(image_data: str) -> np.ndarray:
    """解码Base64图像数据"""
    try:
        # 移除data URL前缀（如果存在）
        if ',' in image_data:
            image_data = image_data.split(',')[1]

        # 解码Base64
        image_bytes = base64.b64decode(image_data)

        # 转换为numpy数组
        nparr = np.frombuffer(image_bytes, np.uint8)

        # 解码图像
        image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

        if image is None:
            raise ValueError("无法解码图像数据")

        return image

    except Exception as e:
        raise HTTPException(status_code=400, detail=f"图像解码失败: {str(e)}")

@app.get("/")
async def root():
    """根路径"""
    return {
        "message": "YOLO推理服务",
        "version": "1.0.0",
        "status": "running",
        "supported_models": list(YOLO_MODELS.keys())
    }

@app.get("/health")
async def health_check():
    """健康检查"""
    return {
        "status": "healthy",
        "timestamp": time.time(),
        "models": inference_engine.list_models()
    }

@app.get("/models")
async def list_models():
    """列出所有模型"""
    return {
        "models": inference_engine.list_models(),
        "total": len(YOLO_MODELS)
    }

@app.get("/models/{model_name}")
async def get_model_info(model_name: str):
    """获取模型信息"""
    if model_name not in YOLO_MODELS:
        raise HTTPException(status_code=404, detail=f"模型 {model_name} 不存在")

    models_info = inference_engine.list_models()
    for model_info in models_info:
        if model_info["name"] == model_name:
            return model_info

    raise HTTPException(status_code=404, detail=f"模型 {model_name} 信息未找到")

@app.post("/models/{model_name}/download")
async def download_model(model_name: str, background_tasks: BackgroundTasks):
    """下载模型"""
    if model_name not in YOLO_MODELS:
        raise HTTPException(status_code=404, detail=f"模型 {model_name} 不存在")

    if downloader.is_model_available(model_name):
        return {"message": f"模型 {model_name} 已存在"}

    # 后台下载
    background_tasks.add_task(downloader.download_model, model_name)

    return {"message": f"开始下载模型 {model_name}"}

@app.get("/models/{model_name}/download/progress")
async def get_download_progress(model_name: str):
    """获取下载进度"""
    if model_name not in YOLO_MODELS:
        raise HTTPException(status_code=404, detail=f"模型 {model_name} 不存在")

    progress = downloader.get_download_progress(model_name)
    is_available = downloader.is_model_available(model_name)

    return {
        "model_name": model_name,
        "progress": progress,
        "completed": is_available,
        "status": "completed" if is_available else "downloading" if progress > 0 else "not_started"
    }

@app.post("/models/{model_name}/load")
async def load_model(model_name: str):
    """加载模型"""
    if model_name not in YOLO_MODELS:
        raise HTTPException(status_code=404, detail=f"模型 {model_name} 不存在")

    if not downloader.is_model_available(model_name):
        raise HTTPException(status_code=400, detail=f"模型 {model_name} 文件不存在，请先下载")

    success = inference_engine.load_model(model_name, hot_swap=False)
    if success:
        return {"message": f"模型 {model_name} 加载成功"}
    else:
        raise HTTPException(status_code=500, detail=f"模型 {model_name} 加载失败")

@app.post("/models/{model_name}/hot-swap")
async def hot_swap_model(model_name: str):
    """热更新模型 - 无中断切换"""
    if model_name not in YOLO_MODELS:
        raise HTTPException(status_code=404, detail=f"模型 {model_name} 不存在")

    if not downloader.is_model_available(model_name):
        raise HTTPException(status_code=400, detail=f"模型 {model_name} 文件不存在，请先下载")

    start_time = time.time()
    success = inference_engine.load_model(model_name, hot_swap=True)
    swap_time = time.time() - start_time

    if success:
        return {
            "message": f"模型 {model_name} 热更新成功",
            "swap_time": round(swap_time, 3),
            "status": "active",
            "hot_swapped": True
        }
    else:
        raise HTTPException(status_code=500, detail=f"模型 {model_name} 热更新失败")

@app.get("/models/{model_name}/status")
async def get_model_status(model_name: str):
    """获取模型状态"""
    if model_name not in YOLO_MODELS:
        raise HTTPException(status_code=404, detail=f"模型 {model_name} 不存在")

    with inference_engine.model_lock:
        is_active = model_name in inference_engine.active_models
        is_loading = model_name in inference_engine.loading_models
        usage_count = inference_engine.model_usage_count.get(model_name, 0)

    model_info = inference_engine.model_info.get(model_name, {})

    return {
        "name": model_name,
        "active": is_active,
        "loading": is_loading,
        "loaded": model_name in inference_engine.models,
        "usage_count": usage_count,
        "last_used": model_info.get("last_used"),
        "load_time": model_info.get("load_time"),
        "hot_swapped": model_info.get("hot_swapped", False),
        "file_size": model_info.get("file_size"),
        "available": downloader.is_model_available(model_name)
    }

@app.delete("/models/{model_name}")
async def unload_model(model_name: str):
    """卸载模型"""
    success = inference_engine.unload_model(model_name)
    if success:
        return {"message": f"模型 {model_name} 卸载成功"}
    else:
        raise HTTPException(status_code=404, detail=f"模型 {model_name} 未加载")

@app.post("/detect", response_model=InferenceResponse)
async def detect_objects(request: DetectionRequest):
    """目标检测"""
    if not request.image_data:
        raise HTTPException(status_code=400, detail="缺少图像数据")

    try:
        # 解码图像
        image = decode_base64_image(request.image_data)

        # 执行推理
        result = await inference_engine.infer(
            model_name=request.model_name,
            image=image,
            confidence_threshold=request.confidence_threshold,
            iou_threshold=request.iou_threshold,
            max_detections=request.max_detections,
            class_filter=request.class_filter,
            return_crops=request.return_crops,
            draw_results=request.draw_results
        )

        # 转换结果格式
        formatted_results = []
        for res in result['results']:
            formatted_results.append(DetectionResult(**res))

        return InferenceResponse(
            success=result['success'],
            message=result['message'],
            model_name=result['model_name'],
            results=formatted_results,
            inference_time=result['inference_time'],
            preprocessing_time=result['preprocessing_time'],
            postprocessing_time=result['postprocessing_time'],
            total_time=result['total_time'],
            image_size=result['image_size'],
            result_image=result.get('result_image')
        )

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"检测请求处理失败: {e}")
        raise HTTPException(status_code=500, detail=f"检测失败: {str(e)}")

@app.post("/detect/file")
async def detect_objects_file(
    model_name: str = "yolo11n",
    file: UploadFile = File(...),
    confidence_threshold: float = 0.5,
    iou_threshold: float = 0.4,
    max_detections: int = 100,
    class_filter: Optional[str] = None,
    return_crops: bool = False,
    draw_results: bool = False
):
    """文件上传目标检测"""
    try:
        logger.info(f"收到文件检测请求: {file.filename}, 大小: {file.size if hasattr(file, 'size') else 'unknown'} bytes")
        logger.info(f"检测参数: model={model_name}, conf={confidence_threshold}, iou={iou_threshold}, max={max_detections}")
        logger.info(f"高级参数: class_filter={class_filter}, return_crops={return_crops}, draw_results={draw_results}")

        # 读取上传的文件
        contents = await file.read()
        logger.info(f"文件读取完成，实际大小: {len(contents)} bytes")

        nparr = np.frombuffer(contents, np.uint8)
        image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
        logger.info(f"图像解码结果: {image.shape if image is not None else 'None'}")

        if image is None:
            raise HTTPException(status_code=400, detail="无法解码上传的图像文件")

        # 解析类别过滤器
        class_filter_list = None
        if class_filter:
            class_filter_list = [c.strip() for c in class_filter.split(',')]

        # 执行推理
        logger.info(f"开始执行推理...")
        result = await inference_engine.infer(
            model_name=model_name,
            image=image,
            confidence_threshold=confidence_threshold,
            iou_threshold=iou_threshold,
            max_detections=max_detections,
            class_filter=class_filter_list,
            return_crops=return_crops,
            draw_results=draw_results
        )

        logger.info(f"推理完成，检测到 {len(result.get('results', []))} 个对象")
        logger.info(f"推理时间: {result.get('inference_time', 0):.3f}s, 总时间: {result.get('total_time', 0):.3f}s")
        logger.info(f"准备返回结果，结果大小: {len(str(result))} 字符")
        logger.info("开始序列化响应数据...")

        try:
            # 测试结果是否可以正常序列化
            import json
            json_str = json.dumps(result)
            logger.info(f"响应数据序列化成功，JSON大小: {len(json_str)} 字符")
        except Exception as e:
            logger.error(f"响应数据序列化失败: {e}")
            # 如果序列化失败，返回简化的结果
            result = {
                "success": True,
                "message": "推理完成（简化结果）",
                "model_name": result.get('model_name', 'unknown'),
                "results": result.get('results', [])[:10],  # 只返回前10个结果
                "inference_time": result.get('inference_time', 0),
                "total_time": result.get('total_time', 0),
                "image_size": result.get('image_size', [0, 0]),
                "result_image": None  # 移除可能很大的图像数据
            }
            logger.info("返回简化的结果数据")

        logger.info("准备发送HTTP响应...")
        return result

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"文件检测失败: {e}")
        raise HTTPException(status_code=500, detail=f"检测失败: {str(e)}")

@app.post("/batch_detect")
async def batch_detect(
    model_name: str,
    images: List[str],  # Base64编码的图像列表
    confidence_threshold: float = 0.5,
    iou_threshold: float = 0.4,
    max_detections: int = 100,
    class_filter: Optional[List[str]] = None
):
    """批量检测"""
    try:
        results = []

        for i, image_data in enumerate(images):
            try:
                # 解码图像
                image = decode_base64_image(image_data)

                # 执行推理
                result = await inference_engine.infer(
                    model_name=model_name,
                    image=image,
                    confidence_threshold=confidence_threshold,
                    iou_threshold=iou_threshold,
                    max_detections=max_detections,
                    class_filter=class_filter
                )

                results.append({
                    "index": i,
                    "result": result
                })

            except Exception as e:
                results.append({
                    "index": i,
                    "error": str(e),
                    "result": {
                        "success": False,
                        "message": f"处理第{i}张图像失败: {str(e)}",
                        "results": [],
                        "inference_time": 0,
                        "preprocessing_time": 0,
                        "postprocessing_time": 0,
                        "total_time": 0
                    }
                })

        return {
            "batch_size": len(images),
            "results": results,
            "success_count": sum(1 for r in results if r["result"]["success"]),
            "error_count": sum(1 for r in results if not r["result"]["success"])
        }

    except Exception as e:
        logger.error(f"批量检测失败: {e}")
        raise HTTPException(status_code=500, detail=f"批量检测失败: {str(e)}")

@app.get("/classes")
async def get_classes():
    """获取支持的类别"""
    return {
        "classes": COCO_CLASSES,
        "total": len(COCO_CLASSES)
    }

@app.get("/performance")
async def get_performance_stats():
    """获取性能统计信息"""
    return inference_engine.performance_monitor.get_performance_summary()

@app.get("/performance/models/{model_name}")
async def get_model_performance(model_name: str):
    """获取指定模型的性能统计"""
    if model_name not in YOLO_MODELS:
        raise HTTPException(status_code=404, detail=f"模型 {model_name} 不存在")

    stats = inference_engine.performance_monitor.get_model_stats(model_name)
    if not stats:
        return {
            "model_name": model_name,
            "message": "暂无性能数据",
            "stats": {}
        }

    return {
        "model_name": model_name,
        "stats": stats
    }

@app.get("/performance/rate")
async def get_current_rate():
    """获取当前推理速率"""
    return {
        "current_rate_per_second": inference_engine.performance_monitor.get_current_rate(),
        "average_inference_time_ms": inference_engine.performance_monitor.get_average_inference_time() * 1000,
        "timestamp": time.time()
    }

@app.post("/performance/log")
async def log_performance_summary():
    """手动触发性能日志记录"""
    stats = inference_engine.performance_monitor.get_simple_stats()
    logger.info(f"性能统计 - 总请求: {stats['total_requests']}, "
               f"错误: {stats['total_errors']}, "
               f"运行时间: {stats['uptime_seconds']:.1f}s")
    return {"message": "性能统计已记录到日志", "stats": stats}

# RTSP视频流处理接口
@app.post("/rtsp/start")
async def start_rtsp_stream(
    rtsp_url: str = Form(..., description="RTSP流地址"),
    model_name: str = Form("yolo11n", description="使用的模型名称"),
    confidence_threshold: float = Form(0.5, description="置信度阈值"),
    fps_target: int = Form(1, description="目标处理帧率（每秒处理帧数）")
):
    """启动RTSP视频流处理"""
    try:
        # 生成唯一的流ID
        stream_id = str(uuid.uuid4())

        logger.info(f"收到RTSP流处理请求: {rtsp_url}")
        logger.info(f"处理参数: model={model_name}, conf={confidence_threshold}, fps={fps_target}")

        # 验证模型是否可用
        if model_name not in inference_engine.models:
            raise HTTPException(status_code=400, detail=f"模型 {model_name} 不可用")

        # 启动流处理
        success = rtsp_processor.start_stream_processing(
            stream_id=stream_id,
            rtsp_url=rtsp_url,
            inference_engine=inference_engine,
            model_name=model_name,
            confidence_threshold=confidence_threshold,
            fps_target=fps_target
        )

        if not success:
            raise HTTPException(status_code=400, detail="无法启动RTSP流处理")

        return {
            "success": True,
            "message": "RTSP流处理已启动",
            "stream_id": stream_id,
            "rtsp_url": rtsp_url,
            "model_name": model_name,
            "confidence_threshold": confidence_threshold,
            "fps_target": fps_target
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"启动RTSP流处理失败: {e}")
        raise HTTPException(status_code=500, detail=f"启动RTSP流处理失败: {str(e)}")

@app.post("/rtsp/stop/{stream_id}")
async def stop_rtsp_stream(stream_id: str):
    """停止RTSP视频流处理"""
    try:
        logger.info(f"收到停止RTSP流请求: {stream_id}")

        success = rtsp_processor.stop_stream_processing(stream_id)

        if not success:
            raise HTTPException(status_code=404, detail=f"流 {stream_id} 不存在或已停止")

        return {
            "success": True,
            "message": f"RTSP流处理已停止",
            "stream_id": stream_id
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"停止RTSP流处理失败: {e}")
        raise HTTPException(status_code=500, detail=f"停止RTSP流处理失败: {str(e)}")

@app.get("/rtsp/results/{stream_id}")
async def get_rtsp_results(stream_id: str, max_results: int = 10):
    """获取RTSP流处理结果"""
    try:
        logger.info(f"获取流处理结果: {stream_id}, 最大结果数: {max_results}")

        results = rtsp_processor.get_stream_results(stream_id, max_results)

        return {
            "success": True,
            "stream_id": stream_id,
            "results_count": len(results),
            "results": results
        }

    except Exception as e:
        logger.error(f"获取RTSP流结果失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取流结果失败: {str(e)}")

@app.get("/rtsp/streams")
async def get_active_streams():
    """获取所有活跃的RTSP流信息"""
    try:
        streams = rtsp_processor.get_active_streams()

        return {
            "success": True,
            "active_streams_count": len(streams),
            "streams": streams
        }

    except Exception as e:
        logger.error(f"获取活跃流信息失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取活跃流信息失败: {str(e)}")

@app.get("/rtsp/stream/{stream_id}/status")
async def get_stream_status(stream_id: str):
    """获取特定流的状态信息"""
    try:
        streams = rtsp_processor.get_active_streams()

        if stream_id not in streams:
            raise HTTPException(status_code=404, detail=f"流 {stream_id} 不存在")

        stream_info = streams[stream_id]

        return {
            "success": True,
            "stream_id": stream_id,
            "status": stream_info
        }

    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"获取流状态失败: {e}")
        raise HTTPException(status_code=500, detail=f"获取流状态失败: {str(e)}")

# 启动事件
@app.on_event("startup")
async def startup_event():
    """应用启动事件"""
    logger.info("YOLO推理服务启动中...")

    # 简化的启动日志
    logger.info("性能监控已启用（简化模式）")

    # 自动下载和加载默认模型
    for model_name in ["yolo11n", "yolo11s"]:
        if not downloader.is_model_available(model_name):
            logger.info(f"开始下载模型 {model_name}...")
            success = await downloader.download_model(model_name)
            if success:
                logger.info(f"模型 {model_name} 下载完成")
            else:
                logger.error(f"模型 {model_name} 下载失败")

        # 加载模型
        if downloader.is_model_available(model_name):
            success = inference_engine.load_model(model_name)
            if success:
                logger.info(f"模型 {model_name} 加载成功")
            else:
                logger.error(f"模型 {model_name} 加载失败")

    # 记录启动完成
    logger.info("YOLO推理服务启动完成")
    logger.info("性能日志将记录到: yolo_performance.log")

@app.on_event("shutdown")
async def shutdown_event():
    """应用关闭事件"""
    logger.info("YOLO推理服务关闭中...")

if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser(description="YOLO推理服务器")
    parser.add_argument("--host", default="0.0.0.0", help="服务器主机地址")
    parser.add_argument("--port", type=int, default=8000, help="服务器端口")
    parser.add_argument("--models-dir", default="./models", help="模型文件目录")
    parser.add_argument("--workers", type=int, default=1, help="工作进程数")
    parser.add_argument("--reload", action="store_true", help="启用自动重载")

    args = parser.parse_args()

    # 更新模型目录
    downloader.models_dir = Path(args.models_dir)
    downloader.models_dir.mkdir(parents=True, exist_ok=True)
    inference_engine.models_dir = Path(args.models_dir)

    logger.info(f"启动YOLO推理服务器 - 主机: {args.host}, 端口: {args.port}")
    logger.info(f"模型目录: {args.models_dir}")

    uvicorn.run(
        "yolo_inference_server:app",
        host=args.host,
        port=args.port,
        workers=args.workers,
        reload=args.reload
    )
