"""
AI推理批处理优化器
实现批量处理、并行推理和资源优化
"""

import asyncio
import time
import logging
from typing import List, Dict, Any, Optional, Callable, Tuple, AsyncGenerator
from dataclasses import dataclass, field
from enum import Enum
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
import queue
import threading
from collections import defaultdict, deque
import numpy as np
import cv2
from datetime import datetime
import uuid

logger = logging.getLogger(__name__)


class TaskPriority(Enum):
    """任务优先级"""
    LOW = 1
    NORMAL = 2
    HIGH = 3
    CRITICAL = 4


@dataclass
class BatchTask:
    """批处理任务"""
    task_id: str
    task_type: str
    input_data: Any
    priority: TaskPriority = TaskPriority.NORMAL
    callback: Optional[Callable] = None
    metadata: Dict[str, Any] = field(default_factory=dict)
    created_at: float = field(default_factory=time.time)
    timeout: float = 30.0


@dataclass
class VideoFrameTask:
    """视频帧处理任务"""
    task_id: str
    camera_id: str
    frame: np.ndarray
    frame_timestamp: datetime
    ai_algorithms: List[str]
    priority: TaskPriority = TaskPriority.NORMAL
    callback: Optional[Callable] = None
    metadata: Dict[str, Any] = field(default_factory=dict)
    created_at: float = field(default_factory=time.time)


@dataclass
class BatchResult:
    """批处理结果"""
    task_id: str
    result: Any
    success: bool
    error_message: Optional[str] = None
    processing_time: float = 0.0
    completed_at: float = field(default_factory=time.time)


class AIBatchProcessor:
    """AI批处理器"""
    
    def __init__(self, config: Dict[str, Any]):
        self.config = config
        self.batch_size = config.get("batch_size", 8)
        self.max_wait_time = config.get("max_wait_time", 1.0)  # 最大等待时间（秒）
        self.max_workers = config.get("max_workers", 4)
        self.enable_gpu = config.get("enable_gpu", True)
        
        # 任务队列
        self.task_queues = {
            TaskPriority.CRITICAL: queue.PriorityQueue(),
            TaskPriority.HIGH: queue.PriorityQueue(),
            TaskPriority.NORMAL: queue.PriorityQueue(),
            TaskPriority.LOW: queue.PriorityQueue()
        }
        
        # 批处理缓冲区
        self.batch_buffers = defaultdict(list)
        self.buffer_timers = {}
        
        # 线程池
        self.thread_pool = ThreadPoolExecutor(max_workers=self.max_workers)
        self.process_pool = ProcessPoolExecutor(max_workers=2) if config.get("enable_multiprocessing") else None
        
        # 统计信息
        self.stats = {
            "total_tasks": 0,
            "completed_tasks": 0,
            "failed_tasks": 0,
            "avg_processing_time": 0.0,
            "batch_efficiency": 0.0
        }
        
        # 运行状态
        self.is_running = False
        self.worker_threads = []
        
        # 模型缓存
        self.model_cache = {}
        
        # 视频帧处理相关
        self.frame_queues = {
            TaskPriority.CRITICAL: deque(),
            TaskPriority.HIGH: deque(),
            TaskPriority.NORMAL: deque(),
            TaskPriority.LOW: deque()
        }
        self.frame_batch_buffers = defaultdict(list)  # camera_id -> frame_list
        self.video_frame_processors = {}  # camera_id -> processor_info
        
        # GPU资源管理器引用
        self.gpu_manager = None
        
        # AI服务路由器引用
        self.ai_service_router = None
        
        # 初始化AI模型
        self._initialize_models()
    
    def _initialize_models(self):
        """初始化AI模型"""
        try:
            # 人员检测模型
            if self.config.get("enable_person_detection", True):
                self.model_cache["person_detection"] = self._load_person_detection_model()
            
            # 行为分析模型
            if self.config.get("enable_behavior_analysis", True):
                self.model_cache["behavior_analysis"] = self._load_behavior_analysis_model()
            
            # 物体检测模型
            if self.config.get("enable_object_detection", True):
                self.model_cache["object_detection"] = self._load_object_detection_model()
            
            logger.info("AI模型初始化完成")
            
        except Exception as e:
            logger.error(f"AI模型初始化失败: {e}")
    
    def _load_person_detection_model(self):
        """加载人员检测模型"""
        # 这里应该加载实际的模型
        # 示例使用OpenCV的HOG检测器
        hog = cv2.HOGDescriptor()
        hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
        return hog
    
    def _load_behavior_analysis_model(self):
        """加载行为分析模型"""
        # 这里应该加载实际的行为分析模型
        return {"model": "behavior_analysis_placeholder"}
    
    def _load_object_detection_model(self):
        """加载物体检测模型"""
        # 这里应该加载实际的物体检测模型
        return {"model": "object_detection_placeholder"}
    
    def submit_task(self, task: BatchTask) -> str:
        """提交任务"""
        self.stats["total_tasks"] += 1
        
        # 根据优先级添加到对应队列
        priority_queue = self.task_queues[task.priority]
        priority_queue.put((task.created_at, task))
        
        logger.debug(f"任务已提交: {task.task_id}, 类型: {task.task_type}, 优先级: {task.priority}")
        
        return task.task_id
    
    def start_processing(self):
        """启动批处理"""
        if self.is_running:
            return
        
        self.is_running = True
        
        # 启动工作线程
        for i in range(self.max_workers):
            worker_thread = threading.Thread(
                target=self._worker_loop,
                name=f"BatchWorker-{i}",
                daemon=True
            )
            worker_thread.start()
            self.worker_threads.append(worker_thread)
        
        # 启动批处理调度器
        scheduler_thread = threading.Thread(
            target=self._batch_scheduler,
            name="BatchScheduler",
            daemon=True
        )
        scheduler_thread.start()
        self.worker_threads.append(scheduler_thread)
        
        logger.info("批处理器已启动")
    
    def stop_processing(self):
        """停止批处理"""
        self.is_running = False
        
        # 等待工作线程结束
        for thread in self.worker_threads:
            thread.join(timeout=5.0)
        
        # 关闭线程池
        self.thread_pool.shutdown(wait=True)
        if self.process_pool:
            self.process_pool.shutdown(wait=True)
        
        logger.info("批处理器已停止")
    
    def _worker_loop(self):
        """工作线程循环"""
        while self.is_running:
            try:
                # 优先处理视频帧任务
                frame_task = self._get_next_frame_task()
                if frame_task:
                    asyncio.run(self._process_single_frame_task(frame_task))
                    continue
                
                # 处理常规任务
                task = self._get_next_task()
                if task:
                    self._process_single_task(task)
                else:
                    time.sleep(0.1)  # 没有任务时短暂休眠
                    
            except Exception as e:
                logger.error(f"工作线程异常: {e}")
    
    def _get_next_task(self) -> Optional[BatchTask]:
        """获取下一个任务"""
        # 按优先级顺序检查队列
        for priority in [TaskPriority.CRITICAL, TaskPriority.HIGH, TaskPriority.NORMAL, TaskPriority.LOW]:
            priority_queue = self.task_queues[priority]
            try:
                _, task = priority_queue.get_nowait()
                return task
            except queue.Empty:
                continue
        
        return None
    
    def _get_next_frame_task(self) -> Optional[VideoFrameTask]:
        """获取下一个视频帧任务"""
        # 按优先级顺序检查帧队列
        for priority in [TaskPriority.CRITICAL, TaskPriority.HIGH, TaskPriority.NORMAL, TaskPriority.LOW]:
            frame_queue = self.frame_queues[priority]
            if frame_queue:
                return frame_queue.popleft()
        
        return None
    
    async def _process_single_frame_task(self, frame_task: VideoFrameTask):
        """处理单个视频帧任务"""
        start_time = time.time()
        
        try:
            # 处理单个帧的所有算法
            results = []
            for algorithm in frame_task.ai_algorithms:
                algorithm_result = await self._process_algorithm_batch(algorithm, [frame_task])
                results.extend(algorithm_result)
            
            processing_time = time.time() - start_time
            
            # 调用回调函数
            if frame_task.callback:
                frame_task.callback(results)
            
            # 更新统计
            self._update_stats(processing_time, True)
            
        except Exception as e:
            processing_time = time.time() - start_time
            
            # 调用回调函数报告错误
            if frame_task.callback:
                error_result = [{
                    "task_id": frame_task.task_id,
                    "camera_id": frame_task.camera_id,
                    "error": str(e),
                    "processing_timestamp": datetime.now().isoformat()
                }]
                frame_task.callback(error_result)
            
            # 更新统计
            self._update_stats(processing_time, False)
            
            logger.error(f"视频帧任务处理失败 {frame_task.task_id}: {e}")
    
    def _batch_scheduler(self):
        """批处理调度器"""
        while self.is_running:
            try:
                # 检查常规任务批处理条件
                self._check_batch_conditions()
                
                # 检查视频帧批处理条件
                self._check_frame_batch_conditions()
                
                time.sleep(0.1)
                
            except Exception as e:
                logger.error(f"批处理调度器异常: {e}")
    
    def _check_frame_batch_conditions(self):
        """检查视频帧批处理条件"""
        current_time = time.time()
        
        # 收集所有优先级的帧任务
        all_frame_tasks = []
        for priority_queue in self.frame_queues.values():
            all_frame_tasks.extend(list(priority_queue))
        
        if not all_frame_tasks:
            return
        
        # 按摄像头分组
        camera_groups = defaultdict(list)
        for frame_task in all_frame_tasks:
            camera_groups[frame_task.camera_id].append(frame_task)
        
        # 检查每个摄像头的批处理条件
        for camera_id, camera_frames in camera_groups.items():
            should_process = (
                len(camera_frames) >= self.batch_size or  # 达到批处理大小
                (current_time - camera_frames[0].created_at) > self.max_wait_time  # 超过最大等待时间
            )
            
            if should_process:
                # 提取批处理帧任务
                batch_frames = camera_frames[:self.batch_size]
                
                # 从队列中移除这些任务
                for frame_task in batch_frames:
                    for priority_queue in self.frame_queues.values():
                        try:
                            priority_queue.remove(frame_task)
                        except ValueError:
                            pass  # 任务可能已经被其他线程处理
                
                # 提交批处理任务
                self.thread_pool.submit(
                    lambda: asyncio.run(self.batch_process_frames(batch_frames))
                )
    
    def _check_batch_conditions(self):
        """检查批处理条件"""
        current_time = time.time()
        
        for task_type, buffer in self.batch_buffers.items():
            if not buffer:
                continue
            
            # 检查批处理条件
            should_process = (
                len(buffer) >= self.batch_size or  # 达到批处理大小
                (current_time - buffer[0].created_at) > self.max_wait_time  # 超过最大等待时间
            )
            
            if should_process:
                # 提取批处理任务
                batch_tasks = buffer[:self.batch_size]
                self.batch_buffers[task_type] = buffer[self.batch_size:]
                
                # 提交批处理任务
                self.thread_pool.submit(self._process_batch, batch_tasks)
    
    def _process_single_task(self, task: BatchTask):
        """处理单个任务"""
        start_time = time.time()
        
        try:
            # 根据任务类型选择处理方法
            if task.task_type == "person_detection":
                result = self._process_person_detection(task.input_data)
            elif task.task_type == "behavior_analysis":
                result = self._process_behavior_analysis(task.input_data)
            elif task.task_type == "object_detection":
                result = self._process_object_detection(task.input_data)
            else:
                raise ValueError(f"未知任务类型: {task.task_type}")
            
            processing_time = time.time() - start_time
            
            # 创建结果
            batch_result = BatchResult(
                task_id=task.task_id,
                result=result,
                success=True,
                processing_time=processing_time
            )
            
            # 调用回调函数
            if task.callback:
                task.callback(batch_result)
            
            # 更新统计
            self._update_stats(processing_time, True)
            
        except Exception as e:
            processing_time = time.time() - start_time
            
            # 创建错误结果
            batch_result = BatchResult(
                task_id=task.task_id,
                result=None,
                success=False,
                error_message=str(e),
                processing_time=processing_time
            )
            
            # 调用回调函数
            if task.callback:
                task.callback(batch_result)
            
            # 更新统计
            self._update_stats(processing_time, False)
            
            logger.error(f"任务处理失败 {task.task_id}: {e}")
    
    def _process_batch(self, tasks: List[BatchTask]):
        """处理批量任务"""
        if not tasks:
            return
        
        start_time = time.time()
        task_type = tasks[0].task_type
        
        try:
            # 批量处理相同类型的任务
            if task_type == "person_detection":
                results = self._batch_person_detection([task.input_data for task in tasks])
            elif task_type == "behavior_analysis":
                results = self._batch_behavior_analysis([task.input_data for task in tasks])
            elif task_type == "object_detection":
                results = self._batch_object_detection([task.input_data for task in tasks])
            else:
                raise ValueError(f"未知批处理任务类型: {task_type}")
            
            processing_time = time.time() - start_time
            
            # 处理结果
            for task, result in zip(tasks, results):
                batch_result = BatchResult(
                    task_id=task.task_id,
                    result=result,
                    success=True,
                    processing_time=processing_time / len(tasks)  # 平均处理时间
                )
                
                if task.callback:
                    task.callback(batch_result)
                
                self._update_stats(processing_time / len(tasks), True)
            
            logger.debug(f"批处理完成: {len(tasks)}个{task_type}任务, 耗时: {processing_time:.3f}s")
            
        except Exception as e:
            processing_time = time.time() - start_time
            
            # 处理批处理失败
            for task in tasks:
                batch_result = BatchResult(
                    task_id=task.task_id,
                    result=None,
                    success=False,
                    error_message=str(e),
                    processing_time=processing_time / len(tasks)
                )
                
                if task.callback:
                    task.callback(batch_result)
                
                self._update_stats(processing_time / len(tasks), False)
            
            logger.error(f"批处理失败: {e}")
    
    def _process_person_detection(self, image_data: np.ndarray) -> List[Dict[str, Any]]:
        """处理人员检测"""
        model = self.model_cache.get("person_detection")
        if not model:
            raise RuntimeError("人员检测模型未加载")
        
        # 使用HOG检测器
        boxes, weights = model.detectMultiScale(image_data, winStride=(8, 8))
        
        detections = []
        for (x, y, w, h), weight in zip(boxes, weights):
            detections.append({
                "bbox": [int(x), int(y), int(w), int(h)],
                "confidence": float(weight),
                "class": "person"
            })
        
        return detections
    
    def _batch_person_detection(self, image_batch: List[np.ndarray]) -> List[List[Dict[str, Any]]]:
        """批量人员检测"""
        results = []
        model = self.model_cache.get("person_detection")
        
        if not model:
            raise RuntimeError("人员检测模型未加载")
        
        # 批量处理图像
        for image in image_batch:
            detections = self._process_person_detection(image)
            results.append(detections)
        
        return results
    
    def _process_behavior_analysis(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
        """处理行为分析"""
        # 这里应该实现实际的行为分析逻辑
        return {
            "behavior_type": "normal",
            "confidence": 0.95,
            "anomaly_score": 0.1
        }
    
    def _batch_behavior_analysis(self, input_batch: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """批量行为分析"""
        results = []
        for input_data in input_batch:
            result = self._process_behavior_analysis(input_data)
            results.append(result)
        return results
    
    def _process_object_detection(self, image_data: np.ndarray) -> List[Dict[str, Any]]:
        """处理物体检测"""
        # 这里应该实现实际的物体检测逻辑
        return [
            {
                "bbox": [100, 100, 50, 50],
                "confidence": 0.9,
                "class": "chair"
            }
        ]
    
    def _batch_object_detection(self, image_batch: List[np.ndarray]) -> List[List[Dict[str, Any]]]:
        """批量物体检测"""
        results = []
        for image in image_batch:
            detections = self._process_object_detection(image)
            results.append(detections)
        return results
    
    def _update_stats(self, processing_time: float, success: bool):
        """更新统计信息"""
        if success:
            self.stats["completed_tasks"] += 1
        else:
            self.stats["failed_tasks"] += 1
        
        # 更新平均处理时间
        total_completed = self.stats["completed_tasks"]
        if total_completed > 0:
            current_avg = self.stats["avg_processing_time"]
            self.stats["avg_processing_time"] = (
                (current_avg * (total_completed - 1) + processing_time) / total_completed
            )
        
        # 计算批处理效率
        total_tasks = self.stats["total_tasks"]
        if total_tasks > 0:
            self.stats["batch_efficiency"] = self.stats["completed_tasks"] / total_tasks
    
    async def process_video_frame(self, camera_id: str, frame: np.ndarray, 
                                 ai_algorithms: List[str], frame_timestamp: datetime = None) -> str:
        """处理单个视频帧"""
        if frame_timestamp is None:
            frame_timestamp = datetime.now()
        
        # 创建视频帧任务
        task_id = str(uuid.uuid4())
        frame_task = VideoFrameTask(
            task_id=task_id,
            camera_id=camera_id,
            frame=frame.copy(),  # 复制帧数据避免并发问题
            frame_timestamp=frame_timestamp,
            ai_algorithms=ai_algorithms,
            priority=TaskPriority.NORMAL
        )
        
        # 预处理视频帧
        processed_frame = await self._preprocess_video_frame(frame, camera_id)
        frame_task.frame = processed_frame
        
        # 添加到帧队列
        self.frame_queues[frame_task.priority].append(frame_task)
        
        logger.debug(f"视频帧任务已提交: {task_id}, 摄像头: {camera_id}, 算法: {ai_algorithms}")
        
        return task_id
    
    async def batch_process_frames(self, frame_batch: List[VideoFrameTask]) -> List[Dict[str, Any]]:
        """批量处理视频帧"""
        if not frame_batch:
            return []
        
        start_time = time.time()
        results = []
        
        try:
            # 按摄像头分组
            camera_groups = defaultdict(list)
            for frame_task in frame_batch:
                camera_groups[frame_task.camera_id].append(frame_task)
            
            # 并行处理每个摄像头的帧
            processing_tasks = []
            for camera_id, camera_frames in camera_groups.items():
                task = asyncio.create_task(
                    self._process_camera_frame_batch(camera_id, camera_frames)
                )
                processing_tasks.append(task)
            
            # 等待所有处理完成
            batch_results = await asyncio.gather(*processing_tasks, return_exceptions=True)
            
            # 合并结果
            for batch_result in batch_results:
                if isinstance(batch_result, Exception):
                    logger.error(f"批处理帧时出错: {batch_result}")
                    continue
                results.extend(batch_result)
            
            processing_time = time.time() - start_time
            logger.debug(f"批量处理完成: {len(frame_batch)}帧, 耗时: {processing_time:.3f}s")
            
            return results
            
        except Exception as e:
            logger.error(f"批量处理视频帧失败: {e}")
            return []
    
    async def _preprocess_video_frame(self, frame: np.ndarray, camera_id: str) -> np.ndarray:
        """预处理视频帧"""
        try:
            # 获取摄像头特定的预处理配置
            preprocess_config = self.config.get("frame_preprocessing", {})
            
            processed_frame = frame.copy()
            
            # 调整大小
            target_size = preprocess_config.get("target_size")
            if target_size:
                processed_frame = cv2.resize(processed_frame, tuple(target_size))
            
            # 标准化
            if preprocess_config.get("normalize", False):
                processed_frame = processed_frame.astype(np.float32) / 255.0
            
            # 去噪
            if preprocess_config.get("denoise", False):
                processed_frame = cv2.fastNlMeansDenoisingColored(processed_frame)
            
            # 增强对比度
            if preprocess_config.get("enhance_contrast", False):
                lab = cv2.cvtColor(processed_frame, cv2.COLOR_BGR2LAB)
                l, a, b = cv2.split(lab)
                clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
                l = clahe.apply(l)
                processed_frame = cv2.merge([l, a, b])
                processed_frame = cv2.cvtColor(processed_frame, cv2.COLOR_LAB2BGR)
            
            return processed_frame
            
        except Exception as e:
            logger.error(f"视频帧预处理失败: {e}")
            return frame
    
    async def _process_camera_frame_batch(self, camera_id: str, frame_tasks: List[VideoFrameTask]) -> List[Dict[str, Any]]:
        """处理单个摄像头的帧批次"""
        results = []
        
        try:
            # 分配GPU资源
            gpu_allocation = None
            if self.gpu_manager and self.enable_gpu:
                from schemas.gpu_resource import GPUResourceRequest
                
                memory_required = len(frame_tasks) * 100  # 估算每帧需要100MB
                gpu_request = GPUResourceRequest(
                    memory_required=memory_required,
                    max_duration=60,
                    exclusive=False
                )
                gpu_allocation = await self.gpu_manager.allocate_resource(gpu_request)
            
            # 按AI算法分组处理
            algorithm_groups = defaultdict(list)
            for frame_task in frame_tasks:
                for algorithm in frame_task.ai_algorithms:
                    algorithm_groups[algorithm].append(frame_task)
            
            # 并行处理每种算法
            for algorithm, tasks in algorithm_groups.items():
                algorithm_results = await self._process_algorithm_batch(algorithm, tasks)
                results.extend(algorithm_results)
            
            # 释放GPU资源
            if gpu_allocation and self.gpu_manager:
                await self.gpu_manager.release_resource(gpu_allocation.allocation_id)
            
            return results
            
        except Exception as e:
            logger.error(f"处理摄像头{camera_id}帧批次失败: {e}")
            return []
    
    async def _process_algorithm_batch(self, algorithm: str, frame_tasks: List[VideoFrameTask]) -> List[Dict[str, Any]]:
        """处理特定算法的帧批次"""
        results = []
        
        try:
            # 如果有AI服务路由器，优先使用路由器
            if self.ai_service_router:
                for frame_task in frame_tasks:
                    routing_results = await self.ai_service_router.route_frame_to_services(
                        frame_task.camera_id, 
                        frame_task.frame, 
                        frame_task.frame_timestamp
                    )
                    
                    # 过滤出当前算法的结果
                    for routing_result in routing_results:
                        if routing_result.success and hasattr(routing_result.result, 'get'):
                            if routing_result.result.get('algorithm') == algorithm:
                                result = {
                                    "task_id": frame_task.task_id,
                                    "camera_id": frame_task.camera_id,
                                    "algorithm": algorithm,
                                    "frame_timestamp": frame_task.frame_timestamp.isoformat(),
                                    "result": routing_result.result,
                                    "processing_timestamp": datetime.now().isoformat(),
                                    "service_name": routing_result.service_name,
                                    "processing_time": routing_result.processing_time
                                }
                                results.append(result)
                
                return results
            
            # 回退到内置处理方法
            frames = [task.frame for task in frame_tasks]
            
            # 根据算法类型调用相应的批处理方法
            if algorithm == "person_detection":
                algorithm_results = await self._batch_person_detection_async(frames)
            elif algorithm == "behavior_analysis":
                algorithm_results = await self._batch_behavior_analysis_async(frames)
            elif algorithm == "object_detection":
                algorithm_results = await self._batch_object_detection_async(frames)
            elif algorithm == "crowd_density":
                algorithm_results = await self._batch_crowd_density_async(frames)
            elif algorithm == "waste_detection":
                algorithm_results = await self._batch_waste_detection_async(frames)
            else:
                logger.warning(f"未知算法类型: {algorithm}")
                algorithm_results = [{"error": f"未知算法: {algorithm}"}] * len(frames)
            
            # 组装结果
            for frame_task, algorithm_result in zip(frame_tasks, algorithm_results):
                result = {
                    "task_id": frame_task.task_id,
                    "camera_id": frame_task.camera_id,
                    "algorithm": algorithm,
                    "frame_timestamp": frame_task.frame_timestamp.isoformat(),
                    "result": algorithm_result,
                    "processing_timestamp": datetime.now().isoformat()
                }
                results.append(result)
            
            return results
            
        except Exception as e:
            logger.error(f"处理算法{algorithm}批次失败: {e}")
            return []
    
    async def _batch_person_detection_async(self, frames: List[np.ndarray]) -> List[Dict[str, Any]]:
        """异步批量人员检测"""
        loop = asyncio.get_event_loop()
        return await loop.run_in_executor(self.thread_pool, self._batch_person_detection, frames)
    
    async def _batch_behavior_analysis_async(self, frames: List[np.ndarray]) -> List[Dict[str, Any]]:
        """异步批量行为分析"""
        loop = asyncio.get_event_loop()
        # 转换为行为分析所需的输入格式
        input_data = [{"frame": frame, "timestamp": datetime.now()} for frame in frames]
        return await loop.run_in_executor(self.thread_pool, self._batch_behavior_analysis, input_data)
    
    async def _batch_object_detection_async(self, frames: List[np.ndarray]) -> List[Dict[str, Any]]:
        """异步批量物体检测"""
        loop = asyncio.get_event_loop()
        return await loop.run_in_executor(self.thread_pool, self._batch_object_detection, frames)
    
    async def _batch_crowd_density_async(self, frames: List[np.ndarray]) -> List[Dict[str, Any]]:
        """异步批量人群密度分析"""
        loop = asyncio.get_event_loop()
        
        def _process_crowd_density_batch(frame_batch):
            results = []
            for frame in frame_batch:
                # 简单的人群密度估算
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                # 使用背景减法或其他方法检测人群
                density_score = np.mean(gray) / 255.0  # 简化的密度计算
                
                result = {
                    "density_level": "high" if density_score > 0.7 else "medium" if density_score > 0.4 else "low",
                    "density_score": float(density_score),
                    "estimated_count": int(density_score * 50)  # 估算人数
                }
                results.append(result)
            return results
        
        return await loop.run_in_executor(self.thread_pool, _process_crowd_density_batch, frames)
    
    async def _batch_waste_detection_async(self, frames: List[np.ndarray]) -> List[Dict[str, Any]]:
        """异步批量垃圾检测"""
        loop = asyncio.get_event_loop()
        
        def _process_waste_detection_batch(frame_batch):
            results = []
            for frame in frame_batch:
                # 简化的垃圾检测逻辑
                # 实际应该使用训练好的垃圾检测模型
                result = {
                    "waste_detected": False,
                    "waste_type": None,
                    "confidence": 0.0,
                    "bounding_boxes": []
                }
                results.append(result)
            return results
        
        return await loop.run_in_executor(self.thread_pool, _process_waste_detection_batch, frames)
    
    def set_gpu_manager(self, gpu_manager):
        """设置GPU资源管理器"""
        self.gpu_manager = gpu_manager
        logger.info("GPU资源管理器已设置")
    
    def set_ai_service_router(self, ai_service_router):
        """设置AI服务路由器"""
        self.ai_service_router = ai_service_router
        logger.info("AI服务路由器已设置")
    
    def get_frame_processing_stats(self) -> Dict[str, Any]:
        """获取视频帧处理统计"""
        return {
            "frame_queue_sizes": {
                priority.name: len(queue) 
                for priority, queue in self.frame_queues.items()
            },
            "frame_buffer_sizes": {
                camera_id: len(buffer) 
                for camera_id, buffer in self.frame_batch_buffers.items()
            },
            "active_processors": len(self.video_frame_processors),
            "gpu_manager_available": self.gpu_manager is not None
        }

    def get_stats(self) -> Dict[str, Any]:
        """获取统计信息"""
        base_stats = {
            **self.stats,
            "queue_sizes": {
                priority.name: queue.qsize() 
                for priority, queue in self.task_queues.items()
            },
            "buffer_sizes": {
                task_type: len(buffer) 
                for task_type, buffer in self.batch_buffers.items()
            },
            "model_cache_size": len(self.model_cache),
            "is_running": self.is_running
        }
        
        # 添加视频帧处理统计
        frame_stats = self.get_frame_processing_stats()
        base_stats.update(frame_stats)
        
        return base_stats
    
    def optimize_batch_size(self):
        """动态优化批处理大小"""
        # 根据处理时间和吞吐量调整批处理大小
        avg_time = self.stats["avg_processing_time"]
        efficiency = self.stats["batch_efficiency"]
        
        if avg_time > 2.0 and self.batch_size > 2:
            # 处理时间过长，减少批处理大小
            self.batch_size = max(2, self.batch_size - 1)
            logger.info(f"批处理大小调整为: {self.batch_size}")
        elif avg_time < 0.5 and efficiency > 0.9 and self.batch_size < 16:
            # 处理时间短且效率高，增加批处理大小
            self.batch_size = min(16, self.batch_size + 1)
            logger.info(f"批处理大小调整为: {self.batch_size}")


class GPUResourceManager:
    """GPU资源管理器"""
    
    def __init__(self):
        self.gpu_available = self._check_gpu_availability()
        self.gpu_memory_limit = self._get_gpu_memory_limit()
        self.current_usage = 0
    
    def _check_gpu_availability(self) -> bool:
        """检查GPU可用性"""
        try:
            import torch
            return torch.cuda.is_available()
        except ImportError:
            try:
                import tensorflow as tf
                return len(tf.config.list_physical_devices('GPU')) > 0
            except ImportError:
                return False
    
    def _get_gpu_memory_limit(self) -> int:
        """获取GPU内存限制"""
        if not self.gpu_available:
            return 0
        
        try:
            import torch
            if torch.cuda.is_available():
                return torch.cuda.get_device_properties(0).total_memory // (1024 * 1024)  # MB
        except ImportError:
            pass
        
        return 4096  # 默认4GB
    
    def allocate_gpu_memory(self, required_mb: int) -> bool:
        """分配GPU内存"""
        if not self.gpu_available:
            return False
        
        if self.current_usage + required_mb > self.gpu_memory_limit:
            return False
        
        self.current_usage += required_mb
        return True
    
    def release_gpu_memory(self, released_mb: int):
        """释放GPU内存"""
        self.current_usage = max(0, self.current_usage - released_mb)
    
    def get_gpu_status(self) -> Dict[str, Any]:
        """获取GPU状态"""
        return {
            "available": self.gpu_available,
            "memory_limit_mb": self.gpu_memory_limit,
            "current_usage_mb": self.current_usage,
            "usage_percentage": (self.current_usage / max(self.gpu_memory_limit, 1)) * 100
        }


# 全局批处理器实例
batch_processor = None
gpu_manager = GPUResourceManager()


def initialize_batch_processor(config: Dict[str, Any]):
    """初始化批处理器"""
    global batch_processor
    batch_processor = AIBatchProcessor(config)
    batch_processor.start_processing()
    logger.info("AI批处理器已初始化")


def get_batch_processor() -> Optional[AIBatchProcessor]:
    """获取批处理器实例"""
    return batch_processor


def shutdown_batch_processor():
    """��闭批处理器"""
    global batch_processor
    if batch_processor:
        batch_processor.stop_processing()
        batch_processor = None
        logger.info("AI批处理器已关闭")