"""
多摄像头并发处理架构
扩展现有ThreadPoolExecutor支持500路并发处理
实现负载分配和GPU资源动态分配
"""
import asyncio
import logging
from typing import Dict, List, Optional, Any, Set, Tuple
from concurrent.futures import ThreadPoolExecutor, as_completed
from collections import defaultdict, deque
import time
import threading
from datetime import datetime
import numpy as np
from dataclasses import dataclass
from enum import Enum

from services.ai_infrastructure.ai_batch_processor import AIBatchProcessor, VideoFrameTask, TaskPriority
from services.ai_infrastructure.gpu_resource_manager import gpu_resource_manager
from schemas.gpu_resource import GPUResourceRequest
from core.config import get_settings

logger = logging.getLogger(__name__)
settings = get_settings()


class WorkerStatus(Enum):
    """工作器状态"""
    IDLE = "idle"
    RUNNING = "running"
    OVERLOADED = "overloaded"
    ERROR = "error"
    STOPPING = "stopping"


@dataclass
class CameraGroup:
    """摄像头分组"""
    group_id: str
    camera_ids: Set[str]
    priority_level: int
    max_cameras: int
    current_load: float
    worker_thread_id: Optional[str] = None
    last_processed: Optional[datetime] = None
    frames_processed: int = 0
    error_count: int = 0


@dataclass
class WorkerMetrics:
    """工作器性能指标"""
    worker_id: str
    cameras_assigned: int
    frames_per_second: float
    gpu_utilization: float
    memory_usage: float
    error_rate: float
    uptime: float
    last_activity: datetime


class ConcurrentStreamProcessor:
    """并发流处理器"""
    
    def __init__(self, max_concurrent_cameras: int = 500):
        self.max_concurrent_cameras = max_concurrent_cameras
        self.cameras_per_worker = 100  # 每个worker处理50-100路摄像头
        self.max_workers = max(1, max_concurrent_cameras // self.cameras_per_worker)
        
        # 扩展的线程池
        self.thread_pool = ThreadPoolExecutor(
            max_workers=self.max_workers,
            thread_name_prefix="StreamWorker"
        )
        
        # 摄像头分组管理
        self.camera_groups: Dict[str, CameraGroup] = {}
        self.camera_to_group: Dict[str, str] = {}
        
        # 工作器管理
        self.worker_metrics: Dict[str, WorkerMetrics] = {}
        self.worker_tasks: Dict[str, asyncio.Task] = {}
        self.worker_queues: Dict[str, deque] = {}
        
        # 优先级队列
        self.priority_queues = {
            TaskPriority.CRITICAL: deque(),
            TaskPriority.HIGH: deque(),
            TaskPriority.NORMAL: deque(),
            TaskPriority.LOW: deque()
        }
        
        # GPU资源池
        self.gpu_allocations: Dict[str, Any] = {}
        self.gpu_allocation_lock = asyncio.Lock()
        
        # 负载均衡器
        self.load_balancer = LoadBalancer(self)
        
        # 性能监控
        self.performance_monitor = PerformanceMonitor(self)
        
        # 运行状态
        self.is_running = False
        self.processing_stats = {
            "total_frames_processed": 0,
            "frames_per_second": 0.0,
            "average_latency": 0.0,
            "error_rate": 0.0,
            "gpu_utilization": 0.0
        }
        
        # 管理任务
        self._scheduler_task: Optional[asyncio.Task] = None
        self._monitor_task: Optional[asyncio.Task] = None
        self._balancer_task: Optional[asyncio.Task] = None
    
    async def initialize(self):
        """初始化并发处理器"""
        try:
            logger.info(f"初始化并发流处理器，支持 {self.max_concurrent_cameras} 路摄像头")
            
            # 创建初始摄像头分组
            await self._create_initial_camera_groups()
            
            # 初始化工作器指标
            await self._initialize_worker_metrics()
            
            # 启动GPU资源管理
            await self._initialize_gpu_resources()
            
            logger.info(f"并发流处理器初始化完成，创建了 {len(self.camera_groups)} 个摄像头组")
            
        except Exception as e:
            logger.error(f"初始化并发流处理器失败: {e}")
            raise
    
    async def start(self):
        """启动并发处理器"""
        if self.is_running:
            logger.warning("并发流处理器已在运行")
            return
        
        try:
            logger.info("启动并发流处理器")
            
            self.is_running = True
            
            # 启动管理任务
            self._scheduler_task = asyncio.create_task(self._frame_scheduler_loop())
            self._monitor_task = asyncio.create_task(self.performance_monitor.monitor_loop())
            self._balancer_task = asyncio.create_task(self.load_balancer.balance_loop())
            
            # 启动工作器队列处理
            for group_id in self.camera_groups.keys():
                await self._start_group_processing(group_id)
            
            logger.info("并发流处理器启动成功")
            
        except Exception as e:
            logger.error(f"启动并发流处理器失败: {e}")
            self.is_running = False
            raise
    
    async def stop(self):
        """停止并发处理器"""
        if not self.is_running:
            return
        
        try:
            logger.info("停止并发流处理器")
            
            self.is_running = False
            
            # 停止管理任务
            for task in [self._scheduler_task, self._monitor_task, self._balancer_task]:
                if task and not task.done():
                    task.cancel()
                    try:
                        await task
                    except asyncio.CancelledError:
                        pass
            
            # 停止所有工作器任务
            for group_id in list(self.worker_tasks.keys()):
                await self._stop_group_processing(group_id)
            
            # 释放GPU资源
            await self._cleanup_gpu_resources()
            
            # 关闭线程池
            self.thread_pool.shutdown(wait=True)
            
            logger.info("并发流处理器已停止")
            
        except Exception as e:
            logger.error(f"停止并发流处理器失败: {e}")
    
    async def assign_camera_to_group(self, camera_id: str, priority: int = 5) -> bool:
        """将摄像头分配到处理组"""
        try:
            # 找到负载最低的组
            best_group = await self._find_best_group_for_camera(camera_id, priority)
            
            if not best_group:
                # 创建新组
                best_group = await self._create_new_camera_group(priority)
            
            if best_group:
                best_group.camera_ids.add(camera_id)
                self.camera_to_group[camera_id] = best_group.group_id
                
                # 更新负载
                best_group.current_load = len(best_group.camera_ids) / best_group.max_cameras
                
                logger.info(f"摄像头 {camera_id} 分配到组 {best_group.group_id}")
                return True
            
            return False
            
        except Exception as e:
            logger.error(f"分配摄像头 {camera_id} 到组失败: {e}")
            return False
    
    async def remove_camera_from_group(self, camera_id: str) -> bool:
        """从处理组中移除摄像头"""
        try:
            if camera_id not in self.camera_to_group:
                return True
            
            group_id = self.camera_to_group[camera_id]
            group = self.camera_groups.get(group_id)
            
            if group:
                group.camera_ids.discard(camera_id)
                group.current_load = len(group.camera_ids) / group.max_cameras
                
                # 如果组为空，考虑删除
                if not group.camera_ids:
                    await self._cleanup_empty_group(group_id)
            
            del self.camera_to_group[camera_id]
            
            logger.info(f"摄像头 {camera_id} 从组 {group_id} 中移除")
            return True
            
        except Exception as e:
            logger.error(f"从组中移除摄像头 {camera_id} 失败: {e}")
            return False
    
    async def process_frame_batch(self, frame_batch: List[VideoFrameTask]) -> List[Dict[str, Any]]:
        """处理帧批次"""
        if not frame_batch:
            return []
        
        try:
            # 按摄像头组分组
            group_batches = defaultdict(list)
            for frame_task in frame_batch:
                camera_id = frame_task.camera_id
                group_id = self.camera_to_group.get(camera_id)
                
                if group_id:
                    group_batches[group_id].append(frame_task)
                else:
                    # 未分配的摄像头，临时分配
                    await self.assign_camera_to_group(camera_id, frame_task.priority.value)
                    group_id = self.camera_to_group.get(camera_id)
                    if group_id:
                        group_batches[group_id].append(frame_task)
            
            # 并发处理各组
            processing_tasks = []
            for group_id, group_frames in group_batches.items():
                task = asyncio.create_task(
                    self._process_group_frame_batch(group_id, group_frames)
                )
                processing_tasks.append(task)
            
            # 等待所有处理完成
            results = []
            batch_results = await asyncio.gather(*processing_tasks, return_exceptions=True)
            
            for batch_result in batch_results:
                if isinstance(batch_result, Exception):
                    logger.error(f"处理组帧批次时出错: {batch_result}")
                    continue
                results.extend(batch_result)
            
            # 更新统计
            self.processing_stats["total_frames_processed"] += len(frame_batch)
            
            return results
            
        except Exception as e:
            logger.error(f"处理帧批次失败: {e}")
            return []
    
    async def _create_initial_camera_groups(self):
        """创建初始摄像头分组"""
        try:
            # 根据优先级创建不同的组
            priority_levels = [TaskPriority.CRITICAL, TaskPriority.HIGH, TaskPriority.NORMAL, TaskPriority.LOW]
            
            for i, priority in enumerate(priority_levels):
                group_id = f"group_{priority.name.lower()}_{i}"
                group = CameraGroup(
                    group_id=group_id,
                    camera_ids=set(),
                    priority_level=priority.value,
                    max_cameras=self.cameras_per_worker,
                    current_load=0.0
                )
                self.camera_groups[group_id] = group
                self.worker_queues[group_id] = deque()
            
            logger.info(f"创建了 {len(self.camera_groups)} 个初始摄像头组")
            
        except Exception as e:
            logger.error(f"创建初始摄像头分组失败: {e}")
            raise
    
    async def _initialize_worker_metrics(self):
        """初始化工作器指标"""
        try:
            for group_id in self.camera_groups.keys():
                metrics = WorkerMetrics(
                    worker_id=group_id,
                    cameras_assigned=0,
                    frames_per_second=0.0,
                    gpu_utilization=0.0,
                    memory_usage=0.0,
                    error_rate=0.0,
                    uptime=0.0,
                    last_activity=datetime.now()
                )
                self.worker_metrics[group_id] = metrics
            
        except Exception as e:
            logger.error(f"初始化工作器指标失败: {e}")
            raise
    
    async def _initialize_gpu_resources(self):
        """初始化GPU资源"""
        try:
            # 预分配GPU资源给各个组
            gpu_info = await gpu_resource_manager.get_gpu_info()
            
            if gpu_info:
                gpu_count = len(gpu_info)
                groups_per_gpu = max(1, len(self.camera_groups) // gpu_count)
                
                for i, group_id in enumerate(self.camera_groups.keys()):
                    gpu_id = i // groups_per_gpu
                    
                    # 为组预分配GPU资源
                    gpu_request = GPUResourceRequest(
                        memory_required=2048,  # 2GB per group
                        max_duration=3600,     # 1 hour
                        exclusive=False,
                        preferred_gpu_id=gpu_id
                    )
                    
                    allocation = await gpu_resource_manager.allocate_resource(gpu_request)
                    if allocation:
                        self.gpu_allocations[group_id] = allocation
                        logger.info(f"为组 {group_id} 分配GPU资源: GPU {gpu_id}")
            
        except Exception as e:
            logger.error(f"初始化GPU资源失败: {e}")
    
    async def _find_best_group_for_camera(self, camera_id: str, priority: int) -> Optional[CameraGroup]:
        """为摄像头找到最佳处理组"""
        try:
            # 优先选择同优先级且负载较低的组
            best_group = None
            min_load = float('inf')
            
            for group in self.camera_groups.values():
                # 检查优先级匹配
                if abs(group.priority_level - priority) <= 1:
                    # 检查容量
                    if len(group.camera_ids) < group.max_cameras:
                        if group.current_load < min_load:
                            min_load = group.current_load
                            best_group = group
            
            return best_group
            
        except Exception as e:
            logger.error(f"为摄像头 {camera_id} 寻找最佳组失败: {e}")
            return None
    
    async def _create_new_camera_group(self, priority: int) -> Optional[CameraGroup]:
        """创建新的摄像头组"""
        try:
            if len(self.camera_groups) >= self.max_workers:
                logger.warning("已达到最大工作器数量，无法创建新组")
                return None
            
            group_id = f"group_dynamic_{len(self.camera_groups)}"
            group = CameraGroup(
                group_id=group_id,
                camera_ids=set(),
                priority_level=priority,
                max_cameras=self.cameras_per_worker,
                current_load=0.0
            )
            
            self.camera_groups[group_id] = group
            self.worker_queues[group_id] = deque()
            
            # 初始化工作器指标
            metrics = WorkerMetrics(
                worker_id=group_id,
                cameras_assigned=0,
                frames_per_second=0.0,
                gpu_utilization=0.0,
                memory_usage=0.0,
                error_rate=0.0,
                uptime=0.0,
                last_activity=datetime.now()
            )
            self.worker_metrics[group_id] = metrics
            
            # 启动组处理
            if self.is_running:
                await self._start_group_processing(group_id)
            
            logger.info(f"创建新摄像头组: {group_id}")
            return group
            
        except Exception as e:
            logger.error(f"创建新摄像头组失败: {e}")
            return None
    
    async def _cleanup_empty_group(self, group_id: str):
        """清理空的摄像头组"""
        try:
            if group_id not in self.camera_groups:
                return
            
            group = self.camera_groups[group_id]
            if group.camera_ids:
                return  # 组不为空
            
            # 停止组处理
            await self._stop_group_processing(group_id)
            
            # 释放GPU资源
            if group_id in self.gpu_allocations:
                allocation = self.gpu_allocations[group_id]
                await gpu_resource_manager.release_resource(allocation.allocation_id)
                del self.gpu_allocations[group_id]
            
            # 清理数据结构
            del self.camera_groups[group_id]
            if group_id in self.worker_metrics:
                del self.worker_metrics[group_id]
            if group_id in self.worker_queues:
                del self.worker_queues[group_id]
            
            logger.info(f"清理空摄像头组: {group_id}")
            
        except Exception as e:
            logger.error(f"清理空摄像头组 {group_id} 失败: {e}")
    
    async def _start_group_processing(self, group_id: str):
        """启动组处理任务"""
        try:
            if group_id in self.worker_tasks:
                logger.warning(f"组 {group_id} 处理任务已存在")
                return
            
            task = asyncio.create_task(self._group_processing_loop(group_id))
            self.worker_tasks[group_id] = task
            
            logger.info(f"启动组 {group_id} 处理任务")
            
        except Exception as e:
            logger.error(f"启动组 {group_id} 处理任务失败: {e}")
    
    async def _stop_group_processing(self, group_id: str):
        """停止组处理任务"""
        try:
            if group_id not in self.worker_tasks:
                return
            
            task = self.worker_tasks[group_id]
            task.cancel()
            
            try:
                await task
            except asyncio.CancelledError:
                pass
            
            del self.worker_tasks[group_id]
            
            logger.info(f"停止组 {group_id} 处理任务")
            
        except Exception as e:
            logger.error(f"停止组 {group_id} 处理任务失败: {e}")
    
    async def _group_processing_loop(self, group_id: str):
        """组处理循环"""
        try:
            logger.info(f"组 {group_id} 开始处理循环")
            
            while self.is_running:
                try:
                    # 从队列获取任务
                    queue = self.worker_queues[group_id]
                    
                    if queue:
                        # 批量处理队列中的帧
                        batch_size = min(32, len(queue))  # 每批最多32帧
                        frame_batch = []
                        
                        for _ in range(batch_size):
                            if queue:
                                frame_batch.append(queue.popleft())
                        
                        if frame_batch:
                            await self._process_group_frame_batch(group_id, frame_batch)
                    
                    # 更新工作器指标
                    await self._update_worker_metrics(group_id)
                    
                    # 短暂休眠
                    await asyncio.sleep(0.01)
                    
                except Exception as e:
                    logger.error(f"组 {group_id} 处理循环异常: {e}")
                    await asyncio.sleep(1)
                    
        except asyncio.CancelledError:
            logger.info(f"组 {group_id} 处理循环被取消")
        except Exception as e:
            logger.error(f"组 {group_id} 处理循环异常退出: {e}")
        finally:
            logger.info(f"组 {group_id} 处理循环已退出")
    
    async def _process_group_frame_batch(self, group_id: str, frame_batch: List[VideoFrameTask]) -> List[Dict[str, Any]]:
        """处理组帧批次"""
        if not frame_batch:
            return []
        
        try:
            start_time = time.time()
            
            # 获取GPU分配
            gpu_allocation = self.gpu_allocations.get(group_id)
            
            # 使用线程池处理帧批次
            loop = asyncio.get_event_loop()
            results = await loop.run_in_executor(
                self.thread_pool,
                self._process_frames_in_thread,
                group_id,
                frame_batch,
                gpu_allocation
            )
            
            processing_time = time.time() - start_time
            
            # 更新组统计
            group = self.camera_groups[group_id]
            group.frames_processed += len(frame_batch)
            group.last_processed = datetime.now()
            
            # 更新性能指标
            metrics = self.worker_metrics[group_id]
            metrics.last_activity = datetime.now()
            
            logger.debug(f"组 {group_id} 处理 {len(frame_batch)} 帧，耗时: {processing_time:.3f}s")
            
            return results
            
        except Exception as e:
            logger.error(f"处理组 {group_id} 帧批次失败: {e}")
            
            # 更新错误统计
            group = self.camera_groups[group_id]
            group.error_count += 1
            
            return []
    
    def _process_frames_in_thread(self, group_id: str, frame_batch: List[VideoFrameTask], 
                                 gpu_allocation: Any) -> List[Dict[str, Any]]:
        """在线程中处理帧（同步方法）"""
        try:
            results = []
            
            # 按算法分组处理
            algorithm_groups = defaultdict(list)
            for frame_task in frame_batch:
                for algorithm in frame_task.ai_algorithms:
                    algorithm_groups[algorithm].append(frame_task)
            
            # 处理每种算法
            for algorithm, tasks in algorithm_groups.items():
                algorithm_results = self._process_algorithm_sync(algorithm, tasks, gpu_allocation)
                results.extend(algorithm_results)
            
            return results
            
        except Exception as e:
            logger.error(f"线程中处理帧失败: {e}")
            return []
    
    def _process_algorithm_sync(self, algorithm: str, frame_tasks: List[VideoFrameTask], 
                               gpu_allocation: Any) -> List[Dict[str, Any]]:
        """同步处理算法（在线程中运行）"""
        try:
            results = []
            
            # 提取帧数据
            frames = [task.frame for task in frame_tasks]
            
            # 根据算法类型处理
            if algorithm == "person_detection":
                algorithm_results = self._batch_person_detection_sync(frames)
            elif algorithm == "behavior_analysis":
                algorithm_results = self._batch_behavior_analysis_sync(frames)
            elif algorithm == "crowd_density":
                algorithm_results = self._batch_crowd_density_sync(frames)
            elif algorithm == "waste_detection":
                algorithm_results = self._batch_waste_detection_sync(frames)
            else:
                logger.warning(f"未知算法类型: {algorithm}")
                algorithm_results = [{"error": f"未知算法: {algorithm}"}] * len(frames)
            
            # 组装结果
            for frame_task, algorithm_result in zip(frame_tasks, algorithm_results):
                result = {
                    "task_id": frame_task.task_id,
                    "camera_id": frame_task.camera_id,
                    "algorithm": algorithm,
                    "frame_timestamp": frame_task.frame_timestamp.isoformat(),
                    "result": algorithm_result,
                    "processing_timestamp": datetime.now().isoformat()
                }
                results.append(result)
            
            return results
            
        except Exception as e:
            logger.error(f"同步处理算法 {algorithm} 失败: {e}")
            return []
    
    def _batch_person_detection_sync(self, frames: List[np.ndarray]) -> List[Dict[str, Any]]:
        """同步批量人员检测"""
        # 简化的人员检测实现
        results = []
        for frame in frames:
            # 模拟检测结果
            result = {
                "detections": [
                    {"bbox": [100, 100, 50, 80], "confidence": 0.9, "class": "person"}
                ],
                "count": 1
            }
            results.append(result)
        return results
    
    def _batch_behavior_analysis_sync(self, frames: List[np.ndarray]) -> List[Dict[str, Any]]:
        """同步批量行为分析"""
        results = []
        for frame in frames:
            result = {
                "behavior_type": "normal",
                "confidence": 0.95,
                "anomaly_score": 0.1
            }
            results.append(result)
        return results
    
    def _batch_crowd_density_sync(self, frames: List[np.ndarray]) -> List[Dict[str, Any]]:
        """同步批量人群密度分析"""
        results = []
        for frame in frames:
            result = {
                "density_level": "medium",
                "person_count": 5,
                "density_score": 0.6
            }
            results.append(result)
        return results
    
    def _batch_waste_detection_sync(self, frames: List[np.ndarray]) -> List[Dict[str, Any]]:
        """同步批量垃圾检测"""
        results = []
        for frame in frames:
            result = {
                "waste_detected": False,
                "waste_type": None,
                "confidence": 0.0
            }
            results.append(result)
        return results
    
    async def _frame_scheduler_loop(self):
        """帧调度循环"""
        try:
            logger.info("启动帧调度循环")
            
            while self.is_running:
                try:
                    # 从优先级队列分发帧到工作器队列
                    await self._distribute_frames_to_workers()
                    
                    # 短暂休眠
                    await asyncio.sleep(0.01)
                    
                except Exception as e:
                    logger.error(f"帧调度循环异常: {e}")
                    await asyncio.sleep(1)
                    
        except asyncio.CancelledError:
            logger.info("帧调度循环被取消")
        except Exception as e:
            logger.error(f"帧调度循环异常退出: {e}")
    
    async def _distribute_frames_to_workers(self):
        """将帧分发到工作器队列"""
        try:
            # 按优先级处理队列
            for priority in [TaskPriority.CRITICAL, TaskPriority.HIGH, TaskPriority.NORMAL, TaskPriority.LOW]:
                queue = self.priority_queues[priority]
                
                while queue:
                    frame_task = queue.popleft()
                    camera_id = frame_task.camera_id
                    
                    # 获取摄像头所属组
                    group_id = self.camera_to_group.get(camera_id)
                    
                    if group_id and group_id in self.worker_queues:
                        # 添加到工作器队列
                        self.worker_queues[group_id].append(frame_task)
                    else:
                        # 摄像头未分配，重新分配
                        await self.assign_camera_to_group(camera_id, priority.value)
                        group_id = self.camera_to_group.get(camera_id)
                        
                        if group_id and group_id in self.worker_queues:
                            self.worker_queues[group_id].append(frame_task)
                        else:
                            # 分配失败，丢弃帧
                            logger.warning(f"无法分配摄像头 {camera_id}，丢弃帧")
            
        except Exception as e:
            logger.error(f"分发帧到工作器失败: {e}")
    
    async def _update_worker_metrics(self, group_id: str):
        """更新工作器指标"""
        try:
            if group_id not in self.worker_metrics:
                return
            
            metrics = self.worker_metrics[group_id]
            group = self.camera_groups[group_id]
            
            # 更新基本指标
            metrics.cameras_assigned = len(group.camera_ids)
            
            # 计算FPS
            if group.last_processed:
                time_diff = (datetime.now() - group.last_processed).total_seconds()
                if time_diff > 0:
                    metrics.frames_per_second = group.frames_processed / time_diff
            
            # 获取GPU利用率
            if group_id in self.gpu_allocations:
                try:
                    gpu_info = await gpu_resource_manager.get_gpu_info()
                    if gpu_info:
                        allocation = self.gpu_allocations[group_id]
                        gpu_id = getattr(allocation, 'gpu_id', 0)
                        if gpu_id in gpu_info:
                            metrics.gpu_utilization = gpu_info[gpu_id].utilization
                except:
                    pass
            
            # 计算错误率
            if group.frames_processed > 0:
                metrics.error_rate = group.error_count / group.frames_processed
            
            # 更新活动时间
            metrics.last_activity = datetime.now()
            
        except Exception as e:
            logger.error(f"更新工作器 {group_id} 指标失败: {e}")
    
    async def _cleanup_gpu_resources(self):
        """清理GPU资源"""
        try:
            for group_id, allocation in list(self.gpu_allocations.items()):
                await gpu_resource_manager.release_resource(allocation.allocation_id)
                del self.gpu_allocations[group_id]
            
            logger.info("GPU资源清理完成")
            
        except Exception as e:
            logger.error(f"清理GPU资源失败: {e}")
    
    def get_processing_stats(self) -> Dict[str, Any]:
        """获取处理统计信息"""
        try:
            total_cameras = sum(len(group.camera_ids) for group in self.camera_groups.values())
            total_groups = len(self.camera_groups)
            
            # 计算平均负载
            avg_load = 0.0
            if self.camera_groups:
                total_load = sum(group.current_load for group in self.camera_groups.values())
                avg_load = total_load / len(self.camera_groups)
            
            # 计算平均FPS
            avg_fps = 0.0
            if self.worker_metrics:
                total_fps = sum(metrics.frames_per_second for metrics in self.worker_metrics.values())
                avg_fps = total_fps / len(self.worker_metrics)
            
            return {
                "is_running": self.is_running,
                "total_cameras": total_cameras,
                "total_groups": total_groups,
                "max_concurrent_cameras": self.max_concurrent_cameras,
                "cameras_per_worker": self.cameras_per_worker,
                "average_load": avg_load,
                "average_fps": avg_fps,
                "processing_stats": self.processing_stats.copy(),
                "worker_count": len(self.worker_metrics),
                "gpu_allocations": len(self.gpu_allocations)
            }
            
        except Exception as e:
            logger.error(f"获取处理统计信息失败: {e}")
            return {"error": str(e)}


class LoadBalancer:
    """负载均衡器"""
    
    def __init__(self, processor: ConcurrentStreamProcessor):
        self.processor = processor
    
    async def balance_loop(self):
        """负载均衡循环"""
        try:
            logger.info("启动负载均衡循环")
            
            while self.processor.is_running:
                try:
                    await self._rebalance_camera_groups()
                    await asyncio.sleep(60)  # 每分钟检查一次
                    
                except Exception as e:
                    logger.error(f"负载均衡循环异常: {e}")
                    await asyncio.sleep(30)
                    
        except asyncio.CancelledError:
            logger.info("负载均衡循环被取消")
        except Exception as e:
            logger.error(f"负载均衡循环异常退出: {e}")
    
    async def _rebalance_camera_groups(self):
        """重新平衡摄像头组"""
        try:
            groups = list(self.processor.camera_groups.values())
            if len(groups) < 2:
                return
            
            # 找到负载最高和最低的组
            max_group = max(groups, key=lambda g: g.current_load)
            min_group = min(groups, key=lambda g: g.current_load)
            
            # 如果负载差异过大，移动摄像头
            if max_group.current_load - min_group.current_load > 0.3:
                # 移动一些摄像头
                cameras_to_move = list(max_group.camera_ids)[:2]
                
                for camera_id in cameras_to_move:
                    if len(min_group.camera_ids) < min_group.max_cameras:
                        # 移动摄像头
                        max_group.camera_ids.discard(camera_id)
                        min_group.camera_ids.add(camera_id)
                        self.processor.camera_to_group[camera_id] = min_group.group_id
                        
                        # 更新负载
                        max_group.current_load = len(max_group.camera_ids) / max_group.max_cameras
                        min_group.current_load = len(min_group.camera_ids) / min_group.max_cameras
                        
                        logger.info(f"摄像头 {camera_id} 从组 {max_group.group_id} 移动到 {min_group.group_id}")
            
        except Exception as e:
            logger.error(f"重新平衡摄像头组失败: {e}")


class PerformanceMonitor:
    """性能监控器"""
    
    def __init__(self, processor: ConcurrentStreamProcessor):
        self.processor = processor
    
    async def monitor_loop(self):
        """监控循环"""
        try:
            logger.info("启动性能监控循环")
            
            while self.processor.is_running:
                try:
                    await self._collect_performance_metrics()
                    await self._check_performance_thresholds()
                    await asyncio.sleep(30)  # 每30秒监控一次
                    
                except Exception as e:
                    logger.error(f"性能监控循环异常: {e}")
                    await asyncio.sleep(15)
                    
        except asyncio.CancelledError:
            logger.info("性能监控循环被取消")
        except Exception as e:
            logger.error(f"性能监控循环异常退出: {e}")
    
    async def _collect_performance_metrics(self):
        """收集性能指标"""
        try:
            # 更新所有工作器指标
            for group_id in self.processor.camera_groups.keys():
                await self.processor._update_worker_metrics(group_id)
            
            # 计算全局指标
            total_fps = sum(
                metrics.frames_per_second 
                for metrics in self.processor.worker_metrics.values()
            )
            
            avg_gpu_util = 0.0
            if self.processor.worker_metrics:
                total_gpu_util = sum(
                    metrics.gpu_utilization 
                    for metrics in self.processor.worker_metrics.values()
                )
                avg_gpu_util = total_gpu_util / len(self.processor.worker_metrics)
            
            # 更新全局统计
            self.processor.processing_stats.update({
                "frames_per_second": total_fps,
                "gpu_utilization": avg_gpu_util
            })
            
        except Exception as e:
            logger.error(f"收集性能指标失败: {e}")
    
    async def _check_performance_thresholds(self):
        """检查性能阈值"""
        try:
            stats = self.processor.processing_stats
            
            # 检查GPU利用率
            if stats["gpu_utilization"] > 90:
                logger.warning(f"GPU利用率过高: {stats['gpu_utilization']:.1f}%")
            
            # 检查错误率
            if stats["error_rate"] > 0.1:
                logger.warning(f"错误率过高: {stats['error_rate']:.2f}")
            
            # 检查FPS
            if stats["frames_per_second"] < 10:
                logger.warning(f"处理帧率过低: {stats['frames_per_second']:.1f} FPS")
            
        except Exception as e:
            logger.error(f"检查性能阈值失败: {e}")


# 全局实例
_concurrent_processor: Optional[ConcurrentStreamProcessor] = None


def get_concurrent_processor() -> ConcurrentStreamProcessor:
    """获取并发流处理器实例"""
    global _concurrent_processor
    if _concurrent_processor is None:
        _concurrent_processor = ConcurrentStreamProcessor()
    return _concurrent_processor