"""
实时结果聚合器
负责聚合AI分析结果，进行高效存储和实时推送
"""
import asyncio
import logging
import json
from datetime import datetime, timedelta
from typing import List, Dict, Optional, Any, Set
from collections import defaultdict, deque
from dataclasses import dataclass, field
from uuid import uuid4
import threading
from concurrent.futures import ThreadPoolExecutor
import time

from sqlalchemy.orm import Session
from sqlalchemy import and_, or_
from sqlalchemy.exc import SQLAlchemyError

from core.database import get_db, SessionLocal
from models.ai_result import AIResult
from models.camera import Camera
from schemas.ai_result import (
    RealTimeAIResult, AIResultCreate, AIResultResponse,
    BatchAIResultCreate
)
from services.alerts.intelligent_alert_service import intelligent_alert_service
from services.alerts.alert_notification_service import AlertNotificationService

logger = logging.getLogger(__name__)


@dataclass
class ResultBatch:
    """结果批次"""
    batch_id: str
    results: List[RealTimeAIResult] = field(default_factory=list)
    created_at: datetime = field(default_factory=datetime.now)
    max_size: int = 50
    max_age_seconds: int = 5
    
    def is_full(self) -> bool:
        """检查批次是否已满"""
        return len(self.results) >= self.max_size
    
    def is_expired(self) -> bool:
        """检查批次是否过期"""
        age = (datetime.now() - self.created_at).total_seconds()
        return age >= self.max_age_seconds
    
    def should_flush(self) -> bool:
        """检查是否应该刷新批次"""
        return self.is_full() or self.is_expired()


@dataclass
class CameraResultCache:
    """摄像头结果缓存"""
    camera_id: int
    recent_results: deque = field(default_factory=lambda: deque(maxlen=100))
    last_alert_time: Dict[str, datetime] = field(default_factory=dict)
    performance_metrics: Dict[str, Any] = field(default_factory=dict)
    
    def add_result(self, result: RealTimeAIResult):
        """添加结果到缓存"""
        self.recent_results.append(result)
        self._update_performance_metrics(result)
    
    def _update_performance_metrics(self, result: RealTimeAIResult):
        """更新性能指标"""
        algorithm = result.algorithm_name
        
        if algorithm not in self.performance_metrics:
            self.performance_metrics[algorithm] = {
                'total_count': 0,
                'avg_latency': 0.0,
                'avg_confidence': 0.0,
                'alert_count': 0,
                'last_result_time': None
            }
        
        metrics = self.performance_metrics[algorithm]
        metrics['total_count'] += 1
        metrics['last_result_time'] = result.frame_timestamp
        
        # 更新平均延迟
        current_avg = metrics['avg_latency']
        new_latency = result.processing_latency
        metrics['avg_latency'] = (current_avg * (metrics['total_count'] - 1) + new_latency) / metrics['total_count']
        
        # 更新平均置信度
        if result.confidence is not None:
            current_avg_conf = metrics['avg_confidence']
            new_conf = result.confidence
            metrics['avg_confidence'] = (current_avg_conf * (metrics['total_count'] - 1) + new_conf) / metrics['total_count']
        
        # 更新告警计数
        if result.alert_triggered:
            metrics['alert_count'] += 1


class RealTimeResultAggregator:
    """实时结果聚合器"""
    
    def __init__(self, max_workers: int = 4):
        self.max_workers = max_workers
        self.executor = ThreadPoolExecutor(max_workers=max_workers)
        
        # 批处理相关
        self.result_batches: Dict[str, ResultBatch] = {}
        self.batch_lock = threading.Lock()
        
        # 缓存相关
        self.camera_caches: Dict[int, CameraResultCache] = {}
        self.cache_lock = threading.Lock()
        
        # 队列和任务
        self.result_queue = asyncio.Queue(maxsize=1000)
        self.batch_flush_queue = asyncio.Queue(maxsize=100)
        
        # 服务状态
        self._running = False
        self._tasks: List[asyncio.Task] = []
        
        # 统计信息
        self.stats = {
            'total_results_processed': 0,
            'total_batches_flushed': 0,
            'total_alerts_triggered': 0,
            'avg_processing_latency': 0.0,
            'cache_hit_rate': 0.0,
            'db_write_errors': 0,
            'last_flush_time': None
        }
        
        # 配置
        self.config = {
            'batch_size': 50,
            'batch_timeout_seconds': 5,
            'cache_ttl_seconds': 300,
            'alert_cooldown_seconds': 60,
            'enable_duplicate_detection': True,
            'max_queue_size': 1000
        }
        
        # 初始化通知服务
        self.alert_notification_service = AlertNotificationService()
    
    async def start_service(self):
        """启动聚合器服务"""
        if self._running:
            return
        
        self._running = True
        
        # 启动处理任务
        self._tasks = [
            asyncio.create_task(self._process_results()),
            asyncio.create_task(self._flush_batches()),
            asyncio.create_task(self._cleanup_expired_data()),
            asyncio.create_task(self._update_statistics())
        ]
        
        logger.info("Real-time result aggregator service started")
    
    async def stop_service(self):
        """停止聚合器服务"""
        self._running = False
        
        # 取消所有任务
        for task in self._tasks:
            task.cancel()
        
        # 等待任务完成
        await asyncio.gather(*self._tasks, return_exceptions=True)
        
        # 刷新剩余批次
        await self._flush_all_batches()
        
        # 关闭线程池
        self.executor.shutdown(wait=True)
        
        logger.info("Real-time result aggregator service stopped")
    
    async def add_result(self, result: RealTimeAIResult) -> bool:
        """添加AI分析结果"""
        try:
            # 检查队列是否已满
            if self.result_queue.qsize() >= self.config['max_queue_size']:
                logger.warning("Result queue is full, dropping result")
                return False
            
            # 添加到队列
            await self.result_queue.put(result)
            return True
            
        except Exception as e:
            logger.error(f"Failed to add result to queue: {e}")
            return False
    
    async def add_batch_results(self, results: List[RealTimeAIResult]) -> bool:
        """批量添加AI分析结果"""
        try:
            success_count = 0
            
            for result in results:
                if await self.add_result(result):
                    success_count += 1
            
            logger.info(f"Added {success_count}/{len(results)} results to queue")
            return success_count == len(results)
            
        except Exception as e:
            logger.error(f"Failed to add batch results: {e}")
            return False
    
    async def _process_results(self):
        """处理结果队列"""
        while self._running:
            try:
                # 等待结果
                result = await asyncio.wait_for(
                    self.result_queue.get(),
                    timeout=1.0
                )
                
                # 处理单个结果
                await self._handle_single_result(result)
                
            except asyncio.TimeoutError:
                continue
            except asyncio.CancelledError:
                break
            except Exception as e:
                logger.error(f"Error processing result: {e}")
    
    async def _handle_single_result(self, result: RealTimeAIResult):
        """处理单个结果"""
        try:
            # 更新缓存
            self._update_camera_cache(result)
            
            # 检查重复
            if self.config['enable_duplicate_detection'] and self._is_duplicate_result(result):
                logger.debug(f"Duplicate result detected for camera {result.camera_id}")
                return
            
            # 添加到批次
            batch_id = self._get_batch_id(result)
            self._add_to_batch(batch_id, result)
            
            # 检查是否需要触发告警
            if result.alert_triggered or self._should_trigger_alert(result):
                await self._trigger_alert(result)
            
            # 更新统计
            self.stats['total_results_processed'] += 1
            
        except Exception as e:
            logger.error(f"Error handling single result: {e}")
    
    def _update_camera_cache(self, result: RealTimeAIResult):
        """更新摄像头缓存"""
        with self.cache_lock:
            if result.camera_id not in self.camera_caches:
                self.camera_caches[result.camera_id] = CameraResultCache(result.camera_id)
            
            self.camera_caches[result.camera_id].add_result(result)
    
    def _is_duplicate_result(self, result: RealTimeAIResult) -> bool:
        """检查是否为重复结果"""
        with self.cache_lock:
            cache = self.camera_caches.get(result.camera_id)
            if not cache:
                return False
            
            # 检查最近的结果
            for recent_result in list(cache.recent_results)[-5:]:  # 检查最近5个结果
                if (recent_result.algorithm_name == result.algorithm_name and
                    recent_result.frame_id == result.frame_id):
                    return True
            
            return False
    
    def _get_batch_id(self, result: RealTimeAIResult) -> str:
        """获取批次ID"""
        # 基于算法和时间窗口生成批次ID
        time_window = int(time.time() / self.config['batch_timeout_seconds'])
        return f"{result.algorithm_name}_{time_window}"
    
    def _add_to_batch(self, batch_id: str, result: RealTimeAIResult):
        """添加结果到批次"""
        with self.batch_lock:
            if batch_id not in self.result_batches:
                self.result_batches[batch_id] = ResultBatch(
                    batch_id=batch_id,
                    max_size=self.config['batch_size'],
                    max_age_seconds=self.config['batch_timeout_seconds']
                )
            
            batch = self.result_batches[batch_id]
            batch.results.append(result)
            
            # 检查是否需要立即刷新
            if batch.should_flush():
                asyncio.create_task(self._schedule_batch_flush(batch_id))
    
    async def _schedule_batch_flush(self, batch_id: str):
        """调度批次刷新"""
        try:
            await self.batch_flush_queue.put(batch_id)
        except Exception as e:
            logger.error(f"Failed to schedule batch flush: {e}")
    
    async def _flush_batches(self):
        """刷新批次队列"""
        while self._running:
            try:
                # 等待批次刷新请求
                batch_id = await asyncio.wait_for(
                    self.batch_flush_queue.get(),
                    timeout=1.0
                )
                
                await self._flush_single_batch(batch_id)
                
            except asyncio.TimeoutError:
                # 检查过期批次
                await self._flush_expired_batches()
            except asyncio.CancelledError:
                break
            except Exception as e:
                logger.error(f"Error flushing batches: {e}")
    
    async def _flush_single_batch(self, batch_id: str):
        """刷新单个批次"""
        try:
            with self.batch_lock:
                batch = self.result_batches.pop(batch_id, None)
            
            if not batch or not batch.results:
                return
            
            # 在线程池中执行数据库写入
            await asyncio.get_event_loop().run_in_executor(
                self.executor,
                self._write_batch_to_db,
                batch
            )
            
            self.stats['total_batches_flushed'] += 1
            self.stats['last_flush_time'] = datetime.now()
            
            logger.debug(f"Flushed batch {batch_id} with {len(batch.results)} results")
            
        except Exception as e:
            logger.error(f"Error flushing batch {batch_id}: {e}")
            self.stats['db_write_errors'] += 1
    
    def _write_batch_to_db(self, batch: ResultBatch):
        """将批次写入数据库"""
        db = SessionLocal()
        try:
            ai_results = []
            
            for result in batch.results:
                ai_result = AIResult(
                    camera_id=result.camera_id,
                    algorithm_name=result.algorithm_name,
                    result_type=result.result_type.value,
                    result_data=result.result_data,
                    confidence=result.confidence,
                    bounding_boxes=result.bounding_boxes,
                    timestamp=result.frame_timestamp,
                    processed_at=datetime.now(),
                    
                    # 实时处理字段
                    frame_timestamp=result.frame_timestamp,
                    processing_latency=result.processing_latency,
                    frame_id=result.frame_id,
                    stream_info={
                        'worker_id': result.worker_id,
                        'gpu_id': result.gpu_id
                    },
                    is_real_time=True,
                    batch_id=batch.batch_id,
                    worker_id=result.worker_id,
                    gpu_id=result.gpu_id,
                    frame_size=None,  # 可以从result.result_data中提取
                    alert_triggered=result.alert_triggered
                )
                ai_results.append(ai_result)
            
            # 批量插入
            db.add_all(ai_results)
            db.commit()
            
            logger.debug(f"Successfully wrote {len(ai_results)} results to database")
            
        except SQLAlchemyError as e:
            db.rollback()
            logger.error(f"Database error writing batch: {e}")
            raise
        except Exception as e:
            db.rollback()
            logger.error(f"Error writing batch to database: {e}")
            raise
        finally:
            db.close()
    
    async def _flush_expired_batches(self):
        """刷新过期批次"""
        expired_batch_ids = []
        
        with self.batch_lock:
            for batch_id, batch in self.result_batches.items():
                if batch.is_expired():
                    expired_batch_ids.append(batch_id)
        
        for batch_id in expired_batch_ids:
            await self._flush_single_batch(batch_id)
    
    async def _flush_all_batches(self):
        """刷新所有批次"""
        with self.batch_lock:
            batch_ids = list(self.result_batches.keys())
        
        for batch_id in batch_ids:
            await self._flush_single_batch(batch_id)
    
    def _should_trigger_alert(self, result: RealTimeAIResult) -> bool:
        """判断是否应该触发告警"""
        # 检查告警冷却期
        with self.cache_lock:
            cache = self.camera_caches.get(result.camera_id)
            if not cache:
                return False
            
            alert_key = f"{result.algorithm_name}_{result.result_type.value}"
            last_alert_time = cache.last_alert_time.get(alert_key)
            
            if last_alert_time:
                cooldown = timedelta(seconds=self.config['alert_cooldown_seconds'])
                if datetime.now() - last_alert_time < cooldown:
                    return False
        
        # 基于置信度和结果类型判断
        if result.confidence and result.confidence < 0.6:
            return False
        
        # 特定算法的告警逻辑
        alert_conditions = {
            'behavior_analysis': self._check_behavior_alert,
            'crowd_density': self._check_crowd_alert,
            'waste_detection': self._check_waste_alert,
            'conflict_detection': self._check_conflict_alert
        }
        
        check_func = alert_conditions.get(result.algorithm_name)
        if check_func:
            return check_func(result)
        
        return False
    
    def _check_behavior_alert(self, result: RealTimeAIResult) -> bool:
        """检查行为分析告警"""
        dangerous_behaviors = ['fighting', 'running', 'falling', 'intrusion']
        behavior_type = result.result_data.get('behavior_type', '').lower()
        return behavior_type in dangerous_behaviors
    
    def _check_crowd_alert(self, result: RealTimeAIResult) -> bool:
        """检查人群密度告警"""
        density = result.result_data.get('density', 0)
        person_count = result.result_data.get('person_count', 0)
        return density > 0.8 or person_count > 20
    
    def _check_waste_alert(self, result: RealTimeAIResult) -> bool:
        """检查垃圾检测告警"""
        waste_count = result.result_data.get('waste_count', 0)
        waste_area_ratio = result.result_data.get('waste_area_ratio', 0)
        return waste_count > 5 or waste_area_ratio > 0.3
    
    def _check_conflict_alert(self, result: RealTimeAIResult) -> bool:
        """检查冲突检测告警"""
        conflict_detected = result.result_data.get('conflict_detected', False)
        return conflict_detected
    
    async def _trigger_alert(self, result: RealTimeAIResult):
        """触发告警"""
        try:
            # 更新告警时间
            with self.cache_lock:
                cache = self.camera_caches.get(result.camera_id)
                if cache:
                    alert_key = f"{result.algorithm_name}_{result.result_type.value}"
                    cache.last_alert_time[alert_key] = datetime.now()
            
            # 构造告警数据
            alert_data = {
                'camera_id': result.camera_id,
                'algorithm_name': result.algorithm_name,
                'result_type': result.result_type.value,
                'confidence': result.confidence,
                'timestamp': result.frame_timestamp,
                'result_data': result.result_data,
                'frame_id': result.frame_id,
                'worker_id': result.worker_id
            }
            
            # 发送到智能告警服务
            await intelligent_alert_service._event_queue.put(alert_data)
            
            # 发送实时推送
            await self.alert_notification_service.send_real_time_alert(alert_data)
            
            self.stats['total_alerts_triggered'] += 1
            
            logger.info(f"Alert triggered for camera {result.camera_id}, algorithm {result.algorithm_name}")
            
        except Exception as e:
            logger.error(f"Error triggering alert: {e}")
    
    async def _cleanup_expired_data(self):
        """清理过期数据"""
        while self._running:
            try:
                await asyncio.sleep(60)  # 每分钟清理一次
                
                current_time = datetime.now()
                ttl = timedelta(seconds=self.config['cache_ttl_seconds'])
                
                with self.cache_lock:
                    # 清理过期的缓存数据
                    for camera_id, cache in list(self.camera_caches.items()):
                        # 清理过期的告警时间记录
                        expired_keys = []
                        for alert_key, alert_time in cache.last_alert_time.items():
                            if current_time - alert_time > ttl:
                                expired_keys.append(alert_key)
                        
                        for key in expired_keys:
                            del cache.last_alert_time[key]
                
                logger.debug("Expired data cleanup completed")
                
            except asyncio.CancelledError:
                break
            except Exception as e:
                logger.error(f"Error during cleanup: {e}")
    
    async def _update_statistics(self):
        """更新统计信息"""
        while self._running:
            try:
                await asyncio.sleep(30)  # 每30秒更新一次
                
                # 计算平均处理延迟
                total_latency = 0
                total_count = 0
                
                with self.cache_lock:
                    for cache in self.camera_caches.values():
                        for metrics in cache.performance_metrics.values():
                            if metrics['total_count'] > 0:
                                total_latency += metrics['avg_latency'] * metrics['total_count']
                                total_count += metrics['total_count']
                
                if total_count > 0:
                    self.stats['avg_processing_latency'] = total_latency / total_count
                
                logger.debug("Statistics updated")
                
            except asyncio.CancelledError:
                break
            except Exception as e:
                logger.error(f"Error updating statistics: {e}")
    
    def get_statistics(self) -> Dict[str, Any]:
        """获取统计信息"""
        with self.cache_lock:
            camera_stats = {}
            for camera_id, cache in self.camera_caches.items():
                camera_stats[camera_id] = {
                    'recent_results_count': len(cache.recent_results),
                    'performance_metrics': cache.performance_metrics.copy()
                }
        
        with self.batch_lock:
            batch_stats = {
                'active_batches': len(self.result_batches),
                'total_pending_results': sum(len(batch.results) for batch in self.result_batches.values())
            }
        
        return {
            'service_stats': self.stats.copy(),
            'camera_stats': camera_stats,
            'batch_stats': batch_stats,
            'queue_size': self.result_queue.qsize(),
            'config': self.config.copy()
        }
    
    def update_config(self, new_config: Dict[str, Any]):
        """更新配置"""
        for key, value in new_config.items():
            if key in self.config:
                self.config[key] = value
                logger.info(f"Updated config {key} = {value}")
    
    async def get_camera_results(
        self,
        camera_id: int,
        algorithm_name: Optional[str] = None,
        limit: int = 100
    ) -> List[AIResultResponse]:
        """获取摄像头的AI结果"""
        db = SessionLocal()
        try:
            query = db.query(AIResult).filter(
                AIResult.camera_id == camera_id,
                AIResult.is_real_time == True
            )
            
            if algorithm_name:
                query = query.filter(AIResult.algorithm_name == algorithm_name)
            
            results = query.order_by(AIResult.created_at.desc()).limit(limit).all()
            
            return [AIResultResponse.model_validate(result) for result in results]
            
        except Exception as e:
            logger.error(f"Error getting camera results: {e}")
            return []
        finally:
            db.close()


# 全局实时结果聚合器实例
real_time_result_aggregator = RealTimeResultAggregator()