"""
延迟监控模块 - 监控关键延迟指标

这个模块实现了关键延迟指标的监控：
- 任务排队延迟
- 任务分配延迟
- 任务处理延迟
- 通知延迟
- 性能统计和分析
"""

import asyncio
import logging
import time
import statistics
from collections import defaultdict, deque
from dataclasses import dataclass, field
from datetime import datetime, timezone, timedelta
from typing import Any, Dict, List, Optional, Tuple
from enum import Enum

from event_bus import EventBus, EventType, EventHandler, Event, get_event_bus
from distributed_tracing import get_task_tracer

logger = logging.getLogger(__name__)


class LatencyMetric(Enum):
    """延迟指标类型"""
    TASK_QUEUE_DELAY = "task_queue_delay"  # 任务排队延迟
    TASK_ASSIGNMENT_DELAY = "task_assignment_delay"  # 任务分配延迟
    TASK_PROCESSING_DELAY = "task_processing_delay"  # 任务处理延迟
    NOTIFICATION_DELAY = "notification_delay"  # 通知延迟
    TOTAL_DELAY = "total_delay"  # 总延迟


@dataclass
class LatencyMeasurement:
    """延迟测量数据"""
    metric: LatencyMetric
    value: float  # 延迟值（秒）
    timestamp: datetime
    task_id: str
    server_id: Optional[str] = None
    metadata: Dict[str, Any] = field(default_factory=dict)


@dataclass
class LatencyStatistics:
    """延迟统计信息"""
    metric: LatencyMetric
    count: int
    min_value: float
    max_value: float
    mean_value: float
    median_value: float
    p95_value: float
    p99_value: float
    std_dev: float
    time_window: timedelta


class LatencyMonitor(EventHandler):
    """延迟监控器"""
    
    def __init__(self, 
                 event_bus: Optional[EventBus] = None,
                 max_samples: int = 10000,
                 time_window_minutes: int = 60):
        self.event_bus = event_bus or get_event_bus()
        self.max_samples = max_samples
        self.time_window = timedelta(minutes=time_window_minutes)
        
        # 延迟数据存储
        self.latency_data: Dict[LatencyMetric, deque] = defaultdict(
            lambda: deque(maxlen=max_samples)
        )
        
        # 任务时间戳跟踪
        self.task_timestamps: Dict[str, Dict[str, float]] = {}
        
        # 统计信息
        self.total_measurements = 0
        self.last_cleanup_time = time.time()
        
        # 订阅相关事件
        self.event_bus.subscribe(self, priority=1)
        
        self.logger = logging.getLogger(__name__)
        self.logger.info("延迟监控器已初始化")
    
    def can_handle(self, event_type: EventType) -> bool:
        """判断是否能处理指定类型的事件"""
        return event_type in {
            EventType.TASK_CREATED,
            EventType.TASK_ASSIGNED,
            EventType.TASK_STARTED,
            EventType.TASK_COMPLETED,
            EventType.TASK_FAILED,
            EventType.TASK_CANCELLED,
            EventType.TASK_TIMEOUT,
            EventType.WEBHOOK_SENT,
            EventType.WEBHOOK_FAILED
        }
    
    async def handle(self, event: Event) -> None:
        """处理事件"""
        try:
            task_id = event.data.get("task_id")
            if not task_id:
                return
            
            current_time = time.time()
            
            # 记录任务时间戳
            if task_id not in self.task_timestamps:
                self.task_timestamps[task_id] = {}
            
            self.task_timestamps[task_id][event.type.value] = current_time
            
            # 计算延迟指标
            await self._calculate_latency_metrics(event, current_time)
            
            # 定期清理过期数据
            if current_time - self.last_cleanup_time > 300:  # 5分钟清理一次
                await self._cleanup_expired_data()
                self.last_cleanup_time = current_time
            
        except Exception as e:
            self.logger.error(f"处理延迟监控事件时出错: {e}", exc_info=True)
    
    async def _calculate_latency_metrics(self, event: Event, current_time: float) -> None:
        """计算延迟指标"""
        task_id = event.data.get("task_id")
        timestamps = self.task_timestamps.get(task_id, {})
        
        if event.type == EventType.TASK_ASSIGNED:
            # 计算任务分配延迟（从创建到分配）
            if "task_created" in timestamps:
                delay = current_time - timestamps["task_created"]
                await self._record_latency(
                    LatencyMetric.TASK_ASSIGNMENT_DELAY,
                    delay,
                    task_id,
                    event.data.get("server_id")
                )
        
        elif event.type == EventType.TASK_STARTED:
            # 计算任务排队延迟（从分配到开始处理）
            if "task_assigned" in timestamps:
                delay = current_time - timestamps["task_assigned"]
                await self._record_latency(
                    LatencyMetric.TASK_QUEUE_DELAY,
                    delay,
                    task_id,
                    event.data.get("server_id")
                )
        
        elif event.type in {EventType.TASK_COMPLETED, EventType.TASK_FAILED, EventType.TASK_TIMEOUT}:
            # 计算任务处理延迟（从开始处理到完成）
            if "task_started" in timestamps:
                delay = current_time - timestamps["task_started"]
                await self._record_latency(
                    LatencyMetric.TASK_PROCESSING_DELAY,
                    delay,
                    task_id,
                    event.data.get("server_id")
                )
            
            # 计算总延迟（从创建到完成）
            if "task_created" in timestamps:
                total_delay = current_time - timestamps["task_created"]
                await self._record_latency(
                    LatencyMetric.TOTAL_DELAY,
                    total_delay,
                    task_id,
                    event.data.get("server_id")
                )
        
        elif event.type == EventType.WEBHOOK_SENT:
            # 计算通知延迟（从任务完成到通知发送）
            if "task_completed" in timestamps or "task_failed" in timestamps:
                completion_time = timestamps.get("task_completed") or timestamps.get("task_failed")
                if completion_time:
                    delay = current_time - completion_time
                    await self._record_latency(
                        LatencyMetric.NOTIFICATION_DELAY,
                        delay,
                        task_id,
                        event.data.get("server_id")
                    )
    
    async def _record_latency(self, 
                             metric: LatencyMetric,
                             value: float,
                             task_id: str,
                             server_id: Optional[str] = None) -> None:
        """记录延迟测量"""
        measurement = LatencyMeasurement(
            metric=metric,
            value=value,
            timestamp=datetime.now(timezone.utc),
            task_id=task_id,
            server_id=server_id
        )
        
        self.latency_data[metric].append(measurement)
        self.total_measurements += 1
        
        # 使用分布式追踪记录延迟
        task_tracer = get_task_tracer()
        current_span = task_tracer.distributed_tracer.get_current_span()
        if current_span:
            current_span.set_attribute(f"latency.{metric.value}", value)
        
        self.logger.debug(f"记录延迟指标: {metric.value} = {value:.3f}s, 任务: {task_id}")
    
    async def _cleanup_expired_data(self) -> None:
        """清理过期数据"""
        cutoff_time = datetime.now(timezone.utc) - self.time_window
        cleaned_count = 0
        
        for metric, measurements in self.latency_data.items():
            # 移除过期的测量数据
            while measurements and measurements[0].timestamp < cutoff_time:
                measurements.popleft()
                cleaned_count += 1
        
        # 清理过期的任务时间戳
        current_time = time.time()
        expired_tasks = []
        
        for task_id, timestamps in self.task_timestamps.items():
            # 如果任务的所有时间戳都超过时间窗口，则清理
            if all(current_time - ts > self.time_window.total_seconds() for ts in timestamps.values()):
                expired_tasks.append(task_id)
        
        for task_id in expired_tasks:
            del self.task_timestamps[task_id]
        
        if cleaned_count > 0 or expired_tasks:
            self.logger.info(f"清理过期数据: {cleaned_count} 个测量, {len(expired_tasks)} 个任务")
    
    def get_latency_statistics(self, 
                              metric: LatencyMetric,
                              time_window: Optional[timedelta] = None) -> Optional[LatencyStatistics]:
        """获取延迟统计信息"""
        if metric not in self.latency_data:
            return None
        
        measurements = self.latency_data[metric]
        if not measurements:
            return None
        
        # 过滤时间窗口内的数据
        window = time_window or self.time_window
        cutoff_time = datetime.now(timezone.utc) - window
        
        filtered_measurements = [
            m for m in measurements 
            if m.timestamp >= cutoff_time
        ]
        
        if not filtered_measurements:
            return None
        
        values = [m.value for m in filtered_measurements]
        
        return LatencyStatistics(
            metric=metric,
            count=len(values),
            min_value=min(values),
            max_value=max(values),
            mean_value=statistics.mean(values),
            median_value=statistics.median(values),
            p95_value=self._percentile(values, 95),
            p99_value=self._percentile(values, 99),
            std_dev=statistics.stdev(values) if len(values) > 1 else 0.0,
            time_window=window
        )
    
    def _percentile(self, values: List[float], percentile: int) -> float:
        """计算百分位数"""
        if not values:
            return 0.0
        
        sorted_values = sorted(values)
        index = int((percentile / 100.0) * len(sorted_values))
        index = min(index, len(sorted_values) - 1)
        return sorted_values[index]
    
    def get_all_statistics(self, 
                          time_window: Optional[timedelta] = None) -> Dict[LatencyMetric, LatencyStatistics]:
        """获取所有延迟统计信息"""
        statistics_dict = {}
        
        for metric in LatencyMetric:
            stats = self.get_latency_statistics(metric, time_window)
            if stats:
                statistics_dict[metric] = stats
        
        return statistics_dict
    
    def get_server_statistics(self, 
                             server_id: str,
                             time_window: Optional[timedelta] = None) -> Dict[LatencyMetric, LatencyStatistics]:
        """获取特定服务器的延迟统计信息"""
        window = time_window or self.time_window
        cutoff_time = datetime.now(timezone.utc) - window
        
        server_statistics = {}
        
        for metric, measurements in self.latency_data.items():
            # 过滤特定服务器和时间窗口的数据
            filtered_measurements = [
                m for m in measurements 
                if m.server_id == server_id and m.timestamp >= cutoff_time
            ]
            
            if not filtered_measurements:
                continue
            
            values = [m.value for m in filtered_measurements]
            
            server_statistics[metric] = LatencyStatistics(
                metric=metric,
                count=len(values),
                min_value=min(values),
                max_value=max(values),
                mean_value=statistics.mean(values),
                median_value=statistics.median(values),
                p95_value=self._percentile(values, 95),
                p99_value=self._percentile(values, 99),
                std_dev=statistics.stdev(values) if len(values) > 1 else 0.0,
                time_window=window
            )
        
        return server_statistics
    
    def get_recent_measurements(self, 
                               metric: LatencyMetric,
                               limit: int = 100) -> List[LatencyMeasurement]:
        """获取最近的延迟测量数据"""
        if metric not in self.latency_data:
            return []
        
        measurements = list(self.latency_data[metric])
        return measurements[-limit:] if measurements else []
    
    def get_performance_summary(self) -> Dict[str, Any]:
        """获取性能摘要"""
        all_stats = self.get_all_statistics()
        
        summary = {
            "total_measurements": self.total_measurements,
            "active_metrics": len(all_stats),
            "time_window_minutes": self.time_window.total_seconds() / 60,
            "metrics": {}
        }
        
        for metric, stats in all_stats.items():
            summary["metrics"][metric.value] = {
                "count": stats.count,
                "mean_seconds": round(stats.mean_value, 3),
                "p95_seconds": round(stats.p95_value, 3),
                "p99_seconds": round(stats.p99_value, 3),
                "max_seconds": round(stats.max_value, 3)
            }
        
        return summary
    
    def check_performance_thresholds(self, 
                                   thresholds: Dict[LatencyMetric, float]) -> Dict[LatencyMetric, bool]:
        """检查性能阈值"""
        results = {}
        all_stats = self.get_all_statistics()
        
        for metric, threshold in thresholds.items():
            if metric in all_stats:
                stats = all_stats[metric]
                # 使用P95值作为阈值检查
                results[metric] = stats.p95_value > threshold
            else:
                results[metric] = False
        
        return results
    
    async def export_metrics(self) -> Dict[str, Any]:
        """导出指标数据（用于监控系统集成）"""
        all_stats = self.get_all_statistics()
        
        metrics = {}
        for metric, stats in all_stats.items():
            metric_name = metric.value
            metrics[f"{metric_name}_count"] = stats.count
            metrics[f"{metric_name}_mean"] = stats.mean_value
            metrics[f"{metric_name}_p95"] = stats.p95_value
            metrics[f"{metric_name}_p99"] = stats.p99_value
            metrics[f"{metric_name}_max"] = stats.max_value
            metrics[f"{metric_name}_std_dev"] = stats.std_dev
        
        return {
            "timestamp": datetime.now(timezone.utc).isoformat(),
            "metrics": metrics,
            "summary": self.get_performance_summary()
        }
