"""
模型监控模块
专门用于监控模型性能、资源使用和请求统计
"""
import time
import asyncio
import threading
from typing import Dict, List, Optional, Any
from dataclasses import dataclass
from collections import deque, defaultdict
import psutil
from utils.logger import global_logger
from config.monitoring_config import ModelMonitorConfig, get_monitoring_config


@dataclass
class ModelPerformanceMetrics:
    """模型性能指标"""
    model_name: str
    backend: str
    inference_latency: float = 0.0  # 推理延迟(ms)
    memory_usage: float = 0.0  # 内存使用量(MB)
    cpu_usage: float = 0.0  # CPU使用率(%)
    gpu_usage: Optional[float] = None  # GPU使用率(%)
    tokens_per_second: float = 0.0  # 每秒生成tokens数
    request_count: int = 0  # 请求数
    error_count: int = 0  # 错误数
    success_rate: float = 100.0  # 成功率(%)
    timestamp: float = 0.0  # 时间戳


@dataclass
class ModelRequestRecord:
    """模型请求记录"""
    request_id: str
    model_name: str
    backend: str
    start_time: float
    end_time: Optional[float] = None
    success: Optional[bool] = None
    error_message: Optional[str] = None
    input_tokens: int = 0
    output_tokens: int = 0


class ModelMonitor:
    """
    模型监控器
    负责收集、分析和报告模型相关的性能指标
    """
    
    _instance: Optional['ModelMonitor'] = None
    _lock = threading.RLock()
    
    def __new__(cls):
        with cls._lock:
            if cls._instance is None:
                cls._instance = super(ModelMonitor, cls).__new__(cls)
        return cls._instance
    
    def __init__(self):
        with self._lock:
            # 确保初始化只执行一次
            if not hasattr(self, '_initialized'):
                self._initialized = True
                
                # 加载配置
                config = get_monitoring_config()
                self.config = config.model_monitor
                
                # 数据存储
                self._metrics_history: Dict[str, deque] = defaultdict(lambda: deque(maxlen=1000))  # 保留最近1000条记录
                self._active_requests: Dict[str, ModelRequestRecord] = {}
                self._model_metrics: Dict[str, ModelPerformanceMetrics] = {}
                
                # 统计数据
                self._total_requests = 0
                self._total_errors = 0
                
                # 状态管理
                self._running = False
                self._thread: Optional[threading.Thread] = None
                self._stop_event = threading.Event()
                
                global_logger.info("模型监控器初始化完成")
    
    def start(self):
        """
        启动模型监控器
        """
        if not self.config.enabled:
            global_logger.warning("模型监控已禁用")
            return
            
        with self._lock:
            if self._running:
                global_logger.warning("模型监控器已经在运行")
                return
                
            self._running = True
            self._stop_event.clear()
            self._thread = threading.Thread(target=self._monitor_loop, daemon=True)
            self._thread.start()
            global_logger.info("模型监控器已启动")
    
    def stop(self):
        """
        停止模型监控器
        """
        if not self._running:
            return
            
        with self._lock:
            self._stop_event.set()
            self._running = False
            
            if self._thread:
                self._thread.join(timeout=5.0)
                self._thread = None
                
            global_logger.info("模型监控器已停止")
    
    def _monitor_loop(self):
        """
        监控循环
        定期收集和更新模型性能指标
        """
        while not self._stop_event.is_set():
            try:
                # 更新所有模型的性能指标
                self._update_model_metrics()
                
                # 检查是否需要触发告警
                self._check_alarms()
                
                # 清理过期请求记录
                self._cleanup_old_requests()
                
                # 等待下一个收集周期
                time.sleep(self.config.check_interval)
                
            except Exception as e:
                global_logger.error(f"模型监控循环出错: {e}")
                time.sleep(5)  # 出错后等待5秒再继续
    
    def _update_model_metrics(self):
        """
        更新模型性能指标
        """
        current_time = time.time()
        
        for model_key, metrics in self._model_metrics.items():
            try:
                # 更新成功率
                if metrics.request_count > 0:
                    metrics.success_rate = ((metrics.request_count - metrics.error_count) / metrics.request_count) * 100
                
                # 更新系统资源使用情况
                process = psutil.Process()
                metrics.memory_usage = process.memory_info().rss / (1024 * 1024)  # 转换为MB
                metrics.cpu_usage = process.cpu_percent(interval=0.1)
                
                # 更新时间戳
                metrics.timestamp = current_time
                
                # 保存历史记录
                self._metrics_history[model_key].append((current_time, metrics.__dict__.copy()))
                
            except Exception as e:
                global_logger.error(f"更新模型 {model_key} 指标出错: {e}")
    
    def _check_alarms(self):
        """
        检查告警条件
        """
        for model_key, metrics in self._model_metrics.items():
            try:
                # 检查推理延迟
                if metrics.inference_latency > (self.config.inference_latency_threshold * 1000):  # 转换为ms
                    self._trigger_alert(
                        "model_inference_latency",
                        "warning",
                        f"模型 {model_key} 推理延迟过高: {metrics.inference_latency:.2f}ms",
                        model_key=model_key,
                        latency=metrics.inference_latency,
                        threshold=self.config.inference_latency_threshold * 1000
                    )
                
                # 检查内存使用率
                system_memory = psutil.virtual_memory()
                if system_memory.percent > self.config.memory_usage_threshold:
                    self._trigger_alert(
                        "model_memory_usage",
                        "warning",
                        f"系统内存使用率过高: {system_memory.percent:.2f}%",
                        model_key=model_key,
                        memory_usage=system_memory.percent,
                        threshold=self.config.memory_usage_threshold
                    )
                
                # 检查错误率
                if metrics.request_count > 0:
                    error_rate = (metrics.error_count / metrics.request_count) * 100
                    if error_rate > self.config.error_rate_threshold:
                        self._trigger_alert(
                            "model_error_rate",
                            "warning",
                            f"模型 {model_key} 错误率过高: {error_rate:.2f}%",
                            model_key=model_key,
                            error_rate=error_rate,
                            threshold=self.config.error_rate_threshold
                        )
                        
            except Exception as e:
                global_logger.error(f"检查模型 {model_key} 告警条件出错: {e}")
    
    def _trigger_alert(self, alert_type: str, level: str, message: str, **kwargs):
        """
        触发告警
        
        Args:
            alert_type: 告警类型
            level: 告警级别
            message: 告警消息
            **kwargs: 附加信息
        """
        alert_data = {
            "type": alert_type,
            "level": level,
            "message": message,
            "timestamp": time.time(),
            **kwargs
        }
        
        global_logger.warning(f"[告警] {message}")
        
        # 这里可以集成到全局告警管理器
        # 从performance_monitor中导入AlertManager并添加告警
        try:
            from core.monitoring.performance_monitor import get_alert_manager
            alert_manager = get_alert_manager()
            if alert_manager:
                alert_manager.add_alert(
                    name=alert_type,
                    level=level,
                    message=message,
                    details=kwargs
                )
        except Exception as e:
            global_logger.error(f"发送告警失败: {e}")
    
    def _cleanup_old_requests(self):
        """
        清理超时的请求记录
        """
        current_time = time.time()
        timeout_threshold = 3600  # 1小时超时
        
        expired_ids = []
        for request_id, record in self._active_requests.items():
            if record.end_time is None and (current_time - record.start_time) > timeout_threshold:
                expired_ids.append(request_id)
        
        for request_id in expired_ids:
            del self._active_requests[request_id]
            global_logger.warning(f"清理超时请求: {request_id}")
    
    def start_request(self, request_id: str, model_name: str, backend: str) -> None:
        """
        记录请求开始
        
        Args:
            request_id: 请求ID
            model_name: 模型名称
            backend: 后端类型
        """
        if not self.config.enabled:
            return
            
        record = ModelRequestRecord(
            request_id=request_id,
            model_name=model_name,
            backend=backend,
            start_time=time.time()
        )
        
        with self._lock:
            self._active_requests[request_id] = record
            self._total_requests += 1
            
            # 初始化模型指标
            model_key = f"{model_name}_{backend}"
            if model_key not in self._model_metrics:
                self._model_metrics[model_key] = ModelPerformanceMetrics(
                    model_name=model_name,
                    backend=backend
                )
    
    def end_request(self, request_id: str, success: bool = True, error_message: Optional[str] = None,
                   input_tokens: int = 0, output_tokens: int = 0) -> None:
        """
        记录请求结束
        
        Args:
            request_id: 请求ID
            success: 是否成功
            error_message: 错误信息
            input_tokens: 输入tokens数
            output_tokens: 输出tokens数
        """
        if not self.config.enabled:
            return
            
        with self._lock:
            if request_id not in self._active_requests:
                global_logger.warning(f"找不到请求记录: {request_id}")
                return
                
            record = self._active_requests[request_id]
            record.end_time = time.time()
            record.success = success
            record.error_message = error_message
            record.input_tokens = input_tokens
            record.output_tokens = output_tokens
            
            # 计算推理延迟
            latency = (record.end_time - record.start_time) * 1000  # 转换为ms
            
            # 更新模型指标
            model_key = f"{record.model_name}_{record.backend}"
            if model_key in self._model_metrics:
                metrics = self._model_metrics[model_key]
                metrics.request_count += 1
                
                # 更新平均延迟（简单移动平均）
                metrics.inference_latency = (metrics.inference_latency * 0.9 + latency * 0.1)
                
                # 更新token生成速率
                if output_tokens > 0 and latency > 0:
                    metrics.tokens_per_second = (metrics.tokens_per_second * 0.9 + 
                                               (output_tokens / (latency / 1000)) * 0.1)
                
                # 统计错误
                if not success:
                    metrics.error_count += 1
                    self._total_errors += 1
            
            # 从活跃请求中移除
            del self._active_requests[request_id]
    
    def get_model_metrics(self, model_name: Optional[str] = None,
                         backend: Optional[str] = None) -> Dict[str, ModelPerformanceMetrics]:
        """
        获取模型性能指标
        
        Args:
            model_name: 模型名称（可选，过滤条件）
            backend: 后端类型（可选，过滤条件）
        
        Returns:
            模型性能指标字典
        """
        with self._lock:
            if model_name or backend:
                filtered_metrics = {}
                for key, metrics in self._model_metrics.items():
                    if model_name and metrics.model_name != model_name:
                        continue
                    if backend and metrics.backend != backend:
                        continue
                    filtered_metrics[key] = metrics
                return filtered_metrics
            
            return dict(self._model_metrics)
    
    def get_request_stats(self) -> Dict[str, Any]:
        """
        获取请求统计信息
        
        Returns:
            请求统计数据
        """
        with self._lock:
            total_success = self._total_requests - self._total_errors
            success_rate = (total_success / self._total_requests * 100) if self._total_requests > 0 else 100
            
            return {
                "total_requests": self._total_requests,
                "total_errors": self._total_errors,
                "total_success": total_success,
                "success_rate": success_rate,
                "active_requests": len(self._active_requests)
            }
    
    def get_metrics_history(self, model_key: str, time_range: int = 3600) -> List[Dict[str, Any]]:
        """
        获取指标历史记录
        
        Args:
            model_key: 模型键（格式：model_name_backend）
            time_range: 时间范围（秒），默认1小时
        
        Returns:
            历史指标列表
        """
        with self._lock:
            if model_key not in self._metrics_history:
                return []
                
            current_time = time.time()
            filtered_history = []
            
            for timestamp, metrics in self._metrics_history[model_key]:
                if current_time - timestamp <= time_range:
                    filtered_history.append({
                        "timestamp": timestamp,
                        **metrics
                    })
            
            return filtered_history
    
    def reset_metrics(self) -> None:
        """
        重置所有指标
        """
        with self._lock:
            self._metrics_history.clear()
            self._active_requests.clear()
            self._model_metrics.clear()
            self._total_requests = 0
            self._total_errors = 0
            global_logger.info("模型监控指标已重置")


# 全局模型监控器实例
_model_monitor_instance: Optional[ModelMonitor] = None


def get_model_monitor() -> Optional[ModelMonitor]:
    """
    获取全局模型监控器实例
    
    Returns:
        模型监控器实例
    """
    global _model_monitor_instance
    
    if _model_monitor_instance is None:
        _model_monitor_instance = ModelMonitor()
    
    return _model_monitor_instance


def start_model_monitor() -> None:
    """
    启动全局模型监控器
    """
    monitor = get_model_monitor()
    monitor.start()


def stop_model_monitor() -> None:
    """
    停止全局模型监控器
    """
    monitor = get_model_monitor()
    monitor.stop()