"""
模型监控和优化服务
提供实时模型性能监控、自动优化和预警功能
"""

import json
import time
import logging
import threading
from datetime import datetime, timedelta
from typing import Dict, List, Any, Optional, Tuple
from dataclasses import dataclass
from collections import defaultdict, deque
import numpy as np
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
import psutil
import os

@dataclass
class ModelMetrics:
    """模型指标数据类"""
    timestamp: datetime
    accuracy: float
    precision: float
    recall: float
    f1_score: float
    response_time: float
    memory_usage: float
    cpu_usage: float
    error_rate: float
    user_satisfaction: float

class ModelMonitor:
    """模型监控器"""
    
    def __init__(self, config_file: str = "config/model_monitor.json"):
        self.config_file = config_file
        self.config = self._load_config()
        self.metrics_history = deque(maxlen=1000)
        self.current_metrics = None
        self.is_monitoring = False
        self.monitor_thread = None
        self.alert_callbacks = []
        self.thresholds = self.config.get('thresholds', {})
        
        # 性能统计
        self.response_times = deque(maxlen=100)
        self.error_counts = defaultdict(int)
        self.satisfaction_scores = deque(maxlen=100)
        
        # 模型性能缓存
        self.model_performance_cache = {}
        
        logging.info("模型监控器初始化完成")
    
    def _load_config(self) -> Dict[str, Any]:
        """加载监控配置"""
        default_config = {
            "monitoring_interval": 60,  # 秒
            "thresholds": {
                "min_accuracy": 0.75,
                "max_response_time": 2.0,  # 秒
                "max_error_rate": 0.05,
                "min_user_satisfaction": 0.7,
                "max_memory_usage": 0.8,  # 80%
                "max_cpu_usage": 0.9  # 90%
            },
            "alerts": {
                "enable_email": False,
                "enable_webhook": False,
                "email_recipients": [],
                "webhook_url": ""
            },
            "optimization": {
                "auto_retrain": True,
                "retrain_threshold": 0.7,
                "min_training_data": 100,
                "retrain_interval": 86400  # 24小时
            }
        }
        
        if os.path.exists(self.config_file):
            try:
                with open(self.config_file, 'r', encoding='utf-8') as f:
                    config = json.load(f)
                    default_config.update(config)
            except Exception as e:
                logging.error(f"加载监控配置失败: {str(e)}")
        
        return default_config
    
    def start_monitoring(self):
        """开始监控"""
        if not self.is_monitoring:
            self.is_monitoring = True
            self.monitor_thread = threading.Thread(target=self._monitoring_loop, daemon=True)
            self.monitor_thread.start()
            logging.info("模型监控已启动")
    
    def stop_monitoring(self):
        """停止监控"""
        self.is_monitoring = False
        if self.monitor_thread:
            self.monitor_thread.join(timeout=5)
        logging.info("模型监控已停止")
    
    def _monitoring_loop(self):
        """监控主循环"""
        interval = self.config.get('monitoring_interval', 60)
        
        while self.is_monitoring:
            try:
                # 收集指标
                metrics = self.collect_metrics()
                self.metrics_history.append(metrics)
                self.current_metrics = metrics
                
                # 检查阈值
                self._check_thresholds(metrics)
                
                # 自动优化
                if self.config.get('optimization', {}).get('auto_retrain', False):
                    self._check_retrain_condition()
                
                time.sleep(interval)
                
            except Exception as e:
                logging.error(f"监控循环错误: {str(e)}")
                time.sleep(interval)
    
    def collect_metrics(self) -> ModelMetrics:
        """收集模型指标"""
        # 计算当前指标
        accuracy = self._calculate_current_accuracy()
        precision, recall, f1 = self._calculate_precision_recall_f1()
        response_time = self._calculate_avg_response_time()
        memory_usage = psutil.virtual_memory().percent / 100
        cpu_usage = psutil.cpu_percent(interval=1) / 100
        error_rate = self._calculate_error_rate()
        user_satisfaction = self._calculate_user_satisfaction()
        
        return ModelMetrics(
            timestamp=datetime.now(),
            accuracy=accuracy,
            precision=precision,
            recall=recall,
            f1_score=f1,
            response_time=response_time,
            memory_usage=memory_usage,
            cpu_usage=cpu_usage,
            error_rate=error_rate,
            user_satisfaction=user_satisfaction
        )
    
    def _calculate_current_accuracy(self) -> float:
        """计算当前准确率"""
        # 从最近的用户反馈计算准确率
        if not self.satisfaction_scores:
            return 0.8  # 默认值
        
        return np.mean(list(self.satisfaction_scores))
    
    def _calculate_precision_recall_f1(self) -> Tuple[float, float, float]:
        """计算精确率、召回率和F1分数"""
        # 这里简化处理，实际应该从真实标签计算
        accuracy = self._calculate_current_accuracy()
        
        # 假设精确率和召回率与准确率相关
        precision = accuracy * 0.95
        recall = accuracy * 0.9
        f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
        
        return precision, recall, f1
    
    def _calculate_avg_response_time(self) -> float:
        """计算平均响应时间"""
        if not self.response_times:
            return 0.5  # 默认值
        
        return np.mean(list(self.response_times))
    
    def _calculate_error_rate(self) -> float:
        """计算错误率"""
        total_requests = sum(self.error_counts.values())
        if total_requests == 0:
            return 0.0
        
        error_requests = self.error_counts.get('error', 0)
        return error_requests / total_requests
    
    def _calculate_user_satisfaction(self) -> float:
        """计算用户满意度"""
        if not self.satisfaction_scores:
            return 0.75  # 默认值
        
        return np.mean(list(self.satisfaction_scores))
    
    def _check_thresholds(self, metrics: ModelMetrics):
        """检查阈值并触发警报"""
        alerts = []
        
        if metrics.accuracy < self.thresholds.get('min_accuracy', 0.75):
            alerts.append(f"准确率低于阈值: {metrics.accuracy:.2f}")
        
        if metrics.response_time > self.thresholds.get('max_response_time', 2.0):
            alerts.append(f"响应时间超过阈值: {metrics.response_time:.2f}s")
        
        if metrics.error_rate > self.thresholds.get('max_error_rate', 0.05):
            alerts.append(f"错误率超过阈值: {metrics.error_rate:.2f}")
        
        if metrics.user_satisfaction < self.thresholds.get('min_user_satisfaction', 0.7):
            alerts.append(f"用户满意度低于阈值: {metrics.user_satisfaction:.2f}")
        
        if metrics.memory_usage > self.thresholds.get('max_memory_usage', 0.8):
            alerts.append(f"内存使用率超过阈值: {metrics.memory_usage:.2%}")
        
        if metrics.cpu_usage > self.thresholds.get('max_cpu_usage', 0.9):
            alerts.append(f"CPU使用率超过阈值: {metrics.cpu_usage:.2%}")
        
        # 触发警报
        for alert in alerts:
            self._trigger_alert(alert)
    
    def _trigger_alert(self, message: str):
        """触发警报"""
        logging.warning(f"模型警报: {message}")
        
        # 调用注册的回调函数
        for callback in self.alert_callbacks:
            try:
                callback(message)
            except Exception as e:
                logging.error(f"警报回调错误: {str(e)}")
    
    def _check_retrain_condition(self):
        """检查是否需要重新训练"""
        optimization_config = self.config.get('optimization', {})
        
        if not optimization_config.get('auto_retrain', False):
            return
        
        # 检查准确率
        if self.current_metrics and self.current_metrics.accuracy < optimization_config.get('retrain_threshold', 0.7):
            logging.info("触发自动重训练：准确率低于阈值")
            self.trigger_retrain()
            return
        
        # 检查时间间隔
        last_retrain = self.model_performance_cache.get('last_retrain_time')
        if last_retrain:
            time_since_retrain = (datetime.now() - last_retrain).total_seconds()
            if time_since_retrain >= optimization_config.get('retrain_interval', 86400):
                logging.info("触发自动重训练：达到时间间隔")
                self.trigger_retrain()
    
    def trigger_retrain(self):
        """触发重新训练"""
        try:
            from api.utils.auto_learning import AutoLearningManager
            
            auto_learner = AutoLearningManager()
            auto_learner.trigger_training()
            
            self.model_performance_cache['last_retrain_time'] = datetime.now()
            logging.info("自动重训练已触发")
            
        except Exception as e:
            logging.error(f"触发重训练失败: {str(e)}")
    
    def record_response(self, response_time: float, success: bool = True, satisfaction: Optional[float] = None):
        """记录响应信息"""
        self.response_times.append(response_time)
        
        if success:
            self.error_counts['success'] += 1
        else:
            self.error_counts['error'] += 1
        
        if satisfaction is not None:
            self.satisfaction_scores.append(satisfaction)
    
    def get_performance_summary(self) -> Dict[str, Any]:
        """获取性能摘要"""
        if not self.metrics_history:
            return {"status": "no_data"}
        
        recent_metrics = list(self.metrics_history)[-10:]  # 最近10个数据点
        
        summary = {
            "status": "active",
            "total_records": len(self.metrics_history),
            "current_metrics": self.current_metrics.__dict__ if self.current_metrics else None,
            "average_metrics": {
                "accuracy": np.mean([m.accuracy for m in recent_metrics]),
                "response_time": np.mean([m.response_time for m in recent_metrics]),
                "error_rate": np.mean([m.error_rate for m in recent_metrics]),
                "user_satisfaction": np.mean([m.user_satisfaction for m in recent_metrics])
            },
            "trend": self._calculate_trend(),
            "recommendations": self._generate_recommendations()
        }
        
        return summary
    
    def _calculate_trend(self) -> Dict[str, str]:
        """计算趋势"""
        if len(self.metrics_history) < 5:
            return {"accuracy": "insufficient_data", "response_time": "insufficient_data"}
        
        recent = list(self.metrics_history)[-5:]
        older = list(self.metrics_history)[-10:-5]
        
        if not older:
            return {"accuracy": "insufficient_data", "response_time": "insufficient_data"}
        
        recent_accuracy = np.mean([m.accuracy for m in recent])
        older_accuracy = np.mean([m.accuracy for m in older])
        
        recent_response = np.mean([m.response_time for m in recent])
        older_response = np.mean([m.response_time for m in older])
        
        accuracy_trend = "improving" if recent_accuracy > older_accuracy else "declining"
        response_trend = "improving" if recent_response < older_response else "declining"
        
        return {
            "accuracy": accuracy_trend,
            "response_time": response_trend
        }
    
    def _generate_recommendations(self) -> List[str]:
        """生成优化建议"""
        recommendations = []
        
        if not self.current_metrics:
            return recommendations
        
        if self.current_metrics.accuracy < 0.8:
            recommendations.append("建议收集更多高质量训练数据")
        
        if self.current_metrics.response_time > 1.5:
            recommendations.append("建议优化模型推理速度，可考虑模型量化")
        
        if self.current_metrics.error_rate > 0.03:
            recommendations.append("建议检查错误日志并修复常见问题")
        
        if self.current_metrics.memory_usage > 0.7:
            recommendations.append("建议优化内存使用，考虑清理缓存")
        
        return recommendations
    
    def export_metrics(self, filename: str = None) -> str:
        """导出指标数据"""
        if not filename:
            filename = f"model_metrics_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
        
        metrics_data = {
            "export_time": datetime.now().isoformat(),
            "total_records": len(self.metrics_history),
            "metrics": [m.__dict__ for m in self.metrics_history],
            "summary": self.get_performance_summary()
        }
        
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(metrics_data, f, ensure_ascii=False, indent=2, default=str)
        
        return filename
    
    def add_alert_callback(self, callback):
        """添加警报回调"""
        self.alert_callbacks.append(callback)
    
    def get_real_time_dashboard(self) -> Dict[str, Any]:
        """获取实时仪表板数据"""
        return {
            "timestamp": datetime.now().isoformat(),
            "is_monitoring": self.is_monitoring,
            "current_metrics": self.current_metrics.__dict__ if self.current_metrics else None,
            "performance_summary": self.get_performance_summary(),
            "recent_alerts": self._get_recent_alerts(),
            "system_status": self._get_system_status()
        }
    
    def _get_recent_alerts(self) -> List[Dict[str, Any]]:
        """获取最近警报"""
        # 这里简化处理，实际应该从日志或数据库获取
        return []
    
    def _get_system_status(self) -> Dict[str, str]:
        """获取系统状态"""
        return {
            "cpu_status": "normal" if psutil.cpu_percent() < 80 else "high",
            "memory_status": "normal" if psutil.virtual_memory().percent < 80 else "high",
            "disk_status": "normal" if psutil.disk_usage('/').percent < 90 else "low",
            "model_status": "running" if self.is_monitoring else "stopped"
        }

# 全局监控实例
model_monitor = ModelMonitor()

# 装饰器用于自动记录响应时间
def monitor_performance(func):
    """性能监控装饰器"""
    def wrapper(*args, **kwargs):
        start_time = time.time()
        try:
            result = func(*args, **kwargs)
            response_time = time.time() - start_time
            
            # 获取满意度（如果有）
            satisfaction = None
            if isinstance(result, dict) and 'satisfaction' in result:
                satisfaction = result['satisfaction']
            
            model_monitor.record_response(response_time, success=True, satisfaction=satisfaction)
            return result
            
        except Exception as e:
            response_time = time.time() - start_time
            model_monitor.record_response(response_time, success=False)
            raise e
    
    return wrapper