"""
模型评估接口
提供模型性能评估、对比分析、评估报告等功能
"""
from fastapi import APIRouter, HTTPException, Depends
from typing import List, Dict, Any, Optional
from datetime import datetime, timedelta
import json
import os
from pathlib import Path
import random
import math

from backend.config.database import get_db
from backend.utils.exceptions import PredictionFailedException as PredictionError
from backend.entities.model_eval import ModelEval
from backend.entities.model_training_log import ModelTrainingLog
from backend.entities.feat_imp import FeatImp
from sqlalchemy.orm import Session

router = APIRouter(prefix="/model-evaluation", tags=["model-evaluation"])

# 模型评估器实例
model_evaluator = None

def get_model_evaluator():
    """获取模型评估器实例"""
    global model_evaluator
    if model_evaluator is None:
        model_evaluator = ModelEvaluator()
    return model_evaluator

class ModelEvaluator:
    """模型评估器类"""
    
    def __init__(self):
        # 初始化时不立即加载复杂的依赖，避免导入错误
        self.trainer = None
        self.error_analyzer = None
        self.models_dir = Path("backend/models/saved_models")
        self.results_dir = Path("data")
        self.db = None
    
    def load_evaluation_data_from_db(self, db: Session):
        """从数据库加载评估数据"""
        try:
            # 从model_eval表获取最新的模型评估数据
            model_evals = db.query(ModelEval).order_by(ModelEval.created_at.desc()).all()
            
            if not model_evals:
                return {}
            
            # 按模型分组，获取每个模型的最新评估结果
            model_data = {}
            for eval_record in model_evals:
                model_name = eval_record.model
                if model_name not in model_data:
                    model_data[model_name] = {
                        'mae': float(eval_record.mae) if eval_record.mae else 0,
                        'rmse': float(eval_record.rmse) if eval_record.rmse else 0,
                        'mape': float(eval_record.mape) if eval_record.mape else 0,
                        'r2': float(eval_record.r2) if eval_record.r2 else 0,
                        'max_error': float(eval_record.max_err) if eval_record.max_err else 0,
                        'train_date': eval_record.train_dt.isoformat() if eval_record.train_dt else None,
                        'created_at': eval_record.created_at.isoformat() if eval_record.created_at else None
                    }
            
            return model_data
            
        except Exception as e:
            print(f"从数据库加载评估数据失败: {str(e)}")
            return {}
    
    def load_training_logs_from_db(self, db: Session):
        """从数据库加载训练日志数据"""
        try:
            # 获取最新的训练日志
            training_logs = db.query(ModelTrainingLog).order_by(
                ModelTrainingLog.train_start.desc()
            ).all()
            
            training_data = {}
            for log in training_logs:
                model_name = log.model
                if model_name not in training_data:
                    training_data[model_name] = {
                        'status': log.status,
                        'train_start': log.train_start.isoformat() if log.train_start else None,
                        'train_end': log.train_end.isoformat() if log.train_end else None,
                        'epochs': log.epochs,
                        'batch_size': log.batch_size,
                        'learning_rate': float(log.learning_rate) if log.learning_rate else None,
                        'train_loss': float(log.train_loss) if log.train_loss else None,
                        'val_loss': float(log.val_loss) if log.val_loss else None,
                        'train_acc': float(log.train_acc) if log.train_acc else None,
                        'val_acc': float(log.val_acc) if log.val_acc else None,
                        'progress': float(log.progress) if log.progress else 0,
                        'training_time': 0  # 计算训练时间
                    }
                    
                    # 计算训练时间
                    if log.train_start and log.train_end:
                        time_diff = log.train_end - log.train_start
                        training_data[model_name]['training_time'] = time_diff.total_seconds()
            
            return training_data
            
        except Exception as e:
            print(f"从数据库加载训练日志失败: {str(e)}")
            return {}
    
    def load_feature_importance_from_db(self, db: Session):
        """从数据库加载特征重要性数据"""
        try:
            # 获取最新的特征重要性数据
            feat_imps = db.query(FeatImp).order_by(FeatImp.train_dt.desc()).all()
            
            feature_data = {}
            for feat_imp in feat_imps:
                model_name = feat_imp.model
                if model_name not in feature_data:
                    feature_data[model_name] = {}
                
                feature_data[model_name][feat_imp.feat] = float(feat_imp.imp) if feat_imp.imp else 0
            
            return feature_data
            
        except Exception as e:
            print(f"从数据库加载特征重要性数据失败: {str(e)}")
            return {}
        
    def load_evaluation_data(self):
        """加载评估数据"""
        try:
            # 加载训练结果
            training_files = list(self.results_dir.glob("training_results_*.json"))
            if not training_files:
                # 如果没有训练结果文件，返回示例数据
                return self.get_fallback_data()
            
            # 获取最新的训练结果
            latest_file = max(training_files, key=lambda x: x.stat().st_mtime)
            with open(latest_file, 'r', encoding='utf-8') as f:
                training_data = json.load(f)
            
            # 检查数据结构
            if 'training_results' in training_data and 'results' in training_data['training_results']:
                return training_data['training_results']['results']
            elif 'results' in training_data:
                return training_data['results']
            else:
                return training_data
        except Exception as e:
            # 如果加载失败，返回示例数据
            return self.get_fallback_data()
    
    def get_fallback_data(self):
        """获取示例数据，用于演示"""
        return {
            'lightgbm': {
                'mae': 85.6,
                'rmse': 124.3,
                'mape': 3.2,
                'r2': 0.892,
                'max_err': 235.8,
                'train_date': '2025-07-19',
                'created_at': '2025-07-19T10:30:00'
            },
            'xgboost': {
                'mae': 92.1,
                'rmse': 138.7,
                'mape': 3.8,
                'r2': 0.875,
                'max_err': 268.4,
                'train_date': '2025-07-19',
                'created_at': '2025-07-19T10:30:00'
            },
            'ensemble': {
                'mae': 78.3,
                'rmse': 115.9,
                'mape': 2.9,
                'r2': 0.906,
                'max_err': 198.7,
                'train_date': '2025-07-19',
                'created_at': '2025-07-19T10:30:00'
            }
        }
    
    def evaluate_all_models(self, db: Session = None):
        """评估所有模型"""
        try:
            # 优先从数据库加载数据
            if db:
                model_data = self.load_evaluation_data_from_db(db)
                training_logs = self.load_training_logs_from_db(db)
            else:
                model_data = self.load_evaluation_data()
                training_logs = {}
            
            # 提取模型性能指标
            model_metrics = {}
            for model_name, model_info in model_data.items():
                if isinstance(model_info, dict):
                    # 从评估结果中提取指标
                    metrics = model_info
                    # 计算精度（基于R²和相对误差的综合指标）
                    mape = metrics.get('mape', 0)
                    r2 = metrics.get('r2', 0)
                    
                    # 使用更合理的精度计算方法
                    if 'accuracy' in metrics:
                        # 如果评估结果中已经包含精度，直接使用
                        accuracy = metrics['accuracy']
                    else:
                        # 基于R²和MAPE的综合精度计算
                        relative_error = mape / 100.0  # 将MAPE转换为相对误差
                        accuracy = max(0.0, r2 * (1 - relative_error)) * 100  # 转换为百分比
                    
                    # 获取训练时间（优先从训练日志获取）
                    training_time = 0
                    if model_name in training_logs:
                        training_time = training_logs[model_name].get('training_time', 0)
                    else:
                        training_time = metrics.get('training_time', 0)
                    
                    model_metrics[model_name] = {
                        'mae': metrics.get('mae', 0),
                        'rmse': metrics.get('rmse', 0),
                        'mape': mape,
                        'smape': metrics.get('smape', mape),  # 如果没有smape，使用mape
                        'bias': metrics.get('bias', 0),
                        'r2': metrics.get('r2', 0),
                        'max_error': metrics.get('max_error', metrics.get('max_err', 0)),  # 使用max_err字段
                        'p90_error': metrics.get('p90_error', metrics.get('rmse', 0) * 1.5),  # 估算P90误差
                        'p95_error': metrics.get('p95_error', metrics.get('rmse', 0) * 1.8),  # 估算P95误差
                        'inference_time': metrics.get('inference_time', 1.0),
                        'model_size': metrics.get('model_size', 2.0),  # 估算模型大小
                        'accuracy': accuracy,  # 计算精度
                        'training_time': training_time,  # 包含训练时间（用于报告生成）
                        'train_dt': metrics.get('train_date'),  # 使用train_dt字段
                        'created_at': metrics.get('created_at')
                    }
                    
                    # 调试信息
                    print(f"Model {model_name}: MAPE={mape}, Accuracy={accuracy}")
            
            return model_metrics
        except Exception as e:
            # 如果评估失败，返回空数据
            return {}
    
    def get_fallback_data_with_metrics(self):
        """获取包含完整指标的示例数据"""
        return {}
    
    def rank_models(self, db: Session = None):
        """模型排名 - 按精度排名"""
        try:
            model_metrics = self.evaluate_all_models(db)
            
            ranked_models = []
            for name, metrics in model_metrics.items():
                # 直接使用精度作为排名依据
                accuracy = metrics.get('accuracy', 0)
                
                ranked_models.append({
                    'name': name,
                    'displayName': name.upper(),
                    'metrics': metrics,
                    'accuracy': accuracy
                })
            
            # 按精度排序（从高到低）
            ranked_models.sort(key=lambda x: x['accuracy'], reverse=True)
            
            return ranked_models
        except Exception as e:
            print(f"模型排名失败: {str(e)}")
            return []
    
    def get_model_comparison_data(self, db: Session = None):
        """获取模型对比数据"""
        try:
            model_metrics = self.evaluate_all_models(db)
            
            comparison_data = []
            for model_name, metrics in model_metrics.items():
                comparison_data.append({
                    'model': model_name.upper(),
                    **metrics
                })
            
            return comparison_data
        except Exception as e:
            print(f"获取模型对比数据失败: {str(e)}")
            return []
    
    def generate_evaluation_report(self, db: Session = None):
        """生成评估报告"""
        try:
            model_metrics = self.evaluate_all_models(db)
            ranked_models = self.rank_models(db)
            
            # 找出最佳模型
            best_model = ranked_models[0] if ranked_models else None
            
            # 计算平均精度
            avg_accuracy = sum(m['accuracy'] for m in model_metrics.values()) / len(model_metrics) if model_metrics else 0
            
            # 调试信息
            print(f"Model metrics: {model_metrics}")
            print(f"Average accuracy calculation: {avg_accuracy}")
            
            # 生成报告
            report = {
                'timestamp': datetime.now().isoformat(),
                'total_models': len(model_metrics),
                'best_model': best_model['name'] if best_model else None,
                'best_model_metrics': best_model['metrics'] if best_model else None,
                'model_rankings': ranked_models,
                'comparison_data': self.get_model_comparison_data(db),
                'evaluation_summary': {
                    'avg_mae': sum(m['mae'] for m in model_metrics.values()) / len(model_metrics) if model_metrics else 0,
                    'avg_rmse': sum(m['rmse'] for m in model_metrics.values()) / len(model_metrics) if model_metrics else 0,
                    'avg_r2': sum(m['r2'] for m in model_metrics.values()) / len(model_metrics) if model_metrics else 0,
                    'avg_accuracy': avg_accuracy,
                    'avg_training_time': sum(m['training_time'] for m in model_metrics.values()) / len(model_metrics) if model_metrics else 0,
                    'performance_variance': 0  # 简化计算
                }
            }
            
            return report
        except Exception as e:
            print(f"生成评估报告失败: {str(e)}")
            return {
                'timestamp': datetime.now().isoformat(),
                'total_models': 0,
                'best_model': None,
                'error': str(e),
                'model_rankings': [],
                'comparison_data': [],
                'evaluation_summary': {
                    'avg_mae': 0,
                    'avg_rmse': 0,
                    'avg_r2': 0,
                    'avg_accuracy': 0,
                    'avg_training_time': 0,
                    'performance_variance': 0
                }
            }
    
    def get_sample_analysis(self, sample_index: int = 0):
        """获取样本分析数据"""
        try:
            # 设置随机种子以获得一致的结果
            random.seed(42 + sample_index)
            
            # 生成24小时的时间序列数据（96个15分钟间隔）
            time_points = 96
            
            # 生成真实值（模拟日负荷曲线）
            true_values = []
            for i in range(time_points):
                t = i / time_points * 24  # 时间点（小时）
                base_load = 0.6 + 0.3 * math.sin(2 * math.pi * t / 24) + 0.1 * math.sin(4 * math.pi * t / 24)
                noise = 0.05 * random.gauss(0, 1)
                true_values.append(base_load + noise)
            
            # 生成预测值（添加一些预测误差）
            predicted_values = []
            for i in range(time_points):
                prediction_noise = 0.03 * random.gauss(0, 1)
                predicted_values.append(true_values[i] + prediction_noise)
            
            # 计算误差
            errors = [pred - true for pred, true in zip(predicted_values, true_values)]
            
            # 计算指标
            mae = sum(abs(e) for e in errors) / len(errors)
            mse = sum(e**2 for e in errors) / len(errors)
            rmse = math.sqrt(mse)
            mape = sum(abs(e / t) for e, t in zip(errors, true_values) if t != 0) / len(true_values) * 100
            
            # 计算R²
            mean_true = sum(true_values) / len(true_values)
            ss_tot = sum((t - mean_true)**2 for t in true_values)
            ss_res = sum(e**2 for e in errors)
            r2 = 1 - (ss_res / ss_tot) if ss_tot != 0 else 0
            
            accuracy = max(0, (1 - mape / 100) * 100)
            
            return {
                'sample_index': sample_index,
                'true_values': true_values,
                'predicted_values': predicted_values,
                'errors': errors,
                'metrics': {
                    'mae': mae,
                    'mse': mse,
                    'rmse': rmse,
                    'mape': mape,
                    'r2': r2,
                    'accuracy': accuracy
                }
            }
        except Exception as e:
            raise PredictionError(f"获取样本分析失败: {str(e)}")
    
    def get_anomaly_samples(self, threshold: float = 0.95):
        """获取异常样本"""
        try:
            anomalies = []
            
            # 生成模拟异常样本
            for i in range(10):  # 生成10个异常样本
                sample = self.get_sample_analysis(sample_index=i + 100)
                
                # 如果精度低于阈值，则认为是异常
                if sample['metrics']['accuracy'] < threshold * 100:
                    anomalies.append({
                        'index': i + 100,
                        'accuracy': sample['metrics']['accuracy'] / 100,
                        'mse': sample['metrics']['mse'],
                        'mae': sample['metrics']['mae'],
                        'mape': sample['metrics']['mape'],
                        'r2': sample['metrics']['r2']
                    })
            
            return {
                'anomalies': anomalies,
                'total_count': len(anomalies),
                'anomaly_rate': len(anomalies) / 100 * 100  # 假设总样本数为100
            }
        except Exception as e:
            # 如果获取异常样本失败，返回空数据
            return {
                'anomalies': [],
                'total_count': 0,
                'anomaly_rate': 0
            }

@router.get("/metrics")
async def get_model_metrics(db: Session = Depends(get_db)):
    """获取所有模型的性能指标"""
    try:
        evaluator = get_model_evaluator()
        model_metrics = evaluator.evaluate_all_models(db)
        
        return {
            "success": True,
            "data": model_metrics,
            "message": "模型指标获取成功"
        }
    except Exception as e:
        return {
            "success": False,
            "data": {},
            "message": f"模型指标获取失败: {str(e)}"
        }

@router.get("/comparison")
async def get_model_comparison(db: Session = Depends(get_db)):
    """获取模型对比数据"""
    try:
        evaluator = get_model_evaluator()
        comparison_data = evaluator.get_model_comparison_data(db)
        
        return {
            "success": True,
            "data": comparison_data,
            "message": "模型对比数据获取成功"
        }
    except Exception as e:
        return {
            "success": False,
            "data": [],
            "message": f"模型对比数据获取失败: {str(e)}"
        }

@router.get("/ranking")
async def get_model_ranking(db: Session = Depends(get_db)):
    """获取模型排名"""
    try:
        evaluator = get_model_evaluator()
        ranked_models = evaluator.rank_models(db)
        
        return {
            "success": True,
            "data": ranked_models,
            "message": "模型排名获取成功"
        }
    except Exception as e:
        return {
            "success": False,
            "data": [],
            "message": f"模型排名获取失败: {str(e)}"
        }

@router.get("/report")
async def generate_evaluation_report(db: Session = Depends(get_db)):
    """生成模型评估报告"""
    try:
        evaluator = get_model_evaluator()
        report = evaluator.generate_evaluation_report(db)
        
        return {
            "success": True,
            "data": report,
            "message": "评估报告生成成功"
        }
    except Exception as e:
        return {
            "success": False,
            "data": {
                'timestamp': datetime.now().isoformat(),
                'total_models': 0,
                'best_model': None,
                'error': str(e)
            },
            "message": f"评估报告生成失败: {str(e)}"
        }

@router.get("/sample/{sample_index}")
async def get_sample_details(sample_index: int):
    """获取样本详细分析"""
    try:
        evaluator = get_model_evaluator()
        sample_data = evaluator.get_sample_analysis(sample_index)
        
        return {
            "success": True,
            "data": sample_data,
            "message": f"样本 {sample_index} 分析数据获取成功"
        }
    except Exception as e:
        return {
            "success": False,
            "data": {},
            "message": f"样本分析获取失败: {str(e)}"
        }

@router.get("/anomalies")
async def get_anomaly_samples(threshold: float = 0.95):
    """获取异常样本"""
    try:
        evaluator = get_model_evaluator()
        anomaly_data = evaluator.get_anomaly_samples(threshold)
        
        return {
            "success": True,
            "data": anomaly_data,
            "message": "异常样本获取成功"
        }
    except Exception as e:
        return {
            "success": False,
            "data": {
                'anomalies': [],
                'total_count': 0,
                'anomaly_rate': 0
            },
            "message": f"异常样本获取失败: {str(e)}"
        }

@router.get("/performance-trends")
async def get_performance_trends(db: Session = Depends(get_db)):
    """获取性能趋势数据"""
    try:
        evaluator = get_model_evaluator()
        
        # 生成模拟趋势数据
        dates = []
        metrics_trend = {}
        
        for i in range(30):  # 30天的数据
            date = (datetime.now() - timedelta(days=29-i)).strftime("%Y-%m-%d")
            dates.append(date)
        
        model_metrics = evaluator.evaluate_all_models(db)
        for model_name in model_metrics.keys():
            # 使用Python内置random生成模拟趋势数据
            metrics_trend[model_name] = {
                'mae': [model_metrics[model_name]['mae'] * (1 + 0.1 * random.gauss(0, 1)) for _ in range(30)],
                'rmse': [model_metrics[model_name]['rmse'] * (1 + 0.1 * random.gauss(0, 1)) for _ in range(30)],
                'r2': [model_metrics[model_name]['r2'] * (1 + 0.05 * random.gauss(0, 1)) for _ in range(30)]
            }
        
        trend_data = {
            'dates': dates,
            'metrics': metrics_trend
        }
        
        return {
            "success": True,
            "data": trend_data,
            "message": "性能趋势数据获取成功"
        }
    except Exception as e:
        return {
            "success": False,
            "data": {
                'dates': [],
                'metrics': {}
            },
            "message": f"性能趋势数据获取失败: {str(e)}"
        }

@router.post("/evaluate")
async def evaluate_models(db: Session = Depends(get_db)):
    """重新评估所有模型"""
    try:
        evaluator = get_model_evaluator()
        
        # 执行模型评估
        model_metrics = evaluator.evaluate_all_models(db)
        ranking = evaluator.rank_models(db)
        
        evaluation_result = {
            'metrics': model_metrics,
            'ranking': ranking,
            'timestamp': datetime.now().isoformat()
        }
        
        return {
            "success": True,
            "data": evaluation_result,
            "message": "模型评估完成"
        }
    except Exception as e:
        return {
            "success": False,
            "data": {
                'metrics': {},
                'ranking': [],
                'timestamp': datetime.now().isoformat()
            },
            "message": f"模型评估失败: {str(e)}"
        }

@router.get("/best-model")
async def get_best_model(db: Session = Depends(get_db)):
    """获取最佳模型信息"""
    try:
        evaluator = get_model_evaluator()
        ranked_models = evaluator.rank_models(db)
        
        if not ranked_models:
            return {
                "success": False,
                "data": None,
                "message": "未找到可用模型"
            }
        
        best_model = ranked_models[0]
        
        return {
            "success": True,
            "data": best_model,
            "message": "最佳模型信息获取成功"
        }
    except Exception as e:
        return {
            "success": False,
            "data": None,
            "message": f"最佳模型信息获取失败: {str(e)}"
        }

@router.get("/feature-importance")
async def get_feature_importance(db: Session = Depends(get_db)):
    """获取特征重要性数据"""
    try:
        evaluator = get_model_evaluator()
        feature_data = evaluator.load_feature_importance_from_db(db)
        
        return {
            "success": True,
            "data": feature_data,
            "message": "特征重要性数据获取成功"
        }
    except Exception as e:
        return {
            "success": False,
            "data": {},
            "message": f"特征重要性数据获取失败: {str(e)}"
        }

@router.get("/training-logs")
async def get_training_logs(db: Session = Depends(get_db)):
    """获取训练日志数据"""
    try:
        evaluator = get_model_evaluator()
        training_data = evaluator.load_training_logs_from_db(db)
        
        return {
            "success": True,
            "data": training_data,
            "message": "训练日志数据获取成功"
        }
    except Exception as e:
        return {
            "success": False,
            "data": {},
            "message": f"训练日志数据获取失败: {str(e)}"
        }