"""
Evaluate模块
分为评估器(Evaluator)和日志工具(Logger)两类方法
支持11分类任务的详细评估分析
"""
import yaml
import numpy as np
import pandas as pd
import json
import os
from typing import Dict, Any, List, Optional
import logging
from datetime import datetime
import matplotlib.pyplot as plt
import seaborn as sns
from models import load_models_config, load_paths_config
from sklearn.metrics import (
    classification_report, confusion_matrix, accuracy_score,
    precision_score, recall_score, f1_score
)

config = load_models_config('../config/models.yaml')
paths = load_paths_config('../config/paths.yaml')

class ModelEvaluator:
    """模型评估器 - 专注于模型性能评估"""
    
    def __init__(self, config: Dict[str, Any]):
        self.config = config
        self.best_scores = {}  # 跟踪最佳分数
    
    def evaluate_model_performance(self, model, X_test: np.ndarray, y_test: np.ndarray, 
                                 model_name: str) -> Dict[str, float]:
        """评估模型性能 - 核心评估方法"""
        # 1. 获取预测结果
        y_pred = model.predict(X_test)
        
        # 2. 计算基础指标
        metrics = {
            'accuracy': accuracy_score(y_test, y_pred),
            'precision_macro': precision_score(y_test, y_pred, average='macro', zero_division=0),
            'recall_macro': recall_score(y_test, y_pred, average='macro', zero_division=0),
            'f1_macro': f1_score(y_test, y_pred, average='macro', zero_division=0),
            'precision_micro': precision_score(y_test, y_pred, average='micro', zero_division=0),
            'recall_micro': recall_score(y_test, y_pred, average='micro', zero_division=0),
            'f1_micro': f1_score(y_test, y_pred, average='micro', zero_division=0),
            'precision_weighted': precision_score(y_test, y_pred, average='weighted', zero_division=0),
            'recall_weighted': recall_score(y_test, y_pred, average='weighted', zero_division=0),
            'f1_weighted': f1_score(y_test, y_pred, average='weighted', zero_division=0)
        }
        
        return metrics
    
    def generate_classification_report(self, y_true: np.ndarray, y_pred: np.ndarray, 
                                     target_names: List[str] = None) -> str:
        """生成分类报告"""
        if target_names is None:
            target_names = [f"Class_{i}" for i in range(len(np.unique(y_true)))]
        
        report = classification_report(
            y_true, y_pred, 
            target_names=target_names,
            digits=4,
            zero_division=0
        )
        
        return report
    
    def generate_confusion_matrix(self, y_true: np.ndarray, y_pred: np.ndarray) -> np.ndarray:
        """生成混淆矩阵"""
        return confusion_matrix(y_true, y_pred)
    
    def calculate_per_class_metrics(self, y_true: np.ndarray, y_pred: np.ndarray) -> Dict[str, List[float]]:
        """计算每个类别的详细指标"""
        # 每个类别的精确率、召回率、F1分数
        precision_per_class = precision_score(y_true, y_pred, average=None, zero_division=0)
        recall_per_class = recall_score(y_true, y_pred, average=None, zero_division=0)
        f1_per_class = f1_score(y_true, y_pred, average=None, zero_division=0)
        
        return {
            'precision_per_class': precision_per_class.tolist(),
            'recall_per_class': recall_per_class.tolist(),
            'f1_per_class': f1_per_class.tolist()
        }
    
    def track_best_model(self, model, model_name: str, metrics: Dict[str, float]) -> bool:
        """跟踪最佳模型"""
        monitor_metric = self.config.get('training', {}).get('early_stopping_monitor', 'f1_macro')
        current_score = metrics[monitor_metric]
        
        is_best = False
        if model_name not in self.best_scores or current_score > self.best_scores[model_name]:
            self.best_scores[model_name] = current_score
            is_best = True
        
        return is_best
    
    def compare_models(self, models_results: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
        """比较多个模型的性能"""
        comparison_data = []
        best_model = None
        best_score = 0
        
        for model_name, results in models_results.items():
            if 'test_metrics' in results:
                metrics = results['test_metrics']
                comparison_data.append({
                    'model_name': model_name,
                    'accuracy': metrics['accuracy'],
                    'precision_macro': metrics['precision_macro'],
                    'recall_macro': metrics['recall_macro'],
                    'f1_macro': metrics['f1_macro'],
                    'f1_micro': metrics['f1_micro'],
                    'f1_weighted': metrics['f1_weighted'],
                    'training_time': results.get('training_time', 0)
                })
                
                if metrics['f1_macro'] > best_score:
                    best_score = metrics['f1_macro']
                    best_model = model_name
        
        return {
            'models_comparison': comparison_data,
            'best_model': best_model,
            'best_f1_score': best_score,
            'comparison_time': datetime.now().isoformat()
        }


class EvaluationLogger:
    """评估日志工具 - 专注于日志记录和文件保存"""
    
    def __init__(self, paths: Dict[str, str]):
        self.paths = paths
        self.logger = self._setup_logger()
        
        # 创建必要目录
        self._create_directories()
    
    def _setup_logger(self) -> logging.Logger:
        """设置日志记录器"""
        logger = logging.getLogger('EvaluationLogger')
        logger.setLevel(logging.INFO)
        
        # 避免重复添加处理器
        if not logger.handlers:
            # 创建文件处理器
            log_dir = self.paths.get('logs', {}).get('base_path', 'logs')
            os.makedirs(log_dir, exist_ok=True)
            
            file_handler = logging.FileHandler(
                os.path.join(log_dir, 'evaluation.log'),
                encoding='utf-8'
            )
            file_handler.setLevel(logging.INFO)
            
            # 创建控制台处理器
            console_handler = logging.StreamHandler()
            console_handler.setLevel(logging.INFO)
            
            # 创建格式器
            formatter = logging.Formatter(
                '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
            )
            file_handler.setFormatter(formatter)
            console_handler.setFormatter(formatter)
            
            # 添加处理器
            logger.addHandler(file_handler)
            logger.addHandler(console_handler)
        
        return logger
    
    def _create_directories(self):
        """创建必要的目录结构"""
        dirs_to_create = [
            self.paths.get('results', {}).get('base_path', 'results'),
            self.paths.get('logs', {}).get('base_path', 'logs'),
            os.path.join(self.paths.get('results', {}).get('base_path', 'results'), 'plots'),
            os.path.join(self.paths.get('results', {}).get('base_path', 'results'), 'reports')
        ]
        
        for dir_path in dirs_to_create:
            os.makedirs(dir_path, exist_ok=True)
    
    def log_evaluation_result(self, model_name: str, metrics: Dict[str, float], 
                            is_best: bool = False, additional_info: Dict[str, Any] = None):
        """记录评估结果"""
        best_marker = "🏆 " if is_best else ""
        self.logger.info(f"📊 {best_marker}{model_name} 评估结果:")
        self.logger.info(f"   准确率: {metrics['accuracy']:.4f}")
        self.logger.info(f"   精确率(macro): {metrics['precision_macro']:.4f}")
        self.logger.info(f"   召回率(macro): {metrics['recall_macro']:.4f}")
        self.logger.info(f"   F1分数(macro): {metrics['f1_macro']:.4f}")
        self.logger.info(f"   F1分数(micro): {metrics['f1_micro']:.4f}")
        self.logger.info(f"   F1分数(weighted): {metrics['f1_weighted']:.4f}")
        
        # 记录额外信息
        if additional_info:
            for key, value in additional_info.items():
                self.logger.info(f"   {key}: {value}")
    
    def save_classification_report(self, report: str, model_name: str):
        """保存分类报告到文件"""
        report_path = os.path.join(
            self.paths.get('results', {}).get('base_path', 'results'),
            'reports',
            f'{model_name}_classification_report.txt'
        )
        
        with open(report_path, 'w', encoding='utf-8') as f:
            f.write(f"{model_name} 分类报告\n")
            f.write("=" * 60 + "\n")
            f.write(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            f.write("=" * 60 + "\n\n")
            f.write(report)
        
        self.logger.info(f"分类报告已保存到: {report_path}")
        return report_path
    
    def save_confusion_matrix_plot(self, cm: np.ndarray, model_name: str, 
                                 target_names: List[str] = None):
        """保存混淆矩阵可视化"""
        plt.figure(figsize=(12, 10))
        
        if target_names is None:
            target_names = [f"Class_{i}" for i in range(cm.shape[0])]
        
        # 创建热力图
        sns.heatmap(
            cm, 
            annot=True, 
            fmt='d', 
            cmap='Blues',
            xticklabels=target_names,
            yticklabels=target_names,
            cbar_kws={'label': '样本数量'}
        )
        
        plt.title(f'{model_name} 混淆矩阵', fontsize=16, pad=20)
        plt.xlabel('预测标签', fontsize=12)
        plt.ylabel('真实标签', fontsize=12)
        plt.xticks(rotation=45, ha='right')
        plt.yticks(rotation=0)
        
        # 保存图片
        plot_path = os.path.join(
            self.paths.get('results', {}).get('base_path', 'results'),
            'plots',
            f'{model_name}_confusion_matrix.png'
        )
        
        plt.tight_layout()
        plt.savefig(plot_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        self.logger.info(f"混淆矩阵图片已保存到: {plot_path}")
        return plot_path
    
    def save_confusion_matrix_data(self, cm: np.ndarray, model_name: str, 
                                 target_names: List[str] = None):
        """保存混淆矩阵数据"""
        matrix_path = os.path.join(
            self.paths.get('results', {}).get('base_path', 'results'),
            'reports',
            f'{model_name}_confusion_matrix.json'
        )
        
        matrix_data = {
            'confusion_matrix': cm.tolist(),
            'target_names': target_names or [f"Class_{i}" for i in range(cm.shape[0])],
            'model_name': model_name,
            'generated_at': datetime.now().isoformat()
        }
        
        with open(matrix_path, 'w', encoding='utf-8') as f:
            json.dump(matrix_data, f, ensure_ascii=False, indent=2)
        
        self.logger.info(f"混淆矩阵数据已保存到: {matrix_path}")
        return matrix_path
    
    def save_evaluation_results(self, results: Dict[str, Any], filename: str):
        """保存评估结果到JSON文件"""
        filepath = os.path.join(
            self.paths.get('results', {}).get('base_path', 'results'),
            filename
        )
        
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(results, f, ensure_ascii=False, indent=2, default=str)
        
        self.logger.info(f"评估结果已保存到: {filepath}")
        return filepath
    
    def save_best_model(self, model, model_name: str):
        """保存最佳模型"""
        best_model_dir = os.path.join(
            self.paths.get('models', {}).get('base_path', 'models'),
            'best_models'
        )
        os.makedirs(best_model_dir, exist_ok=True)
        
        best_model_path = os.path.join(best_model_dir, f'{model_name}_best.pkl')
        model.save(best_model_path)
        
        self.logger.info(f"💾 最佳模型已保存: {best_model_path}")
        return best_model_path


class EvaluationOperation:
    """评估编排器 - 整合评估器和日志工具"""
    
    def __init__(self, config: Dict[str, Any], paths: Dict[str, str]):
        self.config = config
        self.paths = paths
        
        # 创建评估器和日志工具
        self.evaluator = ModelEvaluator(config)
        self.logger_tool = EvaluationLogger(paths)
    
    def evaluate_and_track_operator(self, model, X_test: np.ndarray, 
                                  y_test: np.ndarray, model_name: str) -> Dict[str, float]:
        """评估模型并跟踪最佳性能 - 主要接口"""
        # 1. 使用评估器评估性能
        metrics = self.evaluator.evaluate_model_performance(model, X_test, y_test, model_name)
        
        # 2. 跟踪最佳模型
        is_best = self.evaluator.track_best_model(model, model_name, metrics)
        
        # 3. 使用日志工具记录结果
        self.logger_tool.log_evaluation_result(model_name, metrics, is_best)
        
        # 4. 如果是最佳模型，保存它
        if is_best:
            self.logger_tool.save_best_model(model, model_name)
        
        return metrics
    
    def generate_comprehensive_report_operator(self, model, X_test: np.ndarray, y_test: np.ndarray,
                                            model_name: str, target_names: List[str] = None) -> Dict[str, str]:
        """生成综合评估报告 - 包含分类报告和混淆矩阵"""
        # 1. 获取预测结果
        y_pred = model.predict(X_test)
        
        # 2. 生成分类报告
        classification_report_text = self.evaluator.generate_classification_report(
            y_test, y_pred, target_names
        )
        
        # 3. 生成混淆矩阵
        confusion_matrix_array = self.evaluator.generate_confusion_matrix(y_test, y_pred)
        
        # 4. 保存报告和图表
        report_path = self.logger_tool.save_classification_report(classification_report_text, model_name)
        plot_path = self.logger_tool.save_confusion_matrix_plot(confusion_matrix_array, model_name, target_names)
        matrix_path = self.logger_tool.save_confusion_matrix_data(confusion_matrix_array, model_name, target_names)
        
        return {
            'classification_report_path': report_path,
            'confusion_matrix_plot_path': plot_path,
            'confusion_matrix_data_path': matrix_path
        }
    
    def compare_models_operator(self, models_results: Dict[str, Dict[str, Any]]) -> Dict[str, Any]:
        """比较多个模型的性能"""
        # 1. 使用评估器进行模型比较
        comparison_result = self.evaluator.compare_models(models_results)
        
        # 2. 使用日志工具记录比较结果
        self.logger_tool.logger.info("📊 模型性能对比完成")
        self.logger_tool.logger.info(f"🏆 最佳模型: {comparison_result['best_model']} (F1分数: {comparison_result['best_f1_score']:.4f})")
        
        # 3. 保存比较结果
        self.logger_tool.save_evaluation_results(comparison_result, 'model_comparison.json')
        
        return comparison_result
    
    def evaluate_label_specific_performance(self, model, X_test: np.ndarray, y_test: np.ndarray,
                                          model_name: str, target_labels: List[int] = None) -> Dict[str, Any]:
        """评估特定标签的性能 - 用于XGBoost标签强化分析"""
        y_pred = model.predict(X_test)
        
        # 1. 整体性能
        overall_metrics = self.evaluator.evaluate_model_performance(model, X_test, y_test, model_name)
        
        # 2. 每个类别的详细指标
        per_class_metrics = self.evaluator.calculate_per_class_metrics(y_test, y_pred)
        
        # 3. 特定标签性能分析
        label_specific_analysis = {}
        if target_labels:
            for label in target_labels:
                mask = (y_test == label)
                if mask.sum() > 0:
                    label_y_true = y_test[mask]
                    label_y_pred = y_pred[mask]
                    
                    label_accuracy = accuracy_score(label_y_true, label_y_pred)
                    label_specific_analysis[f'label_{label}'] = {
                        'accuracy': label_accuracy,
                        'sample_count': mask.sum(),
                        'correct_predictions': (label_y_true == label_y_pred).sum()
                    }
        
        # 4. 记录日志
        self.logger_tool.log_evaluation_result(
            model_name, overall_metrics, 
            additional_info={'label_specific_analysis': label_specific_analysis}
        )
        
        results = {
            'overall_metrics': overall_metrics,
            'per_class_metrics': per_class_metrics,
            'label_specific_analysis': label_specific_analysis,
            'model_name': model_name,
            'evaluation_time': datetime.now().isoformat()
        }
        
        # 5. 保存详细结果
        self.logger_tool.save_evaluation_results(results, f'{model_name}_label_analysis.json')
        
        return results


def create_evaluation_operation(config_path: str = 'config/models.yaml', 
                              paths_path: str = 'config/paths.yaml') -> EvaluationOperation:
    """工厂函数：创建评估编排器"""
    import yaml
    
    # 使用YAML配置文件
    try:
        with open(config_path, 'r', encoding='utf-8') as f:
            config = yaml.safe_load(f)
    except FileNotFoundError:
        raise FileNotFoundError(f"配置文件不存在: {config_path}，请确保config/models.yaml存在")
    
    # 加载路径配置
    try:
        with open(paths_path, 'r', encoding='utf-8') as f:
            paths = yaml.safe_load(f)
    except FileNotFoundError:
        raise FileNotFoundError(f"路径配置文件不存在: {paths_path}，请确保config/paths.yaml存在")
    
    return EvaluationOperation(config, paths)


if __name__ == "__main__":
    # 测试评估模块 - 使用YAML配置文件
    try:
        # 使用config文件夹的yaml配置
        evaluator = create_evaluation_operation()
        print("✅ 评估编排器创建成功（使用YAML配置）")
        print(f"加载的配置节: {list(evaluator.config.keys())}")
        print(f"加载的路径配置: {list(evaluator.paths.keys())}")
        
        # 显示模块结构
        print(f"\n📋 模块结构:")
        print(f"  评估器类: {type(evaluator.evaluator).__name__}")
        print(f"  日志工具类: {type(evaluator.logger_tool).__name__}")
        
    except FileNotFoundError as e:
        print(f"❌ {e}")
        print("请确保在项目根目录运行，且config/目录下有models.yaml和paths.yaml文件")