import torch
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from torch.utils.data import DataLoader, TensorDataset
from sklearn.metrics import (classification_report, confusion_matrix,
                             ConfusionMatrixDisplay, roc_auc_score, roc_curve,
                             f1_score)
from sklearn.metrics import auc as calculate_auc
import os
from typing import Tuple, Dict, List, Any

from src.config_loader import ConfigLoader

# 设置中文显示
plt.rcParams.update({'font.family': 'SimHei'})
plt.rcParams["axes.unicode_minus"] = False


class Evaluator:
    """评估器组件，负责模型评估和结果分析"""

    def __init__(self, config: ConfigLoader, model_manager, data_processor, logger):
        self.config = config
        self.model_manager = model_manager
        self.data_processor = data_processor
        self.logger = logger
        self.metric_dir = config.get('system.artifacts.metric_dir')
        self.analysis_dir = config.get('system.artifacts.analysis_dir')

        # 创建输出目录
        os.makedirs(self.metric_dir, exist_ok=True)
        os.makedirs(self.analysis_dir, exist_ok=True)

    def evaluate(self, test_set: TensorDataset, y_true_all: np.ndarray,
                 feature_names: List[str]) -> Tuple[float, float]:
        """评估测试集性能"""
        self.model_manager.model.eval()
        dataloader = DataLoader(test_set, batch_size=64, shuffle=False)

        all_preds = []
        all_labels = []
        all_probs = []
        all_features = []

        with torch.no_grad():
            for x, y in dataloader:
                x, y = x.to(self.model_manager.device), y.to(self.model_manager.device)
                logits = self.model_manager.model(x)
                probs = torch.softmax(logits, dim=1)
                _, preds = torch.max(logits, 1)

                # 收集结果
                all_preds.extend(preds.cpu().numpy())
                all_labels.extend(y.cpu().numpy())
                all_probs.extend(probs.cpu().numpy())
                all_features.extend(x.cpu().numpy())

        # 计算基本指标
        accuracy = np.mean(np.array(all_preds) == np.array(all_labels))
        auc = roc_auc_score(
            all_labels,
            all_probs,
            multi_class='ovr',
            average='macro'
        )

        self.logger.info(f"测试集准确率: {accuracy:.4f}")
        self.logger.info(f"测试集宏平均AUC: {auc:.4f}")

        # 输出分类报告
        self.logger.info("\n分类报告:")
        report = classification_report(
            all_labels, all_preds,
            target_names=[f'{i}' for i in np.unique(all_labels)],
            output_dict=True,
            zero_division=0
        )
        self.logger.info(classification_report(
            all_labels, all_preds,
            target_names=[f'{i}' for i in np.unique(all_labels)],
            zero_division=0
        ))

        # 可视化
        if self.config.get('evaluation.visualize.enable', True):
            plots = self.config.get('evaluation.visualize.plots', [])
            if 'roc' in plots:
                self.plot_roc_curve(all_labels, all_probs, len(np.unique(all_labels)), "测试集ROC曲线")
            if 'confusion_matrix' in plots:
                self.plot_confusion_matrix(all_labels, all_preds)
            if 'metrics_curve' in plots:
                self.plot_training_curves()

        # 误分类分析
        if self.config.get('evaluation.analysis.misclassification.enable', True):
            self.analyze_misclassified(
                np.array(all_features),
                np.array(all_labels),
                np.array(all_preds),
                feature_names
            )

        return accuracy, auc

    def plot_roc_curve(self, y_true: np.ndarray, y_probs: np.ndarray, n_classes: int, title: str) -> None:
        """绘制ROC曲线"""
        from sklearn.preprocessing import label_binarize

        plt.figure(figsize=(10, 8))
        y_true_bin = label_binarize(y_true, classes=np.arange(n_classes))

        fpr = dict()
        tpr = dict()
        roc_auc = dict()

        # 将 y_probs 从列表转换为 numpy 数组（关键修复）
        y_probs = np.array(y_probs)  # 确保是二维数组 (n_samples, n_classes)

        # 后续的索引操作就会生效
        n_classes = y_probs.shape[1]
        y_true_bin = label_binarize(y_true, classes=range(n_classes))

        for i in range(n_classes):
            fpr[i], tpr[i], _ = roc_curve(y_true_bin[:, i], y_probs[:, i])
            roc_auc[i] = calculate_auc(fpr[i], tpr[i])
            plt.plot(fpr[i], tpr[i], lw=2, label=f'类别 {i} (AUC = {roc_auc[i]:.3f})')

        # 随机猜测基准线
        plt.plot([0, 1], [0, 1], 'k--', lw=2)

        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('假正例率 (FPR)')
        plt.ylabel('真正例率 (TPR)')
        plt.title(title)
        plt.legend(loc="lower right")
        plt.grid(True)

        save_path = os.path.join(self.metric_dir, f'{title.replace(" ", "_")}.png')
        plt.savefig(save_path)
        plt.close()
        self.logger.info(f"ROC曲线已保存至: {save_path}")

    def plot_confusion_matrix(self, y_true: np.ndarray, y_pred: np.ndarray) -> None:
        """绘制混淆矩阵"""
        cm = confusion_matrix(y_true, y_pred)
        disp = ConfusionMatrixDisplay(
            confusion_matrix=cm,
            display_labels=[f'{i}' for i in np.unique(y_true)]
        )

        plt.figure(figsize=(10, 8))
        disp.plot(cmap=plt.cm.Blues)
        plt.title('混淆矩阵')
        save_path = os.path.join(self.metric_dir, 'confusion_matrix.png')
        plt.savefig(save_path)
        plt.close()
        self.logger.info(f"混淆矩阵已保存至: {save_path}")

    def plot_training_curves(self) -> None:
        """绘制训练曲线"""
        history = self.model_manager.history

        # 准确率曲线
        self._plot_metric_curve(
            history['train_acc'],
            history['val_acc'],
            '准确率',
            '训练准确率',
            '验证准确率'
        )

        # 损失曲线
        self._plot_metric_curve(
            history['train_loss'],
            history['val_loss'],
            '损失'
        )

        # AUC曲线
        self._plot_metric_curve(
            [0] * len(history['val_auc']),  # 训练集AUC未计算
            history['val_auc'],
            'AUC'
        )

    def _plot_metric_curve(self, train_vals: List[float], val_vals: List[float],
                           metric_name: str, train_label: str = None, val_label: str = None) -> None:
        """绘制指标曲线"""
        plt.figure(figsize=(10, 6))
        train_label = train_label or f'训练{metric_name}'
        val_label = val_label or f'验证{metric_name}'

        plt.plot(train_vals, label=train_label)
        plt.plot(val_vals, label=val_label)
        plt.title(f'训练和验证{metric_name}曲线')
        plt.xlabel('轮次')
        plt.ylabel(metric_name)
        plt.legend()
        plt.grid(True)

        save_path = os.path.join(self.metric_dir, f'{metric_name}_curve.png')
        plt.savefig(save_path)
        plt.close()

    def analyze_misclassified(self, features: np.ndarray, true_labels: np.ndarray,
                              pred_labels: np.ndarray, feature_names: List[str]) -> None:
        """分析误分类样本"""
        self.logger.info("\n===== 误分类样本分析 =====")

        # 识别误分类样本
        is_misclassified = (true_labels != pred_labels)
        mis_features = features[is_misclassified]
        mis_true = true_labels[is_misclassified]
        mis_pred = pred_labels[is_misclassified]

        # 统计基本信息
        total = len(true_labels)
        mis_count = np.sum(is_misclassified)
        self.logger.info(f"总样本数: {total}, 误分类样本数: {mis_count}, 误分类率: {mis_count / total:.4f}")

        # 分析误分类模式
        unique_classes = np.unique(true_labels)
        patterns = {}
        for t_cls in unique_classes:
            for p_cls in unique_classes:
                if t_cls != p_cls:
                    count = np.sum((true_labels == t_cls) & (pred_labels == p_cls))
                    if count > 0:
                        patterns[(t_cls, p_cls)] = count

        self.logger.info("\n误分类模式（真实类别→预测类别）:")
        for (t, p), cnt in patterns.items():
            self.logger.info(f"类别 {t} → 类别 {p}: {cnt} 个样本 ({cnt / np.sum(true_labels == t) * 100:.1f}%)")

        # 详细分析主要误分类模式
        min_samples = self.config.get('evaluation.analysis.misclassification.min_samples', 5)
        for (t_cls, p_cls), cnt in patterns.items():
            if cnt < min_samples:
                continue

            self._analyze_pattern(t_cls, p_cls, features, true_labels, pred_labels, feature_names)

    def _analyze_pattern(self, true_cls: int, pred_cls: int, features: np.ndarray,
                         true_labels: np.ndarray, pred_labels: np.ndarray, feature_names: List[str]) -> None:
        """分析特定的误分类模式"""
        # 提取相关样本
        mask = (true_labels == true_cls) & (pred_labels == pred_cls)
        pattern_features = features[mask]

        # 正确分类的对比样本
        true_correct_mask = (true_labels == true_cls) & (pred_labels == true_cls)
        pred_correct_mask = (true_labels == pred_cls) & (pred_labels == pred_cls)

        true_correct_feats = features[true_correct_mask]
        pred_correct_feats = features[pred_correct_mask]

        # 转换回原始尺度
        pattern_orig = self.data_processor.scaler.inverse_transform(pattern_features)
        true_orig = self.data_processor.scaler.inverse_transform(true_correct_feats) if len(
            true_correct_feats) > 0 else None
        pred_orig = self.data_processor.scaler.inverse_transform(pred_correct_feats) if len(
            pred_correct_feats) > 0 else None

        # 特征差异分析
        analysis = []
        for i, fname in enumerate(feature_names):
            # 误分类样本特征统计
            mis_mean = np.mean(pattern_orig[:, i])
            mis_std = np.std(pattern_orig[:, i])

            # 正确分类样本特征统计
            true_mean = np.mean(true_orig[:, i]) if true_orig is not None and len(true_orig) > 0 else 0
            pred_mean = np.mean(pred_orig[:, i]) if pred_orig is not None and len(pred_orig) > 0 else 0

            # 计算差异
            diff_true = abs(mis_mean - true_mean)
            diff_pred = abs(mis_mean - pred_mean)

            analysis.append({
                '特征名称': fname,
                '误分类均值': mis_mean,
                '误分类标准差': mis_std,
                f'正确类别{true_cls}均值': true_mean,
                f'正确类别{pred_cls}均值': pred_mean,
                f'与类别{true_cls}差异': diff_true,
                f'与类别{pred_cls}差异': diff_pred,
                '更接近类别': pred_cls if diff_pred < diff_true else true_cls
            })

        # 保存分析结果
        df = pd.DataFrame(analysis)
        df = df.sort_values(f'与类别{true_cls}差异', ascending=False)
        save_path = os.path.join(self.analysis_dir, f'misclass_{true_cls}_to_{pred_cls}.csv')
        df.to_csv(save_path, index=False)
        self.logger.info(f"类别 {true_cls}→{pred_cls} 分析结果已保存至: {save_path}")

        # 可视化特征分布
        self._plot_feature_distributions(
            pattern_orig, true_orig, pred_orig,
            feature_names, true_cls, pred_cls
        )

    def _plot_feature_distributions(self, mis_features: np.ndarray, true_features: np.ndarray,
                                    pred_features: np.ndarray, feature_names: List[str],
                                    true_cls: int, pred_cls: int) -> None:
        """绘制特征分布对比图"""
        n_feats = min(3, len(feature_names))
        plt.figure(figsize=(15, 5 * n_feats))

        for i in range(n_feats):
            plt.subplot(n_feats, 1, i + 1)

            if true_features is not None and len(true_features) > 0:
                sns.histplot(
                    true_features[:, i], kde=True,
                    label=f'正确分类的类别{true_cls}',
                    color='green', alpha=0.5
                )

            if pred_features is not None and len(pred_features) > 0:
                sns.histplot(
                    pred_features[:, i], kde=True,
                    label=f'正确分类的类别{pred_cls}',
                    color='blue', alpha=0.5
                )

            sns.histplot(
                mis_features[:, i], kde=True,
                label=f'被误分为{pred_cls}的类别{true_cls}',
                color='red', alpha=0.5
            )

            plt.title(f'特征 "{feature_names[i]}" 的分布对比')
            plt.xlabel(feature_names[i])
            plt.ylabel('频率')
            plt.legend()
            plt.grid(True, alpha=0.3)

        plt.tight_layout()
        save_path = os.path.join(self.analysis_dir, f'feat_dist_{true_cls}_to_{pred_cls}.png')
        plt.savefig(save_path)
        plt.close()