import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
from pathlib import Path
from sklearn.metrics import (
    accuracy_score, precision_score, recall_score, f1_score, roc_auc_score,
    confusion_matrix, classification_report, mean_squared_error, mean_absolute_error, r2_score
)


class Evaluator:
    def __init__(self, config, logger, task_type):
        self.config = config
        self.logger = logger
        self.task_type = task_type
        self.result_dir = Path(config.get("paths.result_dir"))
        self.result_dir.mkdir(exist_ok=True)
        self.fig_dpi = config.get("global_params.fig_dpi")

        # 设置中文显示
        plt.rcParams["font.family"] = ["SimHei"]
        plt.rcParams['axes.unicode_minus'] = False

    def evaluate(self, X_test, y_test, models: pd.DataFrame):
        """评估所有模型并生成报告"""
        self.logger.info("\n====== 开始模型评估 ======")
        y_preds = {}  # 预测类别/值
        y_probs = {}  # 分类概率（仅分类任务）

        # 加载模型并预测
        for model_name in models["模型名称"]:
            model_path = Path(self.config.get("paths.model_dir")).joinpath(f"{model_name}_最优模型.pkl")
            try:
                with open(model_path, "rb") as f:
                    pipeline = pickle.load(f)

                y_pred = pipeline.predict(X_test)
                y_preds[model_name] = y_pred

                # 分类任务保存概率
                if self.task_type != "regression":
                    y_probs[model_name] = pipeline.predict_proba(X_test)[:, 1] if self.task_type == "binary" else None

                self.logger.info(f"完成{model_name}预测")
            except Exception as e:
                self.logger.error(f"加载{model_name}失败: {str(e)}", exc_info=True)
                continue

        # 计算评估指标
        eval_results = self._calculate_metrics(y_test, y_preds, y_probs)

        # 保存评估结果
        eval_path = self.result_dir.joinpath(self.config.get("paths.eval_csv"))
        eval_df = pd.DataFrame(eval_results)
        eval_df.to_csv(eval_path, index=False)
        self.logger.info(f"评估结果:\n{eval_df.to_string()}")
        self.logger.info(f"评估指标已保存至: {eval_path}")

        # 生成可视化
        if self.task_type != "regression":
            for model_name, y_pred in y_preds.items():
                self._plot_confusion_matrix(y_test, y_pred, model_name)

    def _calculate_metrics(self, y_true, y_preds, y_probs) -> list:
        """计算对应任务类型的评估指标"""
        metrics = []
        for model_name, y_pred in y_preds.items():
            res = {"模型名称": model_name}

            if self.task_type == "regression":
                res["MSE"] = round(mean_squared_error(y_true, y_pred), 4)
                res["MAE"] = round(mean_absolute_error(y_true, y_pred), 4)
                res["R2"] = round(r2_score(y_true, y_pred), 4)

            elif self.task_type == "binary":
                res["准确率"] = round(accuracy_score(y_true, y_pred), 4)
                res["精确率"] = round(precision_score(y_true, y_pred), 4)
                res["召回率"] = round(recall_score(y_true, y_pred), 4)
                res["F1分数"] = round(f1_score(y_true, y_pred), 4)
                res["AUC"] = round(roc_auc_score(y_true, y_probs[model_name]), 4)

            elif self.task_type == "multiclass":
                res["准确率"] = round(accuracy_score(y_true, y_pred), 4)
                res["精确率"] = round(precision_score(y_true, y_pred, average="macro"), 4)
                res["召回率"] = round(recall_score(y_true, y_pred, average="macro"), 4)
                res["F1分数"] = round(f1_score(y_true, y_pred, average="macro"), 4)

            metrics.append(res)
        return metrics

    def _plot_confusion_matrix(self, y_true, y_pred, model_name):
        """绘制混淆矩阵"""
        cm = confusion_matrix(y_true, y_pred)
        plt.figure(figsize=(8, 6))
        sns.heatmap(
            cm, annot=True, fmt="d", cmap="Blues",
            xticklabels=[f"类别{i}" for i in sorted(y_true.unique())],
            yticklabels=[f"类别{i}" for i in sorted(y_true.unique())]
        )
        plt.title(f"{model_name}混淆矩阵")
        plt.xlabel("预测标签")
        plt.ylabel("实际标签")
        plt.tight_layout()

        cm_path = self.result_dir / f"{model_name}_混淆矩阵.png"
        plt.savefig(cm_path, dpi=self.fig_dpi)
        plt.close()
        self.logger.info(f"已保存混淆矩阵: {cm_path}")