# utils/evaluator.py
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.metrics import confusion_matrix, classification_report, precision_recall_curve, roc_curve, auc
from config import Config


class ModelEvaluator:
    """模型评估器"""

    def __init__(self):
        self.config = Config()
        plt.style.use('default')
        sns.set_palette("husl")

    def plot_training_history(self, history):
        """绘制训练历史"""
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))

        # 准确率
        ax1.plot(history.history['accuracy'], label='Training Accuracy', linewidth=2)
        if 'val_accuracy' in history.history:
            ax1.plot(history.history['val_accuracy'], label='Validation Accuracy', linewidth=2)
        ax1.set_title('Model Accuracy', fontsize=14)
        ax1.set_xlabel('Epoch')
        ax1.set_ylabel('Accuracy')
        ax1.legend()
        ax1.grid(True, alpha=0.3)

        # 损失
        ax2.plot(history.history['loss'], label='Training Loss', linewidth=2)
        if 'val_loss' in history.history:
            ax2.plot(history.history['val_loss'], label='Validation Loss', linewidth=2)
        ax2.set_title('Model Loss', fontsize=14)
        ax2.set_xlabel('Epoch')
        ax2.set_ylabel('Loss')
        ax2.legend()
        ax2.grid(True, alpha=0.3)

        plt.tight_layout()
        plt.show()

    def plot_confusion_matrix(self, y_true, y_pred, normalize=False):
        """绘制混淆矩阵"""
        # 确保所有类别都在范围内
        valid_indices = (y_true < self.config.NUM_CLASSES) & (y_pred < self.config.NUM_CLASSES)
        y_true_valid = y_true[valid_indices]
        y_pred_valid = y_pred[valid_indices]

        if len(y_true_valid) == 0:
            print("没有有效的预测结果可绘制混淆矩阵")
            return

        cm = confusion_matrix(y_true_valid, y_pred_valid, labels=range(self.config.NUM_CLASSES))

        if normalize:
            cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
            fmt = '.2f'
        else:
            fmt = 'd'

        plt.figure(figsize=(10, 8))
        sns.heatmap(cm, annot=True, fmt=fmt, cmap='Blues',
                    xticklabels=[self.config.ACTIVITY_LABELS[i] for i in range(self.config.NUM_CLASSES)],
                    yticklabels=[self.config.ACTIVITY_LABELS[i] for i in range(self.config.NUM_CLASSES)])
        plt.title('Confusion Matrix', fontsize=16)
        plt.ylabel('True Label')
        plt.xlabel('Predicted Label')
        plt.xticks(rotation=45)
        plt.yticks(rotation=0)
        plt.tight_layout()
        plt.show()

        # 打印分类报告
        print("\nClassification Report:")
        print(classification_report(y_true_valid, y_pred_valid,
                                    target_names=[self.config.ACTIVITY_LABELS[i] for i in
                                                  range(self.config.NUM_CLASSES)],
                                    zero_division=0))

    def analyze_predictions(self, y_true, y_pred, y_pred_proba=None):
        """分析预测结果"""
        print("\n预测结果分析:")

        # 计算每个类别的准确率
        for i in range(self.config.NUM_CLASSES):
            class_mask = y_true == i
            if np.sum(class_mask) > 0:
                class_accuracy = np.mean(y_pred[class_mask] == i)
                print(f"{self.config.ACTIVITY_LABELS[i]}: 准确率 = {class_accuracy:.3f}")

        # 摔倒检测专项分析
        fall_idx = self.config.ACTIVITY_MAPPING['falling']
        fall_mask = y_true == fall_idx

        if np.sum(fall_mask) > 0:
            # 真正例：预测为摔倒且真实为摔倒
            true_positives = np.sum((y_pred == fall_idx) & (y_true == fall_idx))
            # 假正例：预测为摔倒但真实不是摔倒
            false_positives = np.sum((y_pred == fall_idx) & (y_true != fall_idx))
            # 假反例：真实为摔倒但预测不是摔倒
            false_negatives = np.sum((y_pred != fall_idx) & (y_true == fall_idx))

            fall_precision = true_positives / (true_positives + false_positives) if (
                                                                                                true_positives + false_positives) > 0 else 0
            fall_recall = true_positives / (true_positives + false_negatives) if (
                                                                                             true_positives + false_negatives) > 0 else 0
            fall_f1 = 2 * (fall_precision * fall_recall) / (fall_precision + fall_recall) if (
                                                                                                         fall_precision + fall_recall) > 0 else 0

            print(f"\n摔倒检测专项分析:")
            print(f"真正例: {true_positives}")
            print(f"假正例: {false_positives}")
            print(f"假反例: {false_negatives}")
            print(f"精确率: {fall_precision:.3f}")
            print(f"召回率: {fall_recall:.3f}")
            print(f"F1分数: {fall_f1:.3f}")
        else:
            print("\n测试集中没有摔倒样本")