# model_evaluation.py
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import json
import os
from basic_preprocessing import extract_basic_features
from improved_preprocessing import extract_detailed_features

# 设置中文字体支持
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

def load_data():
    """加载并预处理数据"""
    sequences = ['swipe_down', 'swipe_up', 'grab']
    X_basic, X_improved, y = [], [], []

    for label, seq_name in enumerate(sequences):
        seq_dir = f'sequence_data/{seq_name}'
        if os.path.exists(seq_dir):
            for file in os.listdir(seq_dir):
                if file.endswith('.npy'):
                    sequence_data = np.load(f'{seq_dir}/{file}')

                    # 基础特征提取
                    basic_features = extract_basic_features(sequence_data)
                    X_basic.append(basic_features)

                    # 改进特征提取
                    improved_features = extract_detailed_features(sequence_data)
                    X_improved.append(improved_features)

                    y.append(label)

    X_basic = np.array(X_basic)
    X_improved = np.array(X_improved)
    y = np.array(y)

    return X_basic, X_improved, y, sequences

def evaluate_model(model_path, X_test, y_test, sequences, model_name):
    """评估模型性能"""
    # 加载模型
    model = tf.keras.models.load_model(model_path)

    # 预测
    y_pred = model.predict(X_test, verbose=0)
    y_pred_classes = np.argmax(y_pred, axis=1)

    # 计算准确率
    accuracy = np.mean(y_pred_classes == y_test)

    # 生成分类报告
    report = classification_report(y_test, y_pred_classes, target_names=sequences, output_dict=True)

    # 生成混淆矩阵
    cm = confusion_matrix(y_test, y_pred_classes)

    return accuracy, report, cm, y_pred_classes

def plot_training_history(history, model_name):
    """绘制训练历史曲线"""
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4))

    # 绘制准确率曲线
    ax1.plot(history.history['accuracy'], label='训练准确率')
    ax1.plot(history.history['val_accuracy'], label='验证准确率')
    ax1.set_title(f'{model_name} - 准确率')
    ax1.set_xlabel('Epoch')
    ax1.set_ylabel('Accuracy')
    ax1.legend()
    ax1.grid(True)

    # 绘制损失曲线
    ax2.plot(history.history['loss'], label='训练损失')
    ax2.plot(history.history['val_loss'], label='验证损失')
    ax2.set_title(f'{model_name} - 损失')
    ax2.set_xlabel('Epoch')
    ax2.set_ylabel('Loss')
    ax2.legend()
    ax2.grid(True)

    plt.tight_layout()
    plt.savefig(f'{model_name}_training_history.png', dpi=300, bbox_inches='tight')
    plt.show()

def plot_confusion_matrix(cm, sequences, model_name):
    """绘制混淆矩阵"""
    plt.figure(figsize=(8, 6))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=sequences, yticklabels=sequences)
    plt.title(f'{model_name} - 混淆矩阵')
    plt.xlabel('预测标签')
    plt.ylabel('真实标签')
    plt.tight_layout()
    plt.savefig(f'{model_name}_confusion_matrix.png', dpi=300, bbox_inches='tight')
    plt.show()

def compare_models():
    """比较不同模型的性能"""
    # 加载数据
    X_basic, X_improved, y, sequences = load_data()

    if len(X_basic) == 0:
        print("未找到训练数据")
        return

    print(f"数据集大小: 基础特征={X_basic.shape}, 改进特征={X_improved.shape}, 标签={y.shape}")

    # 数据标准化
    scaler_basic = StandardScaler()
    X_basic_scaled = scaler_basic.fit_transform(X_basic)

    scaler_improved = StandardScaler()
    X_improved_scaled = scaler_improved.fit_transform(X_improved)

    # 划分数据集
    X_basic_train, X_basic_test, y_basic_train, y_basic_test = train_test_split(
        X_basic_scaled, y, test_size=0.2, random_state=42, stratify=y
    )

    X_improved_train, X_improved_test, y_improved_train, y_improved_test = train_test_split(
        X_improved_scaled, y, test_size=0.2, random_state=42, stratify=y
    )

    # 评估基础模型
    basic_accuracy = 0
    improved_accuracy = 0
    basic_report = None
    improved_report = None
    basic_cm = None
    improved_cm = None
    basic_pred = None
    improved_pred = None

    if os.path.exists('basic_gesture_model.keras'):
        basic_accuracy, basic_report, basic_cm, basic_pred = evaluate_model(
            'basic_gesture_model.keras', X_basic_test, y_basic_test, sequences, '基础模型'
        )
        print(f"基础模型测试准确率: {basic_accuracy:.4f}")
        print("基础模型分类报告:")
        print(classification_report(y_basic_test, basic_pred, target_names=sequences))

        # 绘制基础模型混淆矩阵
        plot_confusion_matrix(basic_cm, sequences, '基础模型')

    # 评估改进模型
    if os.path.exists('improved_gesture_classifier.keras'):
        improved_accuracy, improved_report, improved_cm, improved_pred = evaluate_model(
            'improved_gesture_classifier.keras', X_improved_test, y_improved_test, sequences, '改进模型'
        )
        print(f"改进模型测试准确率: {improved_accuracy:.4f}")
        print("改进模型分类报告:")
        print(classification_report(y_improved_test, improved_pred, target_names=sequences))

        # 绘制改进模型混淆矩阵
        plot_confusion_matrix(improved_cm, sequences, '改进模型')

    # 绘制模型准确率对比
    if basic_accuracy > 0 or improved_accuracy > 0:
        models = []
        accuracies = []
        if basic_accuracy > 0:
            models.append('基础模型')
            accuracies.append(basic_accuracy)
        if improved_accuracy > 0:
            models.append('改进模型')
            accuracies.append(improved_accuracy)

        plt.figure(figsize=(8, 6))
        bars = plt.bar(models, accuracies, color=['skyblue', 'lightcoral'])
        plt.ylabel('准确率')
        plt.title('模型准确率对比')
        plt.ylim(0, 1)

        # 在柱状图上添加数值标签
        for bar, acc in zip(bars, accuracies):
            plt.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.01,
                    f'{acc:.4f}', ha='center', va='bottom')

        plt.tight_layout()
        plt.savefig('model_comparison.png', dpi=300, bbox_inches='tight')
        plt.show()

    # 生成性能对比表
    performance_data = []

    # 获取分类报告中的键名（可能是编码后的）
    basic_report_keys = list(basic_report.keys()) if basic_report else []
    improved_report_keys = list(improved_report.keys()) if improved_report else []

    # 找到实际的类别键名（排除'accuracy', 'macro avg', 'weighted avg'）
    basic_class_keys = [k for k in basic_report_keys if k not in ['accuracy', 'macro avg', 'weighted avg']]
    improved_class_keys = [k for k in improved_report_keys if k not in ['accuracy', 'macro avg', 'weighted avg']]

    if basic_report and len(basic_class_keys) >= 3:
        performance_data.append({
            '模型': '基础模型',
            '总体准确率': basic_accuracy,
            '向下滑动_精确率': basic_report[basic_class_keys[0]]['precision'],
            '向下滑动_召回率': basic_report[basic_class_keys[0]]['recall'],
            '向下滑动_F1': basic_report[basic_class_keys[0]]['f1-score'],
            '向上滑动_精确率': basic_report[basic_class_keys[1]]['precision'],
            '向上滑动_召回率': basic_report[basic_class_keys[1]]['recall'],
            '向上滑动_F1': basic_report[basic_class_keys[1]]['f1-score'],
            '抓取动作_精确率': basic_report[basic_class_keys[2]]['precision'],
            '抓取动作_召回率': basic_report[basic_class_keys[2]]['recall'],
            '抓取动作_F1': basic_report[basic_class_keys[2]]['f1-score']
        })

    if improved_report and len(improved_class_keys) >= 3:
        performance_data.append({
            '模型': '改进模型',
            '总体准确率': improved_accuracy,
            '向下滑动_精确率': improved_report[improved_class_keys[0]]['precision'],
            '向下滑动_召回率': improved_report[improved_class_keys[0]]['recall'],
            '向下滑动_F1': improved_report[improved_class_keys[0]]['f1-score'],
            '向上滑动_精确率': improved_report[improved_class_keys[1]]['precision'],
            '向上滑动_召回率': improved_report[improved_class_keys[1]]['recall'],
            '向上滑动_F1': improved_report[improved_class_keys[1]]['f1-score'],
            '抓取动作_精确率': improved_report[improved_class_keys[2]]['precision'],
            '抓取动作_召回率': improved_report[improved_class_keys[2]]['recall'],
            '抓取动作_F1': improved_report[improved_class_keys[2]]['f1-score']
        })

    # 打印性能对比表
    print("\n模型性能对比表:")
    print("-" * 80)
    for data in performance_data:
        print(f"模型: {data['模型']}")
        print(f"  总体准确率: {data['总体准确率']:.4f}")
        print(f"  向下滑动 - 精确率: {data['向下滑动_精确率']:.4f}, 召回率: {data['向下滑动_召回率']:.4f}, F1: {data['向下滑动_F1']:.4f}")
        print(f"  向上滑动 - 精确率: {data['向上滑动_精确率']:.4f}, 召回率: {data['向上滑动_召回率']:.4f}, F1: {data['向上滑动_F1']:.4f}")
        print(f"  抓取动作 - 精确率: {data['抓取动作_精确率']:.4f}, 召回率: {data['抓取动作_召回率']:.4f}, F1: {data['抓取动作_F1']:.4f}")
        print("-" * 80)

if __name__ == "__main__":
    compare_models()
