"""
模型评估模块
负责模型性能评估和结果分析
"""

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import (
    accuracy_score, precision_score, recall_score, f1_score,
    confusion_matrix, classification_report, roc_curve, auc
)


def evaluate_model(model, X_test, y_test):
    """
    评估模型在测试集上的性能

    参数:
        model: 训练好的模型
        X_test (pd.DataFrame): 测试集特征
        y_test (pd.Series): 测试集标签

    返回:
        dict: 包含各项评估指标的字典
    """
    # 预测
    y_pred = model.predict(X_test)
    y_pred_proba = model.predict_proba(X_test)[:, 1] if hasattr(model, 'predict_proba') else None

    # 计算各项指标
    metrics = {
        'accuracy': accuracy_score(y_test, y_pred),
        'precision': precision_score(y_test, y_pred),
        'recall': recall_score(y_test, y_pred),
        'f1': f1_score(y_test, y_pred)
    }

    # 如果模型支持概率预测，计算AUC
    if y_pred_proba is not None:
        fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
        metrics['auc'] = auc(fpr, tpr)

    # 打印评估结果
    print("模型评估结果:")
    print(f"准确率: {metrics['accuracy']:.4f}")
    print(f"精确率: {metrics['precision']:.4f}")
    print(f"召回率: {metrics['recall']:.4f}")
    print(f"F1分数: {metrics['f1']:.4f}")
    if 'auc' in metrics:
        print(f"AUC: {metrics['auc']:.4f}")

    # 打印分类报告
    print("分类报告:")
    print(classification_report(y_test, y_pred))

    return metrics


def plot_confusion_matrix(y_true, y_pred, class_names=None, normalize=False, title=None, cmap=plt.cm.Blues):
    """
    绘制混淆矩阵

    参数:
        y_true (array): 真实标签
        y_pred (array): 预测标签
        class_names (list): 类别名称列表
        normalize (bool): 是否归一化
        title (str): 图表标题
        cmap: 颜色映射

    返回:
        matplotlib.figure.Figure: 生成的图表对象
    """
    # 计算混淆矩阵
    cm = confusion_matrix(y_true, y_pred)

    # 归一化处理
    if normalize:
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]

    # 设置类别名称
    if class_names is None:
        class_names = ['Not Survived', 'Survived']

    # 创建图表
    fig, ax = plt.subplots(figsize=(8, 6))

    # 绘制热力图
    sns.heatmap(cm, annot=True, fmt='.2f' if normalize else 'd', 
                cmap=cmap, ax=ax, xticklabels=class_names, yticklabels=class_names)

    # 设置标题和标签
    if title:
        ax.set_title(title)
    ax.set_xlabel('Predicted Label')
    ax.set_ylabel('True Label')

    # 调整布局
    plt.tight_layout()

    return fig


def plot_roc_curve(y_true, y_pred_proba, title=None):
    """
    绘制ROC曲线

    参数:
        y_true (array): 真实标签
        y_pred_proba (array): 预测为正类的概率
        title (str): 图表标题

    返回:
        matplotlib.figure.Figure: 生成的图表对象
    """
    # 计算ROC曲线
    fpr, tpr, thresholds = roc_curve(y_true, y_pred_proba)
    roc_auc = auc(fpr, tpr)

    # 创建图表
    fig, ax = plt.subplots(figsize=(8, 6))

    # 绘制ROC曲线
    ax.plot(fpr, tpr, lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
    ax.plot([0, 1], [0, 1], 'k--', lw=2)

    # 设置图表属性
    ax.set_xlim([0.0, 1.0])
    ax.set_ylim([0.0, 1.05])
    ax.set_xlabel('False Positive Rate')
    ax.set_ylabel('True Positive Rate')

    if title:
        ax.set_title(title)
    else:
        ax.set_title('Receiver Operating Characteristic')

    ax.legend(loc="lower right")

    # 调整布局
    plt.tight_layout()

    return fig


def plot_feature_importance(model, feature_names, top_n=10, title=None):
    """
    绘制特征重要性

    参数:
        model: 训练好的模型（需要有feature_importances_属性）
        feature_names (list): 特征名称列表
        top_n (int): 显示前N个重要特征
        title (str): 图表标题

    返回:
        matplotlib.figure.Figure: 生成的图表对象
    """
    # 检查模型是否有特征重要性属性
    if not hasattr(model, 'feature_importances_'):
        print("该模型不支持特征重要性可视化")
        return None

    # 获取特征重要性
    importances = model.feature_importances_

    # 创建DataFrame并排序
    feature_importance_df = pd.DataFrame({
        'feature': feature_names,
        'importance': importances
    }).sort_values('importance', ascending=False)

    # 取前N个重要特征
    top_features = feature_importance_df.head(top_n)

    # 创建图表
    fig, ax = plt.subplots(figsize=(10, 6))

    # 绘制条形图
    sns.barplot(x='importance', y='feature', data=top_features, ax=ax)

    # 设置标题和标签
    if title:
        ax.set_title(title)
    else:
        ax.set_title(f'Top {top_n} Feature Importances')

    ax.set_xlabel('Importance')

    # 调整布局
    plt.tight_layout()

    return fig


def compare_models(evaluations, metric='f1'):
    """
    比较多个模型的性能

    参数:
        evaluations (dict): 模型评估结果字典
        metric (str): 用于比较的指标

    返回:
        matplotlib.figure.Figure: 生成的图表对象
    """
    # 准备数据
    models = list(evaluations.keys())
    cv_scores = [evaluations[model][metric]['mean'] for model in models]
    cv_errors = [evaluations[model][metric]['std'] for model in models]

    # 检查是否有验证集结果
    has_validation = all('validation' in evaluations[model] for model in models)
    val_scores = [evaluations[model]['validation'][metric] for model in models] if has_validation else None

    # 创建图表
    fig, ax = plt.subplots(figsize=(10, 6))

    # 设置x轴位置
    x = np.arange(len(models))
    width = 0.35

    # 绘制交叉验证分数
    bars1 = ax.bar(x - width/2, cv_scores, width, yerr=cv_errors, 
                   label='Cross Validation', capsize=5)

    # 如果有验证集结果，绘制验证集分数
    if has_validation:
        bars2 = ax.bar(x + width/2, val_scores, width, 
                       label='Validation', capsize=5)

    # 设置图表属性
    ax.set_ylabel(f'{metric.capitalize()} Score')
    ax.set_title(f'Model Comparison by {metric.capitalize()}')
    ax.set_xticks(x)
    ax.set_xticklabels(models)
    ax.legend()

    # 添加数值标签
    def autolabel(bars):
        for bar in bars:
            height = bar.get_height()
            ax.annotate(f'{height:.3f}',
                        xy=(bar.get_x() + bar.get_width() / 2, height),
                        xytext=(0, 3),
                        textcoords="offset points",
                        ha='center', va='bottom')

    autolabel(bars1)
    if has_validation:
        autolabel(bars2)

    # 调整布局
    plt.tight_layout()

    return fig


def analyze_errors(model, X_test, y_test, feature_names):
    """
    分析模型预测错误的样本

    参数:
        model: 训练好的模型
        X_test (pd.DataFrame): 测试集特征
        y_test (pd.Series): 测试集标签
        feature_names (list): 特征名称列表

    返回:
        tuple: (错误预测的样本, 错误分析结果)
    """
    # 预测
    y_pred = model.predict(X_test)

    # 找出预测错误的样本
    errors_mask = y_pred != y_test
    error_samples = X_test[errors_mask].copy()
    error_samples['True_Label'] = y_test[errors_mask]
    error_samples['Predicted_Label'] = y_pred[errors_mask]

    # 分析错误类型
    false_positives = error_samples[error_samples['Predicted_Label'] == 1]  # 假阳性：预测为幸存，实际未幸存
    false_negatives = error_samples[error_samples['Predicted_Label'] == 0]  # 假阴性：预测为未幸存，实际幸存

    # 打印错误分析结果
    print(f"总错误样本数: {len(error_samples)} ({len(error_samples)/len(X_test)*100:.2f}%)")
    print(f"假阳性 (预测为幸存，实际未幸存): {len(false_positives)} ({len(false_positives)/len(X_test)*100:.2f}%)")
    print(f"假阴性 (预测为未幸存，实际幸存): {len(false_negatives)} ({len(false_negatives)/len(X_test)*100:.2f}%)")

    # 分析错误样本的特征分布
    error_analysis = {
        'false_positives': false_positives[feature_names].describe(),
        'false_negatives': false_negatives[feature_names].describe()
    }

    return error_samples, error_analysis
