import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import (accuracy_score, precision_score, recall_score, f1_score,
                             roc_auc_score, roc_curve, confusion_matrix, classification_report)

# 设置中文字体
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
plt.rcParams['axes.unicode_minus'] = False

def main():
    # 1. 加载模型和数据
    with open('D:\数学建模代码\训练好的模型.pkl', 'rb') as f:
        model_data = pickle.load(f)
    
    all_models = model_data['all_models']
    best_model_name = model_data['best_model_name']
    best_model = model_data['best_model']
    feature_names = model_data['feature_names']
    X_test = model_data['X_test']
    y_test = model_data['y_test']
    
    # 2. 计算评估指标
    evaluation_results = {}
    
    for name, model_info in all_models.items():
        model = model_info['model']
        
        # 预测
        y_pred = model.predict(X_test)
        y_pred_proba = model.predict_proba(X_test)[:, 1]
        
        # 计算指标
        metrics = {
            'accuracy': accuracy_score(y_test, y_pred),
            'precision': precision_score(y_test, y_pred),
            'recall': recall_score(y_test, y_pred),
            'f1': f1_score(y_test, y_pred),
            'auc': roc_auc_score(y_test, y_pred_proba),
            'confusion_matrix': confusion_matrix(y_test, y_pred),
            'classification_report': classification_report(y_test, y_pred),
            'y_pred': y_pred,
            'y_pred_proba': y_pred_proba,
            'y_test': y_test
        }
        
        evaluation_results[name] = metrics
    
    # 保存评估结果
    with open('模型评估结果.pkl', 'wb') as f:
        pickle.dump(evaluation_results, f)
    
    # 3. 可视化评估结果
    # 3.1 绘制ROC曲线
    plt.figure(figsize=(10, 8))
    for name, metrics in evaluation_results.items():
        fpr, tpr, _ = roc_curve(metrics['y_test'], metrics['y_pred_proba'])
        auc_score = metrics['auc']
        plt.plot(fpr, tpr, label=f'{name} (AUC = {auc_score:.4f})')
    
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('假阳性率')
    plt.ylabel('真阳性率')
    plt.title('各模型ROC曲线')
    plt.legend(loc="lower right")
    plt.savefig('ROC曲线.png', dpi=300, bbox_inches='tight')
    plt.show()
    
    # 3.2 最佳模型混淆矩阵
    best_metrics = evaluation_results[best_model_name]
    cm = best_metrics['confusion_matrix']
    
    plt.figure(figsize=(8, 6))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=['正常', '异常'],
                yticklabels=['正常', '异常'])
    plt.title(f'{best_model_name}混淆矩阵')
    plt.xlabel('预测标签')
    plt.ylabel('真实标签')
    plt.savefig('混淆矩阵.png', dpi=300, bbox_inches='tight')
    plt.show()
    
    # 3.3 特征重要性（如果模型支持）
    if hasattr(best_model, 'feature_importances_'):
        importances = best_model.feature_importances_
        indices = np.argsort(importances)[::-1]
        
        plt.figure(figsize=(12, 8))
        plt.bar(range(min(15, len(importances))), importances[indices[:15]])
        plt.xticks(range(min(15, len(importances))), 
                  [feature_names[i] for i in indices[:15]], 
                  rotation=90)
        plt.title(f'{best_model_name}特征重要性')
        plt.tight_layout()
        plt.savefig('特征重要性.png', dpi=300, bbox_inches='tight')
        plt.show()
    
    # 3.4 生成评估报告
    with open('模型评估报告.txt', 'w', encoding='utf-8') as f:
        f.write("=== 模型评估报告 ===\n\n")
        for name, metrics in evaluation_results.items():
            f.write(f"--- {name} ---\n")
            f.write(f"准确率: {metrics['accuracy']:.4f}\n")
            f.write(f"精确率: {metrics['precision']:.4f}\n")
            f.write(f"召回率: {metrics['recall']:.4f}\n")
            f.write(f"F1分数: {metrics['f1']:.4f}\n")
            f.write(f"AUC: {metrics['auc']:.4f}\n\n")
            f.write("混淆矩阵:\n")
            f.write(f"{metrics['confusion_matrix']}\n\n")
            f.write("分类报告:\n")
            f.write(f"{metrics['classification_report']}\n\n")
    
    print("模型评估完成，已生成评估报告和可视化图表")

if __name__ == "__main__":
    main()
