# ==============================================================================
# 文件: evaluator.py
# 作用: 提供一个统一的评估函数，用于计算各种性能指标并生成可视化图表，
#      以避免在多个文件中重复编写评估代码。
# ==============================================================================
import torch
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np # Added for np.mean

import config

def evaluate(model, embeddings, true_labels, model_name):
    """
    评估一个已训练模型的性能。
    :param model: 训练好的模型。
    :param embeddings: Tensor, 用于评估的嵌入向量。
    :param true_labels: Tensor, 真实标签。
    :param model_name: str, 模型的名称，用于绘图和报告。
    :return: dict, 包含所有性能指标的字典。
    """
    print(f"\n--- 正在评估模型: {model_name} ---")
    model.eval() # 设置为评估模式
    model.to(config.DEVICE)
    
    embeddings = embeddings.to(config.DEVICE)
    true_labels_np = true_labels.cpu().numpy()
    
    predictions = []
    probabilities = []
    with torch.no_grad():
        outputs = model(embeddings)
        probs = torch.sigmoid(outputs).cpu().numpy()
        preds = (probs > 0.5).astype(int).flatten()
    
    # --- 新增：计算真实和估计的类别比例 ---
    true_prior = np.mean(true_labels_np)
    estimated_prior = np.mean(probs)

    # 计算指标
    metrics = {
        'Accuracy': accuracy_score(true_labels_np, preds),
        'Precision': precision_score(true_labels_np, preds, zero_division=0),
        'Recall': recall_score(true_labels_np, preds, zero_division=0),
        'F1-Score': f1_score(true_labels_np, preds, zero_division=0),
        'AUC-ROC': roc_auc_score(true_labels_np, probs),
        'Estimated Prior': estimated_prior,
        'True Prior': true_prior,
        'P-bias': true_prior - estimated_prior
    }
    
    # 打印指标
    for name, value in metrics.items():
        print(f"  {name}: {value:.4f}")

    # 绘制混淆矩阵
    cm = confusion_matrix(true_labels_np, preds)
    plt.figure(figsize=(8, 6))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=['Predicted Negative', 'Predicted Positive'],
                yticklabels=['Actual Negative', 'Actual Positive'])
    plt.title(f'Confusion Matrix for {model_name}')
    plt.ylabel('Actual Label')
    plt.xlabel('Predicted Label')
    
    # 保存图像
    plot_path = config.RESULT_DIR / "plots_and_metrics" / f"cm_{model_name.replace(' ', '_')}_{config.RUN_ID}.png"
    plt.savefig(plot_path)
    print(f"  混淆矩阵已保存到: {plot_path}")
    plt.close() # 关闭图像，防止在Jupyter中重复显示

    return metrics

def evaluate_zero_shot(predictions, true_labels):
    """专门为零样本分类评估的函数。"""
    print(f"\n--- 正在评估模型: Zero-Shot Classification ---")
    
    # --- 新增：计算类别比例 ---
    # 对于零样本，估计比例就是其预测为1的样本占比
    true_prior = np.mean(true_labels)
    estimated_prior = np.mean(predictions)

    # 零样本方法不产生概率，所以不计算AUC
    metrics = {
        'Accuracy': accuracy_score(true_labels, predictions),
        'Precision': precision_score(true_labels, predictions, zero_division=0),
        'Recall': recall_score(true_labels, predictions, zero_division=0),
        'F1-Score': f1_score(true_labels, predictions, zero_division=0),
        'AUC-ROC': float('nan'), # 标记为不可用
        'Estimated Prior': estimated_prior,
        'True Prior': true_prior,
        'P-bias': true_prior - estimated_prior
    }
    for name, value in metrics.items():
        print(f"  {name}: {value:.4f}")
    return metrics

def save_summary_table(all_results):
    """将所有实验结果汇总到一个CSV文件中。"""
    df = pd.DataFrame.from_dict(all_results, orient='index')
    df.to_csv(config.SUMMARY_TABLE_PATH)
    print(f"\n所有实验结果汇总表已保存到: {config.SUMMARY_TABLE_PATH}")
    print(df)