# test.py - 模型评估测试模块
import torch
from MyDate import MyDataset
from torch.utils.data import DataLoader
from nete import Model
from transformers import BertTokenizer
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
from sklearn.metrics import confusion_matrix, classification_report
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import numpy as np
import os

# ===== 设置 matplotlib 支持中文 =====
matplotlib.rcParams['font.family'] = 'SimHei'  # 使用黑体显示中文
matplotlib.rcParams['axes.unicode_minus'] = False  # 显示负号

# 定义设备信息
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 加载字典和分词器
token = BertTokenizer.from_pretrained(
    r"D:\computer_soft\Microsoft_VS_Code\python_project\my_app\AI大模型应用开发\聚客AI\第五期\L2\day05-自定义微调训练BERT模型效果测试\demo_05\model\bert-base-chinese\models--bert-base-chinese\snapshots\c30a6ed22ab4564dc1e3b2ecbf6e766b0611a33f")


# 将传入的字符串进行编码
def collate_fn(data):
    sents = [i[0] for i in data]
    label = [i[1] for i in data]
    # 编码
    data = token.batch_encode_plus(
        batch_text_or_text_pairs=sents,
        truncation=True,
        max_length=128,     # 最大长度为512——替换为128
        padding="max_length",
        return_tensors="pt",
        return_length=True
    )
    input_ids = data["input_ids"]
    attention_mask = data["attention_mask"]
    token_type_ids = data["token_type_ids"]
    label = torch.LongTensor(label)
    return input_ids, attention_mask, token_type_ids, label


def evaluate_model(model, test_loader, device):
    model.eval()
    all_preds, all_labels = [], []

    for i, (input_ids, attention_mask, token_type_ids, labels) in enumerate(test_loader):
        input_ids = input_ids.to(device)
        attention_mask = attention_mask.to(device)
        token_type_ids = token_type_ids.to(device)
        labels = labels.to(device)

        with torch.no_grad():
            outputs = model(input_ids, attention_mask, token_type_ids)
            preds = torch.argmax(outputs, dim=1)

        all_preds.extend(preds.cpu().numpy())
        all_labels.extend(labels.cpu().numpy())

    metrics = {
        'accuracy': accuracy_score(all_labels, all_preds),
        'precision_macro': precision_score(all_labels, all_preds, average='macro'),
        'recall_macro': recall_score(all_labels, all_preds, average='macro'),
        'f1_macro': f1_score(all_labels, all_preds, average='macro'),
        'precision_weighted': precision_score(all_labels, all_preds, average='weighted'),
        'recall_weighted': recall_score(all_labels, all_preds, average='weighted'),
        'f1_weighted': f1_score(all_labels, all_preds, average='weighted'),
        'confusion_matrix': confusion_matrix(all_labels, all_preds),
        'classification_report': classification_report(all_labels, all_preds, digits=4)
    }
    return metrics


def plot_confusion_matrix(cm, class_names, save_path=None):
    plt.figure(figsize=(10, 8))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=class_names, yticklabels=class_names)
    plt.xlabel('预测标签')
    plt.ylabel('真实标签')
    plt.title('混淆矩阵')

    if save_path:
        plt.savefig(save_path, bbox_inches='tight')
        print(f"混淆矩阵已保存至: {save_path}")
    plt.show()


def save_metrics_to_file(metrics, save_path):
    with open(save_path, 'w', encoding='utf-8') as f:
        f.write("模型评估报告\n")
        f.write("=" * 50 + "\n")
        f.write(f"准确率 (Accuracy): {metrics['accuracy']:.4f}\n\n")

        f.write("宏平均指标 (Macro-average):\n")
        f.write(f"  精确率 (Precision): {metrics['precision_macro']:.4f}\n")
        f.write(f"  召回率 (Recall): {metrics['recall_macro']:.4f}\n")
        f.write(f"  F1分数 (F1 Score): {metrics['f1_macro']:.4f}\n\n")

        f.write("加权平均指标 (Weighted-average):\n")
        f.write(f"  精确率 (Precision): {metrics['precision_weighted']:.4f}\n")
        f.write(f"  召回率 (Recall): {metrics['recall_weighted']:.4f}\n")
        f.write(f"  F1分数 (F1 Score): {metrics['f1_weighted']:.4f}\n\n")

        f.write("分类报告 (Classification Report):\n")
        f.write(metrics['classification_report'])

        f.write("\n\n混淆矩阵 (Confusion Matrix):\n")
        np.savetxt(f, metrics['confusion_matrix'], fmt='%d')

    print(f"评估报告已保存至: {save_path}")


if __name__ == '__main__':
    # 创建数据集
    test_dataset = MyDataset("test")
    test_loader = DataLoader(
        dataset=test_dataset,
        batch_size=100,
        shuffle=False,
        drop_last=False,
        collate_fn=collate_fn
    )

    print(f"使用设备: {DEVICE}")
    model = Model().to(DEVICE)

    model_path = "D:/computer_soft/Microsoft_VS_Code/python_project/my_app/AI大模型应用开发/聚客AI\第五期/L2/day05-自定义微调训练BERT模型效果测试/person_test3/params/best_bert.pth"
    if not os.path.exists(model_path):
        raise FileNotFoundError(f"模型参数文件不存在: {model_path}")

    model.load_state_dict(torch.load(model_path))

    metrics = evaluate_model(model, test_loader, DEVICE)

    print("\n" + "=" * 50)
    print(f"准确率 (Accuracy): {metrics['accuracy']:.4f}")
    print("\n宏平均指标 (Macro-average):")
    print(f"  精确率 (Precision): {metrics['precision_macro']:.4f}")
    print(f"  召回率 (Recall): {metrics['recall_macro']:.4f}")
    print(f"  F1分数 (F1 Score): {metrics['f1_macro']:.4f}")

    print("\n加权平均指标 (Weighted-average):")
    print(f"  精确率 (Precision): {metrics['precision_weighted']:.4f}")
    print(f"  召回率 (Recall): {metrics['recall_weighted']:.4f}")
    print(f"  F1分数 (F1 Score): {metrics['f1_weighted']:.4f}")

    print("\n分类报告 (Classification Report):")
    print(metrics['classification_report'])

    # 修改为你的实际类别名
    class_names = ["非常差", "差", "一般", "中性", "还行", "满意", "很好", "非常好"]  # ← 修改为你的真实类别标签名
    plot_confusion_matrix(metrics['confusion_matrix'], class_names, "confusion_matrix.png")

    save_metrics_to_file(metrics, "evaluation_report.txt")

    print("评估完成!")
