import torch
import numpy as np
import yaml
from torch import nn
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from sklearn.metrics import classification_report, confusion_matrix, roc_auc_score
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
from models.resnet_se import ResNetSE
import os


def load_config():
    with open("configs/default.yaml") as f:
        return yaml.safe_load(f)


class_names = [
    'aiye', 'ajiao', 'baibiandou', 'baibu', 'baifan', 'baihe', 'baihuasheshecao',
    'baikou', 'baimaogen', 'baishao', 'baitouweng', 'baizhu', 'baiziren', 'bajitian',
    'banlangen', 'banxia', 'beishashenkuai', 'beishashentiao', 'biejia', 'cangzhu',
    'caoguo', 'caokou', 'cebaiye', 'chaihu', 'chantui', 'chenpi', 'chenxiang',
    'chishao', 'chishizhi', 'chongcao', 'chuanshanjia', 'chuanxinlian', 'cishi',
    'dafupi', 'dangshen', 'danshen', 'daqingye', 'daxueteng', 'digupi', 'dilong',
    'diyu', 'duzhong', 'fangfeng', 'foshou', 'fuling', 'fupenzi', 'fuzi', 'gancao',
    'ganjiang', 'gegen', 'gouqizi', 'gouteng', 'guanzhong', 'guya', 'hehuanpi',
    'heshouwu', 'honghua', 'hongkou', 'houpu', 'huaihua', 'huangbo', 'huangjing',
    'huangqin', 'huomaren', 'huzhang', 'jiangcan', 'jianghuang', 'jineijin',
    'jingjie', 'jinqiancao', 'jinyinhua', 'jixueteng', 'juemingzi', 'kushen',
    'laifuzi', 'lianqiao', 'lianzixin', 'lingzhi', 'lizhihe', 'longgu', 'lulutong',
    'luohanguo', 'luoshiteng', 'maidong', 'maiya', 'mohanlian', 'mudanpi', 'muli',
    'muxiang', 'niuxi', 'nvzhenzi'
]


def evaluate_model(model, test_loader, device):
    """执行模型评估并返回指标"""
    model.eval()
    all_labels = []
    all_preds = []
    all_probs = []

    with torch.no_grad():
        for images, labels in tqdm(test_loader, desc="评估进度"):
            images = images.to(device)
            labels = labels.to(device)

            outputs = model(images)
            probs = torch.softmax(outputs, dim=1)
            _, preds = torch.max(probs, 1)

            all_labels.extend(labels.cpu().numpy())
            all_preds.extend(preds.cpu().numpy())
            all_probs.extend(probs.cpu().numpy())

    return np.array(all_labels), np.array(all_preds), np.array(all_probs)


def generate_confusion_matrix(cm, output_path="confusion_matrix.png"):
    """生成并保存混淆矩阵"""
    plt.figure(figsize=(40, 35))
    sns.set(font_scale=1.2)
    sns.heatmap(cm, annot=True, fmt="d", cmap="Blues",
                xticklabels=class_names,
                yticklabels=class_names,
                annot_kws={"size": 8})

    plt.title("混淆矩阵", fontsize=24)
    plt.xlabel("预测标签", fontsize=18)
    plt.ylabel("真实标签", fontsize=18)
    plt.xticks(rotation=90, fontsize=8)
    plt.yticks(fontsize=8)
    plt.tight_layout()
    plt.savefig(output_path)
    plt.close()


def main():
    # 加载配置
    config = load_config()
    device = torch.device(config['training']['device'] if torch.cuda.is_available() else "cpu")

    # 数据预处理（必须与训练时一致）
    transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])

    # 加载测试集
    test_dataset = datasets.ImageFolder(
        root="data/tcm_images/test",
        transform=transform
    )

    # 确保类别顺序与class_names一致
    class_to_idx = {name: i for i, name in enumerate(class_names)}
    test_dataset.class_to_idx = class_to_idx
    test_dataset.samples = [(p, class_to_idx[os.path.basename(os.path.dirname(p))])
                            for p, _ in test_dataset.samples]

    test_loader = DataLoader(
        test_dataset,
        batch_size=32,
        shuffle=False,
        num_workers=4
    )

    # 初始化模型
    model = ResNetSE(num_classes=len(class_names))
    model.load_state_dict(torch.load("tcm_resnet_se.pth", map_location=device))
    model = model.to(device)

    # 执行评估
    true_labels, pred_labels, pred_probs = evaluate_model(model, test_loader, device)

    # 计算评估指标
    print("\n========== 分类报告 ==========")
    print(classification_report(true_labels, pred_labels, target_names=class_names, digits=4))

    # 保存错误预测样本
    error_indices = np.where(true_labels != pred_labels)[0]
    print(f"\n错误预测样本数: {len(error_indices)}/{len(true_labels)}")
    with open("error_predictions.txt", "w") as f:
        for idx in error_indices[:100]:  # 最多保存100个错误样本
            img_path = test_dataset.samples[idx][0]
            true_name = class_names[true_labels[idx]]
            pred_name = class_names[pred_labels[idx]]
            f.write(f"{img_path} | 真实: {true_name} | 预测: {pred_name}\n")


if __name__ == "__main__":
    main()