#!/usr/bin/env python3
"""
主训练脚本 - 增量学习
第一阶段：在前50类上训练
第二阶段：增量学习10个新类（每类50个样本）
"""

import torch
from torch.utils.data import DataLoader, ConcatDataset
import copy
import numpy as np

# from config import Config
from config import cfg
from dataset import CIFAR100Dataset, IncrementalCIFAR100Dataset, get_transforms
from model import VisionTransformerOOD
from trainer import Trainer, IncrementalTrainer, create_optimizer, create_scheduler
from utils import set_seed, save_checkpoint, load_checkpoint
from evaluate import evaluate_incremental_model


def train_base_model():
    """第一阶段：训练基础模型（前50类）"""
    print("\n" + "=" * 80)
    print(f"Phase 1: Training Base Model on First {cfg.NUM_KNOWN_CLASSES} Classes")
    print("=" * 80)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"Device: {device}")

    # 数据集
    print("\nLoading datasets...")
    train_transform, test_transform = get_transforms()

    train_dataset = CIFAR100Dataset(
        root=cfg.DATA_ROOT,
        train=True,
        known_classes=list(range(cfg.NUM_KNOWN_CLASSES)),
        transform=train_transform,
    )

    test_dataset = IncrementalCIFAR100Dataset(
        root=cfg.DATA_ROOT,
        train=False,
        class_range=[0, cfg.NUM_KNOWN_CLASSES],
        transform=test_transform,
    )

    train_loader = DataLoader(
        train_dataset,
        batch_size=cfg.BATCH_SIZE,
        shuffle=True,
        num_workers=cfg.NUM_WORKERS,
        pin_memory=True,
        drop_last=True,
    )

    test_loader = DataLoader(
        test_dataset,
        batch_size=cfg.BATCH_SIZE,
        shuffle=False,
        num_workers=cfg.NUM_WORKERS,
        pin_memory=True,
    )

    print(f"Train dataset: {len(train_dataset)} samples")
    print(f"Test dataset: {len(test_dataset)} samples")

    # 模型
    print("\nCreating model...")
    model = VisionTransformerOOD(
        num_classes=cfg.NUM_KNOWN_CLASSES,
        model_path=cfg.MODEL_PATH,
        freeze_layers=cfg.FREEZE_LAYERS,
    )
    model = model.to(device)

    # 优化器和调度器
    optimizer = create_optimizer(model, incremental=False)
    steps_per_epoch = len(train_loader) // cfg.GRAD_ACCUMULATION_STEPS
    total_steps = steps_per_epoch * cfg.EPOCHS
    scheduler = create_scheduler(optimizer, total_steps)

    # 训练器
    trainer = Trainer(
        model=model,
        optimizer=optimizer,
        scheduler=scheduler,
        device=device,
    )

    # 训练循环
    print("\nStarting base training...")
    best_accuracy = 0

    for epoch in range(cfg.EPOCHS):
        print(f"\nEpoch [{epoch + 1}/{cfg.EPOCHS}]")

        # 训练
        train_loss, train_acc = trainer.train_epoch(train_loader, epoch)
        print(f"Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}%")

        # 评估
        if (epoch + 1) % cfg.EVAL_INTERVAL == 0:
            with torch.no_grad():
                model.eval()
                correct = 0
                total = 0

                for inputs, targets in test_loader:
                    inputs, targets = inputs.to(device), targets.to(device)
                    outputs = model(inputs)
                    _, predicted = outputs.max(1)
                    total += targets.size(0)
                    correct += predicted.eq(targets).sum().item()

                accuracy = 100.0 * correct / total
                print(f"Test Accuracy: {accuracy:.2f}%")

                if accuracy > best_accuracy:
                    best_accuracy = accuracy
                    save_checkpoint(
                        model=model,
                        optimizer=optimizer,
                        scheduler=scheduler,
                        epoch=epoch,
                        auroc=0,  # 不计算AUROC
                        accuracy=accuracy,
                        filepath=cfg.CHECKPOINT_DIR / "base_model.pth",
                    )
                    print(f"Saved best base model with accuracy: {best_accuracy:.2f}%")

    # 记录基础模型的最佳准确率
    base_accuracy = best_accuracy

    print(f"\nPhase 1 completed! Best accuracy on {cfg.NUM_KNOWN_CLASSES} classes: {best_accuracy:.2f}%")
    return model, base_accuracy


def incremental_learning(base_model: VisionTransformerOOD, base_accuracy):
    """第二阶段：增量学习新类"""
    print("\n" + "=" * 80)
    print(f"Phase 2: Incremental Learning of {cfg.NUM_NEW_CLASSES} New Classes")
    print("=" * 80)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 复制旧模型用于知识蒸馏
    old_model = copy.deepcopy(base_model)
    old_model = old_model.to(device)  # 确保在正确的设备上
    old_model.eval()
    for param in old_model.parameters():
        param.requires_grad = False

    # 扩展模型以支持新类
    base_model.increment_classes(cfg.NUM_NEW_CLASSES)
    base_model = base_model.to(device)

    # 数据集
    print("\nLoading incremental datasets...")
    train_transform, test_transform = get_transforms()

    # 新类数据（Few-shot）
    new_train_dataset = IncrementalCIFAR100Dataset(
        root=cfg.DATA_ROOT,
        train=True,
        class_range=[
            cfg.NUM_KNOWN_CLASSES,
            cfg.NUM_KNOWN_CLASSES + cfg.NUM_NEW_CLASSES,
        ],
        samples_per_class=cfg.NEW_CLASS_SAMPLES,
        transform=train_transform,
    )

    # 旧类回放数据
    old_train_dataset = IncrementalCIFAR100Dataset(
        root=cfg.DATA_ROOT,
        train=True,
        class_range=[0, cfg.NUM_KNOWN_CLASSES],
        samples_per_class=cfg.REPLAY_BUFFER_SIZE // cfg.NUM_KNOWN_CLASSES,
        transform=train_transform,
    )

    # 合并数据集
    combined_dataset = ConcatDataset([old_train_dataset, new_train_dataset])

    train_loader = DataLoader(
        combined_dataset,
        batch_size=cfg.BATCH_SIZE,
        shuffle=True,
        num_workers=cfg.NUM_WORKERS,
        pin_memory=True,
        drop_last=True,
    )

    # 测试数据（所有60类）
    test_dataset = IncrementalCIFAR100Dataset(
        root=cfg.DATA_ROOT,
        train=False,
        class_range=[0, cfg.NUM_TOTAL_CLASSES],
        transform=test_transform,
    )

    test_loader = DataLoader(
        test_dataset,
        batch_size=cfg.BATCH_SIZE,
        shuffle=False,
        num_workers=cfg.NUM_WORKERS,
        pin_memory=True,
    )

    print(
        f"New class samples: {len(new_train_dataset)} ({cfg.NEW_CLASS_SAMPLES} per class)"
    )
    print(f"Old class replay samples: {len(old_train_dataset)}")
    print(f"Total training samples: {len(combined_dataset)}")
    print(f"Test dataset: {len(test_dataset)} samples")

    # 打印新类的样本分布
    print("\nNew class distribution:")
    for cls, count in sorted(new_train_dataset.class_counts.items()):
        print(f"  Class {cls}: {count} samples")

    # 打印增量学习配置
    print("\nIncremental learning configuration:")
    print(f"  Base learning rate: {cfg.INCREMENTAL_LR}")
    print(f"  Backbone LR: {cfg.INCREMENTAL_LR * 0.1}")
    print(f"  Classifier LR: {cfg.INCREMENTAL_LR}")
    print(f"  Classification weight: {cfg.CLASSIFICATION_WEIGHT}")
    print(f"  Distillation weight: {cfg.DISTILLATION_WEIGHT}")
    print(f"  Epochs: {cfg.INCREMENTAL_EPOCHS}")
    print(f"  Gradient accumulation: {cfg.GRAD_ACCUMULATION_STEPS}")
    print(f"  Effective batch size: {cfg.BATCH_SIZE * cfg.GRAD_ACCUMULATION_STEPS}")

    # 优化器（使用更小的学习率）
    optimizer = create_optimizer(base_model, incremental=True)
    steps_per_epoch = len(train_loader) // cfg.GRAD_ACCUMULATION_STEPS
    total_steps = steps_per_epoch * cfg.INCREMENTAL_EPOCHS

    # 增量学习使用带预热的余弦退火调度器
    def warmup_cosine_annealing(epoch):
        warmup_epochs = 2
        if epoch < warmup_epochs:
            # 预热阶段：从0.1倍逐渐增加到1倍
            return 0.1 + (1.0 - 0.1) * epoch / warmup_epochs
        else:
            # 余弦退火阶段
            progress = (epoch - warmup_epochs) / (
                cfg.INCREMENTAL_EPOCHS - warmup_epochs
            )
            return 0.5 * (1.0 + np.cos(np.pi * progress))

    scheduler = torch.optim.lr_scheduler.LambdaLR(
        optimizer, lr_lambda=warmup_cosine_annealing
    )

    # 增量训练器
    trainer = IncrementalTrainer(
        model=base_model,
        optimizer=optimizer,
        scheduler=scheduler,
        device=device,
        old_model=old_model,
    )

    # 训练循环
    print("\nStarting incremental learning...")
    print(
        "Note: First 3 epochs will focus more on new classes (higher weights, less distillation)"
    )
    best_avg_accuracy = 0
    best_new_accuracy = 0

    for epoch in range(cfg.INCREMENTAL_EPOCHS):
        print(f"\nIncremental Epoch [{epoch + 1}/{cfg.INCREMENTAL_EPOCHS}]")
        current_lr = trainer.get_learning_rate()
        print(f"Current learning rate: {current_lr:.6f}")

        # 动态提示
        if epoch < 2:
            print(
                "Strategy: Warmup phase - Focus on new classes (5x weight, 5% distillation)"
            )
        elif epoch < 4:
            print("Strategy: High focus on new classes (3x weight, 15% distillation)")
        elif epoch < 6:
            print("Strategy: Balanced learning (2x weight, 25% distillation)")
        else:
            print("Strategy: Fine-tuning (1.5x weight, 40% distillation)")

        # 训练
        train_loss, train_acc = trainer.train_incremental_epoch(
            train_loader, epoch, cfg.NUM_KNOWN_CLASSES
        )
        print(f"Train Loss: {train_loss:.4f}, Train Acc: {train_acc:.2f}%")

        # 评估
        all_acc, old_acc, new_acc = trainer.evaluate_incremental(
            test_loader, cfg.NUM_KNOWN_CLASSES
        )

        print("Test Results:")
        print(
            f"  Overall Accuracy ({cfg.NUM_KNOWN_CLASSES + cfg.NUM_NEW_CLASSES} classes): {all_acc:.2f}%"
        )
        print(f"  Old Classes Accuracy (1-{cfg.NUM_KNOWN_CLASSES}): {old_acc:.2f}%")
        print(
            f"  New Classes Accuracy ({cfg.NUM_KNOWN_CLASSES + 1}-{cfg.NUM_KNOWN_CLASSES + cfg.NUM_NEW_CLASSES}): {new_acc:.2f}%"
        )

        # 更新学习率调度器
        scheduler.step()

        # 保存最佳模型
        if all_acc > best_avg_accuracy:
            best_avg_accuracy = all_acc
            save_checkpoint(
                model=base_model,
                optimizer=optimizer,
                scheduler=scheduler,
                epoch=epoch,
                auroc=0,
                accuracy=all_acc,
                filepath=cfg.CHECKPOINT_DIR / "incremental_model.pth",
            )
            print(
                f"Saved best incremental model with overall accuracy: {best_avg_accuracy:.2f}%"
            )

        if new_acc > best_new_accuracy:
            best_new_accuracy = new_acc

        # 保存详细结果
        with open(cfg.OUTPUT_DIR / "incremental_results.txt", "w") as f:
            f.write(f"Incremental Learning Results (Epoch {epoch + 1})\n")
            f.write("=================================\n")
            f.write(
                f"Overall Accuracy ({cfg.NUM_NEW_CLASSES + cfg.NUM_KNOWN_CLASSES} classes): {all_acc:.2f}%\n"
            )
            f.write(
                f"Old Classes Accuracy (1-{cfg.NUM_KNOWN_CLASSES}): {old_acc:.2f}%\n"
            )
            f.write(
                f"New Classes Accuracy ({cfg.NUM_KNOWN_CLASSES + 1}-{cfg.NUM_KNOWN_CLASSES + cfg.NUM_NEW_CLASSES}): {new_acc:.2f}%\n"
            )
            f.write(f"Forgetting: {base_accuracy - old_acc:.2f}%\n")
            f.write(f"Best New Class Accuracy: {best_new_accuracy:.2f}%\n")

        # 早停条件：总体准确率达到94%以上，或新类达到90%且旧类保持在93%以上
        if all_acc >= 94.0 and epoch >= 4:
            print(f"\nExcellent performance achieved! Overall accuracy: {all_acc:.2f}%")
            print(f"New classes: {new_acc:.2f}%, Old classes: {old_acc:.2f}%")
            break
        elif new_acc >= 90.0 and old_acc >= 93.0 and epoch >= 5:
            print("\nVery good performance achieved!")
            print(f"New classes: {new_acc:.2f}%, Old classes: {old_acc:.2f}%")
            break

    print("\nPhase 2 completed!")
    print("Final Results:")
    print(f"  Overall Accuracy: {best_avg_accuracy:.2f}%")
    print(f"  Best New Class Accuracy: {best_new_accuracy:.2f}%")
    print(f"  Final Old Classes: {old_acc:.2f}% (was {base_accuracy:.2f}%)")
    print(f"  Final New Classes: {new_acc:.2f}%")
    print(f"  Forgetting: {base_accuracy - old_acc:.2f}%")

    return base_model


def main():
    # 设置随机种子
    set_seed(42)

    # 配置
    # config = Config()

    print("=" * 80)
    print("Incremental Learning with Vision Transformer Large (ViT-L/16)")
    print(f"Phase 1: Train on {cfg.NUM_KNOWN_CLASSES} classes")
    print(
        f"Phase 2: Incrementally learn {cfg.NUM_NEW_CLASSES} new classes ({cfg.NEW_CLASS_SAMPLES} samples each)"
    )
    print("=" * 80)

    # 检查是否已有基础模型
    base_model_path = cfg.CHECKPOINT_DIR / "base_model.pth"

    if base_model_path.exists():
        print(f"\nFound existing base model at {base_model_path}")
        print("Loading base model...")

        # 创建模型并加载权重
        model = VisionTransformerOOD(
            num_classes=cfg.NUM_KNOWN_CLASSES,
            model_path=cfg.MODEL_PATH,
            freeze_layers=cfg.FREEZE_LAYERS,
        )
        checkpoint = load_checkpoint(str(base_model_path), model)
        print(
            f"Loaded base model from epoch {checkpoint.get('epoch', 'N/A')} "
            f"with accuracy {checkpoint.get('accuracy', 'N/A'):.2f}%"
        )

        # 询问是否重新训练
        response = input("\nDo you want to retrain the base model? (y/n): ").lower()
        if response == "y":
            model, base_accuracy = train_base_model()
        else:
            print("Using existing base model.")
            base_accuracy = checkpoint.get("accuracy", 0)
    else:
        # 训练基础模型
        model, base_accuracy = train_base_model()

    # 增量学习
    final_model = incremental_learning(model, base_accuracy)

    print("\nTraining completed successfully!")
    print("\nEvaluating incremental model...")

    checkpoint_path = cfg.CHECKPOINT_DIR / "incremental_model.pth"
    accuracy, auroc = evaluate_incremental_model(checkpoint_path, phase="incremental")
    print(f"\nFinal Evaluation Results:")
    print(f"Classification Accuracy: {accuracy:.2f}%")
    print(f"OOD Detection AUROC: {auroc:.2f}%")


if __name__ == "__main__":
    main()
