if __name__ == '__main__':
    import torch
    import torchvision
    import torchvision.transforms as transforms
    import torch.nn as nn
    import torch.nn.functional as F
    from torch.utils.data import DataLoader
    import torch.optim as optim
    from conv_next import Conv_Next
    from resnet_50 import ResNet_50
    import time
    import os
    import matplotlib.pyplot as plt
    import numpy as np
    import math

    os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f'Using device: {device}')

    def record_training_stats(model_name):
        return {
            'train_loss_per_batch': [], 
            'train_loss_per_epoch': [],
            'test_accuracy_per_epoch': [], 
            'train_time_per_epoch': [],
            'learning_rates': []
        }

    def save_training_stats(stats, model_name):
        np.save(f'{model_name}_stats.npy', stats)
        
        plt.figure(figsize=(15, 10))
        
        plt.subplot(2, 2, 1)
        plt.plot(stats['train_loss_per_epoch'], 'b-', label='Train Loss')
        plt.title(f'{model_name} Training Loss per Epoch')
        plt.xlabel('Epoch')
        plt.ylabel('Loss')
        plt.legend()
        plt.grid(True)
        
        plt.subplot(2, 2, 2)
        plt.plot(stats['test_accuracy_per_epoch'], 'g-', label='Test Accuracy')
        plt.title(f'{model_name} Test Accuracy per Epoch')
        plt.xlabel('Epoch')
        plt.ylabel('Accuracy (%)')
        plt.legend()
        plt.grid(True)
        
        plt.subplot(2, 2, 3)
        if len(stats['learning_rates']) > 0:
            plt.plot(stats['learning_rates'], 'r-', label='Learning Rate')
            plt.title(f'{model_name} Learning Rate Schedule')
            plt.xlabel('Epoch')
            plt.ylabel('Learning Rate')
            plt.legend()
            plt.grid(True)
        
        plt.subplot(2, 2, 4)
        if len(stats['train_loss_per_batch']) > 100:  # 只显示部分batch数据
            step = len(stats['train_loss_per_batch']) // 100
            batch_losses = stats['train_loss_per_batch'][::step]
            plt.plot(batch_losses, 'orange', alpha=0.7, label='Train Loss per Batch')
            plt.title(f'{model_name} Training Loss per Batch (Sampled)')
            plt.xlabel('Batch (Sampled)')
            plt.ylabel('Loss')
            plt.legend()
            plt.grid(True)
            
        plt.tight_layout()
        plt.savefig(f'{model_name}_comprehensive_stats.png', dpi=300, bbox_inches='tight')
        
        print(f'Training statistics for {model_name} saved successfully.')

    def get_transforms():
        transform_train_convnext = transforms.Compose([
            transforms.RandomResizedCrop(32, scale=(0.75, 1.0), ratio=(0.8, 1.2)),
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.RandomRotation(20),
            transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.15),
            transforms.ToTensor(),
            transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)),
            transforms.RandomErasing(p=0.3, scale=(0.02, 0.4), ratio=(0.3, 3.3)),
        ])
        
        transform_train_resnet = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
        ])

        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
        ])
        
        return transform_train_convnext, transform_train_resnet, transform_test

    class CosineAnnealingWarmup:
        def __init__(self, optimizer, T_max, T_warmup=10, eta_min=0, last_epoch=-1):
            self.optimizer = optimizer
            self.T_max = T_max
            self.T_warmup = T_warmup
            self.eta_min = eta_min
            self.last_epoch = last_epoch
            self.base_lrs = [group['lr'] for group in optimizer.param_groups]
            
        def step(self):
            self.last_epoch += 1
            if self.last_epoch < self.T_warmup:
                lr = self.base_lrs[0] * (self.last_epoch + 1) / self.T_warmup
            else:
                progress = (self.last_epoch - self.T_warmup) / (self.T_max - self.T_warmup)
                lr = self.eta_min + (self.base_lrs[0] - self.eta_min) * \
                     0.5 * (1 + math.cos(math.pi * progress))
            
            for param_group in self.optimizer.param_groups:
                param_group['lr'] = lr
            return lr

    batch_size = 128
    train_epochs = 50 
    r_PATH = './resnet.pth'
    cn_PATH = './conv_next.pth'

    transform_train_convnext, transform_train_resnet, transform_test = get_transforms()

    train_dataset_resnet = torchvision.datasets.CIFAR100(
        root='./data', train=True, download=True, transform=transform_train_resnet)
    train_dataset_convnext = torchvision.datasets.CIFAR100(
        root='./data', train=True, download=True, transform=transform_train_convnext)
    test_dataset = torchvision.datasets.CIFAR100(
        root='./data', train=False, download=True, transform=transform_test)

    train_loader_resnet = DataLoader(train_dataset_resnet, batch_size=batch_size, shuffle=True, num_workers=4)
    train_loader_convnext = DataLoader(train_dataset_convnext, batch_size=batch_size, shuffle=True, num_workers=4)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=4)

    classes = train_dataset_resnet.classes

    resnet = ResNet_50(len(classes)).to(device)
    conv_next = Conv_Next(
        num_classes=len(classes),
        depths=[3, 3, 9, 3], 
        dims=[96, 192, 384, 768],
        drop_path_rate=0.15, 
        layer_scale_init_value=1e-6
    ).to(device)

    criterion_r = nn.CrossEntropyLoss()
    criterion_cn = nn.CrossEntropyLoss(label_smoothing=0.15) 

    optimizer_r = optim.SGD(resnet.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4)
    optimizer_cn = optim.AdamW(conv_next.parameters(), lr=4e-3, weight_decay=0.05, 
                              betas=(0.9, 0.999), eps=1e-8)

    scheduler_r = optim.lr_scheduler.MultiStepLR(optimizer_r, milestones=[30, 40, 45], gamma=0.1)
    scheduler_cn = CosineAnnealingWarmup(optimizer_cn, T_max=train_epochs, T_warmup=5, eta_min=1e-6)

    resnet_stats = record_training_stats('ResNet-50')
    convnext_stats = record_training_stats('ConvNeXt')

    def train_model(model, train_loader, optimizer, criterion, scheduler, stats, model_name, epochs):
        print(f'Begin Training: {model_name}')
        print('-' * 50)
        
        best_acc = 0.0
        
        for epoch in range(epochs):
            start_time = time.time()
            model.train()
            running_loss = 0.0
            epoch_loss = 0.0
            correct = 0
            total = 0
            batch_count = 0
            
            for i, data in enumerate(train_loader, 0):
                inputs, labels = data
                inputs, labels = inputs.to(device), labels.to(device)
                
                optimizer.zero_grad()
                outputs = model(inputs)
                loss = criterion(outputs, labels)
                loss.backward()
                
                if model_name == 'ConvNeXt':
                    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
                
                optimizer.step()
                
                running_loss += loss.item()
                epoch_loss += loss.item()
                batch_count += 1
                stats['train_loss_per_batch'].append(loss.item())
                
                _, predictions = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predictions == labels).sum().item()
                
                if i % 200 == 199:
                    accuracy = 100 * correct / total
                    print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 200:.3f} | accuracy: {accuracy:.2f}%')
                    running_loss = 0.0
                    correct = 0
                    total = 0
                    
            stats['train_loss_per_epoch'].append(epoch_loss / batch_count)
            epoch_time = time.time() - start_time
            stats['train_time_per_epoch'].append(epoch_time)
            
            if model_name == 'ResNet-50':
                scheduler.step()
                current_lr = scheduler.get_last_lr()[0]
            else:  # ConvNeXt
                current_lr = scheduler.step()
            stats['learning_rates'].append(current_lr)
                    
            model.eval()
            test_correct = 0
            test_total = 0
            test_loss = 0.0
            with torch.no_grad():
                for data in test_loader:
                    images, labels = data
                    images, labels = images.to(device), labels.to(device)
                    outputs = model(images)
                    loss = criterion(outputs, labels)
                    test_loss += loss.item()
                    _, predictions = torch.max(outputs.data, 1)
                    test_total += labels.size(0)
                    test_correct += (predictions == labels).sum().item()
                    
            test_accuracy = 100 * test_correct / test_total
            stats['test_accuracy_per_epoch'].append(test_accuracy)
            
            if test_accuracy > best_acc:
                best_acc = test_accuracy
                if model_name == 'ResNet-50':
                    torch.save(model.state_dict(), r_PATH)
                else:
                    torch.save(model.state_dict(), cn_PATH)
            
            print(f'Epoch {epoch+1}/{epochs} | Time: {epoch_time:.2f}s | '
                  f'Test Acc: {test_accuracy:.2f}% | Best: {best_acc:.2f}% | LR: {current_lr:.2e}')
                    
        save_training_stats(stats, model_name)            
        print(f'Finished {model_name} Training | Best Accuracy: {best_acc:.2f}%')
        print('-' * 50)
        return best_acc

    resnet_best = train_model(resnet, train_loader_resnet, optimizer_r, criterion_r, 
                             scheduler_r, resnet_stats, 'ResNet-50', train_epochs)
    
    convnext_best = train_model(conv_next, train_loader_convnext, optimizer_cn, criterion_cn, 
                               scheduler_cn, convnext_stats, 'ConvNeXt', train_epochs)

    print("\n" + "="*60)
    print("FINAL RESULTS COMPARISON")
    print("="*60)
    print(f"ResNet-50 Best Accuracy:  {resnet_best:.2f}%")
    print(f"ConvNeXt Best Accuracy:   {convnext_best:.2f}%")
    print(f"Improvement: {convnext_best - resnet_best:+.2f}%")
    if convnext_best > resnet_best:
        print("✅ ConvNeXt outperforms ResNet-50!")
    else:
        print("⚠️  ConvNeXt needs more tuning")
    print("="*60)

    def evaluate_per_class(model, model_name):
        model.eval()
        correct_pred = {classname: 0 for classname in classes}
        total_pred = {classname: 0 for classname in classes}

        with torch.no_grad():
            for data in test_loader:
                images, labels = data
                images, labels = images.to(device), labels.to(device)
                outputs = model(images)
                _, predictions = torch.max(outputs, 1)
                for label, prediction in zip(labels, predictions):
                    if label == prediction:
                        correct_pred[classes[label]] += 1
                    total_pred[classes[label]] += 1

        print(f"\n-------{model_name}-------")
        class_accuracies = []
        for classname, correct_count in correct_pred.items():
            accuracy = 100 * float(correct_count) / total_pred[classname] if total_pred[classname] > 0 else 0
            class_accuracies.append(accuracy)
            print(f'Accuracy for class: {classname:15s} is {accuracy:.1f}%')
        
        avg_class_acc = np.mean(class_accuracies)
        print(f'Average per-class accuracy: {avg_class_acc:.2f}%')
        print("-" * (len(model_name) + 16))
        return class_accuracies

    resnet.load_state_dict(torch.load(r_PATH))
    conv_next.load_state_dict(torch.load(cn_PATH))
    
    resnet_class_acc = evaluate_per_class(resnet, 'ResNet-50')
    convnext_class_acc = evaluate_per_class(conv_next, 'ConvNeXt')

    plt.figure(figsize=(15, 10))
    
    plt.subplot(2, 2, 1)
    plt.plot(resnet_stats['train_loss_per_epoch'], 'b-', label='ResNet-50', linewidth=2)
    plt.plot(convnext_stats['train_loss_per_epoch'], 'r-', label='ConvNeXt', linewidth=2)
    plt.title('Training Loss Comparison', fontsize=14, fontweight='bold')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    plt.grid(True, alpha=0.3)

    plt.subplot(2, 2, 2)
    plt.plot(resnet_stats['test_accuracy_per_epoch'], 'b-', label='ResNet-50', linewidth=2)
    plt.plot(convnext_stats['test_accuracy_per_epoch'], 'r-', label='ConvNeXt', linewidth=2)
    plt.title('Test Accuracy Comparison', fontsize=14, fontweight='bold')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.legend()
    plt.grid(True, alpha=0.3)

    plt.subplot(2, 2, 3)
    plt.plot(resnet_stats['learning_rates'], 'b-', label='ResNet-50', linewidth=2)
    plt.plot(convnext_stats['learning_rates'], 'r-', label='ConvNeXt', linewidth=2)
    plt.title('Learning Rate Schedules', fontsize=14, fontweight='bold')
    plt.xlabel('Epoch')
    plt.ylabel('Learning Rate')
    plt.yscale('log')
    plt.legend()
    plt.grid(True, alpha=0.3)
    
    plt.subplot(2, 2, 4)
    models = ['ResNet-50', 'ConvNeXt']
    accuracies = [resnet_best, convnext_best]
    colors = ['skyblue', 'lightcoral']
    bars = plt.bar(models, accuracies, color=colors, alpha=0.8, edgecolor='black')
    plt.title('Final Best Accuracy Comparison', fontsize=14, fontweight='bold')
    plt.ylabel('Accuracy (%)')
    plt.ylim(0, max(accuracies) * 1.1)
    
    for bar, acc in zip(bars, accuracies):
        plt.text(bar.get_x() + bar.get_width()/2, bar.get_height() + 0.5, 
                f'{acc:.2f}%', ha='center', va='bottom', fontweight='bold')

    plt.tight_layout()
    plt.savefig('comprehensive_model_comparison.png', dpi=300, bbox_inches='tight')
    plt.show()

    print(f"\nTraining completed! Check the generated plots for detailed analysis.")
    print(f"Model weights saved: {r_PATH}, {cn_PATH}")