import os
import torch
import torch.nn as nn

from tqdm import tqdm
import torch.backends.cudnn as cudnn
from utils.prepdat import prepare_data
import matplotlib.pyplot as plt
import time
import pandas as pd

from models.resnet import ResNet18
from models.densenet import densenet_cifar

from utils.bypass_bn import disable_running_stats, enable_running_stats
from utils.getOptimizer import get_optimizer

def train_and_validate(
    model = 'ResNet18', # ResNet18, DenseNet121
    optimizer = 'SGD', # SGD, Adam, SAM
    lr = 1e-3, # 1e-2, 1e-3, 1e-4
    weight_decay = 5e-4, # 0, 5e-4
    data_set = 'cifar10',  # cifar10, cifar100
    batch_size = 128,
    epoch = 220,
    key = 'default',  # 用于保存模模型和训练过程可视化结果的关键字
    alpha = 1.0
    ):
    
    trainloader, testloader = prepare_data(
    data_set=data_set,
    data_root="./data",
    train_batch_size=batch_size,
    test_batch_size=100,
    num_workers=8,
    pin_memory=True
    )
    
    # 模型构建
    print('==> Building model..')
    num_classes = 10 if data_set == 'cifar10' else 100 if data_set == 'cifar100' else 200
    if model == 'ResNet18':
        net = ResNet18(num_classes).to(device)
    elif model == 'DenseNet121':
        net = densenet_cifar(num_classes).to(device)
    else:
        raise ValueError('Invalid model name')
    cudnn.benchmark = True
    
    # 定义损失函数、优化器和学习率调度器
    criterion = nn.CrossEntropyLoss().to(device)
    optimizer, is_sam = get_optimizer(optimizer, net, lr, weight_decay, alpha)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=epoch)
    
    best_acc = 0  # best test accuracy
    
    # 初始化存储指标的列表
    train_losses = []
    train_accuracies = []
    test_losses = []
    test_accuracies = []

    def train(epoch):
        nonlocal best_acc
        net.train()
        train_loss = 0
        correct = 0
        total = 0

        # 显示进度条
        pbar = tqdm(trainloader, desc=f'{key}: Epoch {epoch}')
        for batch_idx, (inputs, targets) in enumerate(pbar):
            inputs, targets = inputs.to(device, non_blocking=True), targets.to(device, non_blocking=True)
            
            if not is_sam:
                optimizer.zero_grad()
                outputs = net(inputs)
                loss = criterion(outputs, targets)
                loss.backward()
                optimizer.step()
                
            else:
                # ===== SAM 优化器的双重前向-反向传播 =====
                # 第一次前向-反向传播
                optimizer.zero_grad()
                enable_running_stats(net)  # 允许 BN 更新
                outputs = net(inputs)
                loss = criterion(outputs, targets)
                loss.backward()
                optimizer.first_step(zero_grad=True)
                
                # 第二次前向-反向传播
                disable_running_stats(net)  # 禁止 BN 更新
                outputs2 = net(inputs)
                loss2 = criterion(outputs2, targets)
                loss2.backward()
                optimizer.second_step(zero_grad=True)

            train_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            pbar.set_postfix({
                'Loss': '%.3f' % (train_loss/(batch_idx+1)),
                'Acc': '%.3f%%' % (100.*correct/total)
            })
                
        # 保存训练过程的指标
        epoch_loss = train_loss / len(trainloader)
        epoch_acc = 100. * correct / total
        train_losses.append(epoch_loss)
        train_accuracies.append(epoch_acc)
        

    def test(epoch):
        nonlocal best_acc
        net.eval()
        test_loss = 0
        correct = 0
        total = 0

        # 显示进度条
        pbar = tqdm(testloader, desc=f'{key}: Test')
        with torch.no_grad():
            for batch_idx, (inputs, targets) in enumerate(pbar):
                inputs, targets = inputs.to(device, non_blocking=True), targets.to(device, non_blocking=True)
                outputs = net(inputs)
                loss = criterion(outputs, targets)

                test_loss += loss.item()
                _, predicted = outputs.max(1)
                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()

                pbar.set_postfix({
                    'Loss': '%.3f' % (test_loss/(batch_idx+1)),
                    'Acc': '%.3f%%' % (100.*correct/total)
                })
                    
        # 保存测试过程的指标
        epoch_loss = test_loss / len(testloader)
        epoch_acc = 100. * correct / total
        test_losses.append(epoch_loss)
        test_accuracies.append(epoch_acc)

        # 保存模型
        acc = 100.*correct/total
        if acc > best_acc:
            best_acc = acc
            print(f'Updated best_acc: {best_acc}, saving model...')
            state = {
                'net': net.state_dict(),
                'acc': acc,
                'epoch': epoch,
            }
            if not os.path.isdir('checkpoint'):
                os.mkdir('checkpoint')
            torch.save(state, f'./checkpoint/{key}.pth')

    # 训练与测试循环
    epochs = epoch
    training_time = 0
    for epoch in range(epochs):
        start_time = time.time()
        train(epoch)
        end_time = time.time()
        training_time += end_time - start_time
        test(epoch)
        scheduler.step()
    
    
    print(f'{key}\nbest test accuracy: {best_acc:.3f}%\ntraining time: {training_time/60:.1f} minutes')

    # 确保结果保存目录存在
    results_dir = 'results'
    os.makedirs(results_dir, exist_ok=True)
    
    # 创建一个双子图用于绘制准确率和损失
    fig, ax1 = plt.subplots(figsize=(10, 6))
    ax2 = ax1.twinx()
    
    epochs_range = range(1, epochs+1)
    
    # 绘制训练和测试准确率
    ax1.plot(epochs_range, train_accuracies, label='Train Accuracy', color='tab:blue')
    ax1.plot(epochs_range, test_accuracies, label='Test Accuracy', color='tab:orange')
    ax1.set_xlabel('Epoch')
    ax1.set_ylabel('Accuracy (%)', color='tab:blue')
    ax1.tick_params(axis='y', labelcolor='tab:blue')
    
    # 绘制训练和测试损失
    ax2.plot(epochs_range, train_losses, label='Train Loss', color='tab:green', linestyle='--')
    ax2.plot(epochs_range, test_losses, label='Test Loss', color='tab:red', linestyle='--')
    ax2.set_ylabel('Loss', color='tab:red')
    ax2.tick_params(axis='y', labelcolor='tab:red')
    
    # 添加标题和图例
    plt.title(f'{key} - best test accuracy: {best_acc:.3f}%')
    lines_1, labels_1 = ax1.get_legend_handles_labels()
    lines_2, labels_2 = ax2.get_legend_handles_labels()
    plt.legend(lines_1 + lines_2, labels_1 + labels_2, loc='upper left')
    
    # 设定纵坐标范围
    ax1.set_ylim(0, 100)
    ax2.set_ylim(0, 2.5 if data_set == 'cifar10' else 4.5 if data_set == 'cifar100' else 5.5)
    
    # 设定横坐标范围
    ax1.set_xlim(0, 250)
    
    # 优化布局
    plt.tight_layout()
    
    # 保存图像
    save_path = os.path.join(results_dir, f"{key}.png")
    plt.savefig(save_path, dpi=600)
    plt.close()
    print(f"Saved plot to {save_path}")
    
    # 保存过程指标到pkl文件
    df = pd.DataFrame({
        'train_losses': train_losses,
        'train_accuracies': train_accuracies,
        'test_losses': test_losses,
        'test_accuracies': test_accuracies
    })
    df.to_pickle(os.path.join(results_dir, f"{key}.pkl"))
    print(f"Saved metrics to {os.path.join(results_dir, f'{key}.pkl')}")
    

if __name__ == "__main__":    
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print(f"Using {device}")
    
    models = ['ResNet18', 'DenseNet121']
    optimizers = ['PSAM', 'SGD', 'Adam', 'SGDM', 'SAM', 'ASAM']
    lrs = [1e-2, 1e-3, 1e-4]
    data_sets = ['cifar10', 'cifar100']
    weight_decay = [5e-4, 0]
    
    params = {}
    for model in models:
        for optimizer in optimizers:
            for lr in lrs:
                for data_set in data_sets:
                    for wd in weight_decay:
                        key = f"{model}_{optimizer}_{lr}_{wd}_{data_set}"
                        if os.path.exists(f'results/{key}.pkl') and os.path.exists(f'results/{key}.png'):
                            print(f"Skipping {key}, already exists.")
                            continue
                        if optimizer == 'SGDM' and lr == 1e-4:
                            continue
                        if optimizer == 'Adam' and lr == 1e-2:
                            continue
                        params[key] = {
                            'model': model,
                            'optimizer': optimizer,
                            'lr': lr,
                            'data_set': data_set,
                            'weight_decay': wd,
                        }
                        if optimizer == 'PSAM':
                            params.pop(key)
                            for alpha in [0.2, 0.5, 0.8, 1.2, 1.5, 1.8]
                                new_key = key.split('PSAM')[0] + f'PSAM_{alpha}' + key.split('PSAM')[1]
                                if not os.path.exists(f'results/{new_key}.pkl') or not os.path.exists(f'results/{new_key}.png'):
                                    params[new_key] = {'model': model, 'optimizer': optimizer, 'lr': lr, 'data_set': data_set, 'weight_decay': wd, 'alpha': alpha}
                            
    
    for key, value in params.items():
        train_and_validate(**value, key=key)
        

    

