import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from torchvision.datasets import CIFAR100
import torchvision.transforms as transforms
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
import sys
from BoTNet import BoTNet

train_losses = []
train_acc1 = []
train_acc5 = []
test_losses = []
test_acc1 = []
test_acc5 = []

def load_data(args):
    data_path = '/kaggle/input/cifar100'
    train_transform = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])
    train_dataset = CIFAR100(root=data_path, train=True, transform=train_transform, download=False)

    train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)

    test_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])
    test_dataset = CIFAR100(root=data_path, train=False, transform=test_transform, download=False)

    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)

    return train_loader, test_loader

def load_results(file_path):
    global train_losses, train_acc1, train_acc5, test_losses, test_acc1, test_acc5

    with open(file_path, 'r') as file:
        lines = file.readlines()

        for i, line in enumerate(lines):
            if line.startswith('Train Losses:'):
                train_losses.extend([float(x) for x in lines[i+1].strip().split(', ')])
            elif line.startswith('Train Top-1 Accuracy:'):
                train_acc1.extend([float(x) for x in lines[i+1].strip().split(', ')])
            elif line.startswith('Train Top-5 Accuracy:'):
                train_acc5.extend([float(x.replace('tensor(', '').replace(')', '')) for x in lines[i+1].strip().split(', ')])
            elif line.startswith('Test Losses:'):
                test_losses.extend([float(x) for x in lines[i+1].strip().split(', ')])
            elif line.startswith('Test Top-1 Accuracy:'):
                test_acc1.extend([float(x) for x in lines[i+1].strip().split(', ')])
            elif line.startswith('Test Top-5 Accuracy:'):
                test_acc5.extend([float(x.replace('tensor(', '').replace(')', '')) for x in lines[i+1].strip().split(', ')])

def save_checkpoint(best_acc, model, optimizer, args, epoch):
    print('Best Model Saving...')
    if args.device_num > 1:
        model_state_dict = model.module.state_dict()
    else:
        model_state_dict = model.state_dict()

    torch.save({
        'model_state_dict': model_state_dict,
        'global_epoch': epoch,
        'optimizer_state_dict': optimizer.state_dict(),
        'best_acc': best_acc
    }, os.path.join('checkpoints', 'checkpoint_model_best.pth'))

def plot_all_results(args, save_dir='results'):
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    plt.figure(figsize=(12, 12))

    plt.subplot(3, 2, 1)
    plt.plot(range(1, args.epochs + 1), [x.cpu() if isinstance(x, torch.Tensor) else x for x in train_losses], label='Train Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training Loss')
    plt.legend()
    plt.savefig(os.path.join(save_dir, 'train_loss_plot.png'))

    plt.subplot(3, 2, 2)
    plt.plot(range(1, args.epochs + 1), [x.cpu() if isinstance(x, torch.Tensor) else x for x in test_losses], label='Test Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Testing Loss')
    plt.legend()
    plt.savefig(os.path.join(save_dir, 'test_loss_plot.png'))

    plt.subplot(3, 2, 3)
    plt.plot(range(1, args.epochs + 1), [x.cpu() if isinstance(x, torch.Tensor) else x for x in train_acc1], label='Train Acc1')
    plt.xlabel('Epoch')
    plt.ylabel('Top-1 Accuracy (%)')
    plt.title('Training Top-1 Accuracy')
    plt.legend()
    plt.savefig(os.path.join(save_dir, 'train_acc1_plot.png'))

    plt.subplot(3, 2, 4)
    plt.plot(range(1, args.epochs + 1), [x.cpu() if isinstance(x, torch.Tensor) else x for x in test_acc1], label='Test Acc1')
    plt.xlabel('Epoch')
    plt.ylabel('Top-1 Accuracy (%)')
    plt.title('Testing Top-1 Accuracy')
    plt.legend()
    plt.savefig(os.path.join(save_dir, 'test_acc1_plot.png'))

    plt.subplot(3, 2, 5)
    plt.plot(range(1, args.epochs + 1), [x.cpu() if isinstance(x, torch.Tensor) else x for x in train_acc5], label='Train Acc5')
    plt.xlabel('Epoch')
    plt.ylabel('Top-5 Accuracy (%)')
    plt.title('Training Top-5 Accuracy')
    plt.legend()
    plt.savefig(os.path.join(save_dir, 'train_acc5_plot.png'))

    plt.subplot(3, 2, 6)
    plt.plot(range(1, args.epochs + 1), [x.cpu() if isinstance(x, torch.Tensor) else x for x in test_acc5], label='Test Acc5')
    plt.xlabel('Epoch')
    plt.ylabel('Top-5 Accuracy (%)')
    plt.title('Testing Top-5 Accuracy')
    plt.legend()
    plt.savefig(os.path.join(save_dir, 'test_acc5_plot.png'))

    plt.tight_layout()
    plt.subplots_adjust(wspace=0.2, hspace=0.5) 
    plt.show()

def plot_test_results(args, save_dir='results'):
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    plt.figure(figsize=(15,5))

    plt.subplot(1, 3, 1)
    plt.plot(range(1, args.epochs + 1), [x.cpu() if isinstance(x, torch.Tensor) else x for x in test_losses], label='Test Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Testing Loss')
    plt.grid()

    plt.subplot(1, 3, 2)
    plt.plot(range(1, args.epochs + 1), [x.cpu() if isinstance(x, torch.Tensor) else x for x in test_acc1], label='Test Acc1')
    plt.xlabel('Epoch')
    plt.ylabel('Top-1 Accuracy (%)')
    plt.title('Testing Top-1 Accuracy')
    plt.grid()

    plt.subplot(1, 3, 3)
    plt.plot(range(1, args.epochs + 1), [x.cpu() if isinstance(x, torch.Tensor) else x for x in test_acc5], label='Test Acc5')
    plt.xlabel('Epoch')
    plt.ylabel('Top-5 Accuracy (%)')
    plt.title('Testing Top-5 Accuracy')
    plt.grid()

    plt.tight_layout()
    plt.savefig(os.path.join(save_dir, 'BoTNet_result.png'))
    plt.show()

def write_results_to_txt(train_losses, train_acc1, train_acc5, test_losses, test_acc1, test_acc5):
    if not os.path.exists('results'):
        os.makedirs('results')

    # 将结果写入txt文件
    with open('results/results.txt', 'w') as file:
        file.write("Train Losses:\n")
        file.write(', '.join(map(str, train_losses)))
        file.write("\n\nTrain Top-1 Accuracy:\n")
        file.write(', '.join(map(str, train_acc1)))
        file.write("\n\nTrain Top-5 Accuracy:\n")
        file.write(', '.join(map(str, train_acc5)))
        file.write("\n\nTest Losses:\n")
        file.write(', '.join(map(str, test_losses)))
        file.write("\n\nTest Top-1 Accuracy:\n")
        file.write(', '.join(map(str, test_acc1)))
        file.write("\n\nTest Top-5 Accuracy:\n")
        file.write(', '.join(map(str, test_acc5)))


def _train(epoch, train_loader, model, optimizer, criterion, args):
    model.train()

    losses = 0.
    acc1 = 0.
    acc5 = 0.
    total = 0.
    for idx, (data, target) in enumerate(train_loader):
        if args.cuda:
            data, target = data.cuda(), target.cuda()

        output = model(data)
        loss = criterion(output, target)

        _, pred = F.softmax(output, dim=-1).max(1)
        acc1 += pred.eq(target).sum().item()

        _, pred_top5 = output.topk(5, 1, True, True)
        pred_top5 = pred_top5.t()
        correct = pred_top5.eq(target.view(1, -1).expand_as(pred_top5))
        acc5 += correct[:5].reshape(-1).float().sum(0)

        losses += loss.item()
        total += target.size(0)

        optimizer.zero_grad()
        loss.backward()
        if args.gradient_clip > 0:
            torch.nn.utils.clip_grad_norm_(model.parameters(), args.gradient_clip)
        optimizer.step()

        if idx % args.print_intervals == 0 and idx != 0:
            print('[Epoch: {0:4d}], Iter: {1:4d}, Loss: {2:.3f}, Acc1: {3:.3f}, Acc5: {4:.3f}'.format(
                epoch, idx, losses / (idx + 1), acc1 / total * 100., acc5 / total * 100.))

    train_losses.append(losses / len(train_loader))
    train_acc1.append(acc1 / total * 100.)
    train_acc5.append(acc5 / total * 100.)

def _eval(epoch, test_loader, model, args):
    model.eval()

    losses = 0.
    acc1 = 0.
    acc5 = 0.
    total = 0.
    with torch.no_grad():
        for data, target in test_loader:
            if args.cuda:
                data, target = data.cuda(), target.cuda()
            output = model(data)
            loss = F.cross_entropy(output, target)
            losses += loss.item()

            _, pred = F.softmax(output, dim=-1).max(1)
            acc1 += pred.eq(target).sum().item()

            _, pred_top5 = output.topk(5, 1, True, True)
            pred_top5 = pred_top5.t()
            correct = pred_top5.eq(target.view(1, -1).expand_as(pred_top5))
            acc5 += correct[:5].reshape(-1).float().sum(0)

            total += target.size(0)

    test_losses.append(losses / len(test_loader))
    test_acc1.append(acc1 / total * 100.)
    test_acc5.append(acc5 / total * 100.)

    print('[Epoch: {0:4d}], Test Loss: {1:.3f}, Test Acc1: {2:.3f}, Test Acc5: {3:.3f}'.format(
        epoch, test_losses[-1], test_acc1[-1], test_acc5[-1]))

    return acc1 / total * 100.

def main(args):
    train_loader, test_loader = load_data(args)
    model = BoTNet()

    optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum)

    if not os.path.isdir('checkpoints'):
        os.mkdir('checkpoints')

    if args.checkpoints is not None:
        checkpoints = torch.load(os.path.join('checkpoints', args.checkpoints))
        model.load_state_dict(checkpoints['model_state_dict'])
        optimizer.load_state_dict(checkpoints['optimizer_state_dict'])
        start_epoch = 1
    else:
        start_epoch = 1

    if args.cuda:
        model = model.cuda()

    if not args.evaluation:
        criterion = nn.CrossEntropyLoss()
        #lr_scheduler=lr_scheduler.SequentialLR(optimizer,schedulers=[lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=20, T_mult=6, eta_min=0.0001),lr_scheduler.LinearLR(optimizer,start_factor=1,end_factor=0.1,total_iters=80)],milestones=[70])
        lr_scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=2, eta_min=0.0001)

        global_acc = 0.
        for epoch in range(start_epoch, args.epochs + 1):
            _train(epoch, train_loader, model, optimizer, criterion, args)
            best_acc = _eval(epoch, test_loader, model, args)
            if global_acc < best_acc:
                global_acc = best_acc
                save_checkpoint(best_acc, model, optimizer, args, epoch)

            lr_scheduler.step()
            print('Current Learning Rate: {}'.format(lr_scheduler.get_last_lr()))
    else:
        _eval(start_epoch, test_loader, model, args)

if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=128)
    parser.add_argument('--num_workers', type=int, default=2)
    parser.add_argument('--lr', type=float, default=0.06)
    parser.add_argument('--weight_decay', type=float, default=1e-4)
    parser.add_argument('--momentum', type=float, default=0.9)
    parser.add_argument('--cuda', type=bool, default=True)
    parser.add_argument('--epochs', type=int, default=70)
    parser.add_argument('--print_intervals', type=int, default=100)
    parser.add_argument('--evaluation', type=bool, default=False)
    #加载预训练的可以用这个
    #parser.add_argument('--checkpoints', type=str, default='checkpoint_model_best.pth', help='model checkpoints path')
    parser.add_argument('--checkpoints', type=str, default=None, help='model checkpoints path')
    parser.add_argument('--device_num', type=int, default=1)
    parser.add_argument('--gradient_clip', type=float, default=2.)

    args = sys.argv
    args = [arg for arg in args if not arg.startswith("-f") and "json" not in arg]
    parsed_args = parser.parse_args(args[1:])
    main(parsed_args)
    #plot_all_results(parsed_args,save_dir='results')
    plot_test_results(parsed_args,save_dir='results')
    write_results_to_txt(train_losses, train_acc1, train_acc5, test_losses, test_acc1, test_acc5)
