import torch
import torchvision
import torch.optim as optim
import torch.nn as nn
from matplotlib import pyplot as plt
import torchvision.transforms as transforms

batch_size = 128
input_size = 32

data_transforms = {
    'train': transforms.Compose([
        transforms.RandomHorizontalFlip(0.5),
        transforms.RandomCrop(32, 4),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    'test': transforms.Compose([
        transforms.Resize(input_size),
        transforms.CenterCrop(input_size),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
}

def getDataLoader(transforms=data_transforms):
    trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
            download=False, transform=transforms['train'])
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
                shuffle=True, num_workers=2)

    testset = torchvision.datasets.CIFAR100(root='./data', train=False,
                download=False, transform=transforms['test'])
    testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
                shuffle=False, num_workers=2)
    
    return (trainloader, testloader)

def train_one_epoch(epoch, model, trainloader, criterion, optimizer, losses, times, device, method='cnn'):
    model.train()
    running_loss = 0.0
    current_lr = optimizer.param_groups[0]['lr']
    print(f'Epoch {epoch + 1}, Current Learning Rate: {current_lr}')
    for i, data in enumerate(trainloader, 0):
        # get the inputs; data is a list of [inputs, labels]
        inputs, labels = data[0].to(device), data[1].to(device)

        # zero the parameter gradients
        optimizer.zero_grad()

        # forward + backward + optimize
        outputs = model(inputs)
        if method == 'cnn':
            loss = criterion(outputs, labels)
        else:
            loss = criterion(outputs[0], labels)
        loss.backward()
        optimizer.step()

        # print statistics
        running_loss += loss.item()
        if i % times == times-1:
            print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / times:.3f}')
            losses.append(running_loss / times)
            running_loss = 0.0

def eval_model(model, testloader, accuracys1, accuracys5, topk=(1,5), device='cpu', method='cnn'):
    model.eval()
    correct_1 = 0
    correct_5 = 0
    total = 0
    with torch.no_grad():
        for data in testloader:
            images, labels = data[0].to(device), data[1].to(device)
            outputs = model(images)
            if method == 'cnn':
                _, pred = outputs.topk(max(topk), 1, True, True)
            else:
                _, pred = outputs[0].topk(max(topk), 1, True, True)
            pred = pred.t()
            correct = pred.eq(labels.view(1, -1).expand_as(pred))

            for k in topk:
                correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True).item()
                if k == 1:
                    correct_1 += correct_k
                elif k == 5:
                    correct_5 += correct_k
            total += labels.size(0)

    print(f'Top-1 accuracy of the network on the 10000 test images: {100 * correct_1 / total:.2f} %')
    print(f'Top-5 accuracy of the network on the 10000 test images: {100 * correct_5 / total:.2f} %')
    accuracys1.append(100 * correct_1 / total)
    accuracys5.append(100 * correct_5 / total)

def plot_loss(losses,accuracys1,accuracys5):
    plt.figure(figsize=(15,5))

    plt.subplot(1, 3, 1)
    plt.plot(losses)
    plt.xlabel("every 128*125 item")
    plt.ylabel("loss")
    plt.grid()

    plt.subplot(1, 3, 2)
    plt.plot(accuracys1)
    plt.xlabel("every epoch")
    plt.ylabel("Top-1 accuracy")
    plt.grid()

    plt.subplot(1, 3, 3)
    plt.plot(accuracys5)
    plt.xlabel("every epoch")
    plt.ylabel("Top-5 accuracy")
    plt.grid()
    
def save_records(losses, accuracys1, accuracys5, filename="botnet_record.txt"):
    with open(filename, "w") as file:
        file.write("Losses:\n")
        for loss in losses:
            file.write(f"{loss}\n")
        
        file.write("\nTop-1 Accuracies:\n")
        for acc1 in accuracys1:
            file.write(f"{acc1}\n")
        
        file.write("\nTop-5 Accuracies:\n")
        for acc5 in accuracys5:
            file.write(f"{acc5}\n")

def train(num_epoch, model, trainloader, testloader, criterion, optimizer, times, device, method='cnn'):
    losses = []
    accuracys1 = []
    accuracys5 = []
    lr_scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=30, T_mult=2, eta_min=0.0001)
    for epoch in range(num_epoch):
        train_one_epoch(epoch, model, trainloader, criterion, optimizer, losses, times, device, method)
        eval_model(model, testloader, accuracys1, accuracys5, device=device, method=method)
        lr_scheduler.step()
    plot_loss(losses, accuracys1, accuracys5)
    save_records(losses, accuracys1, accuracys5)

if __name__ == "__main__":

    #learning_rate = 1e-3
    #learning_rate = 2e-4
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 修改下面一行代码，使其调用你的模型
    model = BoTNet()
    model = model.to(device)
    method = "cnn"

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.005, weight_decay=1e-4, momentum=0.9)
    #optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    trainLoader, testLoader = getDataLoader()

    num_epoch = 30
    times = 16000 // 128
    train(num_epoch, model, trainLoader, testLoader, criterion, optimizer, times, device, method)
    #summary(model, input_size=(3, 32, 32), device=device)