# Train CIFAR-10 with LeNet5, ViT
# reference: PyTorch Training a Classifier

import time
from argparse import ArgumentParser
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms, datasets
from model import LeNet5, ViT

torch.manual_seed(1)


# The CIFAR-10 dataset: https://www.cs.toronto.edu/~kriz/cifar.html
# 32x32 colour images in 10 classes
# There are 50000 training images and 10000 test images.


def load_cifar10_data(batch_size: int):
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    trainset = datasets.CIFAR10(root='./data', train=True,
                                download=True, transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
                                              shuffle=True, num_workers=0)

    testset = datasets.CIFAR10(root='./data', train=False,
                               download=True, transform=transform)
    testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
                                             shuffle=False, num_workers=0)
    return trainloader, testloader


num_classes = 10
classes = ('plane', 'car', 'bird', 'cat',
           'deer', 'dog', 'frog', 'horse', 'ship', 'truck')


def train(model, trainloader, epochs):

    for epoch in range(0, epochs):
        model.train()
        start = time.time()
        running_loss = 0.0
        for batch_idx, (inputs, labels) in enumerate(trainloader):
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()
            if batch_idx % 2000 == 1999:
                print(f'[epoch-{epoch + 1}, batch-{batch_idx + 1:5d} '
                      f'{100. * batch_idx / len(trainloader):.2f}%] '
                      f'loss={running_loss / 2000:.4f}')
                running_loss = 0.0

        print(f'Training epoch-{epoch+1}: time ={time.time()-start: .2f}s')


def test(model, testloader):
    model.eval()
    test_loss = 0
    num_correct = 0
    correct_pred = {classname: 0 for classname in classes}
    total_pred = {classname: 0 for classname in classes}
    with torch.no_grad():
        for (inputs, labels) in testloader:
            outputs = model(inputs)
            test_loss += criterion(outputs, labels).item()
            preds = outputs.argmax(dim=1)
            num_correct += (preds == labels).sum()
            for label, prediction in zip(labels, preds):
                if label == prediction:
                    correct_pred[classes[label]] += 1
                total_pred[classes[label]] += 1

    test_loss /= len(testloader.dataset)
    print(f'\nTest: Average loss: {test_loss:.4f}, '
          f'Accuracy: {num_correct}/{len(testloader.dataset)} '
          f'({100. * num_correct / len(testloader.dataset):.2f}%)\n')

    # print accuracy for each class
    for classname, correct_count in correct_pred.items():
        accuracy = 100 * float(correct_count) / total_pred[classname]
        print(f'Accuracy for class: {classname:5s} is {accuracy:.1f}%')


def infer():
    model = LeNet5(num_classes)
    model.load_state_dict(torch.load(model_file))


if __name__ == '__main__':
    parser = ArgumentParser(description="Train CIFAR-10 with LeNet5, ViT")
    parser.add_argument("-e", "--epochs", type=int, default=1,
                        help="training epochs (default: 1)")
    parser.add_argument("-bs", "--batch-size", type=int, default=4,
                        help="training batch size (default: 4)")
    args = parser.parse_args()

    epochs = args.epochs
    batch_size = args.batch_size
    print(f"Training CIFAR-10: {epochs=} {batch_size=}")

    # data
    trainloader, testloader = load_cifar10_data(batch_size)

    # model
    model = LeNet5(num_classes)
    total_params = sum(p.numel() for p in model.parameters())
    print(f"Number of parameters: {total_params}")

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

    # train & eval
    train(model, trainloader, epochs)
    test(model, testloader)

    # save model weight
    model_file = "./lenet5_cifar10.pt"
    torch.save(model.state_dict(), model_file)
