# %%
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torch.nn.functional as F
# from model import GoogLeNet
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from model import GoogLeNet


def load_MNIST():
    transform = transforms.Compose([
        transforms.Resize(56),
        transforms.RandomHorizontalFlip(),
        transforms.RandomGrayscale(),
        transforms.ToTensor(),
    ])

    batch_size = 100
    trainset = torchvision.datasets.MNIST(
        root='./data', train=True, download=True, transform=transform)

    trainloader = torch.utils.data.DataLoader(
        trainset, batch_size=batch_size, shuffle=True, num_workers=2)

    testset = torchvision.datasets.MNIST(
        root='./data', train=False, download=True, transform=transform)
    testloader = torch.utils.data.DataLoader(
        testset, batch_size=batch_size, shuffle=False, num_workers=2)

    return trainloader, testloader


def train(trainloader):
    net = GoogLeNet(num_classes=10, aux_logits=True)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
    print("start Training")
    for epoch in range(10):  # loop over the dataset multiple times

        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            # get the inputs; data is a list of [inputs, labels]
            inputs, labels = data

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs, aux2, aux1 = net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()
            # print(i)

        print('[%d, %5d] loss: %.3f' % (epoch + 1, 60000, running_loss / 60000))
        running_loss = 0.0
        PATH = 'model/googlenet.pth'
        torch.save(net.state_dict(), PATH)

    print('Finished Training')


def test(PATH, testloader):
    classes = ('0', '1', '2', '3',
               '4', '5', '6', '7', '8', '9')
    net = GoogLeNet(num_classes=10, aux_logits=True)
    net.load_state_dict(torch.load(PATH))
    correct = 0
    total = 0
    # since we're not training, we don't need to calculate the gradients for our outputs
    print("start test")
    with torch.no_grad():
        for data in testloader:
            images, labels = data
            # calculate outputs by running images through the network
            outputs, aux2, aux1 = net(images)
            # the class with the highest energy is what we choose as prediction
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

    print('Accuracy of the network on the 10000 test images: %d %%' % (
            100 * correct / total))
    # prepare to count predictions for each class
    # correct_pred = {classname: 0 for classname in classes}
    # total_pred = {classname: 0 for classname in classes}
    # error_pred = [ [] for _ in range(10) ]
    # # again no gradients needed
    # print("start test")
    # with torch.no_grad():
    #     for data in testloader:
    #         images, labels = data
    #         outputs, aux2, aux1 = net(images)
    #         _, predictions = torch.max(outputs, 1)
    #         # collect the correct predictions for each class
    #         for label, prediction in zip(labels, predictions):
    #             if label == prediction:
    #                 correct_pred[classes[label]] += 1
    #             else:
    #                 error_pred[int(classes[label])].append(int(prediction))
    #             total_pred[classes[label]] += 1
    #
    # # print(correct_pred)
    # # print accuracy for each class
    # # print(error_pred)
    #
    # # for error in error_pred:
    # #     result = pd.value_counts(error)
    # #     print(result)
    #
    # for classname, correct_count in correct_pred.items():
    #     accuracy = 100 * float(correct_count) / total_pred[classname]
    #     print("Accuracy for class {:5s} is: {:.1f} %".format(classname,
    #                                                          accuracy))

def imshow(img):
    img = img / 2 + 0.5     # unnormalize
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.show()


# def keeptrain(PATH, trainloader):
#     net = GoogLeNet(num_classes=10, aux_logits=True)
#     net.load_state_dict(torch.load(PATH))
#     criterion = nn.CrossEntropyLoss()
#     optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
#     print("start Training")
#     for epoch in range(10):  # loop over the dataset multiple times
#
#         running_loss = 0.0
#         for i, data in enumerate(trainloader, 0):
#             # get the inputs; data is a list of [inputs, labels]
#             inputs, labels = data
#
#             # zero the parameter gradients
#             optimizer.zero_grad()
#
#             # forward + backward + optimize
#             outputs, aux2, aux1 = net(inputs)
#             loss = criterion(outputs, labels)
#             loss.backward()
#             optimizer.step()
#
#             # print statistics
#             running_loss += loss.item()
#
#
#         print('[%d, %5d] loss: %.3f' % (epoch + 1, 60000, running_loss / 60000))
#         running_loss = 0.0
#         PATH = 'model/model20v2.pth'
#         torch.save(net.state_dict(), PATH)
#         # test(PATH, testloader)
#
#     print('Finished Training')

if __name__ == '__main__':
    trainloader, testloader = load_MNIST()

    train(trainloader)
    test('model/googlenet.pth', testloader)
