import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, sampler
import torchvision.datasets as dset
import torchvision.transforms as T
import matplotlib.pyplot as plt

NUM_TRAIN = 49000


def get_dataset():
    transform = T.Compose([
        T.ToTensor(),
        T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1884, 0.2010))
    ])
    batch_size = 64

    cifar10_train = dset.CIFAR10('./datasets', train=True, download=True,
                                 transform=transform)
    cifar10_val = dset.CIFAR10('./datasets', train=True, download=True,
                               transform=transform)
    cifar10_test = dset.CIFAR10('./datasets', train=False, download=True,
                                transform=transform)

    loader_train = DataLoader(cifar10_train, batch_size=batch_size,
                              sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN)))
    loader_val = DataLoader(cifar10_val, batch_size=batch_size,
                            sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN, 50000)))
    loader_test = DataLoader(cifar10_test, batch_size=batch_size)

    return loader_train, loader_val, loader_test


dtype = torch.float
ltype = torch.long
print_every = 100


def avaliable_device():
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    return device


def check_accuracy_part(loader, model):
    split = 'validation' if loader.dataset.train else 'test'
    print('Checking accuarcy on the %s set' % split)

    num_correct, num_samples = 0, 0
    device = avaliable_device()

    # set model to evaluation mode
    model.eval()
    with torch.no_grad():
        for X, y in loader:
            X = X.to(device=device, dtype=dtype)
            y = y.to(device=device, dtype=ltype)
            scores = model(X)
            _, preds = scores.max(1)
            num_correct += (y == preds).sum()
            num_samples += preds.size(0)
        acc = float(num_correct) / num_samples
        print('Got %d / %d correct (%.3f%%) \n' % (num_correct, num_samples, acc * 100.0))

    return acc


def adjust_learning_rate(optimizer, lrd, epoch, schedule):
    if epoch in schedule:
        for param_group in optimizer.param_groups:
            print('lr decay from {} to {}'.format(param_group['lr'], param_group['lr'] * lrd))
            param_group['lr'] *= lrd


def train_part(model, loader, optimizer, epochs=1, learning_rate_decay=0.1, schedule=[], verbose=True):
    loader_train, loader_val, loader_test = loader
    device = avaliable_device()
    model = model.to(device=device)
    num_iters = epochs * len(loader_train)

    num_prints = num_iters // print_every + 1 if verbose else epochs
    acc_history = torch.zeros(num_prints, dtype=dtype)
    iter_history = torch.zeros(num_prints, dtype=ltype)

    for e in range(epochs):
        adjust_learning_rate(optimizer, learning_rate_decay, e, schedule)

        for t, (X, y) in enumerate(loader_train):
            model.train()
            X = X.to(device=device, dtype=dtype)
            y = y.to(device=device, dtype=ltype)

            scores = model(X)
            loss = F.cross_entropy(scores, y)
            optimizer.zero_grad()

            loss.backward()

            optimizer.step()

            tt = t + e * len(loader_train)

            if verbose and (tt % print_every == 0 or (e == epochs - 1 and t == len(loader_train) - 1)):
                print('Epoch %d, Iteration %d, loss = %.4f \n' % (e, tt, loss.item()))
                acc = check_accuracy_part(loader_val, model)
                acc_history[tt // print_every] = acc
                iter_history[tt // print_every] = tt
            elif not verbose and (t == len(loader_train) - 1):
                print('Epoch %d, Iteration %d, loss = %.4f \n' % (e, tt, loss.item()))
                acc = check_accuracy_part(loader_val, model)
                acc_history[e] = acc
                iter_history[e] = tt

    return acc_history, iter_history


def main():
    from VGG.vgg import vgg13_bn

    model = vgg13_bn()
    optimizer = optim.SGD(model.parameters(), lr=1e-3, momentum=0.9, weight_decay=1e-4)

    loader = get_dataset()
    acc_history, iter_history = train_part(model, loader, optimizer, epochs=10, schedule=[6, 8])
    # acc_history, iter_history = train_part(model, loader, optimizer, epochs=5)
    plt.plot(iter_history, acc_history)
    plt.show()


if __name__ == '__main__':
    main()
    pass
    pass
