import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, sampler
import torchvision.datasets as dset
import torchvision.transforms as T

# data preparation
NUM_TRAIN = 49000


def get_dataset():
    transform = T.Compose([
        T.ToTensor(),
        T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1884, 0.2010))
    ])

    cifar10_train = dset.CIFAR10('./datasets', train=True, download=True,
                                 transform=transform)
    cifar10_val = dset.CIFAR10('./datasets', train=True, download=True,
                               transform=transform)
    cifar10_test = dset.CIFAR10('./datasets', train=False, downloader=True,
                                transform=transform)

    loader_train = DataLoader(cifar10_train, batch_size=64,
                              sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN)))
    loader_val = DataLoader(cifar10_val, batch_size=64,
                            sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN, 50000)))
    loader_test = DataLoader(cifar10_test, batch_size=64)

    return loader_train, loader_val, loader_test


dtype = torch.float
ltype = torch.long
print_every = 100


def avaliable_device():
    """
    :return: avaliable device
    """
    if torch.cuda.is_available():
        device = torch.device('cuda:0')
    else:
        device = torch.device('cpu')

    return device


def train_part2(model_fn, params, learning_rate, dataloader):
    loader_train, loader_val, loader_test = dataloader
    device = avaliable_device()
    for t, (X, y) in enumerate(loader_train):
        X = X.to(device=device, dtype=dtype)
        y = y.to(device=device, dtype=ltype)

        # Forward pass: compute scores and loss
        scores = model_fn(X, params)
        loss = F.cross_entropy(scores, y)

        # Backward pass
        loss.backward()

        # Update parameters. We don't want to backpropagate through the
        # parameter updates, so we scope the updates under a torch.no_grad()
        # context manager to prevent a computational graph from being built
        with torch.no_grad():
            for W in params:
                if W.requires_grad:
                    W -= learning_rate * W.grad

                    # Manually zero the gradients after running the backward pass
                    W.grad_zero_()

        if t % print_every == 0 or t == len(loader_train) - 1:
            print('Iteration %d, loss = %.4f' % (t, loss.item()))
            check_accuracy_part2(loader_val, model_fn, params)


def check_accuracy_part2(loader, model_fn, params):
    split = 'validation' if loader.dataset.train else 'test'
    print('Checking accuracy on the %s set' % split)

    num_correct, num_samples = 0, 0
    device = avaliable_device()

    with torch.no_grad():
        for X, y in loader:
            X = X.to(device=device, dtype=dtype)
            y = y.to(device=device, dtype=ltype)
            scores = model_fn(X, params)
            _, preds = scores.max(1)
            num_correct += (preds == y).sum()
            num_samples += preds.size(0)
        acc = float(num_correct) / num_samples
        print('Got %d / %d correct (%.2f%%) \n' % (num_correct, num_samples, 100 * acc))

    return


def check_accuracy_part34(loader, model):
    split = 'validation' if loader.dataset.train else 'test'
    print('Checking accuracy on the %s set' % split)

    num_correct, num_samples = 0, 0
    device = avaliable_device()

    # Set model to evaluation mode
    model.eval()
    with torch.no_grad():
        for X, y in loader:
            X = X.to(device=device, dtype=dtype)
            y = y.to(device=device, dtype=ltype)
            scores = model(X)
            _, preds = scores.max(1)
            num_correct += (y == preds).sum()
            num_samples += preds.size(0)
        acc = float(num_correct) / num_samples
        print('Got %d / %d correct (%.2f%%) \n' % (num_correct, num_samples, 100 * acc))

    return acc


def adjust_learning_rate(optimizer, lrd, epoch, schedule):
    if epoch in schedule:
        for param_group in optimizer.para_groups:
            print('lr decay from {} to {}'.format(param_group['lr'], param_group['lr'] * lrd))
            param_group['lr'] *= lrd


def train_part345(model, loader, optimizer, epochs=1, learning_rate_decay=0.1, schedule=[], verbose=True):
    loader_train, loader_val, loader_test = loader
    device = avaliable_device()
    model = model.to(device=device)
    num_iters = epochs * len(loader_train)

    num_prints = num_iters // print_every + 1 if verbose else epochs

    acc_history = torch.zeros(num_prints, dtype=dtype)
    iter_history = torch.zeros(num_prints, dtype=ltype)

    for e in range(epochs):
        adjust_learning_rate(optimizer, learning_rate_decay, e, schedule)

        for t, (X, y) in enumerate(loader_train):
            # put model to training mode
            model.train()
            X = X.to(device=device, dtype=dtype)
            y = y.to(device=device, dtype=ltype)

            scores = model(X)
            loss = F.cross_entropy(scores, y)
            optimizer.zero_grad()
            # This is the backward pass:
            # compute the gradient of the loss w.r.t each parameter of the model
            loss.backward()

            # Actually update the parameters of the model using the gradients
            optimizer.step()

            tt = t + e * len(loader_train)
            acc = check_accuracy_part34(loader_val, model)

            print('Epoch %d, Iteration %d, loss = %.4f \n' % (e, tt, loss.item()))
            if verbose and (tt % print_every == 0 or (e == epochs - 1 and t == len(loader_train) - 1)):
                acc_history[tt // print_every] = acc
                iter_history[tt // print_every] = tt
            elif not verbose and (t == len(loader_train) - 1):
                acc_history[e] = acc
                iter_history[e] = tt

        return acc_history, iter_history

# how to train
