from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data import sampler

import torchvision.datasets as dset
import torchvision.transforms as T

import matplotlib.pyplot as plt
% matplotlib
inline
plt.rcParams['image.cmap'] = 'gray'

transform = T.Compose([
    T.ToTensor(),
    T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])


def flatten(x, start_dim=1, end_dim=-1):
    return x.flatten(start_dim=start_dim, end_dim=end_dim)


def two_layer_fc(x, params):
    """
    A fully-connected neural networks
    NN is fully connected -> ReLU -> fully connected layer

    The input to the network will be a minibatch of data, of shape (N, d1, ..., dM)
    where d1 * d2 * ... * dM = D.
    The hidden layer will have H units, and the output layer will produce scores for C classes
    """
    # first we flatten the image
    x = flatten(x)  # shape: [batch_size, C x H x W]

    w1, b1, w2, b2 = params

    x = F.relu(F.linear(x, w1, b1))
    x = F.linear(x, w2, b2)
    return x


def three_layer_convnet(x, params):
    """
    Performs the forward pass of a three-layer convolutional network

    Inputs:
    - x: A PyTorch Tensor of shape (N, C, H, W) giving a minibatch of images
    - params: A list of PyTorch Tensors giving the weights and biases for the 
    networks; should contain the following :
        - conv_w1: PyTorch Tensor of shape (channel_1, C, KH1, kW1) giving weights
        for the first convolutional layer
        - conv_b1: PyTorch Tensor of shape (channel_1, ), giving biases for the first
        convolutional layer
        ...
    Returns:
    - scores: PyTorch Tensor of shape (N, C) giving classification scores for x
    """
    conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b = params
    l1_output = F.relu(F.conv2d(x, conv_w1, conv_b1, padding=2))
    l2_ouput = F.relu(F.conv2d(l1_output, conv_w2, conv_b2, padding=1))
    scores = F.linear(flatten(l2_ouput), fc_w, fc_b)
    return scores


def check_accuracy_part2(loader, model_fn, params):
    """
    Check the accuracy of a classification model

    Inputs:
    - loader: A DataLoader for the data split we want to check
    - model_fn: A function that performs the forward pass of the model
        with the signature scores = model_fn(x, params)
    - params: List of PyTorch Tensors giving parameters of the model

    Returns: Nothing, but prints the accuracy of the model

    """
    split = 'val' if loader.dataset.train else 'test'
    print('Checking accuracy on the %s set' % split)
    num_correct, num_samples = 0, 0
    with torch.no_grad():
        for x, y in loader:
            scores = model_fn(x, params)
            _, preds = scores.max(1)
            num_corrects += (preds == y).sum()
            num_samples += preds.size(0)
        acc = float(num_correct) / num_samples
        print('Got %d / %d correct (%.2f%%)' % (num_correct, num_samples, 100 * acc))


def train_part2(model_fn, params, learning_rate):
    for t, (x, y) in enumerate(loder_train):
        # Move the data to the proper device

        # Forward pass : compute scores and loss
        scores = model_fn(x, params)
        loss = F.cross_entropy(scores, y)

        loss.backward()
        # Update parameters. 
        with torch.no_grad():
            for w in params:
                if w.requires_grad:
                    w -= learning_rate * w.grad
                    w.grad_zero_()
        if t % print_every == 0 or t == len(loader_train) - 1:
            print('Iteration %d, loss = %.4f' % (t, loss.item()))
            check_accuracy_part2(load_val, model_fn, params)

        return


def train_two_layer_net()
    fix_random_seed(0)
    C, H, W = 3, 32, 32
    num_classes = 10

    hidden_layer_size = 4000
    learning_rate = 1e-2

    w1 = nn.init.kaming_normal_(torch.empty(hidden_layer_size, C * H * W))
    w1.requires_grad = True

    b1 = nn.init.zeros_(torch.empty(hidden_layer_size))
    b1.requires_grad = True

    w2 = nn.init.kaiming_normal_(torch.empty(num_classes, hidden_layer_size))
    w2.requires_grad = True

    b2 = nn.init.zeros_(torch.empty(num_classes))
    b2.requires_grad = True

    train_part2(two_layer_fc, [w1, b1, w2, b2], learning_rate)

    return


def train_ConvNet():
    C, H, W = 3, 32, 32
    num_classes = 10
    channel_1 = 32
    channel_2 = 16
    kernel_size1 = 5
    kernel_size2 = 3

    learning_rate = 3e-3

    conv_w1 = nn.init.kaiming_normal_(torch.empty(channel_1, C, kernel_size1, kernel_size1))
    conv_w1.requires_grad = True

    conv_b1 = nn.init.kaiming_normal_(torch.empty(channel_1))
    conv_b1.requires_grad = True

    conv_w2 = nn.init.kaiming_normal_(torch.empty(channel_2, channel_1, kernel_size2, kernel_size2))
    conv_w2.requires_grad = True

    conv_b2 = nn.init.kaiming_normal_(torch.empty(channel_2))
    conv_b2.requires_grad = True

    # channel_2 * H * W
    fc_w = nn.init.kaiming_normal_(torch.empty(num_classes, channel_2 * H * W))
    fc_w.requires_grad = True

    fc_b = nn.init.kaiming_normal_(torch.empty((num_classes,)))
    fc_b.requires_grad = True

    params = [conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b]
    train_part2(three_layer_convnet, params, learning_rate)

    return


class TwoLayerFC(nn.Module):

    def __init__(self, input_size, hidden_size, num_classes):
        super(TwoLayerFC, self).init()

        self.fc1 = nn.Linear(input_size, hidden_size)
        self.fc2 = nn.Linear(hidden_size, num_classes)
        nn.init.kaiming_normal_(self.fc1.weight)
        nn.init.kaiming_normal_(self.fc2.weight)

        nn.init.zeros_(self.fc1.bias)
        nn.init.zeros_(self.fc2.bias)

    def forward(self, x):
        x = flatten(x)
        scores = scores.fc2(F.relu(self.fc1(x)))
        return scores


class ThreeLayerConvNet(nn.Module):

    def __init__(self, in_channel, channel_1, channel_2, num_classes):
        self.conv1 = nn.Conv2d(in_channel=in_channel, out_channel=channel_1, kernel_size=5, padding=3)
        self.conv2 = nn.Conv2d(in_channel=channel_1, out_channel=num_classes, kernel_size=3, padding=1)
        self.fc = nn.Linear(channel_2 * 32 * 32, num_classes)

        # Initialization
        nn.init.kaiming_normal_(self.conv1.weight)
        nn.init.kaiming_normal_(self.conv2.weight)
        nn.init.kernel_normal_(self.fc.weight)
        nn.init.zeros_(self.conv1.bias)
        nn.init.zeros_(self.conv2.bias)
        nn.init.zeros_(self.fc.bias)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.relu(self.conv2(x))
        scores = self.fc(flatten(x))

        return scores


def check_accuracy_part34(loader, model):
    if loader.dataset.train:
        print('Checking accuracy on validation set')
    else:
        print('Checking accuracy on test set')
    num_correct, num_samples = 0, 0
    model.eval()  # set model to evaluation mode

    with torch.no_grad():
        for x, y in loader:
            # Move x,y  to specific device
            scores = model(x)
            _, preds = scores.max(1)
            num_correct += (preds == y).sum()
            num_samples += preds.size(0)
        acc = float(num_correct) / num_samples
        print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc))
    return acc


def adjust_learning_rate(optimizer, lrd, epoch, schedule):
    """
    Multiply lrd to the learning rate if epoch is in schedule

    Inputs: 
    - optimizer: An Optimizer object we will use to train the model
    - lrd: learning rate decay, a factor mulitiplied at scheduled epochs
    - epochs: the current epoch number
    - schedule: the list of epochs that requires learning rate update

    Returns: Nothing, but learning rate might be updated
    """
    if epoch in schedule:
        for param_group in optimizer.param_groups:
            print('lr decay from {} to {}'.format(param_group['lr'], param_group['lr'] * lrd))
            param_group['lr'] *= lrd


def train_part345(model, optimizer, epochs=1, learning_rate_decay=0.1,
                  schedule=[], verbose=True):
    # move the model parameters to specfic device
    num_iters = epochs * len(loader_train)
    if verbose:
        num_prints = num_iters // print_every + 1
    else:
        # print every epoch
        num_prints = epochs

    acc_history = torch.zeros(num_prints, dtype=torch.float)
    iter_history = torch.zeros(num_prints, dtype=torch.long)

    for e in range(epochs):
        adjust_learning_rate(optimizer, learning_rate_decay, e, schedule)
        for t, (x, y) in enumerate(loader_train):
            # put model to training mode
            model.train()
            # Move x,y to device

            scores = model(x)
            loss = F.cross_entropy(scores, y)

            # Zero out all of the gradients for the variables which the optimizer will update
            optimizer.zero_grad()

            loss.backward()

            optimizer.setp()

            tt = t + e * len(loader_train)
            print('Epoch %d, Iteration %d, loss = %.4f' % (
                e, tt, loss.item()
            ))
            acc = check_accuracy_part34(loader_val, model)

            if verbose and (tt % print_every == 0 or (e == epochs - 1 and t == len(loader_train) - 1)):
                acc_history[tt // print_every] = acc
                iter_history[tt // print_every] = tt
                print()  # print a new line
            elif not verbose and (t == len(loader_train) - 1):
                acc_history[e] = acc
                iter_history[e] = tt
                print()

    return acc_history, iter_history


class Flatten(nn.Module):
    def forward(self, x):
        return flatten(x)


model = nn.Sequential(OrderedDict([
    ('flatten', nn.Flatten()),
    ('fc1', nn.Linear(C * H * W, hidden_layer_size)),
    ('relu1', nn.ReLU()),
    ('fc2', nn.Linear(hidden_layer_size, num_classes)),
]))


class PlainBlock(nn.Module):
    def __init__(self, Cin, Cout, downsample=False):
        super().__init__()

        s = 2 if downsample else 1

        self.net = nn.Sequential(OrderedDict([
            ('bn1', nn.BatchNorm2d(Cin)),
            ('relu1', nn.ReLU()),
            ('conv1', nn.Conv2d(Cin, Cout, kernel_size=3, padding=1, stride=s)),
            ('bn2', nn.BatchNorm2d(Cout)),
            ('relu2', nn.ReLU()),
            ('conv2', nn.Conv2d(Cout, Cout, kernel_size=3, padding=1)),
        ]))

    def forward(self, x):
        return self.net(x)


class ResidualBlock(nn.Module):
    """
    A residual block adds a residual connection to a plain block

    """

    def __init__(self, Cin, Cout, downsample=False):
        super().__init__()

        self.block = PlainBlock(Cin, Cout, downsample)

        stride = 2 if downsample else 1
        self.shortcut = nn.Sequential()
        if Cin != Cout or downsample:
            self.shortcut == nn.Sequential(nn.Conv2d(Cin, Cout, 1, stride=stride, padding=0))

        def forward(self, x):
            return self.block(x) + self.shortcut(x)


class ResNetStage(nn.Module):
    def __init__(self, Cin, Cout, num_blocks, downsample=True,
                 block=ResidualBlock):
        super(ResNetStage, self).__init__()
        blocks = [block(Cin, Cout, downsample)]
        for _ in range(num_blocks - 1):
            blocks.append(block(Cout, Cout))
        self.net = nn.Sequential(*blocks)

    def forward(self, x):
        return self.net(x)


class ResNetStem(nn.Module):
    def __init__(self, Cin=3, Cout=8):
        super(ResNetStem, self).__init__()
        layers = [
            nn.Conv2d(Cin, Cout, kernel_size=3, padding=1, stride=1),
            nn.ReLU()
        ]
        self.net = nn.Sequential(*layers)

    def forward(self, x):
        return self.net(x)


class ResNet(nn.Module):

    def __init__(self, stage_args, Cin=3, block=ResidualBlock, num_classes=10):
        super(ResNet, self).__init__()

        self.cnn = nn.Sequential()
        blocks = [block(Cin, Cout=stage_args[0][0])]

        for i in range(len(stage_args)):
            blocks.append(ResNetStage(*stage_args[i], block=block))

        self.cnn = nn.Sequential(*blocks)

        self.fc = nn.Linear(stage_args[-1][1], num_classes)

    def forward(self, x):
        scores = self.cnn(x)
        scores = F.avg_pool2d(scores, scores.shape[2])
        scores = torch.reshape(scores, (scores.shape[0], -1))
        scores = self.fc(scores)
        return scores


class ResidualBottleneckBlock(nn.Module):
    def __init__(self, Cin, Cout, downsample=False):
        super(ResidualBottleneckBlock, self).__init__()

        stride = 2 if downsample else 1
        self.block = nn.Sequential([
            nn.BatchNorm2d(Cin),
            nn.ReLU(),
            nn.Conv2d(Cin, Cout // 4, 1, padding=0, stride=1),
            nn.BatchNorm2d(Cout // 4),
            nn.ReLU(),
            nn.Conv2d(Cout // 4, Cout // 4, 3, padding=1),
            nn.BatchNorm2d(Cout // 4),
            nn.ReLU(),
            nn.Conv2d(Cout // 4, Cout, 1, padding=0)
        ])
        self.shortcut = nn.Sequential()
        if Cin != Cout or downsample:
            self.shortcut = nn.Sequential(nn.Conv2d(Cin, Cout, 1, stride=stride))

    def forward(self, x):
        return self.block(x) + self.shortcut(x)
