import math
import torch
import coutils
from coutils import fix_random_seed, rel_error, compute_numeric_gradient, Solver
import matplotlib.pyplot as plt

"""
def forward(x, w):
    Receive inputs x and weights
    # Do some computations
    cache = (x, w, z, out)
    return out, cache


def backward(dout, cache):
    Receive dout(derivative of loss with respect to outputs) and cache
    and compute derivative with respect to inputs
    # Unpack cache values
    x, w, z, out = cache
    dx =  # Derivative of loss with respect to x
    dw =  # Derivative of loss with respect to w
    return dx, dw
"""

# data type and device for torch.tensor
to_float = {'dtype': torch.float, 'device': 'cpu'}
to_float_cuda = {'dtype': torch.float, 'device': 'cuda'}
to_double = {'dtype': torch.double, 'device': 'cpu'}
to_double_cuda = {'dtype': torch.double, 'device': 'cuda'}
to_long = {'dtype': torch.long, 'device': 'cpu'}
to_long_cuda = {'dtype': torch.long, 'device': 'cuda'}


def get_CIFAR10_data(validation_ratio=0.05, cuda=False, reshape_to_2d=False,
                     visualize=False):
    """
    Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
    it for the linear classifier.
    These are the same steps as we used for the  SVM, but condensed to a single function
    """
    X_train, y_train, X_test, y_test = coutils.data.cifar10()

    # Load every data on cuda
    if cuda:
        X_train = X_train.cuda()
        y_train = y_train.cuda()
        X_test = X_test.cuda()
        y_test = y_test.cuda()

    # Visualize some examples from the visualize_dataset
    class_names = [
        'plane', 'car', 'bird', 'cat', 'deer',
        'dog', 'frog', 'horse', 'ship', 'truck'
    ]
    # ... omit


class Linear(object):
    @staticmethod
    def forward(x, w, b):
        """
        Inputs:
        - x: A tensor containing input data, of shape (N, d1,...dn)
        - w: A tensor of weights, of shape (D, M)
        - b: A tensor of biases, of shape (M,)
        Returns a tuple of:
        - out: output, of shape(N,M)
        - cache: (x, w, b)
        """
        out = x.view(-1, w.shape[0]).mm(w) + b
        cache = (x, w, b)
        return out, cache

    @staticmethod
    def backward(dout, cache):
        """
        Inputs:
        - dout: Upstream derivative, of shape (N, M)
        - cache: Tuple of :
            - x: Input data, of shape (N, d_1, ... d_k)
            - w: Weights, of shape (D, M)
            - b: Biases, of shape (M,)
        Returns a tuple of:
        - dx: Gradient with respect to x, of shape (N, d_1, ... d_k)
        - dw: Gradient with respect to w, of shape (D, M)
        - db: Gradient with reshape to b, of shape (M,)
        """
        x, w, _ = cache
        dx = dout.mm(w.t()).reshape(x.shape)
        dw = x.view(x.shape[0], -1).t().mm(dout)
        db = dout.sum(dim=0)
        return dx, dw, db


class ReLU(object):
    @staticmethod
    def forward(x):
        """
        Input:
        - x: A tensor of any shape
        Returns a tuple of:
        - out: Output, a tensor of the same shape as x
        - cache: x
        """

        out = torch.max(torch.tensor([0.0], dtype=x.dtype).cuda(), x)
        cache = x
        return out, cache

    @staticmethod
    def backward(dout, cache):
        """
        Input:
        - dout: Upstream derivatives, of any shape
        - cache: Input x, of same shape as dout
        Returns:
        - dx: Gradient with respect to x
        """
        x = cache
        one = torch.ones(1, dtype=x.dtype, device=x.device)
        zero = torch.zeros(1, dtype=x.dtype, device=x.device)
        dx = torch.where(x > 0, one, zero) * dout

        return dx


class Linear_ReLU(object):
    @staticmethod
    def forward(x, w, b):
        """
        Convenience layer that performs an linear transform followed by ReLu

        Inputs:
        - x: Input to the linear layer
        - w, b: Weights for the linear layer
        Returns a tuple of:
        - out: Output from the ReLU
        - cache: Object to give to the backward pass
        """
        a, fc_cache = Linear.forward(x, w, b)
        out, relu_cache = ReLU.forward(a)
        cache = (fc_cache, relu_cache)
        return out, cache

    @staticmethod
    def backward(dout, cache):
        """
        Backward pass for the linear_relu convenience layer
        """
        # Unpack the cache
        fc_cache, relu_cache = cache
        da = ReLU.backward(dout, relu_cache)
        dx, dw, db = Linear.backward(da, fc_cache)
        return dx, dw, db


def svm_loss(x, y):
    """
    Compute the loss and gradient using for multiclass SVM classification
    Inputs:
    - x: Input data,of shape (N, C) where x[i, j] is the score for jth class for
    the ith input
    - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C
    Returns a tuple of:
    - loss: Scalar giving the loss
    - dx: Gradient of the loss with respect to x
    """
    N = x.shape[0]
    correct_class_scores = x[torch.arange(N), y]
    margins = (x - correct_class_scores[:, None] + 1.0).clamp(min=0.)
    margins[torch.arange(N), y] = 0.
    loss = margins.sum() / N
    num_pos = (margins > 0).sum(dim=1)
    dx = torch.zeros_like(x)
    dx[margins > 0] = 1.
    dx[torch.arange(N), y] -= num_pos.to(dx.dtype)
    dx /= N
    return loss, dx


def softmax_loss(x, y):
    """
    Compute the loss and gradient using for softmax classification
    Inputs:
    - x: Input data,of shape (N, C) where x[i, j] is the score for jth class for
    the ith input
    - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
    0 <= y[i] < C
    Returns a tuple of:
    - loss: Scalar giving the loss
    - dx: Gradient of the loss with respect to x
    """
    shifted_logits = x - x.max(dim=1, keepdim=True).values
    Z = shifted_logits.exp().sum(dim=1, keepdim=True)
    log_probs = shifted_logits - Z.log()
    probs = log_probs.exp()
    N = x.shape[0]
    loss = (-1.0 / N) * log_probs[torch.arange(N), y].sum()
    dx = probs.clone()
    dx[torch.arange(N), y] -= 1
    dx /= N
    return loss, dx


class TwoLayerNet(object):
    """
    A two-layer fully-connected neural with ReLU nonlinearity and softmax
    loss that uees a modular layer design.
    The architecture should be linear-relu-linear-softmax.
    Note that this class does not implement gradient descent;
    instead, it will interact with a separate Solver object that is responsible
    for running optimization.
    """

    def __init__(self, input_dim=3 * 32 * 32, hidden_size=100, num_classes=10,
                 weight_scale=1e-3, reg=0.0, dtype=torch.float32, device='cpu'):
        self.reg = reg
        self.params['W1'] = torch.randn(
            (input_dim, hidden_size), dtype=dtype, device=device) * weight_scale
        self.params['b1'] = torch.zeros(
            hidden_size, dtype=dtype, device=device)
        self.params['W2'] = torch.randn(
            (hidden_size, num_classes), dtype=dtype, device=device) * weight_scale
        self.params['b2'] = torch.zeros(
            num_classes, dtype=dtype, device=device)

    def loss(self, X, y=None):
        """
        Returns: 
        If y is None, then run a test-time forward pass of the model
        and return:
        - scores: Tensor of shape(N, C) giving classification scores, where
        scores[i,c] is the the classification score for X[i] and class c.
        If y is no None, then run a training-time forward and backward pass
        and return a tuple of :
        - loss: Scalar value giving the loss
        - grads: Dictionary with the same keys as self.params, mapping parameter
        names to gradients of the loss with respect to those parameters.
        """
        hidden, hidden_cache = Linear_ReLU.forward(
            X, self.params['W1'], self.params['b1'])
        scores, cache = Linear.forward(
            hidden, self.params['W2'], self.params['b2'])

        if y is None:
            return scores

        grads = {}
        loss, dout = softmax_loss(scores, y)
        loss += self.reg * \
                (torch.sum(self.params['W1'] ** 2) +
                 torch.sum(self.params['W2'] ** 2))
        d_h, grads['W2'], grads['b2'] = Linear.backward(dout, cache)
        _, grads['W1'], grads['b1'] = Linear_ReLU.backward(d_h, hidden_cache)

        return loss, grads


class FullyConnectedNet(object):
    """
      A fully-connected neural network with an arbitrary number of hidden layers,
      ReLU nonlinearities, and a softmax loss function.
      For a network with L layers, the architecture will be:

      {linear - relu - [dropout]} x (L - 1) - linear - softmax

      where dropout is optional, and the {...} block is repeated L - 1 times.

      Similar to the TwoLayerNet above, learnable parameters are stored in the
      self.params dictionary and will be learned using the Solver class.
      """

    def __init__(self, hidden_dims, input_dim=3 * 32 * 32,
                 num_classes=10, dropout=0.0, reg=0.0,
                 weight_scale=1e-2, seed=None, dtype=torch.float,
                 device='cpu'):
        """
           Initialize a new FullyConnectedNet.

           Inputs:
           - hidden_dims: A list of integers giving the size of each hidden layer.
           - input_dim: An integer giving the size of the input.
           - num_classes: An integer giving the number of classes to classify.
           - dropout: Scalar between 0 and 1 giving the drop probability for networks
             with dropout. If dropout=0 then the network should not use dropout.
           - reg: Scalar giving L2 regularization strength.
           - weight_scale: Scalar giving the standard deviation for random
             initialization of the weights.
           - seed: If not None, then pass this random seed to the dropout layers. This
             will make the dropout layers deteriminstic so we can gradient check the
             model.
           - dtype: A torch data type object; all computations will be performed using
             this datatype. float is faster but less accurate, so you should use
             double for numeric gradient checking.
           - device: device to use for computation. 'cpu' or 'cuda'
           """
        self.use_dropout = dropout != 0
        self.reg = reg
        self.num_layers = 1 + len(hidden_dims)
        self.dtype = dtype
        self.params = {}
        kw = {'dtype': dtype, 'device': device}

        # self.params['W1'] = torch.randn(input_dim,
        #                                 hidden_dims[0], **kw) * weight_scale
        # self.params['b1'] = torch.zeros(hidden_dims[0], **kw)
        #
        # for i in range(1, self.num_layers - 1):
        #     self.params['W' + str(i + 1)] = torch.randn(hidden_dims[i - 1],
        #                                                 hidden_dims[i], **kw) * weight_scale
        #     self.params['b' + str(i + 1)] = torch.zeros(hidden_dims[i], **kw)
        #
        # self.params['W' + str(self.num_layers)] = torch.randn(hidden_dims[-1], num_classes,
        #                                                       **kw) * weight_scale
        # self.params['b' + str(self.num_layers)] = torch.zeros(num_classes, **kw)

        # The following code is copied from
        # https://github.com/jasonbian97/Deep-Learning-Computer-Vision/blob/master/fully_connected_networks.ipynb
        dims = [input_dim] + hidden_dims + [num_classes]
        for i in range(self.num_layers):
            self.params['W%d' % (i + 1)] = weight_scale * torch.randn(dims[i], dims[i + 1], **kw)
            self.params['b%d' % (i + 1)] = torch.zeros(dims[i + 1], **kw)

        self.dropout_param = {}
        if self.use_dropout:
            self.dropout_param = {'mode': 'train', 'p': dropout}
            if seed is not None:
                self.dropout_param['seed'] = seed

    def loss(self, X, y=None):
        X = X.to(self.dtype)
        mode = 'test' if y is None else 'train'

        if self.use_dropout:
            self.dropout_param['mode'] = mode
        scores = None
        # The following code is copied from
        # https://github.com/jasonbian97/Deep-Learning-Computer-Vision/blob/master/fully_connected_networks.ipynb
        hidden_num = self.num_layers - 1
        scores = X
        cache_history = []
        L2reg = 0

        for i in range(hidden_num):
            scores, cache = Linear_ReLU.forward(scores,
                                                self.params['W%d' % (i + 1)],
                                                self.params['b%d' % (i + 1)])
            cache_history.append(cache)
            if self.use_dropout:
                scores, cache = dropout_forward(scores, self.dropout_param)
                cache_history.append(cache)
            L2reg += torch.sum(self.params['W%d' % (i + 1)] ** 2)

        i += 1
        scores, cache = Linear.forward(scores, self.params['W%d' % (i + 1)],
                                       self.params['b%d' % (i + 1)])
        cache_history.append(cache)
        L2reg += torch.sum(self.params['W%d' % (i + 1)] ** 2)
        L2reg *= self.reg

        if mode == 'test':
            return scores

        loss, grads = 0.0, {}
        loss, dout = softmax_loss(scores, y)
        dout, grads['W%d' % (i + 1)], grads['b%d' % (i + 1)] = Linear.backward(dout, cache_history.pop())
        grads['W%d' % (i + 1)] += 2.0 * self.reg * self.params['W%d' % (i + 1)]
        i -= 1

        while i >= 0:
            if self.use_dropout:
                dout = dropout_backward(dout, cache_history.pop())
            dout, grads['W%d' % (i + 1)], grads['b%d' % (i + 1)] = Linear_ReLU.backward(dout, cache_history.pop())
            grads['W%d' % (i + 1)] += self.reg * 2.0 * self.params['W%d' % (i + 1)]
            i -= 1
        return loss, grads


def sgd(w, dw, config=None):
    """
    Inputs:
    - w: A tensor giving the current weights
    - dw: A tensor of the same shape as w giving the
        gradient of the loss w.r.t w
    - config: A dictionary containing hyperparameter values
        such as learning rate, momentum, etc.
        if the update requires caching values over many
        iterations, then config will also hold these cached values
    """
    if config is None:
        config = {}
    config.setdefault('learning_rate', 1e-2)

    w -= config['learning_rate'] * dw
    return w, config


class Dropout(object):
    """
    Performs the forward pass for (inverted) dropout
    Inputs:
    - x: Input data: tensor of any shape
    - dropout_param: A dictionary with the following keys:
        - p: Dropout parameter. We drop each neuron output with probability o
        - mode: 'test' or 'train'
        If the mode is train, then perform dropout:
        If the mode is test, then just return the input.
        - seed: Seed for the random number generator.
    Outputs:
    - out: Tensor of the same shape as x
    - cache: tuple (dropout_param, mask)
    In training mode, mask is the dropout
    mask that was used to multiply the input;
    In test mode, mask is None
    """

    @staticmethod
    def forward(x, w, b):
        raise NotImplementedError

    @staticmethod
    def backward(dout, cache):
        raise NotImplementedError


def dropout_forward(x, dropout_param):
    p, mode = dropout_param['p'], dropout_param['mode']

    if 'seed' in dropout_param:
        torch.munual_seed(dropout_param['seed'])

    mask = None
    out = None

    if mode == 'train':
        mask = ((torch.randn(*x.shape, device=x.device, dtype=x.dtype) < p) / p)
        x *= mask
        out = x
    elif mode == 'test':
        out = x
    cache = (dropout_param, mask)
    return out, cache


Dropout.forward = dropout_forward


def dropout_backward(dout, cache):
    """
    Perform the backward pass for (inverted) dropout.
    Inputs:
    - dout: Upstream derivation, of any shape
    - cache: (dropout_param, mask) from Dropout.forward
    """
    dropout_param, mask = cache
    mode = dropout_param['mode']

    if mode == 'train':
        dx = dout * mask
    elif mode == 'test':
        dx = dout

    return dx


Dropout.backward = dropout_forward
