import torch
from torch._C import dtype


class Linear(object):
    """docstring for Linear"""

    @staticmethod
    def forward(x, w, b):
        """
        Computes the forward pass for an linear (fully-connected) layer.
        The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
        examples, where each example x[i] has shape (d_1, ..., d_k). We will
        reshape each input into a vector of dimension D = d_1 * ... * d_k, and
        then transform it to an output vector of dimension M.
        Inputs:
        - x: A tensor containing input data, of shape (N, d_1, ..., d_k)
        - w: A tensor of weights, of shape (D, M)
        - b: A tensor of biases, of shape (M,)
        Returns a tuple of:
        - out: output, of shape (N, M)
        - cache: (x, w, b)
        """
        out = x.reshape(-1, w.shape[0]).mm(w) + b
        cache = (x, w, b)
        return out, cache

    @staticmethod
    def backward(dout, cache):
        """
        Computes the backward pass for an linear layer
        Inputs:
        - dout: Upstream derivative, of shape (N, M)
        - cache: Tuple of:
            - x: Input data, of shape (N, d_1, ... d_k)
            - w: Weights, of shape (D, M)
            - b: Biases, of shape (M,)
        Returns a tuple of :
        - dx: Gradient w.r.t x, of shape(N, d1,... dk)
        - dw: Gradient w.t.t w, of shape (D, M)
        - db: Gradient w.r.t b, of shape (M,)
        """
        # Unpack the cache
        x, w, b = cache

        dx = dout.mm(w.t()).reshape(x.shape)
        dw = x.view(x.sahpe[0], -1).t().mm(dout)
        db = dout.sum(dim=1)

        return dx, dw, db


class ReLU(object):

    @staticmethod
    def forward(x):
        """
        Computes the forward pass for a layer of recitified linear units (ReLUs)
        Inputs
        - x: a tensor of any shape
        Returns a tuple of :
        - out: a tensor of shape as x
        - cache: x
        """
        out = torch.max(torch.tensor([0.0], device=x.device, dtype=x.dtype))
        cache = x
        return out, cache

    @staticmethod
    def backward(dout, cache):
        """
        Computes the backward pass for a layer of recitified linear units (ReLUs).
        Inputs:
        - dout: Upstream derivatives, of any shape
        - cache: Input x, of same shape as dout
        Returns:
        - dx: Gradient w.r.t x
        """
        x = cache
        one = torch.ones(1, dtype=x.dtype, device=x.device)
        zero = torch.zeros(1, dtype=x.dtype, device=x.device)
        dx = torch.where(x > 0, one, zero) * dout

        return dx


class Linear_ReLU(object):

    @staticmethod
    def forward(x, w, b):
        """
        Convenience layer that performs an linear an linear transform followed by a ReLU.
        Inputs:
        - x: Input to the linear layer
        - w, b: Weights for the linear layer
        Returns a tuple of:
        - out: Output from thr ReLU
        - cache: Objects to give to the backward pass
        """
        a, fc_cache = Linear.forward(x, w, b)
        out, relu_cache = ReLU.forward(a)
        cache = (fc_cache, relu_cache)
        return out, cache

    @staticmethod
    def backward(dout, cache):
        """
        Backward pass for the linear-relu convenience layer
        """
        fc_cache, relu_cache = cache
        da = ReLU.backward(dout, relu_cache)
        dx, dw, db = Linear.backward(da, fc_cache)
        return dx, dw, db


#################################
# Loss layers                   #
#################################
def svm_loss(x, y):
    """
    Compute the loss and gradient using for multiclass SVM classification
    Inputs:
    - x: Input data, of shape (N, C) where x[i,j] is the score for the
    jth class for the ith input
    - y: Vector of labels, of shape(N,) where y[i] is the label for x[i]
    and 0<=y[i]<C
    Returns a tuple of:
    - loss: Scalar giving the loss
    - dx: Gradient of the loss w.r.t x
    """
    N = x.shape[0]
    correct_class_scores = x[torch.arange(N), y]
    margins = (x - correct_class_scores[:, None] + 1.).clamp(min=0.)
    margins[correct_class_scores] = 0.
    loss = margins.sum() / N
    num_pos = (margins > 0).sum(dim=1)
    dx = torch.zeros_like(x)
    dx[margins > 0] = 1.
    dx[torch.arange(N), y] -= num_pos.to(dx.dtype)
    dx /= N
    return loss, dx


def softmax_loss(x, y):
    """
    Computes the loss and gradient for softmax classification.
      Inputs:
      - x: Input data, of shape (N, C) where x[i, j] is the score for the jth
        class for the ith input.
      - y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
        0 <= y[i] < C
      Returns a tuple of:
      - loss: Scalar giving the loss
      - dx: Gradient of the loss with respect to x
    """
    shifted_logits = x - x.max(dim=1, keepdim=True).values
    Z = shifted_logits.exp().sum(dim=1, keepdim=True)
    log_probs = shifted_logits - Z.log()
    probs = log_probs.exp()
    N = x.shape[0]
    loss = (-1.0 / N) * log_probs[torch.arange(N), y].sum()
    dx = probs.clone()
    dx[torch.arange(N), y] -= 1
    dx /= N
    return loss, dx


class TwoLayerNet(object):
    def __init__(self, input_dim=3 * 32 * 32, hidden_dim=100,
                 num_class=10, weight_scale=1e-3, reg=0., dtype=torch.float32,
                 device='cpu'):
        self.params = {}
        self.reg = reg

        kw = ['device': device, 'dtype': dtype]
        self.params['W1'] = torch.randn(
            (input_dim, hidden_dim), **kw) * weight_scale
        self.params['b1'] = torch.zeros(hidden_dim, **kw)
        self.params['W2'] = torch.randn(
            (hidden_dim, num_class), **kw) * weight_scale
        self.params['b2'] = torch.zeros(num_class, **kw)

    def loss(self, X, y=None):
        hidden, hidden_cache = Linear_ReLU.forward(
            X, self.params['W1'], self.params['b1'])
        scores, cache = Linear.forward(
            hidden, self.params['W2'], self.params['b2'])

        if y is None:
            return scores

        loss, dout = softmax_loss(scores, y)
        loss += self.reg * \
                ((self.params['W1'] ** 2).sum() + (self.params['W2'] ** 2).sum())
        dhidden, grads['W2'], grads['b2'] = Linear_ReLU.backward(dout, cache)
        _, grads['W1'], grads['b1'] = Linear.backward(dhidden, hidden_cache)

        return loss, grads


class FullyConnectedNet(object):
    def __init__(self, hidden_dims, input_dim=3 * 32 * 32, num_class=10,
                 dropout=0., reg=0., weight_scale=1e-2, seed=None,
                 dtype=torch.float, device='cpu'):
        self.use_dropout = dropout != 0
        self.reg = reg
        self.num_layers = 1 + len(hidden_dims)
        self.dtype = dtype
        self.params = {}

        kw = {'dtype': dtype, 'device': device}
        dims = [input_dim] + hidden_dims + \
               [num_class]  # len(dims) == num_layers + 1

        for i in range(len(dims) - 1):
            self.params[f'W{i + 1}'] = torch.randn(
                (dims[i]), dims[i + 1], **kw) * weight_scale
            self.params[f'b{i + 1}'] = torch.zeros(dims[i + 1], **kw)

        self.dropout_param = {}
        if self.use_dropout > 0:
            self.dropout_param = {'mode': 'train', 'p': dropout}
            if seed is not None:
                self.dropout_param['seed'] = seed

    def loss(self, X, y=None):
        X = X.to(self.dtype)
        mode = 'test' if y is None else 'train'

        if self.use_dropout:
            self.dropout_param['mode'] = mode

        hidden_num = self.num_layers - 1
        scores = X
        cache_history = []
        L2reg = 0

        for i in range(hidden_num):
            scores, cache = Linear_ReLU.forward(scores,
                                                self.params[f'W{i + 1}'],
                                                self.params[f'b{i + 1}'])
            cache_history.append(cache)
            if self.use_dropout:
                scores, cache = Dropout.forward(scores, self.dropout_param)
                cache_history.append(cache)

            L2reg += torch.sum(self.params[f'W{i + 1}'] ** 2)

        i += 1
        socres, cache = Linear.forward(scores, self.params[f'W{i + 1}'],
                                       self.params[f'b{i + 1}'])
        cache_history.append(cache)
        L2reg += torch.sum(self.params[f'W{i + 1}'] ** 2)
        L2reg *= self.reg

        if mode == 'test':
            return scores

        loss, grads = 0.0, {}
        loss, dout = softmax_loss(scores, y)
        dout, grads[f'W{i + 1}'], grads[f'b{i + 1}'] = Linear.backward(
            dout, cache_history.pop())
        grads[f'W{i + 1}'] += 2. * self.reg * self.params[f'W{i + 1}']
        i -= 1

        while i >= 0:
            if self.use_dropout:
                dout = Dropout.backward(dout, cache_history.pop())
            dout, grads[f'W{i + 1}'], grads[f'b{i + 1}'] = Linear_ReLU.backward(
                dout, cache_history.pop())
            grads[f'W{i + 1}'] += self.reg * 2. * self.params[f'W{i + 1}']
            i -= 1
        return loss, grads


def sgd(w, dw, config=None):
    """
    Performs vanilla stochastic gradient descent.
    config format:
    - learning_rate: Scalar learning rate.
    """
    if config is None:
        config = {}

    config.setdefault('learning_rate', 1e-2)
    w -= config['learning_rate'] * dw
    return w, config


def sgd_momentum(w, dw, config=None):
    """
    Performs stochastic gradient with momentum
    config format:
    - learning_rate: Scalar learning rate
    - momentum: Scalar between 0 and 1 giving the momentum value.
        Setting momentum = 0 reduces to sgd
    - velocity: A numpy array of the same shape as w and dw used to
    store a moving average of the gradients
    """
    if config is None:
        config = {}

    config.setdefault('learning_rate', 1e-2)
    config.setdefault('momentum', 0.9)
    v = config.get('velocity', torch.zeros_like(w))
    v = config['momentum'] * v - config['learning_rate'] * dw

    next_w = w + v
    config['velocity'] = v

    return next_w, config


def rmsprop(w, dw, config=None):
    """
    Uses the RMSProp update rule, which uses a moving average of squared
    gradient values to set adaptive per-parameter learning rates.
    config format:
    - learning_rate: Scalar learning rate.
    - decay_rate: Scalar between 0 and 1 giving the decay rate for the squared
        gradient cache.
    - epsilon: Small scalar used for smoothing to avoid dividing by zero.
    - cache: Moving average of second moments of gradients.
    """
    if config is None:
        config = {}

    config.setdefault('learning_rate', 1e-2)
    config.setdefault('decay_rate', 0.99)
    config.setdefault('epsilon', 1e-8)
    config.setdefault('cache', torch.zeros_like(w))

    config['cache'] = config['decay_rate'] * \
                      config['cache'] + (1 - config['decay_rate']) * (dw ** 2)
    next_w = w - config['learning_rate'] * dw / \
             torch.sqrt(config['cache'] + config['epsilon'])

    return next_w, config


def adam(w, dw, config=None):
    """
    Uses the Adam update rule, which incorporates moving averages of both the
    gradient and its square and a bias correction term.
    config format:
    - learning_rate: Scalar learning rate.
    - beta1: Decay rate for moving average of first moment of gradient.
    - beta2: Decay rate for moving average of second moment of gradient.
    - epsilon: Small scalar used for smoothing to avoid dividing by zero.
    - m: Moving average of gradient.
    - v: Moving average of squared gradient.
    - t: Iteration number.
    """
    if config is None:
        config = {}
    config.setdefault('learning_rate', 1e-3)
    config.setdefault('beta1', 0.9)
    config.setdefault('beta2', 0.999)
    config.setdefault('epsilon', 1e-8)
    config.setdefault('m', torch.zeros_like(w))
    config.setdefault('v', torch.zeros_like(w))
    config.setdefault('t', 0)

    config['t'] += 1
    config['m'] = config['beta1'] * config['m'] + (1.0 - config['beta1']) * dw
    config['v'] = config['beta2'] * config['v'] + \
                  (1.0 - config['beta2']) * (dw ** 2)
    moment1 = config['m'] / (1 - config['beta1'] ** config['t'])
    moment2 = config['v'] / (1 - config['beta2'] ** config['t'])
    next_w = w - config['learning_rate'] * moment1 / \
             (torch.sqrt(moment2) + config['epsilon'])

    return next_w, config


class Dropout(object):
    """
    Performs the forward pass for (inverted) dropout.
    Inputs:
    - x: Input data: tensor of any shape
    - dropout_param: A dictionary with the following keys:
        - p: Dropout parameter. We *drop* each neuron output with probability p.
        - mode: 'test' or 'train'. If the mode is train, then perform dropout;
        if the mode is test, then just return the input.
        - seed: Seed for the random number generator. Passing seed makes this
        function deterministic, which is needed for gradient checking but not
        in real networks.
    Outputs:
    - out: Tensor of the same shape as x.
    - cache: tuple (dropout_param, mask). In training mode, mask is the dropout
        mask that was used to multiply the input; in test mode, mask is None.
    """

    @staticmethod
    def forward(x, dropout_param):
        p, mode = dropout_param['p'], dropout_param['mode']
        if 'seed' in dropout_param:
            torch.manual_seed(dropout_param['seed'])

        if mode == 'train':
            mask = (torch.randn(*x.shape, dtype=x.dtype, device=x.device) < p) / p
            x *= mask
            out = x
        elif mode == 'test':
            out = x

        cache = (dropout_param, mask)
        return out, cache

    @staticmethod
    def dropout_backward(dout, cache):
        dropout_param, mask = cache
        mode = dropout_param['mode']
        dx = None

        if mode == 'train':
            dx = dout * mask
        elif mode == 'test':
            dx = dout

        return dx


if __name__ == '__main__':
    pass
