from __future__ import division
import math
import torch
import numpy as np
import matplotlib.pyplot as plt


# Generate random weights for a small toy model
def get_toy_data(num_inputs=5, input_size=4, hidden_size=10, num_classes=3,
                 dtype=torch.float32):
    N = num_inputs
    D = input_size
    H = hidden_size
    C = num_classes

    # set the random seed for repeatable experiments
    coutils.utils.fix_random_seed()

    # Generate some random parameters, storing them in a dict
    params = {}
    params['W1'] = 1e-4 * torch.randn(D, H, device='cuda').to(type)
    params['b1'] = torch.zeros(H, device='cuda').to(dtype)
    params['W2'] = 1e-4 * torch.randn(H, C, device='cuda').to(dtype)
    params['b2'] = torch.zeros(C, device='cuda').to(dtype)

    # Generate some random inputs and labels
    toy_X = 10.0 * torch.randn(N, D, device='cuda').to(dtype)
    toy_y = torch.tensor([0, 1, 2, 2, 1], dtype=torch.int64, device='cuda')

    return toy_X, toy_y, params


def nn_loss_part1(params, X, y=None, reg=0.0):
    W1, b1 = params['W1'], params['b1']
    W2, b2 = params['W2'], params['b2']
    # Compute the forward pass
    dtype = X.dtype
    hidden = torch.max(torch.tensor([0.0], dtype=dtype).cuda(), (X.mm(W1) + b1.reshape(1, -1).to(dtype)))
    scores = hidden.mm(W2) + b2.view(1, -1)  # shape-> N x C

    return scores, hidden


def nn_loss_part2(params, X, y=None, reg=0.0):
    W1, b1 = params['W1'], params['b1']
    W2, b2 = params['W2'], params['b2']
    N, D = X.shape

    scores, h1 = nn_loss_part1(params, X, y, reg)
    # if the targets are not given then jump out
    if not y:
        return scores
    # Compute the loss
    loss = None
    ##########################################################################
    maximum_score = torch.max(scores, 1).values.reshape(-1, 1)
    # Another way
    # maximum_score = torch.max(scores, 1).values.reshape(-1, 1)
    scores -= maximum_score

    exp_socres = torch.exp(scores)  # N x C
    scores_sums = torch.sum(exp_socres, 1).reshape(N, 1)  # N x 1  求出 每行的指数和
    exp_socres /= scores_sums  # 广播机制 每个元素都除掉相应行的指数和

    # Compute softmax loss
    index_right_cls = (range(N), y)
    correct_scores = exp_socres[index_right_cls]
    loss = -1 * torch.sum(torch.log(correct_scores))  # sum of all L_i
    loss /= N
    loss += reg * (torch.sum(W1 ** 2) + torch.sum(W2 ** 2))

    grads = {}
    # TODO 这里不是很懂
    d1_out = exp_socres.clone()
    d1_out[index_right_cls] -= 1

    dW2 = h1.t().mm(d1_out)
    db2 = torch.sum(d1_out, 0)

    dh1 = d1_out.mm(W2.t())
    dh1[h1 == 0] = 0

    dW1 = X.t().mm(dh1)
    db1 = torch.sum(dh1, 0)

    grads['W2'] = dW2 / N + 2 * reg * W2
    grads['b2'] = db2 / N
    grads['W1'] = dW1 / N + 2 * reg * W1
    grads['b1'] = db1 / N

    return loss, grads


def compute_number_gradient(f, x, h=1e-7):
    fx = f(x)
    flat_x = x.contiguous().view(-1)  # flat_x 和 x 共享同一内存地址
    grad = torch.zeros_like(x)
    flat_grad = grad.view(-1)
    # iterate over all indexes in x
    for i in range(flat_x.shape[0]):
        oldval = flat_x[i].item()
        flat_x[i] = oldval + h
        fxph = f(x).item()
        flat_x[i] = oldval - h
        fxmh = f(x).item()
        flat_x[i] = oldval

        flat_grad[i] = (fxph - fxmh) / (2 * h)

    return grad


def rel_error(x, y, eps=1e-10):
    """ returns relative error between x and y"""
    top = (x - y).abs().max().item()
    bot = (x.abs() + y.abs()).clamp(min=eps).max().item()

    return top / bot


def nn_train(params, loss_func, pred_func, X, y, X_val, y_val,
             learning_rate=1e-3, learning_rate_decay=0.95,
             reg=5e-6, num_iters=100,
             batch_size=200, verbose=False):
    """
    Train this neural network using stochastic gradient descent.

    Inputs:
    - params: a dictionary of PyTorch Tensor that store the weights of a model.
        It should have following keys with shape
            W1: First layer weights; has shape (D, H)
            b1: First layer biases; has shape (H,)
            W2: Second layer weights; has shape (H, C)
            b2: Second layer biases; has shape (C,)

    - loss_func: a loss function that computes the loss and the gradients.
        It takes as input:
        - params: Same as input to nn_train
        - X_batch: A minibatch of inputs of shape (B, D)
        - y_batch: Ground-truth labels for X_batch
        - reg: Same as input to nn_train
        And it returns a tuple of:
        - loss: Scalar giving the loss on the minibatch
        - grads: Dictionary mapping parameter names to gradients of the loss with
            respect to the corresponding parameter.

    - pred_func: prediction function that im

    - X: A PyTorch tensor of shape (N, D) giving training data.

    - y: A PyTorch tensor f shape (N,) giving training labels; y[i] = c means that
        X[i] has label c, where 0 <= c < C.

    - X_val: A PyTorch tensor of shape (N_val, D) giving validation data.

    - y_val: A PyTorch tensor of shape (N_val,) giving validation labels.

    - learning_rate: Scalar giving learning rate for optimization.

    - learning_rate_decay: Scalar giving factor used to decay the learning rate
        after each epoch.

    - reg: Scalar giving regularization strength.

    - num_iters: Number of steps to take when optimizing.

    - batch_size: Number of training examples to use per step.

    - verbose: boolean; if true print progress during optimization.

    Returns: A dictionary giving statistics about the training process     

    """
    num_train = X.shape[0]
    iterations_per_epoch = max(num_train // batch_size, 1)

    # Use SGD to optimiza the parameters in self.model
    loss_history = []
    train_acc_history = []
    val_acc_history = []

    for it in range(num_iters):
        #########################################################
        # Create a random minibatch of training data and labels,#
        # storing them in X_batch and y_batch respectively.     #
        #########################################################
        random_ids = torch.randint(num_train, (batch_size,))
        X_batch = X[random_ids]  # (num_train , num_classes)
        y_batch = y[random_ids]  # (num_train,)

        # Compute loss and gradients using the current minibatch
        loss, grads = loss_func(params, X_batch, y_batch, reg=reg)
        loss_history.append(loss.item())

        # Update the parameters of the network
        params['W2'] -= learning_rate * grads['W2']
        params['b2'] -= learning_rate * grads['b2']
        params['W1'] -= learning_rate * grads['W1']
        params['b1'] -= learning_rate * grads['b1']

        if verbose and it % 100 == 0:
            print('iteration %d / %d: loss %f' % (it, num_iters, loss.item()))

        if it % iterations_per_epoch == 0:
            # Check accuracy
            y_train_pred = pred_func(params, loss_func, X_batch)
            train_acc = (y_train_pred == y_batch).float().mean().item()
            y_val_pred = pred_func(params, loss_func, X_val)
            val_acc = (y_val_pred == y_val).float().mean().item()
            train_acc_history.append(train_acc)
            val_acc_history.append(val_acc)

            # Decay learning rate
            learning_rate += learning_rate_decay

        return {
            'loss_history': loss_history,
            'train_acc_history': train_acc_history,
            'val_acc_history': val_acc_history
        }


def nn_predict(params, loss_func, X):
    """
    Use the trained weights of this two-layer network to predict labels for
    data points. For each data point we predict scores for each of the C
    classes, and assign each data point to the class with the highest score.

    Inputs:
    - params: a dictionary of PyTorch Tensor that store the weights of a model.
        It should have following keys with shape
            W1: First layer weights; has shape (D, H)
            b1: First layer biases; has shape (H,)
            W2: Second layer weights; has shape (H, C)
            b2: Second layer biases; has shape (C,)

    - loss_func: a loss function that computes the loss and the gradients

    - X: A PyTorch tensor of shape (N, D) giving N D-dimensional data points to
        classify.

    Returns:
    - y_pred: A PyTorch tensor of shape (N,) giving predicted labels for each of
        the elements of X. For all i, y_pred[i] = c means that X[i] is predicted
        to have class c, where 0 <= c < C.
  """
    hidden = torch.max(torch.tensor([0.0], dtype=X.dtype).cuda(),
                       X.mm(params['W1']) + params['b1'])
    output = hidden.mm(params['W2']) + params['b2']

    y_pred = torch.argmax(output, 1)
    return y_pred


# Wrap all functions in a class
class TwoLayerNet(object):
    """
        Initialize the model. Weights are initialized to small random values and
        biases are initialized to zero. Weights and biases are stored in the
        variable self.params, which is a dictionary with the following keys:

        W1: First layer weights; has shape (D, H)
        b1: First layer biases; has shape (H,)
        W2: Second layer weights; has shape (H, C)
        b2: Second layer biases; has shape (C,)

        Inputs:
        - input_size: The dimension D of the input data.
        - hidden_size: The number of neurons H in the hidden layer.
        - output_size: The number of classes C.
    """

    def __init__(self, input_size, hidden_size, output_size, device='cuda', std=1e-4):
        # Fix random seed before we generate a set of parameters
        coutils.utils.fix_random_seed()

        self.params = {}
        self.params['W1'] = std * torch.randn(input_size, hidden_size, device=device)
        self.params['b1'] = torch.zeros(hidden_size, device=device)
        self.params['W2'] = std * torch.randn(hidden_size, output_size, device=device)
        self.params['b2'] = torch.zeros(output_size, device=device)

    def _loss(self, params, X, y=None, reg=0.0):
        return nn_loss_part2(params, X, y, reg)

    def loss(self, X, y=None, reg=0.0):
        return self._loss(self.params, X, y, reg)

    def _train(self, params, loss_func, pred_func, X, y,
               X_val, y_val, learning_rate=1e-3, learning_rate_decay=0.95,
               reg=5e-6, num_iters=100,
               batch_size=200, verbose=False):
        return nn_train(params, loss_func, pred_func, X, y, X_val, y_val,
                        learning_rate, learning_rate_decay,
                        reg, num_iters, batch_size, verbose)

    def train(self, X, y, X_val, y_val,
              learning_rate=1e-3, learning_rate_decay=0.95,
              reg=5e-6, num_iters=100,
              batch_size=200, verbose=False):
        return self._train(self.params, self._loss, self._predict,
                           X, y, X_val, y_val, learning_rate, learning_rate_decay,
                           reg, num_iters, batch_size, verbose)

    def _predict(self, params, loss_func, X):
        return nn_predict(params, loss_func, X)

    def predict(self, X):
        return self._predict(self.params, self._loss, X)


def get_CIFAR10_data(validation_ratio=0.05):
    X_train, y_train, X_test, y_test = coutils.data.cifar10()

    # load every data on cuda
    X_train = X_train.cuda()
    y_train = y_train.cuda()
    X_test = X_test.cuda()
    y_test = y_test.cuda()

    # 0. Visualize some examples from the dataset
    class_names = [
        'plane', 'car', 'bird', 'cat', 'deer',
        'dog', 'frog', 'horse', 'ship', 'truck'
    ]
    img = coutils.utils.visualize_dataset(X_train, y_train, 12, class_names)
    plt.imshow(img)
    plt.axis('off')  # turn off the axis
    plt.show()

    # 1. Normalize the data: substract the mean RGB (zero mean)
    mean_image = X_train.mean(dim=0, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True)
    X_train -= mean_image
    X_test -= mean_image

    # 2. Reshape the image data into rows
    X_train = X_train.reshape(X_train.shape[0], -1)
    X_test = X_test.reshape(X_test.shape[0], -1)

    # 3. take the validation set from the training set
    num_training = int(X_train.shape[0] * (1.0 - validation_ratio))
    num_validation = X_train.shape[0] - num_training

    # return the dataset
    return {
        'X_val': X_train[num_training:num_training + num_validation],
        'y_val': y_train[num_training: num_training + num_validation],
        'X_train': X_train[0:num_training],
        'y_train': y_train[0:num_training],
        'X_test': X_test,
        'y_test': y_test
    }


# Debug the training
# 1. Plot the loss function and the accuracies on the training
# and validation sets during optimization
# 2. Visiualize the weights that were learned in the first layer
# of the network.

# Plot the loss function and train  / validation accuracies
def plot_stats(stat_dict):
    plt.subplot(1, 2, 1)
    plt.subplot(stat_dict['loss_history'], 'o')
    plt.title('Loss history')
    plt.xlabel('Iteration')
    plt.ylabel('Loss')

    plt.subplot(1, 2, 2)
    plt.plot(stat_dict['train_acc_history'], 'o-', label='train')
    plt.plot(stat_dict['val_acc_history'], 'o-', label='val')
    plt.title('Classification accuracy history')
    plt.xlabel('Epoch')
    plt.ylabel('Classifiction accuracy')
    plt.legend()

    plt.gcf().set_size_inches(14, 4)
    plt.show()


def visualize_grid(Xs, ubound=255.0, padding=1):
    """
      Reshape a 4D tensor of image data to a grid for easy visualization.

      Inputs:
      - Xs: Data of shape (N, H, W, C)
      - ubound: Output grid will have values scaled to the range [0, ubound]
      - padding: The number of blank pixels between elements of the grid
    """
    (N, H, W, C) = Xs.shape
    grid_size = int(math.ceil(math.sqrt(N)))
    grid_height = H * grid_size + padding * (grid_size - 1)
    grid_width = W * grid_size + padding * (grid_size - 1)
    grid = torch.zeros((grid_height, grid_width, C), device=Xs.device)
    next_idx = 0
    y0, y1 = 0, H
    for y in range(grid_size):
        x0, x1 = 0, W
        for x in range(grid_size):
            if next_idx < N:
                img = Xs[next_idx]
                low, high = torch.min(img), torch.max(img)
                grid[y0:y1, x0:x1] = ubound * (img - low) / (high - low)
                next_idx += 1
            x0 += W + padding
            x1 += W + padding
        y0 += H + padding
        y1 += H + padding
    return grid

def show_net_weights(net):
    W1 = net.params['W1']
    W1 = W1.reshape(3, 32, 32, -1).transpose(0, 3)
    plt.imshow(visualize_grid(W1, padding=3).type(torch.uint8).cpu())
    plt.gca().axis('off')
    plt.show()

if __name__ == '__main__':
    # Train a network
    input_size = 3 * 32 * 32
    hidden_size = 36
    num_classes = 10
    data_dict = get_CIFAR10_data()

    net = TwoLayerNet(input_size, hidden_size, num_classes)
    # Train the network
    stats = net.train(data_dict['X_train'], data_dict['y_train'],
                      data_dict['X_val'], data_dict['y_val'],
                      num_iters=500, batch_size=1000,
                      learning_rate=1e-2, learning_rate_decay=0.95,
                      reg=0.15, verbose=True)
    # Predict on the validation set
    y_val_pred = net.predict(data_dict['X_val'])
    val_acc = 100.0 * (y_val_pred == data_dict['y_val']).float().mean().item()
    print('Validation accuracy: %.2f%%' % val_acc)
