from toolz.itertoolz import sliding_window, partition_all
import numpy as np
from random import shuffle


def sigmoid(z):
    return 1.0 / (1.0 + np.exp(-z))


def sigmoid_derivative(z):
    s = sigmoid(z)
    return s * (1 - s)


def relu(z):
    return np.maximum(0, z)


def step(z):
    return np.where(z < 0.0, 0.0, 1.0)


def tanh(z):
    return np.tanh(z)


def sech(z):
    return 1 / np.cosh(z)


def log(z):
    return np.log(z, where=z > 0)


def cost(activations, labels):
    return -np.mean(labels * log(activations) + (1 - labels) * log(1 - activations), axis=0, keepdims=True)


class NeuralNetwork:
    def __init__(self, *layers):
        self.layers_count = len(layers) - 1  # excluding input layer
        self.biases = list(map(lambda l: np.random.randn(l, 1), layers[1:]))
        self.weights = list(map(lambda p: np.random.randn(p[1], p[0]) * 2 / np.sqrt(p[0]), sliding_window(2, layers)))
        self.weights[-1] /= 2

    def train(self, training_set, dev_set, learning_rate, epochs, mini_batch_size, regularization=0, decay=0.0):
        costs = []
        sw = list(map(lambda w: np.zeros(np.shape(w)), self.weights))
        sb = list(map(lambda b: np.zeros(np.shape(b)), self.biases))
        for epoch in range(0, epochs):
            shuffle(training_set)
            _cost = self._train(training_set, dev_set, learning_rate, mini_batch_size, regularization, decay, sw, sb)
            costs.append(_cost)
            print("Epoch {epoch}:\ttrain={train}\tdev={dev}".format(
                epoch=epoch,
                train=_cost['train'],
                dev=_cost['dev']
            ))
        return costs

    def predict(self, x):
        """
        :param x: layer0 * 1
        :return: layer_last * 1
        """
        activation = x
        for l in range(0, self.layers_count):
            activation_function = self._activation_function_for(l)
            activation = activation_function(self.weights[l].dot(activation) + self.biases[l])
        return activation

    def _train(self, training_set, dev_set, learning_rate, mini_batch_size, regularization, decay, sw, sb):
        # sw = list(map(lambda w: np.zeros(np.shape(w)), self.weights))
        # sb = list(map(lambda b: np.zeros(np.shape(b)), self.biases))
        for mini_batch in partition_all(mini_batch_size, training_set):
            self._train_with_mini_batch(mini_batch, learning_rate, regularization, decay, sw, sb)
        return self._validate(training_set, dev_set)

    def _train_with_mini_batch(self, mini_batch, learning_rate, regularization, decay, sw, sb):
        inputs = np.array(list(map(lambda p: p[0], mini_batch))).T  # layer0 * m (m is mini_batch_size)
        labels = np.array(list(map(lambda p: p[1], mini_batch))).T  # layer_last * m

        # weighted_inputs: [layer1 * m, layer2 * m, ..., layers_last * m] (`self.layers_count` items total)
        # activations: [layer0 * m, layer1 * m, layer2 * m, ..., layers_last * m] (`self.layers_count + 1` items total)
        weighted_inputs, activations = self._forward_propagate(inputs)

        # deltas: [layer1 * 1, layer2 * 1, ..., layer_last * 1] (`self.layers_count` items total)
        deltas = self._back_propagate(labels, weighted_inputs, activations)

        self._update_weights(activations, deltas, learning_rate, regularization, decay, sw)
        self._update_biases(deltas, learning_rate, decay, sb)

    def _forward_propagate(self, inputs):
        """
        :param inputs: A features * batch-size matrix
        :return: (weighted_inputs, activations)
        """
        weighted_inputs = []
        activations = [inputs]
        for l in range(0, self.layers_count):
            activation_function = self._activation_function_for(l)
            weighted_input = self.weights[l].dot(activations[-1]) + self.biases[l]
            activation = activation_function(weighted_input)
            weighted_inputs.append(weighted_input)
            activations.append(activation)
        return weighted_inputs, activations

    def _back_propagate(self, labels, weighted_inputs, activations):
        """
        :param labels: layer_last * m
        :param weighted_inputs: [layer1 * m, ..., layer_last * m]
        :param activations: [layer0 * m, ..., layer_last * m]
        :return: deltas
        """
        # cost_derivative: 1 * m
        # weighted_inputs[-1]: layer_last * m
        # delta_last: layer_last * m
        delta_last = activations[-1] - labels
        deltas = [delta_last]
        for l in range(self.layers_count - 2, -1, -1):
            delta = self.weights[l+1].T.dot(deltas[-1]) * step(weighted_inputs[l])
            deltas.append(delta)
        deltas.reverse()
        return deltas

    def _activation_function_for(self, layer):
        if layer == self.layers_count - 1:
            return sigmoid
        else:
            return relu

    def _update_weights(self, activations, deltas, learning_rate, regularization, decay, sw):
        """
        :param activations: [layer0 * m, ..., layer_last * m]
        :param deltas: [layer1 * m, ..., layer_last * m]
        :param learning_rate: number
        """
        for l in range(0, self.layers_count):
            m = np.size(deltas[l], axis=1)
            delta_weight = (deltas[l].dot(activations[l].T) + regularization * self.weights[l]) / m
            sw[l] = decay * sw[l] + (1 - decay) * np.square(delta_weight)
            self.weights[l] -= learning_rate * np.divide(delta_weight, np.sqrt(sw[l]), where=sw[l] > 0)

    def _update_biases(self, deltas, learning_rate, decay, sb):
        """
        :param deltas: [layer1 * m, ..., layer_last * m]
        :param learning_rate: number
        """
        for l in range(0, self.layers_count):
            delta_bias = np.mean(deltas[l], axis=1, keepdims=True)
            sb[l] = decay * sb[l] + (1 - decay) * np.square(delta_bias)
            self.biases[l] -= learning_rate * np.divide(delta_bias, np.sqrt(sb[l]), where=sb[l] > 0)

    def _validate(self, training_set, dev_set):
        cost_train = self._validate_with(training_set)
        cost_dev = self._validate_with(dev_set)
        return {'train': cost_train, 'dev': cost_dev}

    def _validate_with(self, data_set):
        x = np.array(list(map(lambda t: t[0], data_set))).T
        y = np.array(list(map(lambda t: t[1], data_set))).T
        a = self.predict(x)
        return np.mean(cost(a, y))
