from numpy import *


class Network(object):
    def __init__(self, layers_sizes):
        self.num_layers = len(layers_sizes)
        self.layers_sizes = layers_sizes
        self.Thetas = self.rand_init_theta()

    def rand_init_theta(self):
        return [random.randn(L2, L1 + 1)
                for L1, L2 in zip(self.layers_sizes[:-1], self.layers_sizes[1:])]

    def feed_forward(self, x):
        """
        feedforward
        :param A: 784 * 1
        :return: A, the final
        """
        a = x
        for Theta in self.Thetas:
            # a = reshape(array([ones((1, 1)), a]), (a.shape[0] + 1, 1))
            a = concatenate((ones((1, 1)), a))
            a = sigmoid(dot(Theta, a))
        return a

    def SGD(self, training_set, iter, mini_batch_size, alpha, test_set=None):
        if test_set: num_test = len(test_set)
        num_train = len(training_set)
        for j in range(iter):
            random.shuffle(training_set)
            mini_batch_set = [training_set[k:k + mini_batch_size]
                              for k in range(0, num_train, mini_batch_size)]
            for mini_batch in mini_batch_set:
                self.mini_batch_update(mini_batch, alpha)
            if test_set:
                print "Epoch {0}: {1} / {2}".format(
                    j, self.evaluate(test_set), num_test)
            else:
                print "Epoch {0} complete".format(j)
        return

    def mini_batch_update(self, mini_batch, alpha):
        nabla_thetas = [zeros(Theta.shape) for Theta in self.Thetas]
        for x, y in mini_batch:
            Thetas_gradient = self.back_prop(x, y)
            nabla_thetas = [nabla_theta + Theta_gradient for nabla_theta, Theta_gradient in
                            zip(nabla_thetas, Thetas_gradient)]
        self.Thetas = [Theta - alpha / len(mini_batch) * Theta_grad for Theta, Theta_grad in
                       zip(self.Thetas, nabla_thetas)]

    def back_prop(self, x, y):
        """
        back prop
        :param x:
        :param y:
        :return: Thetas's gradient
        """
        Deltas = [zeros(Theta.shape) for Theta in self.Thetas]
        # feed_forward
        Activation = x
        Activations = []
        Zs = []
        for Theta in self.Thetas:
            Activation = concatenate((ones((1, 1)), Activation))
            Activations.append(Activation)
            Z = dot(Theta, Activation)
            Zs.append(Z)
            Activation = sigmoid(Z)
        Activations.append(Activation)
        # backward
        Thetas_gradient = [zeros(Theta.shape) for Theta in self.Thetas]
        Deltas[-1] = (Activations[-1] - y) * sigmoid_gradient(Zs[-1])
        Thetas_gradient[-1] = dot(Deltas[-1], Activations[-2].T)
        for L in range(2, self.num_layers):
            Deltas[-L] = dot(self.Thetas[-L + 1].T, Deltas[-L + 1])[1:, ] * sigmoid_gradient(Zs[-L])
            Thetas_gradient[-L] = Deltas[-L] * Activations[-L - 1].T
        return Thetas_gradient

    def evaluate(self, test_data):
        test_results = [(argmax(self.feed_forward(x)), y)
                        for (x, y) in test_data]
        return sum(int(P == y) for (P, y) in test_results)

    def toMatrix(self, data_set):
        for data in data_set:
            X = concatenate(X, data[0])
            y = concatenate(y, data[1])
        return X, y

        # def SGD1(self, training_set, iter, mini_batch_size, eta,
        #          test_set=None):
        #     if test_set: num_test = len(test_set)
        #     num_train = len(training_set)
        #     for j in xrange(iter):
        #         random.shuffle(training_set)
        #         X, y = self.toMatrix(training_set)
        #         X_test, y_test = self.toMatrix(test_set)
        #         mini_batchs = [X[k:k + mini_batch_size] from k in range(0, num_train, mini_batch_size)]

def sigmoid(z):
    return 1.0 / (1.0 + exp(-z))

def sigmoid_gradient(z):
    return sigmoid(z) * (1 - sigmoid(z))
