import json

import numpy as np

from network import sigmoid, sigmoid_prime, Network
import mnist_loader


class CrossEntropyCost:
    @staticmethod
    def fn(a, y):
        return np.sum(np.nan_to_num(-y * np.log(a) - (1 - y) * np.log(1 - a)))

    @staticmethod
    def delta(z, a, y):
        return a - y


class QuadraticCost:
    @staticmethod
    def fn(a, y):
        return 0.5 * (np.linalg.norm(a - y) ** 2)

    @staticmethod
    def delta(z, a, y):
        return (a - y) * sigmoid_prime(z)


class Network2(Network):
    def __init__(self, sizes, cost=CrossEntropyCost):
        self.num_layers = len(sizes)
        self.sizes = sizes
        self.default_weight_initializer()
        self.cost = cost

    def default_weight_initializer(self):
        self.bias = [np.random.randn(y, 1) for y in self.sizes[1:]]
        self.weights = [np.random.randn(y, x) / np.sqrt(x) for x, y in zip(self.sizes[:-1], self.sizes[1:])]

    def SGD(self, tr, epochs, batch_size, learn_rate, lmbda=0,
            evaluation_data=None,
            monitor_evaluation_cost=False,
            monitor_evaluation_accuracy=False,
            monitor_training_cost=False,
            monitor_training_accuracy=False):
        n = len(tr[0])
        tr = [(tr[0][index], tr[1][index]) for index in range(len(tr[0]))]
        if evaluation_data:
            n_data = len(evaluation_data[0])
            evaluation_data = [(evaluation_data[0][index], evaluation_data[1][index]) for index in range(len(evaluation_data[0]))]

        evaluation_cost, evaluation_accuracy = [], []
        training_cost, training_accuracy = [], []
        for j in range(epochs):
            # mini_batches = [[(tr[0][i], tr[1][i]) for i in range(k, k + batch_size)] for k in range(0, n, batch_size)]
            mini_batches = [tr[k: k + batch_size] for k in range(0, n, batch_size)]
            for mini_batch in mini_batches:
                self.update_batch(mini_batch, learn_rate, lmbda, len(tr))
            print('epoch %s training complete' % j)
            if monitor_training_cost:
                cost = self.total_cost(tr, lmbda)
                training_cost.append(cost)
                print('cost on training data: {}'.format(cost))
            if monitor_training_accuracy:
                accuracy = self.accuracy(tr, convert=True)
                training_accuracy.append(accuracy)
                print('accuracy on training data: {}/ {}'.format(accuracy, n))
            if monitor_evaluation_cost:
                cost = self.total_cost(evaluation_data, lmbda, convert=True)
                evaluation_cost.append(cost)
                print('cost on evaluation data: {}'.format(cost))
            if monitor_evaluation_accuracy:
                accuracy = self.accuracy(evaluation_data)
                evaluation_accuracy.append(accuracy)
                print('accuracy on evaluation data: {} / {}'.format(self.accuracy(evaluation_data), n_data))
            print()
        return evaluation_cost, evaluation_accuracy, training_cost, training_accuracy

    def update_batch(self, one_batch, learn_rate, lmbda, n):
        delta_b = [np.zeros(b.shape) for b in self.bias]
        delta_w = [np.zeros(w.shape) for w in self.weights]
        for x, y in one_batch:
            # method backprop is to calculate derivative of loss for weight and bias
            delta_b_sample, delta_w_sample = self.backprop(x, y)
            delta_b = [db + dnb for db, dnb in zip(delta_b, delta_b_sample)]
            delta_w = [dw + dnw for dw, dnw in zip(delta_w, delta_w_sample)]
        # regularization param lmbda to adjust weight except bias
        self.weights = [(1 - learn_rate * (lmbda / n)) * w - (learn_rate/len(one_batch)) * nw for w, nw in zip(self.weights, delta_w)]
        self.bias = [b - (learn_rate/len(one_batch)) * nb for b, nb in zip(self.bias, delta_b)]

    def accuracy(self, data, convert=False):
        if convert:
            results = [(np.argmax(self.feedforward(x)), np.argmax(y)) for x, y in data]
        else:
            results = [(np.argmax(self.feedforward(x)), y) for x, y in data]
        return sum(int(x == y) for x, y in results)

    def total_cost(self, data, lmbda, convert=False):
        cost = 0
        data_len = len(data)
        for x, y in data:
            a = self.feedforward(x)
            if convert:
                y = vectorized_result(y)
            cost += self.cost.fn(a, y) / data_len
        cost += 0.5 * (lmbda / len(data)) * sum(np.linalg.norm(w) ** 2 for w in self.weights)
        return cost

    def save(self, filename):
        data = {'sizes': self.sizes,
                'weights': [w.tolist() for w in self.weights],
                'biases': [b.tolist() for b in self.bias],
                'cost': str(self.cost.__name__)}
        with open(filename, 'w') as f:
            json.dump(data, f)


def vectorized_result(j):
    e = np.zeros((10, 1))
    e[j] = 1
    return e


if __name__ == '__main__':
    tr, vd, te = mnist_loader.load_data_wrapper()
    net = Network2([784, 30, 10], cost=CrossEntropyCost)
    net.SGD(tr, 30, 10, .5,
            lmbda=1.8,
            evaluation_data=vd,
            monitor_evaluation_accuracy=True,
            monitor_evaluation_cost=True,
            monitor_training_accuracy=True,
            monitor_training_cost=True)
