# 神经网络模型 NeuralNetwork

import numpy as np


class NeuralNetwork:
    # 初始化神经网络模型
    def __init__(self, layer_sizes):
        """
        :param layer_sizes: 神经网络模型的层数以及每层神经元个数
        """
        self.num_layers = len(layer_sizes)  # 神经网络的层数
        self.layers = layer_sizes  # 每一层的神经元个数
        # 隐藏层以及输出层神经元阈值
        self.biasis = [np.random.randn(y, 1) for y in layer_sizes[1:]]
        # 生成各层之间的连接权值矩阵
        self.weights = [np.random.randn(y, x) for x, y in zip(layer_sizes[:-1], layer_sizes[1:])]

    # Sigmoid 函数
    def Sigmoid(self, z):
        return 1.0 / (1.0 + np.exp(-z))

    # Sigmoid 函数的导数
    def Sigmoid_prime(self, z):
        return self.Sigmoid(z) * (1 - self.Sigmoid(z))

    # 代价函数的导数
    def cost_func(self, output_layer_values, y):
        """
        :param output_layer_values: 输出值
        :param y: 训练集真实值
        :return: 代价函数的导数
        """
        return (output_layer_values - y)

    # 向前传播
    def feedforward(self, x):
        for w, b in zip(self.weights, self.biasis):
            x = self.Sigmoid(np.dot(w, x) + b)
        return x

    # 误差逆向传播，计算权值和阈值的更新值
    def backpropagation(self, X, Y):
        delta_b = [np.zeros(b.shape) for b in self.biasis]
        delta_w = [np.zeros(w.shape) for w in self.weights]
        activation = np.transpose(X)
        activations = [activation]
        zs = []
        for b, w in zip(self.biasis, self.weights):
            z = np.dot(w, activation) + b
            zs.append(z)
            activation = self.Sigmoid(z)
            activations.append(activation)
        Y = np.transpose(Y)
        costs = self.cost_func(activations[-1], Y)
        z = zs[-1]
        delta = np.multiply(costs,self.Sigmoid_prime(z))
        delta_b[-1] = np.sum(delta, axis=1, keepdims=True)
        delta_w[-1] = np.dot(delta, np.transpose(activations[-2]))
        for i in range(2, self.num_layers):
            z = zs[-i]
            sp = self.Sigmoid_prime(z)
            delta = np.multiply(np.dot(np.transpose(self.weights[-i + 1]), delta), sp)
            delta_b[-i] = np.sum(delta, axis=1, keepdims=True)
            delta_w[-i] = np.dot(delta, np.transpose(activations[-i - 1]))
        return delta_b, delta_w

    # 采用小批量方式训练神经网络模型
    def fit(self, X, Y, learnrate, mini_batch_size, ephocs=1000):
        N = len(X)
        for i in range(ephocs):
            randomlist = np.random.randint(0, N - mini_batch_size, int(N / mini_batch_size))
        batch_X = [X[k:k + mini_batch_size] for k in randomlist]
        batch_Y = [Y[k:k + mini_batch_size] for k in randomlist]
        for j in range(len(batch_X)):
            delta_b, delta_w = self.backpropagation(batch_X[j], batch_Y[j])
            self.weights = [w - (learnrate / mini_batch_size) * dw for w, dw in zip(self.weights, delta_w)]
            self.biasis = [b - (learnrate / mini_batch_size) * db for b, db in zip(self.biasis, delta_b)]
            if (i + 1) % 100 == 0:
                labels = self.predict(X)
                acc = 0.0
                # 将 Y 转化为 One-hot 形式
                Y_Onehot = []
                for m in range(len(Y)):
                    temp = [0]*9
                    if Y[m] != 0:
                        k = Y[m] - 1
                        temp[k] = 1
                    Y_Onehot.append(temp)
                Y_Onehot = np.array(Y_Onehot)
                for k in range(len(labels)):
                    if Y_Onehot[k, labels[k]-1] == 1.0:
                        acc += 1.0
                print("iterations %d accuracy %.3f" % (i + 1, acc / len(labels)))

    # 预测函数
    def predict(self, x):
        results = self.feedforward(x.T)
        labels = [np.argmax(results[:, y]) for y in range(results.shape[1])]
        return labels
