import random
import numpy as np

# https://zhuanlan.zhihu.com/p/148102828

class Network(object):
    def __init__(self, sizes):
        self.num_layers = len(sizes)
        self.sizes = sizes
        self.biases = [np.random.randn(y, 1) for y in sizes[1:]]  # randn，随机正态分布
        self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])]
        self.lmbda = 0.1  # L2正则化参数

    def feedforward(self, a):
        for b, w in zip(self.biases, self.weights):
            a = sigmoid(np.dot(w, a) + b)
        return a

        # 重复训练epochs次，训练集mini_batch_size个一包，学习率步长
    def SGD(self, training_data, epochs, mini_batch_size, eta, val_data,test_data=None):
        n_test = len(test_data)
        n = len(training_data)
        n_val = len(val_data)
        best_accuracy = 0
        for j in range(epochs):  # 重复训练的次数
            random.shuffle(training_data)  # 随机打乱训练集顺序
            mini_batches = [
                training_data[k:k + mini_batch_size]  # 切分成每批10个一组
                for k in range(0, n, mini_batch_size)]

            for mini_batch in mini_batches:
                self.update_mini_batch(mini_batch, eta)  # TODO

            # 一次训练完毕
            val_accuracy = self.evaluate(val_data) / n_val  # 进行验证
            # if j==1:
            #     print(f"参数m={m}训练第二次时，验证集精度{val_accuracy * 100}% ")

            if val_accuracy > best_accuracy:
                best_accuracy = val_accuracy
                test_num = self.evaluate(test_data) # 进行测试

                print(f"迭代次数： {j + 1}，验证集精度{best_accuracy * 100}% ，测试集预测准确率: {(test_num / n_test) * 100}%")

            else:
                print(f'迭代次数：{j + 1},验证精度比前面那个小了,不进行测试')
        # return val_accuracy

    def update_mini_batch(self, mini_batch, eta):
        m = len(mini_batch)
        x_matrix = np.zeros((784, m))
        y_matrix = np.zeros((10, m))
        for i in range(m):  # 初始化矩阵为输入的10个x，一次计算
            x_matrix[:, i] = mini_batch[i][0].flatten()  # 将多维数组转换为一维数组
            y_matrix[:, i] = mini_batch[i][1].flatten()
        self.backprop_matrix(x_matrix, y_matrix, m, eta)

    def backprop_matrix(self, x, y, m, eta):
        # 生成梯度矩阵，初始为全零
        nabla_b = [np.zeros(b.shape) for b in self.biases]
        nabla_w = [np.zeros(w.shape) for w in self.weights]
        # 前向传播
        activation = x
        activations = [x]  # 各层的激活值矩阵
        zs = []  # 各层的带权输入矩阵
        for w, b in zip(self.weights, self.biases):
            z = np.dot(w, activation) + b
            activation = sigmoid(z)
            zs.append(z)
            activations.append(activation)

        # 后向传播
        # 计算输出层误差, # 加上L2正则化
        delta = (self.cross_entropy_cost_derivative(activations[-1], y) +
                 self.L2_regularization(self.weights[-1], m)) * sigmoid_prime(zs[-1])
        # 计算输出层的偏置、权重梯度
        nabla_b[-1] = np.array([np.mean(delta, axis=1)]).transpose()
        nabla_w[-1] = (np.dot(delta, activations[-2].transpose()) / m)
        # 反向传播误差，并计算梯度
        for l in range(2, self.num_layers):
            z = zs[-l]
            sp = sigmoid_prime(z)
            delta = np.dot(self.weights[-l + 1].transpose(), delta) * sp
            nabla_b[-l] = np.array([np.mean(delta, axis=1)]).transpose()
            nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose()) / m
        for l in range(1, self.num_layers):
            self.biases[-l] = self.biases[-l] - eta * nabla_b[-l]
            self.weights[-l] = self.weights[-l] - eta * nabla_w[-l]

    def evaluate(self, test_data):
        test_results = [(np.argmax(self.feedforward(x)), y)  # argmax()找到数组中最大值的索引
                        for (x, y) in test_data]
        return sum(int(x == y) for (x, y) in test_results)

    def L2_regularization(self, weights, m):
        return (self.lmbda / (m * 2)) * np.sum(np.square(weights))

    def cross_entropy_cost_derivative(self, a, y):
        # a是预测值矩阵，y是真实值矩阵
        epsilon = 1e-7
        a = a + epsilon  # 防止除0错误
        dc = -y / a  # 交叉熵代价函数的导数
        return dc


def sigmoid(z):
    if np.all(z >= 0):  # 对sigmoid函数优化，避免出现极大的数据溢出
        return 1.0 / (1.0 + np.exp(-z))
    else:
        return np.exp(z) / (1 + np.exp(z))

# 求导sigmoid函数
def sigmoid_prime(z):
    return sigmoid(z) * (1 - sigmoid(z))


# def cross_entropy_cost(a, y):
#     # a是预测值矩阵，y是真实值矩阵
#     n = a.shape[1]  # 样本数量
#     return -np.sum(y * np.log(a)) / n  # 交叉熵代价函数
#
#
# def relu(z):
#     return np.maximum(0, z)
#
#
# def relu_prime(z):
#     # return np.array(x > 0, dtype=x.dtype)
#     return (z > 0).astype(int)  # relu函数的导数

# # 交叉熵代价函数和L2正则化项  -(self.lmbda / m) * self.weights[-1]  # 加入L2正则化项
#
# def cost_function(output_activations, y):
#     return np.sum(np.nan_to_num(-y * np.log(output_activations) - (1 - y) * np.log(1 - output_activations)))
# def L2_regularization(self, lmbda, weights):
#     return lmbda * np.sum(np.square(weights)) / 2.0
#
# def cost_function_with_regularization(output_activations, y, weights, lmbda):
#     return cost_function(output_activations, y) + L2_regularization(lmbda, weights)
