import numpy as np
from utils.features import prepare_for_training
from utils.hypothesis import sigmoid, sigmoid_gradient


class MultilayerPerceptron:
    def __init__(self, data, labels, layers, normalize_data=False):
        """
        :param data: 数据特征值
        :param labels: 数据标签 shape(-1, 1)
        :param layers: 各隐藏层神经元数量
        :param normalize_data: 是否标准化
        """
        data_processed = prepare_for_training(data, normalize_data=normalize_data)[0]
        self.data = data_processed
        self.labels = labels
        self.layers = layers    # (784, 25, 10)
        self.normalize_data = normalize_data
        self.thetas = MultilayerPerceptron.thetas_init(layers)

    def train(self, max_iterations=1000, alpha=0.1):
        """
        训练函数
        :return:
        """
        # 参数拉长展开（方便更新）
        unrolled_thetas = MultilayerPerceptron.thetas_unroll(self.thetas)
        optimized_theta, cost_history = MultilayerPerceptron.gradient_descent(
            self.data, self.labels, self.layers, unrolled_thetas, max_iterations, alpha
        )
        self.thetas = MultilayerPerceptron.thetas_roll(optimized_theta, self.layers)
        return self.thetas, cost_history

    def predict(self, data):
        """
        测试函数
        :param data: 测试集
        :return: 预测值
        """
        data_processed = prepare_for_training(data, normalize_data=self.normalize_data)[0]
        num_examples = data_processed.shape[0]
        predictions = MultilayerPerceptron.forward_propagation(data_processed, self.thetas, self.layers)
        # 为什么索引值就一定与预测值对等呢？
        # 因为通过反向传播，计算预测与真实独热的差值，进而让该索引位置的概率越大，使得索引值成为了预测值
        return np.argmax(predictions, axis=1).reshape(num_examples, 1)

    @staticmethod
    def thetas_init(layers):
        """
        初始化权重参数和偏置项
        :return: 完成初始化的权重参数和偏置项
        """
        num_layers = len(layers)
        thetas = {}
        for layer_index in range(num_layers - 1):
            # 偏置项也加 in_count 中。
            in_count = layers[layer_index] + 1
            out_count = layers[layer_index + 1]
            # 数值尽可能地小。
            thetas[layer_index] = np.random.rand(out_count, in_count) * 0.05
        return thetas

    @staticmethod
    def thetas_unroll(thetas):
        """
        参数拉长展开（方便更新）
        :return: 拉开后的参数列表
        """
        num_theta_layers = len(thetas)
        unrolled_thetas = np.array([])
        for theta_layer_index in range(num_theta_layers):
            unrolled_thetas = np.hstack((unrolled_thetas, thetas[theta_layer_index].flatten()))
        return unrolled_thetas

    @staticmethod
    def thetas_roll(unrolled_thetas, layers):
        """
        还原为参数矩阵
        unrolled_thetas: 拉开后的参数列表
        :return: 还原后的参数矩阵
        """
        num_layers = len(layers)
        thetas = {}
        unrolled_shift = 0
        for layer_index in range(num_layers - 1):
            in_count = layers[layer_index] + 1
            out_count = layers[layer_index + 1]
            start_index = unrolled_shift
            end_index = unrolled_shift + in_count * out_count
            thetas[layer_index] = unrolled_thetas[start_index:end_index].reshape(out_count, in_count)
            unrolled_shift = end_index
        return thetas

    @staticmethod
    def gradient_descent(data, labels, layers, unrolled_thetas, max_iterations, alpha):
        """
        梯度下降
        :return:
        """
        optimized_theta = unrolled_thetas
        cost_history = []
        for _ in range(max_iterations):
            thetas = MultilayerPerceptron.thetas_roll(optimized_theta, layers)
            cost = MultilayerPerceptron.cost_function(data, labels, thetas, layers)
            cost_history.append(cost)
            theta_gradient = MultilayerPerceptron.gradient_step(data, labels, layers, optimized_theta)
            optimized_theta -= alpha * theta_gradient
        return optimized_theta, cost_history

    @staticmethod
    def cost_function(data, labels, thetas, layers):
        """
        损失函数
        :return: 损失值
        """
        num_examples = data.shape[0]
        num_labels = layers[-1]
        # 前向传播
        predictions = MultilayerPerceptron.forward_propagation(data, thetas, layers)
        # One-Hot 独热编码
        bitwise_labels = np.zeros((num_examples, num_labels))
        for example_index in range(num_examples):
            bitwise_labels[example_index, labels[example_index][0]] = 1
        # 分别计算两种损失值（越小越好）
        bit_set_cost = np.sum(np.log(predictions[bitwise_labels == 1]))
        bit_not_set_cost = np.sum(np.log(1 - predictions[bitwise_labels == 0]))
        # 在此处 log 前取负，将两种损失相加
        cost = (-1 / num_examples) * (bit_set_cost + bit_not_set_cost)
        return cost

    @staticmethod
    def gradient_step(data, labels, layers, optimized_theta):
        """
        计算每一次迭代的参数更新梯度
        optimized_theta: 优化后的参数
        :return: 每一次迭代的参数更新梯度
        """
        thetas = MultilayerPerceptron.thetas_roll(optimized_theta, layers)
        # 反向传播，计算每次迭代的参数更新梯度
        thetas_rolled_gradients = MultilayerPerceptron.back_propagation(data, labels, layers, thetas)
        thetas_unrolled_gradients = MultilayerPerceptron.thetas_unroll(thetas_rolled_gradients)
        return thetas_unrolled_gradients

    @staticmethod
    def forward_propagation(data, thetas, layers):
        """
        前向传播，并使用激活函数
        :return: 传播到最后的输出矩阵
        """
        num_layers = len(layers)
        num_examples = data.shape[0]
        in_layer_activation = data
        # 逐层计算
        for layer_index in range(num_layers - 1):
            theta = thetas[layer_index]
            out_layer_activation = sigmoid(in_layer_activation @ theta.T)
            # 数据预处理时加的一列在最前（prepare_for_training中），因此这里也需要在最前。
            out_layer_activation = np.hstack((np.ones((num_examples, 1)), out_layer_activation))
            in_layer_activation = out_layer_activation
        # 神经网络的最后一层通常使用 Sigmoid 激活函数，将输出压缩到 (0,1) 范围，表示概率。
        # 过滤掉循环最后自动增加的一列全为1的偏置项
        return in_layer_activation[:, 1:]

    @staticmethod
    def back_propagation(data, labels, layers, thetas):
        """
        反向传播
        :return: 每次迭代的参数更新梯度
        """
        num_layers = len(layers)
        num_examples, num_features = data.shape
        num_labels = layers[-1]

        # 差值
        deltas = {}
        # 初始化
        for layer_index in range(num_layers - 1):
            in_count = layers[layer_index] + 1
            out_count = layers[layer_index + 1]
            deltas[layer_index] = np.zeros((out_count, in_count))

        # 逐个样本计算
        for example_index in range(num_examples):

            # 单个样本的向前传播
            layer_inputs = {}
            layer_activations = {0: data[example_index, :].reshape((num_features, 1))}
            # 逐层计算
            for layer_index in range(num_layers - 1):
                layer_theta = thetas[layer_index]                                       # 当前权重参数值
                layer_input = layer_theta @ layer_activations[layer_index]              # 加权输入，激活函数的输入
                layer_activation = np.vstack((np.array([[1]]), sigmoid(layer_input)))   # 层的输出，激活后的结果，作为下一层的输入
                layer_inputs[layer_index + 1] = layer_input
                layer_activations[layer_index + 1] = layer_activation
            # 输出层结果
            output_layer_activation = layer_activations[num_layers - 1][1:, :]

            # 单个样本差值初始化
            delta = {}
            # 标签独热
            bitwise_labels = np.zeros((num_labels, 1))
            bitwise_labels[labels[example_index][0]] = 1

            # 反向传播
            # 计算输出层和真实值之间的误差
            delta[num_layers - 1] = output_layer_activation - bitwise_labels
            # 计算隐藏层误差
            for layer_index in range(num_layers - 2, 0, -1):
                layer_theta = thetas[layer_index]           # 本层参数值 (10, 25 + 1)
                next_delta = delta[layer_index + 1]         # 将误差从后一层映射到前一层（维度匹配）(10, 1)
                layer_input = layer_inputs[layer_index]     # 加权输入 (25, 1)
                # 按照公式进行计算 (25, 1)
                delta[layer_index] = (layer_theta.T @ next_delta)[1:, :] * sigmoid_gradient(layer_input)

            # 参数梯度和
            for layer_index in range(num_layers - 1):
                # (25, 785) = (25, 1) & (1, 785)
                layer_delta = delta[layer_index + 1] @ layer_activations[layer_index].T
                deltas[layer_index] += layer_delta

        # 参数梯度平均
        for layer_index in range(num_layers - 1):
            deltas[layer_index] *= (1 / num_examples)

        return deltas
