# 全连接层
import numpy as np
from Layer import Layer


class FullyConnected(Layer):
    def __init__(self, input_size, output_size):
        super().__init__()
        self.input_size = input_size
        self.output_size = output_size
        self.input_tensor = None
        self.output_tensor = None
        self.error = []
        self.weights = np.random.uniform(-0.2, 0.2, (self.input_size+1, self.output_size))
        self.gradient_w = np.array(self.weights)  # 梯度矩阵和权重矩阵shape相同
        self.biases = np.random.uniform(-0.2, 0.2, self.output_size)
        self.gradient_b = np.array(self.biases)

    def forward(self, input_tensor):
        # 将偏置合并到权重中一起参与运算
        self.input_tensor = np.concatenate((input_tensor, np.ones([input_tensor.shape[0], 1])), axis=1)
        input_tensor = np.dot(self.input_tensor, self.weights)  # + self.bias
        return input_tensor

    def backward(self, error_tensor):
        self.error = error_tensor
        error_tensor = np.dot(self.error, self.weights.T)
        self.gradient_w = np.dot(self.input_tensor.T, self.error)
        error_tensor = np.delete(error_tensor, -1, 1)
        return error_tensor

    # 更新权重
    def update_weights(self):
        self.weights -= self.gradient_w / self.input_tensor.shape[0] * self.learning_rate
