# 卷积层
import numpy as np
from scipy import signal
from Layer import Layer


# 卷积层类，可自定义卷积核数量，卷积步长，感受野尺寸，池化层尺寸，池化步长
class Conv(Layer):
    def __init__(self, input_shape, kernel_num=5, conv_stride=2, receptive_field=3, learning_rate=0.0001):
        super().__init__()
        self.input_shape = input_shape  # 输入的格式[B,C,H,W].即[批量数，图片通道数，图片高度，图片宽度]，若前一层为输入层，即[1,1,28,28]
        self.output_shape = None  # 输出格式,用于提供给后一层CNN来做相关尺寸的计算，从而推算出最后一层CNN的输出尺寸
        self.learning_rate = learning_rate  # 学习率
        self.kernel_num = kernel_num  # 本层卷积核的个数
        self.conv_stride = conv_stride  # 卷积步长
        self.receptive_field = receptive_field  # 卷积核的感受野的边长；这里为了简单，采用方形感受野；并要求边长为奇数，这样便于padding规则的简化，同时便于感受野的定位。
        self.weights = []  # 存储每个卷积核的权重矩阵
        self.biases = []  # 存储每个卷积核的偏置
        for _ in range(self.kernel_num):
            weight = np.random.rand(self.input_shape[1], self.receptive_field, self.receptive_field) - 0.5
            bias = np.random.rand() - 0.5
            self.weights.append(weight)
            self.biases.append(bias)
        self.weights = np.array(self.weights)
        self.gradient_w = np.zeros(self.weights.shape)
        self.biases = np.array(self.biases)
        self.gradient_b = np.array(self.biases)
        self.conv_input = []  # 卷积层的输入
        self.conv_output = []  # 卷积层的输出
        self.error_to_former_layer = []
        self.cal_output_shape()

    # 基于输入的图像矩阵，以及卷积核等的情况，计算输出的图像矩阵
    def cal_output_shape(self):
        input_image = np.zeros(self.input_shape)
        output_image = self.forward(input_image)
        self.output_shape = output_image.shape

    # 根据输入图像宽高对图像进行填充，方便下一步的卷积运算
    def padding3d(self, input_image_list):
        new_image_list = []
        for c in range(input_image_list.shape[0]):
            new_image = self.padding2d(input_image_list[c])
            new_image_list.append(new_image)
        new_image_list = np.array(new_image_list)
        return new_image_list

    # 根据输入图像宽高对图像进行填充，方便下一步的卷积运算
    def padding2d(self, input_image):
        ori_height, ori_width = input_image.shape
        new_height = np.ceil(
            (ori_height - self.receptive_field) / self.conv_stride) * self.conv_stride + self.receptive_field
        new_width = np.ceil(
            (ori_width - self.receptive_field) / self.conv_stride) * self.conv_stride + self.receptive_field
        new_height, new_width = int(new_height), int(new_width)
        new_image = np.zeros((new_height, new_width))
        new_image[0:ori_height, 0: ori_width] = input_image
        return new_image

    # 反向传播过程中对误差矩阵进行填充
    # 如果conv_stride大于1，那么要对error_matrix行列每个元素之间进行0填充，同时再进行边界填充
    def padding_backward(self, error_matrix, pad_mode=0):
        em_batch_size, em_channel_num, em_height, em_width = error_matrix.shape
        pad_height = (em_height - 1) * self.conv_stride + 2 * self.receptive_field - 1
        pad_width = (em_width - 1) * self.conv_stride + 2 * self.receptive_field - 1
        tmp_height = (em_height - 1) * self.conv_stride + 1
        tmp_width = (em_width - 1) * self.conv_stride + 1
        up_down_pad = (int((pad_height - tmp_height) / 2), int((pad_height - tmp_height) / 2))  # 向上向下填充
        left_right_pad = (int((pad_width - tmp_width) / 2), int((pad_width - tmp_width) / 2))  # 向左向右填充
        em_bchw = []
        for b in range(em_batch_size):
            em_chw = []
            for c in range(em_channel_num):
                em_hw = np.zeros([tmp_height, tmp_width])
                for h in range(em_height):
                    for w in range(em_width):
                        em_hw[h * self.conv_stride, w * self.conv_stride] = error_matrix[b, c, h, w]
                if pad_mode == 0:
                    em_hw = np.pad(em_hw, (up_down_pad, left_right_pad), mode="constant")
                em_chw.append(em_hw)
            em_bchw.append(em_chw)
        em_bchw = np.array(em_bchw)
        return em_bchw

    # 对图像进行三维卷积运算，在二维的基础上多了通道数
    # 先对每层通道进行二维卷积运输，再将多层的卷积结果进行叠加
    # 效率较低，推荐使用signal.correlate
    def conv3d(self, new_image, conv_kernel, stride, is_backward=False):
        _, image_height, image_width = new_image.shape
        _, kernel_height, kernel_width = conv_kernel.shape
        output_image = np.zeros([int((image_height - kernel_height) / stride + 1),
                                 int((image_width - kernel_width) / stride + 1)])

        for c in range(0, new_image.shape[0]):
            if is_backward:
                rol_weight = np.rot90(np.rot90(conv_kernel[c]))
            else:
                rol_weight = conv_kernel[c]
            if stride == 1:
                one_channel = signal.correlate2d(new_image[c], rol_weight, mode='valid')
            else:
                one_channel = self.conv2d(new_image[c], rol_weight, stride)
            output_image += one_channel
        return output_image

    # 二维卷积运算
    # 效率较低，推荐使用signal.correlate2d
    def conv2d(self, new_image, conv_kernel, stride):
        image_height, image_width = new_image.shape
        kernel_height, kernel_width = conv_kernel.shape
        output_image = np.zeros([int((image_height - kernel_height) / stride + 1),
                                 int((image_width - kernel_width) / stride + 1)])
        height = image_height - kernel_height + 1
        if height == 0: height = 1
        width = image_width - kernel_width + 1
        if width == 0: width = 1
        temp_hw = []
        for h in range(0, height, stride):
            temp_w = []
            for w in range(0, width, stride):
                points_field = new_image[h: h + kernel_height, w: w + kernel_width]
                output = np.sum(points_field * conv_kernel)
                temp_w.append(output)
            temp_hw.append(temp_w)
        output_image = np.array(temp_hw)
        return output_image

    # 前向传播
    def forward(self, input_image_list):
        self.conv_input = input_image_list
        input_batch_size = input_image_list.shape[0]
        pad_image = self.padding2d(input_image_list[0, 0])
        new_image_height = int((pad_image.shape[0] - self.receptive_field) / self.conv_stride + 1)
        new_image_width = int((pad_image.shape[1] - self.receptive_field) / self.conv_stride + 1)
        self.conv_output = np.zeros(np.concatenate(([input_batch_size, self.kernel_num], [new_image_height, new_image_width])))  #(b, k, y, x)

        # 当前层的一个神经元要接收上一层的每一个神经元的输出;
        for b in range(input_batch_size):  # 遍历每一个输入
            for k in range(self.kernel_num):  # 遍历每一个卷积核
                an_image = self.padding3d(input_image_list[b, :, :, :])  # 进行填充
                if self.conv_stride == 1:
                    self.conv_output[b, k] = signal.correlate(an_image, self.weights[k], mode='valid')[0, :, :]
                else:
                    self.conv_output[b, k] = self.conv3d(an_image, self.weights[k], self.conv_stride)
                self.conv_output[b, k] += self.biases[k]
        return self.conv_output

    # 计算所有参数的梯度，以及反向传播给前一层所有图片像素点的误差。后一层反向传播过来的误差，是本层池化层的输出误差
    def backward(self, error_from_later_layer):
        input_batch_size, input_channel_num, input_height, input_width = self.conv_input.shape
        error_matrix = self.padding_backward(error_from_later_layer)
        # 接下来，基于池化层的输入，也就是卷积层的输出，来计算反向传播到前一层的误差
        self.error_to_former_layer = np.zeros(self.conv_input.shape)
        for b in range(input_batch_size):
            for c in range(input_channel_num):
                conv_out = self.conv3d(error_matrix[b], self.weights[:, c], 1, True)
                #conv_out = signal.convolve(error_matrix[b, :, :, :], self.weights[:, c, :, :], mode='valid')
                self.error_to_former_layer[b, c] = conv_out[0:input_height, 0:input_width]

        error_matrix = self.padding_backward(error_from_later_layer, pad_mode=1)
        # 计算这层卷积核的参数的梯度
        for b in range(input_batch_size):
            for k in range(self.weights.shape[0]):
                for c in range(self.weights.shape[1]):
                    tmp_input = self.padding2d(self.conv_input[b, c])
                    #self.gradient_w[k, c] = self.conv2d(tmp_input, error_matrix[b, k], 1)
                    self.gradient_w[k, c] = signal.correlate2d(tmp_input, error_matrix[b, k], mode='valid')
                self.gradient_b[k] = np.sum(error_from_later_layer[b, k])

        # print("计算得到的梯度是", self.gradient_w)
        return self.error_to_former_layer

    # 更新权重
    def update_weights(self):
        self.weights -= self.gradient_w / self.input_shape[0] * self.learning_rate
        self.biases -= self.gradient_b / self.input_shape[0] * self.learning_rate
