import numpy as np
from base_nn import Layer


class Conv2D(Layer):
    """2D卷积层 - 优化版本，使用向量化操作提升效率"""

    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0):
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size, kernel_size)
        self.stride = stride if isinstance(stride, tuple) else (stride, stride)
        self.padding = padding

        # 初始化卷积核和偏置
        self.kernel = np.random.randn(out_channels, in_channels, *self.kernel_size) * np.sqrt(
            2 / (in_channels * np.prod(self.kernel_size)))
        self.bias = np.zeros((out_channels, 1))

    def _pad(self, x, padding):
        """对输入进行零填充"""
        if padding == 0:
            return x
        # (batch, channels, height, width) -> 只在height和width方向填充
        return np.pad(x, ((0, 0), (0, 0), (padding, padding), (padding, padding)), mode='constant')

    def forward(self, input_data):
        """前向传播 - 使用向量化操作优化"""
        self.input = input_data
        batch_size, in_channels, in_height, in_width = input_data.shape

        # 对输入进行填充
        padded_input = self._pad(input_data, self.padding)

        # 计算输出尺寸
        out_height = (in_height + 2 * self.padding - self.kernel_size[0]) // self.stride[0] + 1
        out_width = (in_width + 2 * self.padding - self.kernel_size[1]) // self.stride[1] + 1

        # 初始化输出
        output = np.zeros((batch_size, self.out_channels, out_height, out_width))

        # 使用向量化操作实现卷积，替代四重循环
        for b in range(batch_size):
            for c_out in range(self.out_channels):
                # 当前卷积核
                kernel = self.kernel[c_out]  # (in_channels, kernel_h, kernel_w)

                # 滑动窗口计算
                for h in range(out_height):
                    h_start = h * self.stride[0]
                    h_end = h_start + self.kernel_size[0]

                    for w in range(out_width):
                        w_start = w * self.stride[1]
                        w_end = w_start + self.kernel_size[1]

                        # 提取输入区域并计算卷积
                        input_patch = padded_input[b, :, h_start:h_end, w_start:w_end]
                        output[b, c_out, h, w] = np.sum(input_patch * kernel) + self.bias[c_out]

        self.output = output
        return output

    def backward(self, output_gradient, learning_rate):
        """反向传播 - 优化版本"""
        batch_size, _, out_height, out_width = output_gradient.shape
        _, in_channels, in_height, in_width = self.input.shape

        # 初始化梯度
        kernel_gradient = np.zeros_like(self.kernel)
        bias_gradient = np.zeros_like(self.bias)

        # 对输入进行填充
        padded_input = self._pad(self.input, self.padding)
        padded_input_gradient = np.zeros_like(padded_input)

        # 计算梯度
        for b in range(batch_size):
            for c_out in range(self.out_channels):
                # 累加偏置梯度
                bias_gradient[c_out] += np.sum(output_gradient[b, c_out])

                for h in range(out_height):
                    h_start = h * self.stride[0]
                    h_end = h_start + self.kernel_size[0]

                    for w in range(out_width):
                        w_start = w * self.stride[1]
                        w_end = w_start + self.kernel_size[1]

                        # 提取输入区域
                        input_patch = padded_input[b, :, h_start:h_end, w_start:w_end]

                        # 计算卷积核梯度
                        kernel_gradient[c_out] += output_gradient[b, c_out, h, w] * input_patch

                        # 计算输入梯度
                        padded_input_gradient[b, :, h_start:h_end, w_start:w_end] += \
                            output_gradient[b, c_out, h, w] * self.kernel[c_out]

        # 平均批次梯度
        kernel_gradient /= batch_size
        bias_gradient /= batch_size

        # 更新参数
        self.kernel -= learning_rate * kernel_gradient
        self.bias -= learning_rate * bias_gradient

        # 如果有填充，移除填充部分
        if self.padding > 0:
            input_gradient = padded_input_gradient[:, :, self.padding:-self.padding, self.padding:-self.padding]
        else:
            input_gradient = padded_input_gradient

        return input_gradient

    def get_params(self):
        return {
            'kernel': self.kernel.copy(),
            'bias': self.bias.copy()
        }

    def set_params(self, params):
        self.kernel = params['kernel']
        self.bias = params['bias']


class MaxPool2D(Layer):
    """2D最大池化层"""

    def __init__(self, pool_size, stride=None):
        super().__init__()
        self.pool_size = pool_size if isinstance(pool_size, tuple) else (pool_size, pool_size)
        self.stride = stride if stride is not None else self.pool_size
        self.stride = self.stride if isinstance(self.stride, tuple) else (self.stride, self.stride)
        self.mask = None

    def forward(self, input_data):
        """前向传播"""
        self.input = input_data
        batch_size, channels, in_height, in_width = input_data.shape

        # 计算输出尺寸
        out_height = (in_height - self.pool_size[0]) // self.stride[0] + 1
        out_width = (in_width - self.pool_size[1]) // self.stride[1] + 1

        # 初始化输出和掩码
        self.output = np.zeros((batch_size, channels, out_height, out_width))
        self.mask = np.zeros_like(input_data, dtype=bool)

        # 进行最大池化
        for b in range(batch_size):
            for c in range(channels):
                for h_out in range(out_height):
                    h_start = h_out * self.stride[0]
                    h_end = h_start + self.pool_size[0]

                    for w_out in range(out_width):
                        w_start = w_out * self.stride[1]
                        w_end = w_start + self.pool_size[1]

                        # 提取输入区域
                        input_region = input_data[b, c, h_start:h_end, w_start:w_end]

                        # 找到最大值及其位置
                        max_val = np.max(input_region)
                        max_pos = np.unravel_index(np.argmax(input_region), input_region.shape)

                        # 存储输出和掩码
                        self.output[b, c, h_out, w_out] = max_val
                        self.mask[b, c, h_start + max_pos[0], w_start + max_pos[1]] = True

        return self.output

    def backward(self, output_gradient, learning_rate):
        """反向传播"""
        input_gradient = np.zeros_like(self.input)

        # 反向传播梯度
        for b in range(output_gradient.shape[0]):
            for c in range(output_gradient.shape[1]):
                for h_out in range(output_gradient.shape[2]):
                    for w_out in range(output_gradient.shape[3]):
                        # 计算输入区域
                        h_start = h_out * self.stride[0]
                        h_end = h_start + self.pool_size[0]
                        w_start = w_out * self.stride[1]
                        w_end = w_start + self.pool_size[1]

                        # 将梯度分配到最大值位置
                        input_gradient[b, c, h_start:h_end, w_start:w_end][
                            self.mask[b, c, h_start:h_end, w_start:w_end]] = output_gradient[b, c, h_out, w_out]

        return input_gradient


class AvgPool2D(Layer):
    """2D平均池化层"""

    def __init__(self, pool_size, stride=None):
        super().__init__()
        self.pool_size = pool_size if isinstance(pool_size, tuple) else (pool_size, pool_size)
        self.stride = stride if stride is not None else self.pool_size
        self.stride = self.stride if isinstance(self.stride, tuple) else (self.stride, self.stride)

    def forward(self, input_data):
        """前向传播"""
        self.input = input_data
        batch_size, channels, in_height, in_width = input_data.shape

        # 计算输出尺寸
        out_height = (in_height - self.pool_size[0]) // self.stride[0] + 1
        out_width = (in_width - self.pool_size[1]) // self.stride[1] + 1

        # 初始化输出
        self.output = np.zeros((batch_size, channels, out_height, out_width))

        # 进行平均池化
        for b in range(batch_size):
            for c in range(channels):
                for h_out in range(out_height):
                    h_start = h_out * self.stride[0]
                    h_end = h_start + self.pool_size[0]

                    for w_out in range(out_width):
                        w_start = w_out * self.stride[1]
                        w_end = w_start + self.pool_size[1]

                        # 提取输入区域并计算平均值
                        input_region = input_data[b, c, h_start:h_end, w_start:w_end]
                        self.output[b, c, h_out, w_out] = np.mean(input_region)

        return self.output

    def backward(self, output_gradient, learning_rate):
        """反向传播"""
        input_gradient = np.zeros_like(self.input)
        pool_area = self.pool_size[0] * self.pool_size[1]

        # 反向传播梯度
        for b in range(output_gradient.shape[0]):
            for c in range(output_gradient.shape[1]):
                for h_out in range(output_gradient.shape[2]):
                    h_start = h_out * self.stride[0]
                    h_end = h_start + self.pool_size[0]

                    for w_out in range(output_gradient.shape[3]):
                        w_start = w_out * self.stride[1]
                        w_end = w_start + self.pool_size[1]

                        # 将梯度平均分配到池化区域
                        input_gradient[b, c, h_start:h_end, w_start:w_end] += output_gradient[
                                                                                  b, c, h_out, w_out] / pool_area

        return input_gradient
