import numpy as np


class ConvLayer:
    def __init__(self, kernel_size, in_channels, out_channels, stride=1, padding=0):
        """
        卷积层实现

        参数:
        kernel_size -- 卷积核大小
        in_channels -- 输入通道数
        out_channels -- 输出通道数
        stride -- 步长
        padding -- 填充
        """
        self.kernel_size = kernel_size
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.stride = stride
        self.padding = padding

        # 初始化权重和偏置
        self.weights = np.random.randn(out_channels, in_channels, kernel_size, kernel_size) * 0.01
        self.bias = np.zeros(out_channels)

        # 存储前向传播的中间值，用于反向传播
        self.input = None
        self.padded_input = None
        self.output = None

    def forward(self, input_data):
        """
        前向传播

        参数:
        input_data -- 形状为 (batch_size, in_channels, height, width) 的输入数据

        返回:
        output -- 卷积操作后的输出
        """
        batch_size, _, height, width = input_data.shape
        self.input = input_data

        # 填充输入
        if self.padding > 0:
            self.padded_input = np.pad(input_data,
                                       ((0, 0), (0, 0),  # 不填充batch和channel维度
                                        (self.padding, self.padding),  # 填充高度
                                        (self.padding, self.padding)),  # 填充宽度
                                       mode='constant')
        else:
            self.padded_input = input_data

        # 根据公式计算输出尺寸
        output_height = (height + 2 * self.padding - self.kernel_size) // self.stride + 1
        output_width = (width + 2 * self.padding - self.kernel_size) // self.stride + 1

        # 初始化输出
        output = np.zeros((batch_size, self.out_channels, output_height, output_width))

        # 卷积操作
        for b in range(batch_size):  # 遍历每个批次
            for oc in range(self.out_channels):  # 遍历每个输出通道
                for h in range(output_height):  # 遍历输出高度
                    for w in range(output_width):  # 遍历输出宽度
                        # 计算当前窗口的起始和结束位置
                        h_start = h * self.stride
                        h_end = h_start + self.kernel_size
                        w_start = w * self.stride
                        w_end = w_start + self.kernel_size

                        # 提取当前窗口
                        # 形状为 (in_channels, kernel_size, kernel_size)
                        # 理解为：该批次的所有通道内的当前窗口
                        window = self.padded_input[b, :, h_start:h_end, w_start:w_end]

                        # 计算卷积结果
                        # self.weights[oc] 形状为 (in_channels, kernel_size, kernel_size)
                        # window 形状为 (in_channels, kernel_size, kernel_size)

                        output[b, oc, h, w] = np.sum(window * self.weights[oc]) + self.bias[oc]

        self.output = output
        return output

    def backward(self, grad_output, learning_rate):
        """
        反向传播

        参数:
        grad_output -- 形状与输出相同的梯度
        learning_rate -- 学习率

        返回:
        grad_input -- 传递给前一层的梯度
        """
        batch_size, _, output_height, output_width = grad_output.shape
        _, _, height, width = self.input.shape

        # 初始化权重和偏置的梯度
        grad_weights = np.zeros_like(self.weights)
        grad_bias = np.zeros_like(self.bias)

        # 初始化输入的梯度
        grad_input = np.zeros_like(self.input)

        # 计算偏置的梯度
        for oc in range(self.out_channels):
            grad_bias[oc] = np.sum(grad_output[:, oc, :, :])

        # 计算权重的梯度
        for b in range(batch_size):
            for oc in range(self.out_channels):
                for h in range(output_height):
                    for w in range(output_width):
                        h_start = h * self.stride
                        h_end = h_start + self.kernel_size
                        w_start = w * self.stride
                        w_end = w_start + self.kernel_size

                        # 提取当前窗口
                        window = self.padded_input[b, :, h_start:h_end, w_start:w_end]

                        # 更新权重梯度
                        grad_weights[oc] += window * grad_output[b, oc, h, w]

        # 计算输入的梯度
        if self.padding > 0:
            grad_padded_input = np.zeros_like(self.padded_input)
        else:
            grad_padded_input = grad_input

        for b in range(batch_size):
            for oc in range(self.out_channels):
                for h in range(output_height):
                    for w in range(output_width):
                        h_start = h * self.stride
                        h_end = h_start + self.kernel_size
                        w_start = w * self.stride
                        w_end = w_start + self.kernel_size

                        # 更新输入梯度
                        grad_padded_input[b, :, h_start:h_end, w_start:w_end] += self.weights[oc] * grad_output[
                            b, oc, h, w]

        # 如果有填充，移除填充部分的梯度
        if self.padding > 0:
            grad_input = grad_padded_input[:, :, self.padding:-self.padding, self.padding:-self.padding]
        else:
            grad_input = grad_padded_input

        # 更新权重和偏置
        self.weights -= learning_rate * grad_weights
        self.bias -= learning_rate * grad_bias

        return grad_input


class MaxPoolingLayer:
    def __init__(self, pool_size, stride=None):
        """
        最大池化层实现

        参数:
        pool_size -- 池化窗口大小
        stride -- 步长，默认等于pool_size
        """
        self.pool_size = pool_size
        self.stride = stride if stride is not None else pool_size

        # 存储前向传播的中间值，用于反向传播
        self.input = None
        self.output = None
        self.max_indices = None

    def forward(self, input_data):
        """
        前向传播

        参数:
        input_data -- 形状为 (batch_size, channels, height, width) 的输入数据

        返回:
        output -- 池化操作后的输出
        """
        batch_size, channels, height, width = input_data.shape
        self.input = input_data

        # 计算输出尺寸
        output_height = (height - self.pool_size) // self.stride + 1
        output_width = (width - self.pool_size) // self.stride + 1

        # 初始化输出和最大值索引
        output = np.zeros((batch_size, channels, output_height, output_width))
        self.max_indices = np.zeros((batch_size, channels, output_height, output_width, 2), dtype=int)

        # 池化操作
        for b in range(batch_size):
            for c in range(channels):
                for h in range(output_height):
                    for w in range(output_width):
                        h_start = h * self.stride
                        h_end = h_start + self.pool_size
                        w_start = w * self.stride
                        w_end = w_start + self.pool_size

                        # 提取当前窗口
                        window = input_data[b, c, h_start:h_end, w_start:w_end]

                        # 找出最大值和其索引
                        max_val = np.max(window)
                        max_idx = np.unravel_index(np.argmax(window), window.shape)

                        # 记录最大值和其索引
                        output[b, c, h, w] = max_val
                        self.max_indices[b, c, h, w] = max_idx

        self.output = output
        return output

    def backward(self, grad_output):
        """
        反向传播

        参数:
        grad_output -- 形状与输出相同的梯度

        返回:
        grad_input -- 传递给前一层的梯度
        """
        batch_size, channels, _, _ = grad_output.shape  # 获取输出的形状
        _, _, height, width = self.input.shape

        # 初始化输入的梯度
        grad_input = np.zeros_like(self.input)

        # 反向传播梯度
        for b in range(batch_size):
            for c in range(channels):
                for h in range(grad_output.shape[2]):
                    for w in range(grad_output.shape[3]):
                        h_start = h * self.stride
                        h_end = h_start + self.pool_size
                        w_start = w * self.stride
                        w_end = w_start + self.pool_size

                        # 只有最大值位置接收梯度
                        max_h, max_w = self.max_indices[b, c, h, w]
                        grad_input[b, c, h_start + max_h, w_start + max_w] += grad_output[b, c, h, w]

        return grad_input  # 返回传递给前一层的梯度


class ReLU:
    def __init__(self):
        """
        ReLU激活函数实现
        """
        self.input = None

    def forward(self, input_data):
        """
        前向传播

        参数:
        input_data -- 输入数据

        返回:
        output -- ReLU操作后的输出
        """
        self.input = input_data
        return np.maximum(0, input_data)

    def backward(self, grad_output):
        """
        反向传播

        参数:
        grad_output -- 形状与输出相同的梯度

        返回:
        grad_input -- 传递给前一层的梯度
        """
        grad_input = grad_output.copy()
        grad_input[self.input <= 0] = 0
        return grad_input


class FullyConnectedLayer:
    def __init__(self, input_size, output_size):
        """
        全连接层实现

        参数:
        input_size -- 输入特征数
        output_size -- 输出特征数
        """
        self.input_size = input_size
        self.output_size = output_size

        # 初始化权重和偏置
        self.weights = np.random.randn(input_size, output_size) * 0.01
        self.bias = np.zeros(output_size)

        # 存储前向传播的中间值，用于反向传播
        self.input = None
        self.output = None

    def forward(self, input_data):
        """
        前向传播

        参数:
        input_data -- 形状为 (batch_size, input_size) 的输入数据

        返回:
        output -- 全连接操作后的输出
        """
        # 展平操作(如果输入是多维的)
        if len(input_data.shape) > 2:
            batch_size = input_data.shape[0]
            flattened_input = input_data.reshape(batch_size, -1)
        else:
            flattened_input = input_data

        self.input = flattened_input

        # 执行线性变换: output = input * weights + bias
        self.output = np.dot(flattened_input, self.weights) + self.bias

        return self.output

    def backward(self, grad_output, learning_rate):
        """
        反向传播

        参数:
        grad_output -- 形状与输出相同的梯度
        learning_rate -- 学习率

        返回:
        grad_input -- 传递给前一层的梯度
        """
        # 计算权重和偏置的梯度
        grad_weights = np.dot(self.input.T, grad_output)
        grad_bias = np.sum(grad_output, axis=0)

        # 计算输入的梯度
        grad_input = np.dot(grad_output, self.weights.T)

        # 更新权重和偏置
        self.weights -= learning_rate * grad_weights
        self.bias -= learning_rate * grad_bias

        return grad_input


class SoftmaxWithCrossEntropy:
    def __init__(self):
        """
        Softmax激活函数与交叉熵损失结合的实现
        """
        self.probs = None
        self.labels = None

    def forward(self, input_data, labels):
        """
        前向传播

        参数:
        input_data -- 形状为 (batch_size, num_classes) 的输入数据
        labels -- 形状为 (batch_size,) 的目标标签

        返回:
        loss -- 交叉熵损失值
        """
        batch_size = input_data.shape[0]
        self.labels = labels

        # 计算softmax
        shifted_input = input_data - np.max(input_data, axis=1, keepdims=True)
        exp_input = np.exp(shifted_input)
        self.probs = exp_input / np.sum(exp_input, axis=1, keepdims=True)

        # 计算交叉熵损失
        loss = -np.sum(np.log(self.probs[np.arange(batch_size), labels])) / batch_size

        return loss

    def backward(self):
        """
        反向传播

        返回:
        grad_input -- 传递给前一层的梯度
        """
        batch_size = self.probs.shape[0]

        # 初始化梯度
        grad_input = self.probs.copy()

        # 对正确类别的概率减一
        grad_input[np.arange(batch_size), self.labels] -= 1

        # 归一化梯度
        grad_input /= batch_size

        return grad_input


class CNN:
    def __init__(self, input_shape, num_classes):
        """
        CNN模型实现

        参数:
        input_shape -- 输入形状 (channels, height, width)
        num_classes -- 类别数量
        """
        channels, height, width = input_shape

        # 第一个卷积层和池化层
        self.conv1 = ConvLayer(kernel_size=3, in_channels=channels, out_channels=16, padding=1)
        self.relu1 = ReLU()
        self.pool1 = MaxPoolingLayer(pool_size=2)

        # 第二个卷积层和池化层
        self.conv2 = ConvLayer(kernel_size=3, in_channels=16, out_channels=32, padding=1)
        self.relu2 = ReLU()
        self.pool2 = MaxPoolingLayer(pool_size=2)

        # 计算全连接层的输入尺寸
        fc_input_height = height // 4  # 两次池化操作后的尺寸
        fc_input_width = width // 4
        fc_input_size = 32 * fc_input_height * fc_input_width

        # 全连接层
        self.fc1 = FullyConnectedLayer(input_size=fc_input_size, output_size=128)
        self.relu3 = ReLU()
        self.fc2 = FullyConnectedLayer(input_size=128, output_size=num_classes)

        # 分类器
        self.softmax_ce = SoftmaxWithCrossEntropy()

    def forward(self, input_data, labels=None):
        """
        前向传播

        参数:
        input_data -- 形状为 (batch_size, channels, height, width) 的输入数据
        labels -- 形状为 (batch_size,) 的目标标签，用于计算损失

        返回:
        output或loss -- 如果没有标签，返回模型输出；否则返回损失值
        """
        # 前向传播通过各层
        x = self.conv1.forward(input_data)
        x = self.relu1.forward(x)
        x = self.pool1.forward(x)

        x = self.conv2.forward(x)
        x = self.relu2.forward(x)
        x = self.pool2.forward(x)

        x = self.fc1.forward(x)
        x = self.relu3.forward(x)
        output = self.fc2.forward(x)

        # 如果提供了标签，计算损失
        if labels is not None:
            loss = self.softmax_ce.forward(output, labels)
            return loss

        return output

    def backward(self, learning_rate):
        """
        反向传播

        参数:
        learning_rate -- 学习率
        """
        # 反向传播通过各层
        grad = self.softmax_ce.backward()
        grad = self.fc2.backward(grad, learning_rate)
        grad = self.relu3.backward(grad)
        grad = self.fc1.backward(grad, learning_rate)

        # 修复reshape操作，正确计算形状
        batch_size = grad.shape[0]
        feature_map_size = self.pool2.output.shape[2]  # 获取特征图的高度和宽度

        # 正确地将全连接层的梯度重新形状化为卷积层的输出形状
        grad = grad.reshape(batch_size, 32, feature_map_size, feature_map_size)

        grad = self.pool2.backward(grad)
        grad = self.relu2.backward(grad)
        grad = self.conv2.backward(grad, learning_rate)

        grad = self.pool1.backward(grad)
        grad = self.relu1.backward(grad)
        grad = self.conv1.backward(grad, learning_rate)

    def train(self, X_train, y_train, batch_size, epochs, learning_rate):
        """
        训练模型

        参数:
        X_train -- 训练数据
        y_train -- 训练标签
        batch_size -- 批量大小
        epochs -- 训练轮数
        learning_rate -- 学习率

        返回:
        losses -- 每个epoch的损失列表
        """
        num_samples = X_train.shape[0]
        losses = []

        for epoch in range(epochs):
            total_loss = 0
            # 打乱数据
            indices = np.random.permutation(num_samples)
            X_shuffled = X_train[indices]
            y_shuffled = y_train[indices]

            # 批量训练
            for i in range(0, num_samples, batch_size):
                end = min(i + batch_size, num_samples)
                X_batch = X_shuffled[i:end]
                y_batch = y_shuffled[i:end]

                # 前向传播计算损失
                loss = self.forward(X_batch, y_batch)
                total_loss += loss

                # 反向传播更新参数
                self.backward(learning_rate)

            # 计算平均损失
            avg_loss = total_loss / (num_samples // batch_size)
            losses.append(avg_loss)

            print(f"Epoch {epoch + 1}/{epochs}, Loss: {avg_loss:.4f}")

        return losses

    def predict(self, X):
        """
        预测

        参数:
        X -- 测试数据

        返回:
        predictions -- 预测结果
        """
        # 前向传播获取输出
        output = self.forward(X)

        # 获取预测类别
        predictions = np.argmax(output, axis=1)

        return predictions


# 示例用法
if __name__ == "__main__":
    # 假设我们有一个MNIST数据集的一小部分
    # 这里只是为了说明如何使用CNN类，实际使用需要导入真实数据

    # 创建随机数据
    X_sample = np.random.randn(100, 1, 28, 28)  # 100张28x28的灰度图
    y_sample = np.random.randint(0, 10, 100)  # 10个类别的标签

    # 初始化CNN模型
    model = CNN(input_shape=(1, 28, 28), num_classes=10)

    # 训练模型
    losses = model.train(X_sample, y_sample, batch_size=32, epochs=5, learning_rate=0.01)

    # 预测
    X_test = np.random.randn(10, 1, 28, 28)
    predictions = model.predict(X_test)
    print("Predictions:", predictions)