package com.cu.machinelearning.neural;
// AlexNet.java

import java.util.Random;

/**
 * AlexNet卷积神经网络的简化实现
 * 包含主要的网络层结构和基本的前向传播功能
 */
public class AlexNetDemo {
    // 网络参数
    private double[][][][] conv1Weights;  // 第一个卷积层权重 (96, 11, 11, 3)
    private double[] conv1Bias;           // 第一个卷积层偏置 (96)
    private double[][][][] conv2Weights;  // 第二个卷积层权重 (256, 5, 5, 96)
    private double[] conv2Bias;           // 第二个卷积层偏置 (256)
    private double[][][][] conv3Weights;  // 第三个卷积层权重 (384, 3, 3, 256)
    private double[] conv3Bias;           // 第三个卷积层偏置 (384)
    private double[][][][] conv4Weights;  // 第四个卷积层权重 (384, 3, 3, 384)
    private double[] conv4Bias;           // 第四个卷积层偏置 (384)
    private double[][][][] conv5Weights;  // 第五个卷积层权重 (256, 3, 3, 384)
    private double[] conv5Bias;           // 第五个卷积层偏置 (256)

    private double[][] fc1Weights;        // 第一个全连接层权重 (4096, 9216)
    private double[] fc1Bias;             // 第一个全连接层偏置 (4096)
    private double[][] fc2Weights;        // 第二个全连接层权重 (4096, 4096)
    private double[] fc2Bias;             // 第二个全连接层偏置 (4096)
    private double[][] outputWeights;     // 输出层权重 (1000, 4096)
    private double[] outputBias;          // 输出层偏置 (1000)

    private Random random;


    private double[][][] conv1Output, relu1Output, pool1Output, norm1Output;
    private double[][][] conv2Output, relu2Output, pool2Output, norm2Output;
    private double[][][] conv3Output, relu3Output;
    private double[][][] conv4Output, relu4Output;
    private double[][][] conv5Output, relu5Output, pool5Output;
    private double[] fc1Output, reluFc1Output, drop1Output;
    private double[] fc2Output, reluFc2Output, drop2Output;
    private double[] logits;

    /**
     * 构造函数，初始化AlexNet网络参数
     */
    public AlexNetDemo() {
        random = new Random(12345); // 固定随机种子以保证结果可重现

        // 初始化各层权重和偏置
        initializeLayer1();
        initializeLayer2();
        initializeLayer3();
        initializeLayer4();
        initializeLayer5();
        initializeFullyConnectedLayers();
    }

    /**
     * 初始化第一个卷积层参数
     */
    private void initializeLayer1() {
        conv1Weights = new double[96][11][11][3];
        conv1Bias = new double[96];
        initializeConvWeights(conv1Weights, conv1Bias, 11 * 11 * 3);
    }

    /**
     * 初始化第二个卷积层参数
     */
    private void initializeLayer2() {
        conv2Weights = new double[256][5][5][96];
        conv2Bias = new double[256];
        initializeConvWeights(conv2Weights, conv2Bias, 5 * 5 * 96);
    }

    /**
     * 初始化第三个卷积层参数
     */
    private void initializeLayer3() {
        conv3Weights = new double[384][3][3][256];
        conv3Bias = new double[384];
        initializeConvWeights(conv3Weights, conv3Bias, 3 * 3 * 256);
    }

    /**
     * 初始化第四个卷积层参数
     */
    private void initializeLayer4() {
        conv4Weights = new double[384][3][3][384];
        conv4Bias = new double[384];
        initializeConvWeights(conv4Weights, conv4Bias, 3 * 3 * 384);
    }

    /**
     * 初始化第五个卷积层参数
     */
    private void initializeLayer5() {
        conv5Weights = new double[256][3][3][384];
        conv5Bias = new double[256];
        initializeConvWeights(conv5Weights, conv5Bias, 3 * 3 * 384);
    }


    /**
     * 初始化全连接层参数
     */
    private void initializeFullyConnectedLayers() {
        // 第一个全连接层
        fc1Weights = new double[4096][9216];
        fc1Bias = new double[4096];
        initializeFcWeights(fc1Weights, fc1Bias, 9216);

        // 第二个全连接层
        fc2Weights = new double[4096][4096];
        fc2Bias = new double[4096];
        initializeFcWeights(fc2Weights, fc2Bias, 4096);

        // 输出层
        outputWeights = new double[1000][4096];
        outputBias = new double[1000];
        initializeFcWeights(outputWeights, outputBias, 4096);
    }

    /**
     * 初始化卷积层权重和偏置
     */
    private void initializeConvWeights(double[][][][] weights, double[] bias, int fanIn) {
        double scale = Math.sqrt(2.0 / fanIn);

        for (int k = 0; k < weights.length; k++) {
            for (int i = 0; i < weights[0].length; i++) {
                for (int j = 0; j < weights[0][0].length; j++) {
                    for (int d = 0; d < weights[0][0][0].length; d++) {
                        weights[k][i][j][d] = random.nextGaussian() * scale;
                    }
                }
            }
            bias[k] = 0.0;
        }
    }

    /**
     * 初始化全连接层权重和偏置
     */
    private void initializeFcWeights(double[][] weights, double[] bias, int fanIn) {
        double scale = Math.sqrt(2.0 / fanIn);

        for (int i = 0; i < weights.length; i++) {
            for (int j = 0; j < weights[0].length; j++) {
                weights[i][j] = random.nextGaussian() * scale;
            }
            bias[i] = 0.0;
        }
    }

    /**
     * 执行卷积操作
     */
    private double[][][] convolution(double[][][] input, double[][][][] kernels,
                                     double[] biases, int stride, int padding) {
        int inputDepth = input.length;
        int inputHeight = input[0].length;
        int inputWidth = input[0][0].length;
        int numKernels = kernels.length;
        int kernelSize = kernels[0].length;

        // 计算输出尺寸
        int outputHeight = (inputHeight - kernelSize + 2 * padding) / stride + 1;
        int outputWidth = (inputWidth - kernelSize + 2 * padding) / stride + 1;

        double[][][] output = new double[numKernels][outputHeight][outputWidth];

        // 添加padding
        double[][][] paddedInput = addPadding(input, padding);

        // 对每个卷积核进行卷积
        for (int k = 0; k < numKernels; k++) {
            for (int i = 0; i < outputHeight; i++) {
                for (int j = 0; j < outputWidth; j++) {
                    double sum = 0.0;
                    // 卷积计算
                    for (int d = 0; d < inputDepth; d++) {
                        for (int x = 0; x < kernelSize; x++) {
                            for (int y = 0; y < kernelSize; y++) {
                                sum += paddedInput[d][i * stride + x][j * stride + y] *
                                        kernels[k][x][y][d];
                            }
                        }
                    }
                    output[k][i][j] = sum + biases[k];
                }
            }
        }
        return output;
    }

    /**
     * 为输入添加零填充
     */
    private double[][][] addPadding(double[][][] input, int padding) {
        if (padding == 0) return input;

        int depth = input.length;
        int height = input[0].length;
        int width = input[0][0].length;

        double[][][] padded = new double[depth][height + 2 * padding][width + 2 * padding];

        for (int d = 0; d < depth; d++) {
            for (int i = 0; i < height; i++) {
                System.arraycopy(input[d][i], 0, padded[d][i + padding], padding, width);
            }
        }

        return padded;
    }

    /**
     * ReLU激活函数
     */
    private double[][][] relu(double[][][] input) {
        int depth = input.length;
        int height = input[0].length;
        int width = input[0][0].length;

        double[][][] output = new double[depth][height][width];

        for (int d = 0; d < depth; d++) {
            for (int i = 0; i < height; i++) {
                for (int j = 0; j < width; j++) {
                    output[d][i][j] = Math.max(0, input[d][i][j]);
                }
            }
        }
        return output;
    }

    /**
     * 一维ReLU激活函数
     */
    private double[] relu(double[] input) {
        double[] output = new double[input.length];
        for (int i = 0; i < input.length; i++) {
            output[i] = Math.max(0, input[i]);
        }
        return output;
    }

    /**
     * 执行最大池化操作
     */
    private double[][][] maxPooling(double[][][] input, int poolSize, int stride) {
        int depth = input.length;
        int inputHeight = input[0].length;
        int inputWidth = input[0][0].length;

        // 计算输出尺寸
        int outputHeight = (inputHeight - poolSize) / stride + 1;
        int outputWidth = (inputWidth - poolSize) / stride + 1;

        double[][][] output = new double[depth][outputHeight][outputWidth];

        for (int d = 0; d < depth; d++) {
            for (int i = 0; i < outputHeight; i++) {
                for (int j = 0; j < outputWidth; j++) {
                    double max = Double.NEGATIVE_INFINITY;
                    // 找到池化窗口中的最大值
                    for (int x = 0; x < poolSize; x++) {
                        for (int y = 0; y < poolSize; y++) {
                            max = Math.max(max, input[d][i * stride + x][j * stride + y]);
                        }
                    }
                    output[d][i][j] = max;
                }
            }
        }
        return output;
    }

    /**
     * 局部响应归一化(LRN)
     * 对输入的三维数组进行局部响应归一化处理，增强模型的泛化能力
     *
     * @param input 输入的三维数组，维度顺序为[深度][高度][宽度]
     * @return 经过局部响应归一化处理后的三维数组，维度与输入相同
     */
    private double[][][] localResponseNormalization(double[][][] input) {
        int depth = input.length;
        int height = input[0].length;
        int width = input[0][0].length;
        double[][][] output = new double[depth][height][width];

        int n = 5; // 归一化窗口大小
        // LRN层参数初始化
        // alpha: LRN层的缩放因子，用于控制局部响应归一化的强度
        // beta: LRN层的指数因子，用于控制归一化函数的衰减率
        // k: LRN层的偏移量，用于防止除零错误并控制归一化的基准值
        double alpha = 0.0001;
        double beta = 0.75;
        double k = 2.0;

        // 对每个深度位置的像素点进行局部响应归一化计算
        for (int d = 0; d < depth; d++) {
            for (int i = 0; i < height; i++) {
                for (int j = 0; j < width; j++) {
                    double sum = 0.0;
                    // 计算归一化窗口内的平方和
                    int start = Math.max(0, d - n / 2);
                    int end = Math.min(depth, d + n / 2 + 1);

                    for (int nd = start; nd < end; nd++) {
                        sum += input[nd][i][j] * input[nd][i][j];
                    }

                    // 应用LRN公式进行归一化: output = input / (k + alpha * sum)^beta
                    output[d][i][j] = input[d][i][j] / Math.pow(k + alpha * sum, beta);
                }
            }
        }
        return output;
    }

    /**
     * 将三维数组展平为一维数组
     */
    private double[] flatten(double[][][] input) {
        int depth = input.length;
        int height = input[0].length;
        int width = input[0][0].length;
        int size = depth * height * width;

        double[] output = new double[size];
        int index = 0;

        for (int d = 0; d < depth; d++) {
            for (int i = 0; i < height; i++) {
                for (int j = 0; j < width; j++) {
                    output[index++] = input[d][i][j];
                }
            }
        }
        return output;
    }

    /**
     * 全连接层操作
     */
    private double[] fullyConnected(double[] input, double[][] weights, double[] bias) {
        int outputSize = weights.length;
        double[] output = new double[outputSize];

        for (int i = 0; i < outputSize; i++) {
            double sum = 0.0;
            for (int j = 0; j < input.length; j++) {
                sum += input[j] * weights[i][j];
            }
            output[i] = sum + bias[i];
        }
        return output;
    }

    /**
     * 对输入数组执行dropout操作，以指定的概率随机将部分元素置为0
     *
     * @param input    输入的double数组，需要进行dropout操作的数据
     * @param keepProb 保留概率，取值范围(0,1]，表示每个元素被保留的概率
     * @return 经过dropout处理后的数组，部分元素可能被置为0
     */
    private double[] dropout(double[] input, double keepProb, boolean isTraining) {
        // 在推理阶段不应用dropout，但需要进行缩放以保持期望值
        if (!isTraining) {
            // 推理阶段：直接返回输入，不进行任何修改
            return input.clone();
        }
        // 推理时将所有神经元的输出乘以keepProb，保持数学期望一致
        // 训练阶段：随机将部分元素置为0
        double[] output = new double[input.length];
        for (int i = 0; i < input.length; i++) {
            // 生成随机数，如果小于keepProb则保留该元素，否则置为0
            if (random.nextDouble() < keepProb) {
                // 保留的元素需要除以keepProb以保持期望值不变（inverted dropout）
                output[i] = input[i] / keepProb;
            } else {
                output[i] = 0.0;
            }
        }
        return output;
    }

    /**
     * Softmax激活函数
     */
    private double[] softmax(double[] input) {
        double[] output = new double[input.length];
        double expSum = 0.0;

        // 防止数值溢出，减去最大值
        double maxVal = input[0];
        for (double val : input) {
            maxVal = Math.max(maxVal, val);
        }

        for (int i = 0; i < input.length; i++) {
            output[i] = Math.exp(input[i] - maxVal);
            expSum += output[i];
        }

        for (int i = 0; i < output.length; i++) {
            output[i] /= expSum;
        }

        return output;
    }

    /**
     * 前向传播
     */
    public double[] forward(double[][][] input) {
        // 第一个卷积层: 输入227x227x3 -> 55x55x96
        double[][][] conv1 = convolution(input, conv1Weights, conv1Bias, 4, 0);
        double[][][] relu1 = relu(conv1);
        double[][][] pool1 = maxPooling(relu1, 3, 2); // 27x27x96
        double[][][] norm1 = localResponseNormalization(pool1);

        // 第二个卷积层: 27x27x96 -> 27x27x256
        double[][][] conv2 = convolution(norm1, conv2Weights, conv2Bias, 1, 2);
        double[][][] relu2 = relu(conv2);
        double[][][] pool2 = maxPooling(relu2, 3, 2); // 13x13x256
        double[][][] norm2 = localResponseNormalization(pool2);

        // 第三个卷积层: 13x13x256 -> 13x13x384
        double[][][] conv3 = convolution(norm2, conv3Weights, conv3Bias, 1, 1);
        double[][][] relu3 = relu(conv3);

        // 第四个卷积层: 13x13x384 -> 13x13x384
        double[][][] conv4 = convolution(relu3, conv4Weights, conv4Bias, 1, 1);
        double[][][] relu4 = relu(conv4);

        // 第五个卷积层: 13x13x384 -> 13x13x256
        double[][][] conv5 = convolution(relu4, conv5Weights, conv5Bias, 1, 1);
        double[][][] relu5 = relu(conv5);
        double[][][] pool5 = maxPooling(relu5, 3, 2); // 6x6x256

        // 展平: 6x6x256 -> 9216
        double[] flattened = flatten(pool5);

        // 第一个全连接层: 9216 -> 4096
        double[] fc1 = fullyConnected(flattened, fc1Weights, fc1Bias);
        double[] reluFc1 = relu(fc1);
        double[] drop1 = dropout(reluFc1, 0.5, false);

        // 第二个全连接层: 4096 -> 4096
        double[] fc2 = fullyConnected(drop1, fc2Weights, fc2Bias);
        double[] reluFc2 = relu(fc2);
        double[] drop2 = dropout(reluFc2, 0.5, false);

        // 输出层: 4096 -> 1000
        double[] outputLogits = fullyConnected(drop2, outputWeights, outputBias);
        double[] output = softmax(outputLogits);

        return output;
    }

    /**
     * 计算损失 (交叉熵损失)
     */
    public double calculateLoss(double[] predictions, int label) {
        // 防止log(0)
        double epsilon = 1e-10;
        return -Math.log(Math.max(predictions[label], epsilon));
    }

    /**
     * 训练模型函数
     *
     * @param input        三维数组输入数据，用于训练模型
     * @param label        标签值，表示训练样本的真实分类或目标值
     * @param learningRate 学习率，控制模型参数更新的步长大小
     */
    public void train(double[][][] input, int label, double learningRate) {
        // 前向传播，获取各层输出
        double[] predictions = forwardWithStorage(input);

        // 计算损失
        double loss = calculateLoss(predictions, label);

        System.out.println("训练中... 损失: " + loss);
        // 反向传播
        backward(input, label, learningRate, predictions);
    }

    /**
     * 带存储的前向传播（用于反向传播）
     */
    public double[] forwardWithStorage(double[][][] input) {
        // 第一个卷积层: 输入227x227x3 -> 55x55x96
        conv1Output = convolution(input, conv1Weights, conv1Bias, 4, 0);
        relu1Output = relu(conv1Output);
        pool1Output = maxPooling(relu1Output, 3, 2); // 27x27x96
        norm1Output = localResponseNormalization(pool1Output);

        // 第二个卷积层: 27x27x96 -> 27x27x256
        conv2Output = convolution(norm1Output, conv2Weights, conv2Bias, 1, 2);
        relu2Output = relu(conv2Output);
        pool2Output = maxPooling(relu2Output, 3, 2); // 13x13x256
        norm2Output = localResponseNormalization(pool2Output);

        // 第三个卷积层: 13x13x256 -> 13x13x384
        conv3Output = convolution(norm2Output, conv3Weights, conv3Bias, 1, 1);
        relu3Output = relu(conv3Output);

        // 第四个卷积层: 13x13x384 -> 13x13x384
        conv4Output = convolution(relu3Output, conv4Weights, conv4Bias, 1, 1);
        relu4Output = relu(conv4Output);

        // 第五个卷积层: 13x13x384 -> 13x13x256
        conv5Output = convolution(relu4Output, conv5Weights, conv5Bias, 1, 1);
        relu5Output = relu(conv5Output);
        pool5Output = maxPooling(relu5Output, 3, 2); // 6x6x256

        // 展平: 6x6x256 -> 9216
        double[] flattened = flatten(pool5Output);

        // 第一个全连接层: 9216 -> 4096
        fc1Output = fullyConnected(flattened, fc1Weights, fc1Bias);
        reluFc1Output = relu(fc1Output);
        drop1Output = dropout(reluFc1Output, 0.5, true);

        // 第二个全连接层: 4096 -> 4096
        fc2Output = fullyConnected(drop1Output, fc2Weights, fc2Bias);
        reluFc2Output = relu(fc2Output);
        drop2Output = dropout(reluFc2Output, 0.5, true);

        // 输出层: 4096 -> 1000
        logits = fullyConnected(drop2Output, outputWeights, outputBias);
        double[] output = softmax(logits);

        return output;
    }

    /**
     * 实现反向传播算法
     */
    private void backward(double[][][] input, int label, double learningRate, double[] predictions) {
        int batchSize = 1; // 当前实现为单样本训练

        // 输出层梯度计算 (softmax + cross-entropy derivative)
        double[] outputGrad = new double[1000];
        for (int i = 0; i < 1000; i++) {
            outputGrad[i] = predictions[i] - (i == label ? 1 : 0);
        }

        // 输出层反向传播
        double[][] outputWeightGrad = new double[1000][4096];
        double[] outputBiasGrad = new double[1000];
        double[] drop2Grad = new double[4096];

        for (int i = 0; i < 1000; i++) {
            outputBiasGrad[i] = outputGrad[i];
            for (int j = 0; j < 4096; j++) {
                outputWeightGrad[i][j] = outputGrad[i] * drop2Output[j];
                drop2Grad[j] += outputGrad[i] * outputWeights[i][j];
            }
        }

        // FC2层反向传播
        double[] reluFc2Grad = new double[4096];
        for (int i = 0; i < 4096; i++) {
            reluFc2Grad[i] = drop2Grad[i]; // Dropout梯度传递（推理时不处理）
        }

        double[] fc2Grad = new double[4096];
        for (int i = 0; i < 4096; i++) {
            fc2Grad[i] = reluFc2Grad[i] * (reluFc2Output[i] > 0 ? 1 : 0); // ReLU导数
        }

        double[][] fc2WeightGrad = new double[4096][4096];
        double[] fc2BiasGrad = new double[4096];
        double[] drop1Grad = new double[4096];

        for (int i = 0; i < 4096; i++) {
            fc2BiasGrad[i] = fc2Grad[i];
            for (int j = 0; j < 4096; j++) {
                fc2WeightGrad[i][j] = fc2Grad[i] * drop1Output[j];
                drop1Grad[j] += fc2Grad[i] * fc2Weights[i][j];
            }
        }

        // FC1层反向传播
        double[] reluFc1Grad = new double[4096];
        for (int i = 0; i < 4096; i++) {
            reluFc1Grad[i] = drop1Grad[i]; // Dropout梯度传递
        }

        double[] fc1Grad = new double[4096];
        for (int i = 0; i < 4096; i++) {
            fc1Grad[i] = reluFc1Grad[i] * (reluFc1Output[i] > 0 ? 1 : 0); // ReLU导数
        }

        double[] flattened = flatten(pool5Output);
        double[][] fc1WeightGrad = new double[4096][9216];
        double[] fc1BiasGrad = new double[4096];
        double[] flattenedGrad = new double[9216];

        for (int i = 0; i < 4096; i++) {
            fc1BiasGrad[i] = fc1Grad[i];
            for (int j = 0; j < 9216; j++) {
                fc1WeightGrad[i][j] = fc1Grad[i] * flattened[j];
                flattenedGrad[j] += fc1Grad[i] * fc1Weights[i][j];
            }
        }

        // 将梯度重塑回6x6x256形状以进行后续反向传播
        double[][][] pool5Grad = reshapeTo3D(flattenedGrad, 256, 6, 6);

        // Pool5层反向传播
        double[][][] relu5Grad = maxPoolBackward(pool5Grad, relu5Output, 3, 2);

        // Conv5层反向传播
        double[][][] conv5Grad = reluBackward(relu5Grad, conv5Output);
        double[][][][] conv5WeightGrad = convBackward(relu4Output, conv5Grad, 1, 1);
        double[] conv5BiasGrad = new double[256];
        for (int i = 0; i < 256; i++) {
            for (int j = 0; j < conv5Grad[0].length; j++) {
                for (int k = 0; k < conv5Grad[0][0].length; k++) {
                    conv5BiasGrad[i] += conv5Grad[i][j][k];
                }
            }
        }

        // Conv4层反向传播
        double[][][] relu4Grad = convBackwardToInput(relu4Output, conv5Grad, conv5Weights, 1, 1);
        double[][][] conv4Grad = reluBackward(relu4Grad, conv4Output);
        double[][][][] conv4WeightGrad = convBackward(relu3Output, conv4Grad, 1, 1);
        double[] conv4BiasGrad = new double[384];
        for (int i = 0; i < 384; i++) {
            for (int j = 0; j < conv4Grad[0].length; j++) {
                for (int k = 0; k < conv4Grad[0][0].length; k++) {
                    conv4BiasGrad[i] += conv4Grad[i][j][k];
                }
            }
        }

        // Conv3层反向传播
        double[][][] relu3Grad = convBackwardToInput(relu3Output, conv4Grad, conv4Weights, 1, 1);
        double[][][] conv3Grad = reluBackward(relu3Grad, conv3Output);
        double[][][][] conv3WeightGrad = convBackward(norm2Output, conv3Grad, 1, 1);
        double[] conv3BiasGrad = new double[384];
        for (int i = 0; i < 384; i++) {
            for (int j = 0; j < conv3Grad[0].length; j++) {
                for (int k = 0; k < conv3Grad[0][0].length; k++) {
                    conv3BiasGrad[i] += conv3Grad[i][j][k];
                }
            }
        }

        // Norm2层反向传播（简化处理）
        double[][][] pool2Grad = convBackwardToInput(norm2Output, conv3Grad, conv3Weights, 1, 1);
        double[][][] norm2Grad = pool2Grad; // 简化的LRN反向传播

        // Pool2层反向传播
        double[][][] relu2Grad = maxPoolBackward(norm2Grad, relu2Output, 3, 2);

        // Conv2层反向传播
        double[][][] conv2Grad = reluBackward(relu2Grad, conv2Output);
        double[][][][] conv2WeightGrad = convBackward(norm1Output, conv2Grad, 1, 2);
        double[] conv2BiasGrad = new double[256];
        for (int i = 0; i < 256; i++) {
            for (int j = 0; j < conv2Grad[0].length; j++) {
                for (int k = 0; k < conv2Grad[0][0].length; k++) {
                    conv2BiasGrad[i] += conv2Grad[i][j][k];
                }
            }
        }

        // Norm1层反向传播（简化处理）
        double[][][] pool1Grad = convBackwardToInput(norm1Output, conv2Grad, conv2Weights, 1, 2);
        double[][][] norm1Grad = pool1Grad; // 简化的LRN反向传播

        // Pool1层反向传播
        double[][][] relu1Grad = maxPoolBackward(norm1Grad, relu1Output, 3, 2);

        // Conv1层反向传播
        double[][][] conv1Grad = reluBackward(relu1Grad, conv1Output);
        double[][][][] conv1WeightGrad = convBackward(input, conv1Grad, 4, 0);
        double[] conv1BiasGrad = new double[96];
        for (int i = 0; i < 96; i++) {
            for (int j = 0; j < conv1Grad[0].length; j++) {
                for (int k = 0; k < conv1Grad[0][0].length; k++) {
                    conv1BiasGrad[i] += conv1Grad[i][j][k];
                }
            }
        }

        // 更新参数
        updateParameters(
                conv1WeightGrad, conv1BiasGrad,
                conv2WeightGrad, conv2BiasGrad,
                conv3WeightGrad, conv3BiasGrad,
                conv4WeightGrad, conv4BiasGrad,
                conv5WeightGrad, conv5BiasGrad,
                fc1WeightGrad, fc1BiasGrad,
                fc2WeightGrad, fc2BiasGrad,
                outputWeightGrad, outputBiasGrad,
                learningRate, batchSize
        );
    }

    /**
     * 将一维数组重塑为三维数组
     */
    private double[][][] reshapeTo3D(double[] input, int depth, int height, int width) {
        double[][][] output = new double[depth][height][width];
        int index = 0;
        for (int d = 0; d < depth; d++) {
            for (int h = 0; h < height; h++) {
                for (int w = 0; w < width; w++) {
                    output[d][h][w] = input[index++];
                }
            }
        }
        return output;
    }

    /**
     * ReLU反向传播
     */
    private double[][][] reluBackward(double[][][] grad, double[][][] output) {
        int depth = grad.length;
        int height = grad[0].length;
        int width = grad[0][0].length;
        double[][][] result = new double[depth][height][width];

        for (int d = 0; d < depth; d++) {
            for (int h = 0; h < height; h++) {
                for (int w = 0; w < width; w++) {
                    result[d][h][w] = (output[d][h][w] > 0) ? grad[d][h][w] : 0;
                }
            }
        }
        return result;
    }

    /**
     * 最大池化反向传播
     */
    private double[][][] maxPoolBackward(double[][][] grad, double[][][] forwardOutput,
                                         int poolSize, int stride) {
        int depth = grad.length;
        int height = grad[0].length * stride;
        int width = grad[0][0].length * stride;
        double[][][] result = new double[depth][height][width];

        for (int d = 0; d < depth; d++) {
            for (int i = 0; i < grad[0].length; i++) {
                for (int j = 0; j < grad[0][0].length; j++) {
                    // 找到池化窗口中的最大值位置
                    double maxValue = Double.NEGATIVE_INFINITY;
                    int maxI = 0, maxJ = 0;

                    for (int x = 0; x < poolSize; x++) {
                        for (int y = 0; y < poolSize; y++) {
                            int posX = i * stride + x;
                            int posY = j * stride + y;
                            if (posX < forwardOutput[0].length && posY < forwardOutput[0][0].length &&
                                    forwardOutput[d][posX][posY] > maxValue) {
                                maxValue = forwardOutput[d][posX][posY];
                                maxI = posX;
                                maxJ = posY;
                            }
                        }
                    }

                    // 将梯度传递给最大值位置
                    if (maxI < height && maxJ < width) {
                        result[d][maxI][maxJ] += grad[d][i][j];
                    }
                }
            }
        }
        return result;
    }

    /**
     * 卷积层反向传播计算权重梯度
     */
    private double[][][][] convBackward(double[][][] input, double[][][] grad, int stride, int padding) {
        int inputDepth = input.length;
        int outputChannels = grad.length;
        int kernelSize = 0;

        // 推断卷积核大小
        if (outputChannels == 96) kernelSize = 11;
        else if (outputChannels == 256) kernelSize = 5;
        else if (outputChannels == 384 || outputChannels == 256) kernelSize = 3;

        double[][][][] weightGrad = new double[outputChannels][kernelSize][kernelSize][inputDepth];

        // 添加padding
        double[][][] paddedInput = addPadding(input, padding);

        for (int k = 0; k < outputChannels; k++) {
            for (int i = 0; i < grad[0].length; i++) {
                for (int j = 0; j < grad[0][0].length; j++) {
                    for (int d = 0; d < inputDepth; d++) {
                        for (int x = 0; x < kernelSize; x++) {
                            for (int y = 0; y < kernelSize; y++) {
                                weightGrad[k][x][y][d] +=
                                        paddedInput[d][i * stride + x][j * stride + y] * grad[k][i][j];
                            }
                        }
                    }
                }
            }
        }
        return weightGrad;
    }

    /**
     * 卷积层反向传播计算输入梯度
     */
    private double[][][] convBackwardToInput(double[][][] input, double[][][] grad,
                                             double[][][][] weights, int stride, int padding) {
        int inputDepth = input.length;
        int inputHeight = input[0].length;
        int inputWidth = input[0][0].length;
        double[][][] inputGrad = new double[inputDepth][inputHeight][inputWidth];

        int outputChannels = grad.length;
        int kernelSize = weights[0].length;

        for (int k = 0; k < outputChannels; k++) {
            for (int i = 0; i < grad[0].length; i++) {
                for (int j = 0; j < grad[0][0].length; j++) {
                    for (int d = 0; d < inputDepth; d++) {
                        for (int x = 0; x < kernelSize; x++) {
                            for (int y = 0; y < kernelSize; y++) {
                                int posX = i * stride + x - padding;
                                int posY = j * stride + y - padding;
                                if (posX >= 0 && posX < inputHeight && posY >= 0 && posY < inputWidth) {
                                    inputGrad[d][posX][posY] +=
                                            weights[k][x][y][d] * grad[k][i][j];
                                }
                            }
                        }
                    }
                }
            }
        }
        return inputGrad;
    }

    /**
     * 更新网络参数
     */
    private void updateParameters(
            double[][][][] conv1WeightGrad, double[] conv1BiasGrad,
            double[][][][] conv2WeightGrad, double[] conv2BiasGrad,
            double[][][][] conv3WeightGrad, double[] conv3BiasGrad,
            double[][][][] conv4WeightGrad, double[] conv4BiasGrad,
            double[][][][] conv5WeightGrad, double[] conv5BiasGrad,
            double[][] fc1WeightGrad, double[] fc1BiasGrad,
            double[][] fc2WeightGrad, double[] fc2BiasGrad,
            double[][] outputWeightGrad, double[] outputBiasGrad,
            double learningRate, int batchSize) {

        double scale = learningRate / batchSize;

        // 更新Conv1参数
        for (int k = 0; k < 96; k++) {
            for (int i = 0; i < 11; i++) {
                for (int j = 0; j < 11; j++) {
                    for (int d = 0; d < 3; d++) {
                        conv1Weights[k][i][j][d] -= scale * conv1WeightGrad[k][i][j][d];
                    }
                }
            }
            conv1Bias[k] -= scale * conv1BiasGrad[k];
        }

        // 更新Conv2参数
        for (int k = 0; k < 256; k++) {
            for (int i = 0; i < 5; i++) {
                for (int j = 0; j < 5; j++) {
                    for (int d = 0; d < 96; d++) {
                        conv2Weights[k][i][j][d] -= scale * conv2WeightGrad[k][i][j][d];
                    }
                }
            }
            conv2Bias[k] -= scale * conv2BiasGrad[k];
        }

        // 更新Conv3参数
        for (int k = 0; k < 384; k++) {
            for (int i = 0; i < 3; i++) {
                for (int j = 0; j < 3; j++) {
                    for (int d = 0; d < 256; d++) {
                        conv3Weights[k][i][j][d] -= scale * conv3WeightGrad[k][i][j][d];
                    }
                }
            }
            conv3Bias[k] -= scale * conv3BiasGrad[k];
        }

        // 更新Conv4参数
        for (int k = 0; k < 384; k++) {
            for (int i = 0; i < 3; i++) {
                for (int j = 0; j < 3; j++) {
                    for (int d = 0; d < 384; d++) {
                        conv4Weights[k][i][j][d] -= scale * conv4WeightGrad[k][i][j][d];
                    }
                }
            }
            conv4Bias[k] -= scale * conv4BiasGrad[k];
        }

        // 更新Conv5参数
        for (int k = 0; k < 256; k++) {
            for (int i = 0; i < 3; i++) {
                for (int j = 0; j < 3; j++) {
                    for (int d = 0; d < 384; d++) {
                        conv5Weights[k][i][j][d] -= scale * conv5WeightGrad[k][i][j][d];
                    }
                }
            }
            conv5Bias[k] -= scale * conv5BiasGrad[k];
        }

        // 更新FC1参数
        for (int i = 0; i < 4096; i++) {
            for (int j = 0; j < 9216; j++) {
                fc1Weights[i][j] -= scale * fc1WeightGrad[i][j];
            }
            fc1Bias[i] -= scale * fc1BiasGrad[i];
        }

        // 更新FC2参数
        for (int i = 0; i < 4096; i++) {
            for (int j = 0; j < 4096; j++) {
                fc2Weights[i][j] -= scale * fc2WeightGrad[i][j];
            }
            fc2Bias[i] -= scale * fc2BiasGrad[i];
        }

        // 更新输出层参数
        for (int i = 0; i < 1000; i++) {
            for (int j = 0; j < 4096; j++) {
                outputWeights[i][j] -= scale * outputWeightGrad[i][j];
            }
            outputBias[i] -= scale * outputBiasGrad[i];
        }
    }

    /**
     * 预测方法
     */
    public int predict(double[][][] input) {
        double[] outputs = forward(input);
        int maxIndex = 0;
        double maxVal = outputs[0];

        for (int i = 1; i < outputs.length; i++) {
            if (outputs[i] > maxVal) {
                maxVal = outputs[i];
                maxIndex = i;
            }
        }

        return maxIndex;
    }

    /**
     * 主函数，用于测试网络
     */
    public static void main(String[] args) {
        // 创建AlexNet网络实例
        AlexNetDemo alexNetDemo = new AlexNetDemo();

        // 创建一个随机的227x227x3图像作为测试输入
        double[][][] testImage = new double[3][227][227];
        Random random = new Random();

        for (int d = 0; d < 3; d++) {
            for (int i = 0; i < 227; i++) {
                for (int j = 0; j < 227; j++) {
                    testImage[d][i][j] = random.nextDouble(); // 0-1之间的随机值
                }
            }
        }

        // 测试前向传播
        System.out.println("开始前向传播测试...");
        long startTime = System.currentTimeMillis();
        double[] predictions = alexNetDemo.forward(testImage);
        long endTime = System.currentTimeMillis();

        System.out.println("前向传播完成，耗时: " + (endTime - startTime) + "ms");
        System.out.println("输出层维度: " + predictions.length);
        System.out.println("前5个预测概率:");
        for (int i = 0; i < 5; i++) {
            System.out.printf("类别 %d: %.6f\n", i, predictions[i]);
        }
        startTime = System.currentTimeMillis();
        alexNetDemo.train(testImage, 3, 0.01);
        endTime = System.currentTimeMillis();
        System.out.println("训练，耗时: " + (endTime - startTime) + "ms");
        // 测试预测功能
        int predictedLabel = alexNetDemo.predict(testImage);
        System.out.println("预测的类别索引: " + predictedLabel);
    }
}
