package com.cu.machinelearning.neural;

import java.util.Random;

public class ResNetWithBackprop {
    // 激活函数：ReLU
    public static double relu(double x) {
        return Math.max(0, x);
    }

    public static double[] relu(double[] x) {
        double[] result = new double[x.length];
        for (int i = 0; i < x.length; i++) {
            result[i] = relu(x[i]);
        }
        return result;
    }

    public static double[][][] relu3D(double[][][] x) {
        int depth = x.length;
        int height = x[0].length;
        int width = x[0][0].length;

        double[][][] result = new double[depth][height][width];
        for (int d = 0; d < depth; d++) {
            for (int i = 0; i < height; i++) {
                for (int j = 0; j < width; j++) {
                    result[d][i][j] = relu(x[d][i][j]);
                }
            }
        }
        return result;
    }

    // ReLU导数（用于反向传播）
    public static double reluDeriv(double x) {
        return x > 0 ? 1 : 0;
    }

    public static double[][][] reluDeriv3D(double[][][] x) {
        int depth = x.length;
        int height = x[0].length;
        int width = x[0][0].length;

        double[][][] result = new double[depth][height][width];
        for (int d = 0; d < depth; d++) {
            for (int i = 0; i < height; i++) {
                for (int j = 0; j < width; j++) {
                    result[d][i][j] = reluDeriv(x[d][i][j]);
                }
            }
        }
        return result;
    }

    // 卷积操作（前向）
    public static double[][][] convolution(double[][][] input, double[][][][] kernel,
                                           double[] bias, int stride) {
        int inputDepth = input.length;
        int inputHeight = input[0].length;
        int inputWidth = input[0][0].length;
        int numKernels = kernel.length;
        int kernelSize = kernel[0].length;

        int outputHeight = (inputHeight - kernelSize) / stride + 1;
        int outputWidth = (inputWidth - kernelSize) / stride + 1;

        double[][][] output = new double[numKernels][outputHeight][outputWidth];

        for (int k = 0; k < numKernels; k++) {
            for (int i = 0; i < outputHeight; i++) {
                for (int j = 0; j < outputWidth; j++) {
                    double sum = 0.0;
                    for (int d = 0; d < inputDepth; d++) {
                        for (int x = 0; x < kernelSize; x++) {
                            for (int y = 0; y < kernelSize; y++) {
                                sum += input[d][i * stride + x][j * stride + y] * kernel[k][x][y][d];
                            }
                        }
                    }
                    output[k][i][j] = sum + bias[k];
                }
            }
        }
        return output;
    }

    // 卷积反向传播（计算输入梯度和权重梯度）
    public static class ConvBackpropResult {
        public double[][][] inputGrad;       // 输入特征图的梯度
        public double[][][][] kernelGrad;    // 卷积核的梯度
        public double[] biasGrad;            // 偏置的梯度

        public ConvBackpropResult(double[][][] inputGrad, double[][][][] kernelGrad, double[] biasGrad) {
            this.inputGrad = inputGrad;
            this.kernelGrad = kernelGrad;
            this.biasGrad = biasGrad;
        }
    }

    public static ConvBackpropResult convolutionBackprop(double[][][] input, double[][][][] kernel,
                                                         double[][][] outputGrad, int stride) {
        int inputDepth = input.length;
        int inputHeight = input[0].length;
        int inputWidth = input[0][0].length;
        int numKernels = kernel.length;
        int kernelSize = kernel[0].length;
        int outputHeight = outputGrad[0].length;
        int outputWidth = outputGrad[0][0].length;

        // 1. 初始化梯度存储
        double[][][] inputGrad = new double[inputDepth][inputHeight][inputWidth];
        double[][][][] kernelGrad = new double[numKernels][kernelSize][kernelSize][inputDepth];
        double[] biasGrad = new double[numKernels];

        // 2. 计算偏置梯度（每个卷积核的偏置梯度是输出梯度的总和）
        for (int k = 0; k < numKernels; k++) {
            double sum = 0.0;
            for (int i = 0; i < outputHeight; i++) {
                for (int j = 0; j < outputWidth; j++) {
                    sum += outputGrad[k][i][j];
                }
            }
            biasGrad[k] = sum;
        }

        // 3. 计算卷积核梯度（输入特征图与输出梯度的卷积）
        for (int k = 0; k < numKernels; k++) {
            for (int x = 0; x < kernelSize; x++) {
                for (int y = 0; y < kernelSize; y++) {
                    for (int d = 0; d < inputDepth; d++) {
                        double sum = 0.0;
                        for (int i = 0; i < outputHeight; i++) {
                            for (int j = 0; j < outputWidth; j++) {
                                int inputRow = i * stride + x;
                                int inputCol = j * stride + y;
                                sum += input[d][inputRow][inputCol] * outputGrad[k][i][j];
                            }
                        }
                        kernelGrad[k][x][y][d] = sum;
                    }
                }
            }
        }

        // 4. 计算输入特征图梯度（输出梯度与卷积核翻转后的卷积）
        for (int d = 0; d < inputDepth; d++) {
            for (int i = 0; i < inputHeight; i++) {
                for (int j = 0; j < inputWidth; j++) {
                    double sum = 0.0;
                    for (int k = 0; k < numKernels; k++) {
                        for (int x = 0; x < kernelSize; x++) {
                            for (int y = 0; y < kernelSize; y++) {
                                int outputRow = (i - x) / stride;
                                int outputCol = (j - y) / stride;
                                if (outputRow >= 0 && outputRow < outputHeight &&
                                        outputCol >= 0 && outputCol < outputWidth &&
                                        (i - x) % stride == 0 && (j - y) % stride == 0) {
                                    sum += kernel[k][x][y][d] * outputGrad[k][outputRow][outputCol];
                                }
                            }
                        }
                    }
                    inputGrad[d][i][j] = sum;
                }
            }
        }

        return new ConvBackpropResult(inputGrad, kernelGrad, biasGrad);
    }

    // 1x1卷积（前向和反向）
    public static double[][][] conv1x1(double[][][] input, double[][][][] kernel, double[] bias) {
        return convolution(input, kernel, bias, 1);
    }

    public static ConvBackpropResult conv1x1Backprop(double[][][] input, double[][][][] kernel, double[][][] outputGrad) {
        return convolutionBackprop(input, kernel, outputGrad, 1);
    }

    // 最大池化（前向）
    public static class MaxPoolResult {
        public double[][][] output;
        public int[][][][] maxIndices;  // 记录最大值位置（用于反向传播）

        public MaxPoolResult(double[][][] output, int[][][][] maxIndices) {
            this.output = output;
            this.maxIndices = maxIndices;
        }
    }

    public static MaxPoolResult maxPooling(double[][][] input, int poolSize, int stride) {
        int depth = input.length;
        int inputHeight = input[0].length;
        int inputWidth = input[0][0].length;

        int outputHeight = (inputHeight - poolSize) / stride + 1;
        int outputWidth = (inputWidth - poolSize) / stride + 1;

        double[][][] output = new double[depth][outputHeight][outputWidth];
        int[][][][] maxIndices = new int[depth][outputHeight][outputWidth][2];  // 存储最大值的(x,y)坐标

        for (int d = 0; d < depth; d++) {
            for (int i = 0; i < outputHeight; i++) {
                for (int j = 0; j < outputWidth; j++) {
                    double max = Double.NEGATIVE_INFINITY;
                    int maxX = 0, maxY = 0;
                    for (int x = 0; x < poolSize; x++) {
                        for (int y = 0; y < poolSize; y++) {
                            int inputX = i * stride + x;
                            int inputY = j * stride + y;
                            if (input[d][inputX][inputY] > max) {
                                max = input[d][inputX][inputY];
                                maxX = x;
                                maxY = y;
                            }
                        }
                    }
                    output[d][i][j] = max;
                    maxIndices[d][i][j][0] = maxX;  // 池化窗口内的x坐标
                    maxIndices[d][i][j][1] = maxY;  // 池化窗口内的y坐标
                }
            }
        }
        return new MaxPoolResult(output, maxIndices);
    }

    // 最大池化反向传播
    public static double[][][] maxPoolingBackprop(double[][][] input, int[][][][] maxIndices,
                                                  int poolSize, int stride) {
        int depth = input.length;
        int inputHeight = input[0].length;
        int inputWidth = input[0][0].length;
        int outputHeight = maxIndices[0].length;
        int outputWidth = maxIndices[0][0].length;

        double[][][] inputGrad = new double[depth][inputHeight][inputWidth];

        for (int d = 0; d < depth; d++) {
            for (int i = 0; i < outputHeight; i++) {
                for (int j = 0; j < outputWidth; j++) {
                    // 只将梯度传递给前向传播时的最大值位置
                    int maxX = maxIndices[d][i][j][0];
                    int maxY = maxIndices[d][i][j][1];
                    int inputX = i * stride + maxX;
                    int inputY = j * stride + maxY;
                    inputGrad[d][inputX][inputY] += input[d][i][j];  // input是上游传来的梯度
                }
            }
        }
        return inputGrad;
    }

    // 残差块（包含反向传播）
    static class ResidualBlock {
        // 权重参数
        private double[][][][] conv1Weights;
        private double[] conv1Bias;
        private double[][][][] conv2Weights;
        private double[] conv2Bias;
        private double[][][][] shortcutWeights;
        private double[] shortcutBias;
        private boolean useShortcutConv;

        // 前向传播中间结果（用于反向传播）
        private double[][][] conv1Input;
        private double[][][] conv1Output;
        private double[][][] relu1Output;
        private double[][][] conv2Input;
        private double[][][] conv2Output;
        private double[][][] shortcutOutput;
        private double[][][] residualOutput;

        public ResidualBlock(int inputChannels, int outputChannels, int stride, Random random) {
            useShortcutConv = (inputChannels != outputChannels) || (stride != 1);

            // 初始化权重
            conv1Weights = initializeConvWeights(outputChannels, 3, inputChannels, random);
            conv1Bias = new double[outputChannels];
            initializeBias(conv1Bias);

            conv2Weights = initializeConvWeights(outputChannels, 3, outputChannels, random);
            conv2Bias = new double[outputChannels];
            initializeBias(conv2Bias);

            if (useShortcutConv) {
                shortcutWeights = initializeConvWeights(outputChannels, 1, inputChannels, random);
                shortcutBias = new double[outputChannels];
                initializeBias(shortcutBias);
            }
        }

        // 前向传播（保存中间结果）
        public double[][][] forward(double[][][] input) {
            // 主路径
            conv1Input = input;
            conv1Output = convolution(conv1Input, conv1Weights, conv1Bias, 1);
            relu1Output = relu3D(conv1Output);
            conv2Input = relu1Output;
            conv2Output = convolution(conv2Input, conv2Weights, conv2Bias, 1);

            // 跳跃连接
            if (useShortcutConv) {
                shortcutOutput = conv1x1(input, shortcutWeights, shortcutBias);
            } else {
                shortcutOutput = input;
            }

            // 残差连接
            residualOutput = new double[conv2Output.length][conv2Output[0].length][conv2Output[0][0].length];
            for (int d = 0; d < conv2Output.length; d++) {
                for (int i = 0; i < conv2Output[0].length; i++) {
                    for (int j = 0; j < conv2Output[0][0].length; j++) {
                        residualOutput[d][i][j] = relu(conv2Output[d][i][j] + shortcutOutput[d][i][j]);
                    }
                }
            }
            return residualOutput;
        }

        // 反向传播（计算梯度并更新权重）
        public double[][][] backward(double[][][] outputGrad, double learningRate) {
            // 1. 残差输出的梯度 -> 分裂为主路径和跳跃连接的梯度
            double[][][] reluDeriv = reluDeriv3D(residualOutput);
            double[][][] residualGrad = new double[outputGrad.length][outputGrad[0].length][outputGrad[0][0].length];
            for (int d = 0; d < outputGrad.length; d++) {
                for (int i = 0; i < outputGrad[0].length; i++) {
                    for (int j = 0; j < outputGrad[0][0].length; j++) {
                        residualGrad[d][i][j] = outputGrad[d][i][j] * reluDeriv[d][i][j];
                    }
                }
            }

            // 2. 主路径反向传播（conv2 -> relu1 -> conv1）
            ConvBackpropResult conv2Back = convolutionBackprop(conv2Input, conv2Weights, residualGrad, 1);
            updateWeights(conv2Weights, conv2Back.kernelGrad, conv2Bias, conv2Back.biasGrad, learningRate);

            double[][][] relu1Grad = new double[conv2Back.inputGrad.length][conv2Back.inputGrad[0].length][conv2Back.inputGrad[0][0].length];
            double[][][] relu1Deriv = reluDeriv3D(relu1Output);
            for (int d = 0; d < relu1Grad.length; d++) {
                for (int i = 0; i < relu1Grad[0].length; i++) {
                    for (int j = 0; j < relu1Grad[0][0].length; j++) {
                        relu1Grad[d][i][j] = conv2Back.inputGrad[d][i][j] * relu1Deriv[d][i][j];
                    }
                }
            }

            ConvBackpropResult conv1Back = convolutionBackprop(conv1Input, conv1Weights, relu1Grad, 1);
            updateWeights(conv1Weights, conv1Back.kernelGrad, conv1Bias, conv1Back.biasGrad, learningRate);

            // 3. 跳跃连接反向传播
            double[][][] shortcutGrad = residualGrad;  // 跳跃连接的梯度等于残差梯度
            double[][][] inputGrad = shortcutGrad;

            if (useShortcutConv) {
                ConvBackpropResult shortcutBack = conv1x1Backprop(conv1Input, shortcutWeights, shortcutGrad);
                updateWeights(shortcutWeights, shortcutBack.kernelGrad, shortcutBias, shortcutBack.biasGrad, learningRate);
                inputGrad = shortcutBack.inputGrad;  // 输入梯度来自shortcut的反向传播
            }

            // 4. 主路径conv1的梯度与跳跃连接的梯度相加（输入总梯度）
            for (int d = 0; d < inputGrad.length; d++) {
                for (int i = 0; i < inputGrad[0].length; i++) {
                    for (int j = 0; j < inputGrad[0][0].length; j++) {
                        inputGrad[d][i][j] += conv1Back.inputGrad[d][i][j];
                    }
                }
            }

            return inputGrad;
        }

        // 更新权重和偏置（梯度下降）
        private void updateWeights(double[][][][] weights, double[][][][] grad,
                                   double[] bias, double[] biasGrad, double lr) {
            for (int k = 0; k < weights.length; k++) {
                for (int x = 0; x < weights[0].length; x++) {
                    for (int y = 0; y < weights[0][0].length; y++) {
                        for (int d = 0; d < weights[0][0][0].length; d++) {
                            weights[k][x][y][d] -= lr * grad[k][x][y][d];
                        }
                    }
                }
                bias[k] -= lr * biasGrad[k];
            }
        }

        // 初始化权重和偏置
        private double[][][][] initializeConvWeights(int numKernels, int kernelSize,
                                                     int inputDepth, Random random) {
            double[][][][] weights = new double[numKernels][kernelSize][kernelSize][inputDepth];
            double scale = Math.sqrt(2.0 / (kernelSize * kernelSize * inputDepth));
            for (int k = 0; k < numKernels; k++) {
                for (int i = 0; i < kernelSize; i++) {
                    for (int j = 0; j < kernelSize; j++) {
                        for (int d = 0; d < inputDepth; d++) {
                            weights[k][i][j][d] = random.nextGaussian() * scale;
                        }
                    }
                }
            }
            return weights;
        }

        private void initializeBias(double[] bias) {
            for (int i = 0; i < bias.length; i++) {
                bias[i] = 0.1;
            }
        }
    }

    // 带反向传播的ResNet模型
    static class ResNetWithTrain {
        private double[][][][] conv1Weights;
        private double[] conv1Bias;
        private ResidualBlock[] blocks;
        private double[][] fcWeights;
        private double[] fcBias;
        private int numClasses;

        // 前向传播中间结果（用于反向传播）
        private double[][][] conv1Input;
        private double[][][] conv1Output;
        private double[][][] relu1Output;
        private MaxPoolResult pool1Result;
        private double[][][] globalPoolOutput;
        private double[] fcInput;
        private double[] fcOutput;

        public ResNetWithTrain(int numClasses, Random random) {
            this.numClasses = numClasses;

            // 初始化初始卷积层
            conv1Weights = initializeConvWeights(64, 7, 3, random);
            conv1Bias = new double[64];
            initializeBias(conv1Bias);

            // 初始化残差块
            blocks = new ResidualBlock[4];
            blocks[0] = new ResidualBlock(64, 64, 1, random);
            blocks[1] = new ResidualBlock(64, 128, 2, random);
            blocks[2] = new ResidualBlock(128, 256, 2, random);
            blocks[3] = new ResidualBlock(256, 512, 2, random);

            // 初始化全连接层
            fcWeights = new double[numClasses][512];
            fcBias = new double[numClasses];
            initializeFcWeights(fcWeights, fcBias, random);
        }

        // 前向传播（保存中间结果）
        public double[] forward(double[][][] input) {
            // 初始卷积层
            conv1Input = input;
            conv1Output = convolution(conv1Input, conv1Weights, conv1Bias, 2);
            relu1Output = relu3D(conv1Output);
            pool1Result = maxPooling(relu1Output, 3, 2);

            // 残差块
            double[][][] x = pool1Result.output;
            for (ResidualBlock block : blocks) {
                x = block.forward(x);
            }

            // 全局平均池化
            globalPoolOutput = x;
            double[] features = globalAvgPool(globalPoolOutput);

            // 全连接层
            fcInput = features;
            fcOutput = fullyConnected(fcInput, fcWeights, fcBias);

            // Softmax输出
            return softmax(fcOutput);
        }

        // 反向传播（计算所有层的梯度并更新权重）
        public void backward(int label, double learningRate) {
            // 1. 输出层梯度（交叉熵损失的导数）
            double[] outputGrad = new double[numClasses];
            double[] softmaxOutput = softmax(fcOutput);
            for (int i = 0; i < numClasses; i++) {
                outputGrad[i] = softmaxOutput[i] - (i == label ? 1 : 0);
            }

            // 2. 全连接层反向传播
            double[][] fcWeightsGrad = new double[numClasses][512];
            double[] fcBiasGrad = new double[numClasses];
            double[] fcInputGrad = new double[512];

            for (int i = 0; i < numClasses; i++) {
                fcBiasGrad[i] = outputGrad[i];
                for (int j = 0; j < 512; j++) {
                    fcWeightsGrad[i][j] = outputGrad[i] * fcInput[j];
                    fcInputGrad[j] += outputGrad[i] * fcWeights[i][j];
                }
            }

            // 更新全连接层权重
            updateFcWeights(fcWeights, fcWeightsGrad, fcBias, fcBiasGrad, learningRate);

            // 3. 全局平均池化反向传播（将1D梯度映射回3D特征图）
            int depth = globalPoolOutput.length;
            int height = globalPoolOutput[0].length;
            int width = globalPoolOutput[0][0].length;
            double[][][] poolGrad = new double[depth][height][width];
            double scale = 1.0 / (height * width);  // 平均池化的梯度缩放
            for (int d = 0; d < depth; d++) {
                for (int i = 0; i < height; i++) {
                    for (int j = 0; j < width; j++) {
                        poolGrad[d][i][j] = fcInputGrad[d] * scale;
                    }
                }
            }

            // 4. 残差块反向传播（从最后一个块到第一个块）
            double[][][] currentGrad = poolGrad;
            for (int i = blocks.length - 1; i >= 0; i--) {
                currentGrad = blocks[i].backward(currentGrad, learningRate);
            }

            // 5. 初始池化层反向传播
            double[][][] pool1Grad = maxPoolingBackprop(currentGrad, pool1Result.maxIndices, 3, 2);

            // 6. 初始ReLU反向传播
            double[][][] relu1Grad = new double[pool1Grad.length][pool1Grad[0].length][pool1Grad[0][0].length];
            double[][][] relu1Deriv = reluDeriv3D(relu1Output);
            for (int d = 0; d < relu1Grad.length; d++) {
                for (int i = 0; i < relu1Grad[0].length; i++) {
                    for (int j = 0; j < relu1Grad[0][0].length; j++) {
                        relu1Grad[d][i][j] = pool1Grad[d][i][j] * relu1Deriv[d][i][j];
                    }
                }
            }

            // 7. 初始卷积层反向传播
            ConvBackpropResult conv1Back = convolutionBackprop(conv1Input, conv1Weights, relu1Grad, 2);
            updateConvWeights(conv1Weights, conv1Back.kernelGrad, conv1Bias, conv1Back.biasGrad, learningRate);
        }

        // 训练一步（前向+反向）
        public double trainStep(double[][][] input, int label, double learningRate) {
            double[] output = forward(input);
            double loss = calculateLoss(output, label);
            backward(label, learningRate);
            return loss;
        }

        // 辅助函数（全局池化、全连接、softmax等）
        private double[] globalAvgPool(double[][][] input) {
            int depth = input.length;
            int height = input[0].length;
            int width = input[0][0].length;

            double[] result = new double[depth];
            for (int d = 0; d < depth; d++) {
                double sum = 0.0;
                for (int i = 0; i < height; i++) {
                    for (int j = 0; j < width; j++) {
                        sum += input[d][i][j];
                    }
                }
                result[d] = sum / (height * width);
            }
            return result;
        }

        private double[] fullyConnected(double[] input, double[][] weights, double[] bias) {
            int outputSize = weights.length;
            double[] output = new double[outputSize];
            for (int i = 0; i < outputSize; i++) {
                double sum = 0.0;
                for (int j = 0; j < input.length; j++) {
                    sum += input[j] * weights[i][j];
                }
                output[i] = sum + bias[i];
            }
            return output;
        }

        private double[] softmax(double[] input) {
            double[] output = new double[input.length];
            double expSum = 0.0;
            double maxVal = input[0];
            for (double val : input) maxVal = Math.max(maxVal, val);
            for (int i = 0; i < input.length; i++) {
                output[i] = Math.exp(input[i] - maxVal);
                expSum += output[i];
            }
            for (int i = 0; i < output.length; i++) {
                output[i] /= expSum;
            }
            return output;
        }

        private double calculateLoss(double[] predictions, int label) {
            double epsilon = 1e-10;
            return -Math.log(Math.max(predictions[label], epsilon));
        }

        // 更新权重的辅助方法
        private void updateConvWeights(double[][][][] weights, double[][][][] grad,
                                       double[] bias, double[] biasGrad, double lr) {
            for (int k = 0; k < weights.length; k++) {
                for (int x = 0; x < weights[0].length; x++) {
                    for (int y = 0; y < weights[0][0].length; y++) {
                        for (int d = 0; d < weights[0][0][0].length; d++) {
                            weights[k][x][y][d] -= lr * grad[k][x][y][d];
                        }
                    }
                }
                bias[k] -= lr * biasGrad[k];
            }
        }

        private void updateFcWeights(double[][] weights, double[][] grad,
                                     double[] bias, double[] biasGrad, double lr) {
            for (int i = 0; i < weights.length; i++) {
                for (int j = 0; j < weights[0].length; j++) {
                    weights[i][j] -= lr * grad[i][j];
                }
                bias[i] -= lr * biasGrad[i];
            }
        }

        // 初始化方法
        private double[][][][] initializeConvWeights(int numKernels, int kernelSize,
                                                     int inputDepth, Random random) {
            double[][][][] weights = new double[numKernels][kernelSize][kernelSize][inputDepth];
            double scale = Math.sqrt(2.0 / (kernelSize * kernelSize * inputDepth));
            for (int k = 0; k < numKernels; k++) {
                for (int i = 0; i < kernelSize; i++) {
                    for (int j = 0; j < kernelSize; j++) {
                        for (int d = 0; d < inputDepth; d++) {
                            weights[k][i][j][d] = random.nextGaussian() * scale;
                        }
                    }
                }
            }
            return weights;
        }

        private void initializeBias(double[] bias) {
            for (int i = 0; i < bias.length; i++) {
                bias[i] = 0.1;
            }
        }

        private void initializeFcWeights(double[][] weights, double[] bias, Random random) {
            int rows = weights.length;
            int cols = weights[0].length;
            double scale = Math.sqrt(2.0 / cols);
            for (int i = 0; i < rows; i++) {
                for (int j = 0; j < cols; j++) {
                    weights[i][j] = random.nextGaussian() * scale;
                }
            }
            initializeBias(bias);
        }

        public int predict(double[][][] input) {
            double[] outputs = forward(input);
            int maxIndex = 0;
            double maxVal = outputs[0];
            for (int i = 1; i < outputs.length; i++) {
                if (outputs[i] > maxVal) {
                    maxVal = outputs[i];
                    maxIndex = i;
                }
            }
            return maxIndex;
        }
    }

    public static void main(String[] args) {
        Random random = new Random(42);
        ResNetWithTrain resNet = new ResNetWithTrain(10, random);

        // 生成测试图像（3x224x224）和标签
        double[][][] testImage = new double[3][224][224];
        for (int c = 0; c < 3; c++) {
            for (int i = 0; i < 224; i++) {
                for (int j = 0; j < 224; j++) {
                    testImage[c][i][j] = random.nextGaussian() * 0.1;
                }
            }
        }
        int trueLabel = 3;  // 假设真实标签是3

        // 训练10轮
        double learningRate = 0.001;
        for (int epoch = 0; epoch < 10; epoch++) {
            double loss = resNet.trainStep(testImage, trueLabel, learningRate);
            int prediction = resNet.predict(testImage);
            System.out.printf("Epoch %d: 损失=%.4f, 预测=%d, 真实=%d\n",
                    epoch, loss, prediction, trueLabel);
        }
    }
}
