package com.cu.machinelearning.neural;

import java.util.Random;

public class LeNet5Demo {
    // 网络层参数
    private double[][][][] conv1Weights;  // 卷积层1权重: 6个5x5x1的卷积核
    private double[] conv1Bias;           // 卷积层1偏置: 6个
    private double[][][][] conv2Weights;  // 卷积层2权重: 16个5x5x6的卷积核
    private double[] conv2Bias;           // 卷积层2偏置: 16个
    private double[][] fc1Weights;        // 全连接层1权重: 120x(5x5x16)
    private double[] fc1Bias;             // 全连接层1偏置: 120个
    private double[][] fc2Weights;        // 全连接层2权重: 84x120
    private double[] fc2Bias;             // 全连接层2偏置: 84个
    private double[][] outputWeights;     // 输出层权重: 10x84
    private double[] outputBias;          // 输出层偏置: 10个


    /**
     * 卷积层1的输出特征图
     * 三维数组维度含义：[通道数][高度][宽度]
     */
    private double[][][] conv1Output;

    /**
     * ReLU激活层1的输出特征图
     * 三维数组维度含义：[通道数][高度][宽度]
     */
    private double[][][] relu1Output;

    /**
     * 池化层1的输出特征图
     * 三维数组维度含义：[通道数][高度][宽度]
     */
    private double[][][] pool1Output;

    /**
     * 卷积层2的输出特征图
     * 三维数组维度含义：[通道数][高度][宽度]
     */
    private double[][][] conv2Output;

    /**
     * ReLU激活层2的输出特征图
     * 三维数组维度含义：[通道数][高度][宽度]
     */
    private double[][][] relu2Output;

    /**
     * 池化层2的输出特征图
     * 三维数组维度含义：[通道数][高度][宽度]
     */
    private double[][][] pool2Output;

    /**
     * 全连接层1的输出向量
     * 一维数组表示神经元输出值
     */
    private double[] fc1Output;

    /**
     * ReLU激活层3的输出向量
     * 一维数组表示神经元输出值
     */
    private double[] relu3Output;

    /**
     * 全连接层2的输出向量
     * 一维数组表示神经元输出值
     */
    private double[] fc2Output;

    /**
     * ReLU激活层4的输出向量
     * 一维数组表示神经元输出值
     */
    private double[] relu4Output;

    /**
     * 网络最终输出的logits值
     * 一维数组表示每个类别的未归一化得分
     */
    private double[] logits;

    private Random random;

    // 构造函数，初始化网络参数
    public LeNet5Demo() {
        random = new Random(42);  // 固定随机种子，保证结果可复现

        // 初始化卷积层1参数
        conv1Weights = new double[6][5][5][1];
        conv1Bias = new double[6];
        initializeConvWeights(conv1Weights, conv1Bias);

        // 初始化卷积层2参数
        conv2Weights = new double[16][5][5][6];
        conv2Bias = new double[16];
        initializeConvWeights(conv2Weights, conv2Bias);

        // 初始化全连接层参数
        fc1Weights = new double[120][5 * 5 * 16];
        fc1Bias = new double[120];
        initializeFcWeights(fc1Weights, fc1Bias);

        fc2Weights = new double[84][120];
        fc2Bias = new double[84];
        initializeFcWeights(fc2Weights, fc2Bias);

        outputWeights = new double[10][84];
        outputBias = new double[10];
        initializeFcWeights(outputWeights, outputBias);
    }

    // 初始化卷积层权重和偏置
    private void initializeConvWeights(double[][][][] weights, double[] bias) {
        int numKernels = weights.length;
        int kernelSize = weights[0].length;
        int depth = weights[0][0][0].length;

        // 使用Xavier初始化
        double scale = Math.sqrt(2.0 / (kernelSize * kernelSize * depth));

        for (int k = 0; k < numKernels; k++) {
            for (int i = 0; i < kernelSize; i++) {
                for (int j = 0; j < kernelSize; j++) {
                    for (int d = 0; d < depth; d++) {
                        weights[k][i][j][d] = random.nextGaussian() * scale;
                    }
                }
            }
            bias[k] = 0.1;  // 偏置初始化为小的正值
        }
    }

    // 初始化全连接层权重和偏置
    private void initializeFcWeights(double[][] weights, double[] bias) {
        int rows = weights.length;
        int cols = weights[0].length;

        // 使用Xavier初始化
        double scale = Math.sqrt(2.0 / cols);

        for (int i = 0; i < rows; i++) {
            for (int j = 0; j < cols; j++) {
                weights[i][j] = random.nextGaussian() * scale;
            }
            bias[i] = 0.1;  // 偏置初始化为小的正值
        }
    }

    /**
     * 执行三维卷积操作
     *
     * @param input   输入数据，维度为[深度][高度][宽度]
     * @param kernels 卷积核，维度为[输出通道数][输入通道数][核高度][核宽度]
     * @param biases  偏置值数组，长度等于输出通道数
     * @param stride  卷积步长
     * @return 卷积结果，维度为[输出通道数][输出高度][输出宽度]
     */
    private double[][][] convolution(double[][][] input, double[][][][] kernels,
                                     double[] biases, int stride) {
        int inputDepth = input.length;
        int inputHeight = input[0].length;
        int inputWidth = input[0][0].length;
        int numKernels = kernels.length;
        int kernelSize = kernels[0].length;

        // 计算输出尺寸
        int outputHeight = (inputHeight - kernelSize) / stride + 1;
        int outputWidth = (inputWidth - kernelSize) / stride + 1;

        double[][][] output = new double[numKernels][outputHeight][outputWidth];

        // 对每个卷积核进行卷积
        for (int k = 0; k < numKernels; k++) {
            for (int i = 0; i < outputHeight; i++) {
                for (int j = 0; j < outputWidth; j++) {
                    double sum = 0.0;
                    // 卷积计算
                    for (int d = 0; d < inputDepth; d++) {
                        for (int x = 0; x < kernelSize; x++) {
                            for (int y = 0; y < kernelSize; y++) {
                                sum += input[d][i * stride + x][j * stride + y] *
                                        kernels[k][x][y][d];
                            }
                        }
                    }
                    output[k][i][j] = sum + biases[k];
                }
            }
        }
        return output;
    }

    /**
     * 对三维数组输入执行ReLU（修正线性单元）激活函数操作
     * ReLU函数定义为：f(x) = max(0, x)，即小于0的值置为0，大于等于0的值保持不变
     *
     * @param input 三维输入数组，通常表示神经网络中某一层的特征图或激活值
     * @return 经过ReLU激活函数处理后的三维数组，维度与输入相同，
     * 其中所有负值元素被置为0，非负值元素保持原值
     */
    private double[][][] relu(double[][][] input) {
        int depth = input.length;
        int height = input[0].length;
        int width = input[0][0].length;

        double[][][] output = new double[depth][height][width];

        for (int d = 0; d < depth; d++) {
            for (int i = 0; i < height; i++) {
                for (int j = 0; j < width; j++) {
                    output[d][i][j] = Math.max(0, input[d][i][j]);
                }
            }
        }
        return output;
    }

    /**
     * 执行最大池化操作
     *
     * @param input    输入的三维数组，通常表示图像或多通道数据
     * @param poolSize 池化窗口的大小，窗口为正方形
     * @param stride   池化操作的步长
     * @return 经过最大池化处理后的三维数组
     */
    private double[][][] maxPooling(double[][][] input, int poolSize, int stride) {
        int depth = input.length;
        int inputHeight = input[0].length;
        int inputWidth = input[0][0].length;

        // 计算输出尺寸
        int outputHeight = (inputHeight - poolSize) / stride + 1;
        int outputWidth = (inputWidth - poolSize) / stride + 1;

        double[][][] output = new double[depth][outputHeight][outputWidth];

        for (int d = 0; d < depth; d++) {
            for (int i = 0; i < outputHeight; i++) {
                for (int j = 0; j < outputWidth; j++) {
                    double max = 0.0;
                    // 找到池化窗口中的最大值
                    for (int x = 0; x < poolSize; x++) {
                        for (int y = 0; y < poolSize; y++) {
                            max = Math.max(max, input[d][i * stride + x][j * stride + y]);
                        }
                    }
                    output[d][i][j] = max;
                }
            }
        }
        return output;
    }

    // 将三维数组展平为一维数组
    private double[] flatten(double[][][] input) {
        int depth = input.length;
        int height = input[0].length;
        int width = input[0][0].length;
        int size = depth * height * width;

        double[] output = new double[size];
        int index = 0;

        for (int d = 0; d < depth; d++) {
            for (int i = 0; i < height; i++) {
                for (int j = 0; j < width; j++) {
                    output[index++] = input[d][i][j];
                }
            }
        }
        return output;
    }

    /**
     * 执行全连接层的前向传播计算
     *
     * @param input   输入向量，表示当前层的输入数据
     * @param weights 权重矩阵，维度为[输入维度][输出维度]
     * @param bias    偏置向量，维度为[输出维度]
     * @return 全连接层的输出向量，维度为[输出维度]
     */
    private double[] fullyConnected(double[] input, double[][] weights, double[] bias) {
        int outputSize = weights.length;
        double[] output = new double[outputSize];

        for (int i = 0; i < outputSize; i++) {
            double sum = 0.0;
            for (int j = 0; j < input.length; j++) {
                sum += input[j] * weights[i][j];
            }
            output[i] = sum + bias[i];
        }
        return output;
    }

    // ReLU激活函数 (一维)
    private double[] relu(double[] input) {
        double[] output = new double[input.length];
        for (int i = 0; i < input.length; i++) {
            output[i] = Math.max(0, input[i]);
        }
        return output;
    }

    // Softmax激活函数
    private double[] softmax(double[] input) {
        double[] output = new double[input.length];
        double expSum = 0.0;

        // 防止数值溢出，减去最大值
        double maxVal = input[0];
        for (double val : input) {
            maxVal = Math.max(maxVal, val);
        }

        for (int i = 0; i < input.length; i++) {
            output[i] = Math.exp(input[i] - maxVal);
            expSum += output[i];
        }

        for (int i = 0; i < output.length; i++) {
            output[i] /= expSum;
        }

        return output;
    }

    // 前向传播
    public double[] forward(double[][][] input) {
        // 卷积层1: 1x32x32 -> 6x28x28
        double[][][] conv1 = convolution(input, conv1Weights, conv1Bias, 1);
        conv1Output = conv1;
        double[][][] relu1 = relu(conv1);
        relu1Output = relu1;

        // 池化层1: 6x28x28 -> 6x14x14
        double[][][] pool1 = maxPooling(relu1, 2, 2);
        pool1Output = pool1;

        // 卷积层2: 6x14x14 -> 16x10x10
        double[][][] conv2 = convolution(pool1, conv2Weights, conv2Bias, 1);
        conv2Output = conv2;

        double[][][] relu2 = relu(conv2);
        relu2Output = relu2;

        // 池化层2: 16x10x10 -> 16x5x5
        double[][][] pool2 = maxPooling(relu2, 2, 2);
        pool2Output = pool2;

        // 展平: 16x5x5 -> 400
        double[] flattened = flatten(pool2);


        // 全连接层1: 400 -> 120
        double[] fc1 = fullyConnected(flattened, fc1Weights, fc1Bias);
        fc1Output = fc1;
        double[] relu3 = relu(fc1);
        relu3Output = relu3;

        // 全连接层2: 120 -> 84
        double[] fc2 = fullyConnected(relu3, fc2Weights, fc2Bias);
        fc2Output = fc2;
        double[] relu4 = relu(fc2);
        relu4Output = relu4;

        // 输出层: 84 -> 10
        double[] outputLogits = fullyConnected(relu4, outputWeights, outputBias);
        logits = outputLogits;

        // 归一化处理
        double[] output = softmax(outputLogits);

        return output;
    }

    // 计算损失 (交叉熵损失)
    public double calculateLoss(double[] predictions, int label) {
        // 防止log(0)
        double epsilon = 1e-10;
        return -Math.log(Math.max(predictions[label], epsilon));
    }

    /**
     * 训练模型函数
     *
     * @param input        三维数组输入数据，用于训练模型
     * @param label        标签值，表示训练样本的真实分类或目标值
     * @param learningRate 学习率，控制模型参数更新的步长大小
     */
    public void train(double[][][] input, int label, double learningRate,int batchSize) {
        // 前向传播，获取各层输出 (实际实现中需要保存中间结果用于反向传播)
        double[] predictions = forward(input);

        // 计算损失
        double loss = calculateLoss(predictions, label);

        System.out.println("训练中... 损失: " + loss);
        // 反向传播
        backward(input, label, learningRate, predictions,batchSize);
    }


    /**
     * 执行反向传播算法，根据预测结果和真实标签计算梯度并更新网络权重
     *
     * @param input 网络输入数据，三维数组格式
     * @param label 真实标签值，表示正确的分类结果
     * @param learningRate 学习率，控制参数更新的步长大小
     * @param predictions 预测结果数组，包含网络对各个类别的预测概率
     * @param batchSize 批次数量
     */
    private void backward(double[][][] input, int label, double learningRate, double[] predictions,int batchSize) {

        // 输出层梯度计算 (softmax + cross-entropy derivative)
        double[] outputGrad = new double[10];
        for (int i = 0; i < 10; i++) {
            outputGrad[i] = predictions[i] - (i == label ? 1 : 0);
        }

        // 输出层反向传播
        double[][] outputWeightGrad = new double[10][84];
        double[] outputBiasGrad = new double[10];
        double[] relu4Grad = new double[84];

        for (int i = 0; i < 10; i++) {
            outputBiasGrad[i] = outputGrad[i];
            for (int j = 0; j < 84; j++) {
                outputWeightGrad[i][j] = outputGrad[i] * relu4Output[j];
                relu4Grad[j] += outputGrad[i] * outputWeights[i][j];
            }
        }

        // FC2层反向传播
        double[] fc2Grad = new double[84];
        for (int i = 0; i < 84; i++) {
            fc2Grad[i] = relu4Grad[i] * (relu4Output[i] > 0 ? 1 : 0); // ReLU导数
        }

        double[][] fc2WeightGrad = new double[84][120];
        double[] fc2BiasGrad = new double[84];
        double[] relu3Grad = new double[120];

        for (int i = 0; i < 84; i++) {
            fc2BiasGrad[i] = fc2Grad[i];
            for (int j = 0; j < 120; j++) {
                fc2WeightGrad[i][j] = fc2Grad[i] * relu3Output[j];
                relu3Grad[j] += fc2Grad[i] * fc2Weights[i][j];
            }
        }

        // FC1层反向传播
        double[] fc1Grad = new double[120];
        for (int i = 0; i < 120; i++) {
            fc1Grad[i] = relu3Grad[i] * (relu3Output[i] > 0 ? 1 : 0); // ReLU导数
        }

        double[] flattened = flatten(pool2Output);
        double[][] fc1WeightGrad = new double[120][400];
        double[] fc1BiasGrad = new double[120];

        for (int i = 0; i < 120; i++) {
            fc1BiasGrad[i] = fc1Grad[i];
            for (int j = 0; j < 400; j++) {
                fc1WeightGrad[i][j] = fc1Grad[i] * flattened[j];
            }
        }

        // 将梯度重塑回16x5x5形状以进行后续反向传播
        double[][][] pool2Grad = reshapeTo3D(fc1Grad, 16, 5, 5);

        // Pool2层反向传播
        double[][][] relu2Grad = maxPoolBackward(pool2Grad, relu2Output, 2, 2);

        // Conv2层反向传播
        double[][][] conv2Grad = reluBackward(relu2Grad, conv2Output);
        double[][][][] conv2WeightGrad = convBackward(pool1Output, conv2Grad, 1);
        double[] conv2BiasGrad = new double[16];
        for (int i = 0; i < 16; i++) {
            for (int j = 0; j < conv2Grad[0].length; j++) {
                for (int k = 0; k < conv2Grad[0][0].length; k++) {
                    conv2BiasGrad[i] += conv2Grad[i][j][k];
                }
            }
        }

        // Pool1层反向传播
        double[][][] pool1Grad = convBackwardToInput(pool1Output, conv2Grad, conv2Weights, 1);
        double[][][] relu1Grad = maxPoolBackward(pool1Grad, relu1Output, 2, 2);

        // Conv1层反向传播
        double[][][] conv1Grad = reluBackward(relu1Grad, conv1Output);
        double[][][][] conv1WeightGrad = convBackward(input, conv1Grad, 1);
        double[] conv1BiasGrad = new double[6];
        for (int i = 0; i < 6; i++) {
            for (int j = 0; j < conv1Grad[0].length; j++) {
                for (int k = 0; k < conv1Grad[0][0].length; k++) {
                    conv1BiasGrad[i] += conv1Grad[i][j][k];
                }
            }
        }

        // 更新参数
        updateParameters(
                conv1WeightGrad, conv1BiasGrad,
                conv2WeightGrad, conv2BiasGrad,
                fc1WeightGrad, fc1BiasGrad,
                fc2WeightGrad, fc2BiasGrad,
                outputWeightGrad, outputBiasGrad,
                learningRate, batchSize
        );
    }

    /**
     * 将一维数组重塑为三维数组
     *
     * @param input  输入的一维数组
     * @param depth  三维数组的深度维度
     * @param height 三维数组的高度维度
     * @param width  三维数组的宽度维度
     * @return 重塑后的三维数组
     */
    private double[][][] reshapeTo3D(double[] input, int depth, int height, int width) {
        double[][][] output = new double[depth][height][width];
        int index = 0;
        for (int d = 0; d < depth; d++) {
            for (int h = 0; h < height; h++) {
                for (int w = 0; w < width; w++) {
                    output[d][h][w] = input[index++];
                }
            }
        }
        return output;
    }

    /**
     * ReLU反向传播
     */
    private double[][][] reluBackward(double[][][] grad, double[][][] output) {
        int depth = grad.length;
        int height = grad[0].length;
        int width = grad[0][0].length;
        double[][][] result = new double[depth][height][width];

        for (int d = 0; d < depth; d++) {
            for (int h = 0; h < height; h++) {
                for (int w = 0; w < width; w++) {
                    result[d][h][w] = (output[d][h][w] > 0) ? grad[d][h][w] : 0;
                }
            }
        }
        return result;
    }

    /**
     * 最大池化反向传播
     */
    private double[][][] maxPoolBackward(double[][][] grad, double[][][] forwardOutput,
                                         int poolSize, int stride) {
        int depth = grad.length;
        int height = grad[0].length * stride;
        int width = grad[0][0].length * stride;
        double[][][] result = new double[depth][height][width];

        for (int d = 0; d < depth; d++) {
            for (int i = 0; i < grad[0].length; i++) {
                for (int j = 0; j < grad[0][0].length; j++) {
                    // 找到池化窗口中的最大值位置
                    double maxValue = Double.NEGATIVE_INFINITY;
                    int maxI = 0, maxJ = 0;

                    for (int x = 0; x < poolSize; x++) {
                        for (int y = 0; y < poolSize; y++) {
                            int posX = i * stride + x;
                            int posY = j * stride + y;
                            if (forwardOutput[d][posX][posY] > maxValue) {
                                maxValue = forwardOutput[d][posX][posY];
                                maxI = posX;
                                maxJ = posY;
                            }
                        }
                    }

                    // 将梯度传递给最大值位置
                    result[d][maxI][maxJ] += grad[d][i][j];
                }
            }
        }
        return result;
    }

    /**
     * 卷积层反向传播计算权重梯度
     */
    private double[][][][] convBackward(double[][][] input, double[][][] grad, int stride) {
        int inputDepth = input.length;
        int kernelDepth = inputDepth;
        int outputChannels = grad.length;
        int kernelSize = 5; // 假设卷积核大小为5x5

        double[][][][] weightGrad = new double[outputChannels][kernelSize][kernelSize][kernelDepth];

        for (int k = 0; k < outputChannels; k++) {
            for (int i = 0; i < grad[0].length; i++) {
                for (int j = 0; j < grad[0][0].length; j++) {
                    for (int d = 0; d < inputDepth; d++) {
                        for (int x = 0; x < kernelSize; x++) {
                            for (int y = 0; y < kernelSize; y++) {
                                weightGrad[k][x][y][d] +=
                                        input[d][i * stride + x][j * stride + y] * grad[k][i][j];
                            }
                        }
                    }
                }
            }
        }
        return weightGrad;
    }

    /**
     * 卷积层反向传播计算输入梯度
     */
    private double[][][] convBackwardToInput(double[][][] input, double[][][] grad,
                                             double[][][][] weights, int stride) {
        int inputDepth = input.length;
        int inputHeight = input[0].length;
        int inputWidth = input[0][0].length;
        double[][][] inputGrad = new double[inputDepth][inputHeight][inputWidth];

        int outputChannels = grad.length;
        int kernelSize = weights[0].length;

        for (int k = 0; k < outputChannels; k++) {
            for (int i = 0; i < grad[0].length; i++) {
                for (int j = 0; j < grad[0][0].length; j++) {
                    for (int d = 0; d < inputDepth; d++) {
                        for (int x = 0; x < kernelSize; x++) {
                            for (int y = 0; y < kernelSize; y++) {
                                int posX = i * stride + x;
                                int posY = j * stride + y;
                                if (posX < inputHeight && posY < inputWidth) {
                                    inputGrad[d][posX][posY] +=
                                            weights[k][x][y][d] * grad[k][i][j];
                                }
                            }
                        }
                    }
                }
            }
        }
        return inputGrad;
    }

    /**
     * 更新网络参数
     */
    private void updateParameters(
            double[][][][] conv1WeightGrad, double[] conv1BiasGrad,
            double[][][][] conv2WeightGrad, double[] conv2BiasGrad,
            double[][] fc1WeightGrad, double[] fc1BiasGrad,
            double[][] fc2WeightGrad, double[] fc2BiasGrad,
            double[][] outputWeightGrad, double[] outputBiasGrad,
            double learningRate, int batchSize) {

        double scale = learningRate / batchSize;

        // 更新Conv1参数
        for (int k = 0; k < 6; k++) {
            for (int i = 0; i < 5; i++) {
                for (int j = 0; j < 5; j++) {
                    for (int d = 0; d < 1; d++) {
                        conv1Weights[k][i][j][d] -= scale * conv1WeightGrad[k][i][j][d];
                    }
                }
            }
            conv1Bias[k] -= scale * conv1BiasGrad[k];
        }

        // 更新Conv2参数
        for (int k = 0; k < 16; k++) {
            for (int i = 0; i < 5; i++) {
                for (int j = 0; j < 5; j++) {
                    for (int d = 0; d < 6; d++) {
                        conv2Weights[k][i][j][d] -= scale * conv2WeightGrad[k][i][j][d];
                    }
                }
            }
            conv2Bias[k] -= scale * conv2BiasGrad[k];
        }

        // 更新FC1参数
        for (int i = 0; i < 120; i++) {
            for (int j = 0; j < 400; j++) {
                fc1Weights[i][j] -= scale * fc1WeightGrad[i][j];
            }
            fc1Bias[i] -= scale * fc1BiasGrad[i];
        }

        // 更新FC2参数
        for (int i = 0; i < 84; i++) {
            for (int j = 0; j < 120; j++) {
                fc2Weights[i][j] -= scale * fc2WeightGrad[i][j];
            }
            fc2Bias[i] -= scale * fc2BiasGrad[i];
        }

        // 更新输出层参数
        for (int i = 0; i < 10; i++) {
            for (int j = 0; j < 84; j++) {
                outputWeights[i][j] -= scale * outputWeightGrad[i][j];
            }
            outputBias[i] -= scale * outputBiasGrad[i];
        }
    }


    // 预测方法
    public int predict(double[][][] input) {
        double[] outputs = forward(input);
        int maxIndex = 0;
        double maxVal = outputs[0];

        for (int i = 1; i < outputs.length; i++) {
            if (outputs[i] > maxVal) {
                maxVal = outputs[i];
                maxIndex = i;
            }
        }

        return maxIndex;
    }

    public void trainBatch(double[][][][] batchImages, int[] batchLabels, double learningRate) {
        for (int i = 0; i < batchImages.length; i++) {
            train(batchImages[i], batchLabels[i], learningRate,batchImages[0].length);
        }
    }

    public static void main(String[] args) {
        // 创建LeNet5网络
        LeNet5Demo lenet = new LeNet5Demo();

        // 创建一个随机的32x32灰度图像作为测试输入 (1x32x32)

        double[][][] testImage = new double[1][32][32];
        Random random = new Random();
        for (int i = 0; i < 32; i++) {
            for (int j = 0; j < 32; j++) {
                testImage[0][i][j] = random.nextDouble();  // 0-1之间的随机值
            }
        }

        // 测试前向传播
        double[] predictions = lenet.forward(testImage);
        System.out.println("初始预测概率分布:");
        for (int i = 0; i < predictions.length; i++) {
            System.out.printf("类别 %d: %.4f\n", i, predictions[i]);
        }

        // 假设这张图片的真实标签是3，进行一次训练
        int trueLabel = 3;


        lenet.train(testImage, trueLabel, 0.01,1);

        // 再次预测，查看变化
        int predictedLabel = lenet.predict(testImage);
        System.out.println("预测结果: " + predictedLabel);
        System.out.println("真实标签: " + trueLabel);
    }
}
