
package com.cu.machinelearning.neural;

import java.util.*;

/**
 * 运行不了
 * ResNetDemo 类实现了简化版的 ResNet 神经网络结构
 */
public class RestNetDemo2 {
    private final int inputSize;
    private final int numClasses;
    private final int batchSize;
    private final double learningRate;
    private final List<ResidualBlock> blocks;
    private final DenseLayer finalLayer;
    private final Random random;

    /**
     * 构造函数，初始化 ResNet 网络结构
     *
     * @param inputSize     输入特征维度
     * @param numClasses    分类数量
     * @param batchSize     批次大小
     * @param learningRate  学习率
     * @param numFilters    每个残差块使用的滤波器数量序列
     */
    public RestNetDemo2(int inputSize, int numClasses, int batchSize, double learningRate,
                      int... numFilters) {
        this.inputSize = inputSize;
        this.numClasses = numClasses;
        this.batchSize = batchSize;
        this.learningRate = learningRate;
        this.random = new Random(42);
        this.blocks = new ArrayList<>();

        // 创建残差块
        int currentSize = inputSize;
        for (int filters : numFilters) {
            blocks.add(new ResidualBlock(currentSize, filters, batchSize, random));
            currentSize = filters;
        }

        // 创建最终输出层
        this.finalLayer = new DenseLayer(currentSize, numClasses, batchSize, random);
    }

    /**
     * 前向传播过程
     *
     * @param input 输入数据
     * @return 网络输出
     */
    public double[][] forward(double[][] input) {
        double[][] current = input;

        // 经过所有残差块
        for (ResidualBlock block : blocks) {
            current = block.forward(current);
        }

        // 全局平均池化
        double[][] pooled = new double[batchSize][current[0].length];
        for (int i = 0; i < batchSize; i++) {
            System.arraycopy(current[i], 0, pooled[i], 0, current[i].length);
        }

        // 最终输出层
        return finalLayer.forward(pooled);
    }

    /**
     * 反向传播过程
     *
     * @param outputGradient 输出梯度
     */
    public void backward(double[][] outputGradient) {
        // 从输出层开始反向传播
        double[][] gradient = finalLayer.backward(outputGradient, learningRate);

        // 反向传播经过所有残差块
        for (int i = blocks.size() - 1; i >= 0; i--) {
            gradient = blocks.get(i).backward(gradient, learningRate);
        }
    }

    /**
     * 计算损失值
     *
     * @param predictions 预测值
     * @param labels      真实标签
     * @return 损失值
     */
    public double computeLoss(double[][] predictions, double[][] labels) {
        double totalLoss = 0.0;
        for (int i = 0; i < batchSize; i++) {
            for (int j = 0; j < numClasses; j++) {
                totalLoss -= labels[i][j] * Math.log(Math.max(predictions[i][j], 1e-10));
            }
        }
        return totalLoss / batchSize;
    }

    /**
     * 计算准确率
     *
     * @param predictions 预测值
     * @param labels      真实标签
     * @return 准确率
     */
    public double computeAccuracy(double[][] predictions, double[][] labels) {
        int correct = 0;
        for (int i = 0; i < batchSize; i++) {
            int predClass = argMax(predictions[i]);
            int trueClass = argMax(labels[i]);
            if (predClass == trueClass) {
                correct++;
            }
        }
        return (double) correct / batchSize;
    }

    private int argMax(double[] array) {
        int maxIndex = 0;
        for (int i = 1; i < array.length; i++) {
            if (array[i] > array[maxIndex]) {
                maxIndex = i;
            }
        }
        return maxIndex;
    }

    /**
     * 残差块类
     */
    private static class ResidualBlock {
        /**
         * ResNet残差块中的卷积层组件
         *
         * 该类定义了残差网络中基本块的各个组成部分，包括两个主要的卷积层及其对应的批归一化层，
         * 以及用于处理跳跃连接的shortcut层和输入缓存。
         */

        private final ConvLayer conv1;      // 第一个卷积层，用于特征提取
        private final BatchNorm bn1;        // 第一个卷积层后的批归一化层
        private final ConvLayer conv2;      // 第二个卷积层，用于进一步特征提取
        private final BatchNorm bn2;        // 第二个卷积层后的批归一化层
        private final ConvLayer shortcut;   // 残差连接层，用于实现跳跃连接
        private double[][] inputCache;      // 输入数据缓存，用于反向传播计算

        public ResidualBlock(int inputChannels, int outputChannels, int batchSize, Random random) {
            this.conv1 = new ConvLayer(inputChannels, outputChannels, 3, 1, 1, batchSize, random);
            this.bn1 = new BatchNorm(outputChannels, batchSize);
            this.conv2 = new ConvLayer(outputChannels, outputChannels, 3, 1, 1, batchSize, random);
            this.bn2 = new BatchNorm(outputChannels, batchSize);

            if (inputChannels != outputChannels) {
                this.shortcut = new ConvLayer(inputChannels, outputChannels, 1, 1, 0, batchSize, random);
            } else {
                this.shortcut = null;
            }
        }

        public double[][] forward(double[][] input) {
            this.inputCache = input;

            // 主路径
            double[][] x = conv1.forward(input);
            x = bn1.forward(x, true);
            x = relu(x);
            x = conv2.forward(x);
            x = bn2.forward(x, true);

            // 捷径连接
            double[][] shortcut = (this.shortcut != null) ? this.shortcut.forward(input) : input;

            // 残差连接
            return add(x, shortcut);
        }

        public double[][] backward(double[][] gradient, double learningRate) {
            // 残差梯度分配
            double[][] gradMain = gradient.clone();
            double[][] gradShortcut = gradient.clone();

            // 主路径反向传播
            gradMain = bn2.backward(gradMain, learningRate);
            gradMain = conv2.backward(gradMain, learningRate);
            gradMain = reluDerivative(gradMain, conv2.getOutputCache());
            gradMain = bn1.backward(gradMain, learningRate);
            gradMain = conv1.backward(gradMain, learningRate);

            // 捷径路径反向传播
            if (shortcut != null) {
                gradShortcut = shortcut.backward(gradShortcut, learningRate);
            }

            // 合并梯度
            return add(gradMain, gradShortcut);
        }

        private double[][] relu(double[][] x) {
            double[][] result = new double[x.length][x[0].length];
            for (int i = 0; i < x.length; i++) {
                for (int j = 0; j < x[i].length; j++) {
                    result[i][j] = Math.max(0, x[i][j]);
                }
            }
            return result;
        }

        private double[][] reluDerivative(double[][] gradient, double[][] x) {
            double[][] result = new double[gradient.length][gradient[0].length];
            for (int i = 0; i < gradient.length; i++) {
                for (int j = 0; j < gradient[i].length; j++) {
                    result[i][j] = gradient[i][j] * (x[i][j] > 0 ? 1 : 0);
                }
            }
            return result;
        }

        private double[][] add(double[][] a, double[][] b) {
            double[][] result = new double[a.length][a[0].length];
            for (int i = 0; i < a.length; i++) {
                for (int j = 0; j < a[i].length; j++) {
                    result[i][j] = a[i][j] + b[i][j];
                }
            }
            return result;
        }
    }

    /**
     * 卷积层类
     */
    private static class ConvLayer {
        /**
         * 卷积层类，用于执行卷积神经网络中的卷积操作
         *
         * @param inputChannels 输入通道数，表示输入数据的深度维度
         * @param outputChannels 输出通道数，表示卷积核的数量和输出的深度维度
         * @param kernelSize 卷积核大小，卷积核的宽度和高度（假设为正方形）
         * @param stride 步长，卷积核在输入数据上移动的步幅
         * @param padding 填充大小，在输入数据边缘填充的像素数量
         * @param batchSize 批处理大小，一次处理的样本数量
         * @param kernels 卷积核权重数组，维度为[outputChannels][inputChannels][kernelSize*kernelSize]
         * @param biases 偏置项数组，维度为[outputChannels]
         * @param inputCache 输入数据缓存，用于存储前向传播时的输入数据，维度为[batchSize][inputHeight][inputWidth]
         * @param outputCache 输出数据缓存，用于存储前向传播时的输出数据，维度为[batchSize][outputHeight][outputWidth]
         */
        private final int inputChannels;
        private final int outputChannels;
        private final int kernelSize;
        private final int stride;
        private final int padding;
        private final int batchSize;
        private final double[][][] kernels;
        private final double[] biases;
        private double[][] inputCache;
        private double[][] outputCache;

        public ConvLayer(int inputChannels, int outputChannels, int kernelSize,
                         int stride, int padding, int batchSize, Random random) {
            this.inputChannels = inputChannels;
            this.outputChannels = outputChannels;
            this.kernelSize = kernelSize;
            this.stride = stride;
            this.padding = padding;
            this.batchSize = batchSize;

            // 初始化权重
            double std = Math.sqrt(2.0 / (inputChannels * kernelSize * kernelSize));
            this.kernels = new double[outputChannels][inputChannels][kernelSize * kernelSize];
            for (int o = 0; o < outputChannels; o++) {
                for (int i = 0; i < inputChannels; i++) {
                    for (int k = 0; k < kernelSize * kernelSize; k++) {
                        kernels[o][i][k] = random.nextGaussian() * std;
                    }
                }
            }

            // 初始化偏置
            this.biases = new double[outputChannels];
            Arrays.fill(biases, 0.01);
        }

        public double[][] forward(double[][] input) {
            this.inputCache = input;

            // 简化实现：假设输入为展平的特征图
            int outputSize = outputChannels;
            double[][] output = new double[batchSize][outputSize];

            // 卷积计算
            for (int b = 0; b < batchSize; b++) {
                for (int o = 0; o < outputChannels; o++) {
                    double sum = biases[o];
                    for (int i = 0; i < inputChannels; i++) {
                        for (int k = 0; k < kernelSize * kernelSize; k++) {
                            sum += input[b][i * kernelSize * kernelSize + k] * kernels[o][i][k];
                        }
                    }
                    output[b][o] = sum;
                }
            }

            this.outputCache = output;
            return output;
        }

        public double[][] backward(double[][] gradient, double learningRate) {
            int inputSize = inputChannels * kernelSize * kernelSize;
            double[][] inputGradient = new double[batchSize][inputSize];
            double[][][] kernelGradient = new double[outputChannels][inputChannels][kernelSize * kernelSize];
            double[] biasGradient = new double[outputChannels];

            // 计算梯度
            for (int b = 0; b < batchSize; b++) {
                for (int o = 0; o < outputChannels; o++) {
                    biasGradient[o] += gradient[b][o] / batchSize;

                    for (int i = 0; i < inputChannels; i++) {
                        for (int k = 0; k < kernelSize * kernelSize; k++) {
                            int inputIndex = i * kernelSize * kernelSize + k;
                            kernelGradient[o][i][k] += inputCache[b][inputIndex] * gradient[b][o] / batchSize;
                            inputGradient[b][inputIndex] += kernels[o][i][k] * gradient[b][o];
                        }
                    }
                }
            }

            // 更新权重和偏置
            for (int o = 0; o < outputChannels; o++) {
                biases[o] -= learningRate * biasGradient[o];
                for (int i = 0; i < inputChannels; i++) {
                    for (int k = 0; k < kernelSize * kernelSize; k++) {
                        kernels[o][i][k] -= learningRate * kernelGradient[o][i][k];
                    }
                }
            }

            return inputGradient;
        }

        public double[][] getOutputCache() {
            return outputCache;
        }
    }

    /**
     * 批量归一化层类
     */
    private static class BatchNorm {
        private final int channels;
        private final int batchSize;
        private final double[] gamma;
        private final double[] beta;
        private final double eps = 1e-5;
        private final double momentum = 0.9;

        private double[] runningMean;
        private double[] runningVar;
        private double[][] xNormCache;
        private double[] meanCache;
        private double[] varCache;

        public BatchNorm(int channels, int batchSize) {
            this.channels = channels;
            this.batchSize = batchSize;

            this.gamma = new double[channels];
            Arrays.fill(gamma, 1.0);
            this.beta = new double[channels];
            Arrays.fill(beta, 0.0);

            this.runningMean = new double[channels];
            this.runningVar = new double[channels];
            Arrays.fill(runningVar, 1.0);
        }

        public double[][] forward(double[][] input, boolean isTraining) {
            double[][] output = new double[batchSize][channels];

            if (isTraining) {
                meanCache = new double[channels];
                varCache = new double[channels];

                // 计算均值
                for (int c = 0; c < channels; c++) {
                    double sum = 0.0;
                    for (int b = 0; b < batchSize; b++) {
                        sum += input[b][c];
                    }
                    meanCache[c] = sum / batchSize;
                }

                // 计算方差
                for (int c = 0; c < channels; c++) {
                    double sum = 0.0;
                    for (int b = 0; b < batchSize; b++) {
                        sum += Math.pow(input[b][c] - meanCache[c], 2);
                    }
                    varCache[c] = sum / batchSize + eps;
                }

                // 归一化
                xNormCache = new double[batchSize][channels];
                for (int b = 0; b < batchSize; b++) {
                    for (int c = 0; c < channels; c++) {
                        xNormCache[b][c] = (input[b][c] - meanCache[c]) / Math.sqrt(varCache[c]);
                        output[b][c] = gamma[c] * xNormCache[b][c] + beta[c];
                    }
                }

                // 更新运行时统计量
                for (int c = 0; c < channels; c++) {
                    runningMean[c] = momentum * runningMean[c] + (1 - momentum) * meanCache[c];
                    runningVar[c] = momentum * runningVar[c] + (1 - momentum) * varCache[c];
                }
            } else {
                // 推理模式
                for (int b = 0; b < batchSize; b++) {
                    for (int c = 0; c < channels; c++) {
                        double xNorm = (input[b][c] - runningMean[c]) / Math.sqrt(runningVar[c] + eps);
                        output[b][c] = gamma[c] * xNorm + beta[c];
                    }
                }
            }

            return output;
        }

        public double[][] backward(double[][] gradient, double learningRate) {
            double[][] inputGradient = new double[batchSize][channels];
            double[] gammaGradient = new double[channels];
            double[] betaGradient = new double[channels];

            // 计算beta和gamma的梯度
            for (int c = 0; c < channels; c++) {
                for (int b = 0; b < batchSize; b++) {
                    betaGradient[c] += gradient[b][c] / batchSize;
                    gammaGradient[c] += gradient[b][c] * xNormCache[b][c] / batchSize;
                }
            }

            // 计算输入的梯度
            for (int b = 0; b < batchSize; b++) {
                for (int c = 0; c < channels; c++) {
                    double term1 = gradient[b][c] * gamma[c];
                    double term2 = 1.0 / Math.sqrt(varCache[c]);
                    double term3 = (1.0 / batchSize) * (1 - 1.0 / Math.sqrt(varCache[c]) *
                            Math.pow(inputGradient[b][c] - meanCache[c], 2) / varCache[c]);

                    inputGradient[b][c] = term1 * (term2 - term3);
                }
            }

            // 更新参数
            for (int c = 0; c < channels; c++) {
                gamma[c] -= learningRate * gammaGradient[c];
                beta[c] -= learningRate * betaGradient[c];
            }

            return inputGradient;
        }
    }

    /**
     * 全连接层类
     */
    private static class DenseLayer {
        private final int inputSize;
        private final int outputSize;
        private final int batchSize;
        private final double[][] weights;
        private final double[] biases;
        private double[][] inputCache;

        public DenseLayer(int inputSize, int outputSize, int batchSize, Random random) {
            this.inputSize = inputSize;
            this.outputSize = outputSize;
            this.batchSize = batchSize;

            // 初始化权重
            double std = Math.sqrt(2.0 / (inputSize + outputSize));
            this.weights = new double[outputSize][inputSize];
            for (int o = 0; o < outputSize; o++) {
                for (int i = 0; i < inputSize; i++) {
                    weights[o][i] = random.nextGaussian() * std;
                }
            }

            // 初始化偏置
            this.biases = new double[outputSize];
            Arrays.fill(biases, 0.01);
        }

        public double[][] forward(double[][] input) {
            this.inputCache = input;
            double[][] output = new double[batchSize][outputSize];

            // 矩阵乘法 + 偏置
            for (int b = 0; b < batchSize; b++) {
                for (int o = 0; o < outputSize; o++) {
                    double sum = biases[o];
                    for (int i = 0; i < inputSize; i++) {
                        sum += input[b][i] * weights[o][i];
                    }
                    output[b][o] = sum;
                }
            }

            // 应用softmax
            return softmax(output);
        }

        private double[][] softmax(double[][] logits) {
            double[][] output = new double[batchSize][outputSize];

            for (int b = 0; b < batchSize; b++) {
                // 找到最大值防止溢出
                double maxVal = Double.NEGATIVE_INFINITY;
                for (int o = 0; o < outputSize; o++) {
                    if (logits[b][o] > maxVal) {
                        maxVal = logits[b][o];
                    }
                }

                // 计算指数和
                double sumExp = 0.0;
                for (int o = 0; o < outputSize; o++) {
                    sumExp += Math.exp(logits[b][o] - maxVal);
                }

                // 计算softmax
                for (int o = 0; o < outputSize; o++) {
                    output[b][o] = Math.exp(logits[b][o] - maxVal) / sumExp;
                }
            }

            return output;
        }

        public double[][] backward(double[][] gradient, double learningRate) {
            double[][] inputGradient = new double[batchSize][inputSize];
            double[][] weightGradient = new double[outputSize][inputSize];
            double[] biasGradient = new double[outputSize];

            // 计算梯度
            for (int b = 0; b < batchSize; b++) {
                for (int o = 0; o < outputSize; o++) {
                    biasGradient[o] += gradient[b][o] / batchSize;

                    for (int i = 0; i < inputSize; i++) {
                        weightGradient[o][i] += inputCache[b][i] * gradient[b][o] / batchSize;
                        inputGradient[b][i] += weights[o][i] * gradient[b][o];
                    }
                }
            }

            // 更新权重和偏置
            for (int o = 0; o < outputSize; o++) {
                biases[o] -= learningRate * biasGradient[o];
                for (int i = 0; i < inputSize; i++) {
                    weights[o][i] -= learningRate * weightGradient[o][i];
                }
            }

            return inputGradient;
        }
    }

    // 示例用法
    public static void main(String[] args) {
        // 配置网络参数
        int inputSize = 32 * 32 * 3;  // CIFAR-10图像大小
        int numClasses = 10;
        int batchSize = 32;
        double learningRate = 0.001;

        // 创建ResNet网络
        RestNetDemo2 resnet = new RestNetDemo2(inputSize, numClasses, batchSize, learningRate, 64, 128, 256);

        // 生成随机测试数据
        Random random = new Random();
        double[][] input = new double[batchSize][inputSize];
        double[][] labels = new double[batchSize][numClasses];

        for (int i = 0; i < batchSize; i++) {
            // 随机输入
            for (int j = 0; j < inputSize; j++) {
                input[i][j] = random.nextGaussian() * 0.1;
            }

            // 随机标签
            int label = random.nextInt(numClasses);
            Arrays.fill(labels[i], 0.0);
            labels[i][label] = 1.0;
        }

        // 训练一个批次
        for (int epoch = 0; epoch < 10; epoch++) {
            // 前向传播
            double[][] predictions = resnet.forward(input);

            // 计算损失和准确率
            double loss = resnet.computeLoss(predictions, labels);
            double accuracy = resnet.computeAccuracy(predictions, labels);

            System.out.printf("Epoch %d: Loss = %.4f, Accuracy = %.4f%n", epoch, loss, accuracy);

            // 计算输出梯度
            double[][] outputGradient = new double[batchSize][numClasses];
            for (int i = 0; i < batchSize; i++) {
                for (int j = 0; j < numClasses; j++) {
                    outputGradient[i][j] = (predictions[i][j] - labels[i][j]) / batchSize;
                }
            }

            // 反向传播
            resnet.backward(outputGradient);
        }

        for (int epoch = 0; epoch < 10; epoch++) {
            // 前向传播
            double[][] predictions = resnet.forward(input);

            // 计算损失和准确率
            double loss = resnet.computeLoss(predictions, labels);
            double accuracy = resnet.computeAccuracy(predictions, labels);
            System.out.printf("Epoch %d: Loss = %.4f, Accuracy = %.4f%n", epoch, loss, accuracy);
        }
    }
}
