package enhanced.neural.network.controller;

import enhanced.neural.network.model.ModelParameters;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;

import java.io.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Random;

@Slf4j
@Data
public class EnhancedNeuralNetwork implements Serializable {
    private static final long serialVersionUID = 1L;

    // 网络参数
    private int inputSize = 784;
    private int hiddenSize1 = 512;  // 增加节点数增强特征提取
    private int hiddenSize2 = 256;  // 增加节点数增强特征提取
    private double dropoutRate = 0.3;  // Dropout比率
    private int outputSize = 10;

    // 权重和偏置
    private double[][] W1, W2, W3;
    private double[] b1, b2, b3;

    // 批量归一化参数
    private double[] gamma1, gamma2, beta1, beta2;
    private double[] runningMean1, runningVar1, runningMean2, runningVar2;
    private final double bnMomentum = 0.9;
    private double bnEpsilon = 1e-5;

    // 数据标准化参数
    private double[] dataMean;
    private double[] dataStd;

    // Nadam优化器参数 (Adam + Nesterov动量)
    private double[][] mW1, vW1, mW2, vW2, mW3, vW3;
    private double[] mb1, vb1, mb2, vb2, mb3, vb3;
    private int t = 0;
    private double learningRate;
    private final double initialLearningRate = 0.001;
    private final double decayRate = 0.98; // 学习率衰减率，改为更温和的衰减
    private final double adamBeta1 = 0.9;  // Adam优化器的beta1参数
    private final double adamBeta2 = 0.999; // Adam优化器的beta2参数
    private final double epsilon = 1e-8;
    private final double lambda = 0.001; // L2正则化系数

    // 学习率调度参数
    private final double minLearningRate = 1e-6; // 最小学习率
    private final double lrPatience = 3; // 学习率patience
    private int lrPatienceCounter = 0;
    private double lastBestLoss = Double.MAX_VALUE;

    // 早停机制参数
    private double bestValidationLoss = Double.MAX_VALUE;
    private int patience = 10; // 增加patience，给模型更多机会
    private int patienceCounter = 0;
    private boolean stopTraining = false;
    private final double minImprovement = 1e-6; // 最小改善阈值

    // 自适应训练参数
    private double bestAccuracy = 0.0;
    private double targetAccuracy = 0.96; // 目标准确率
    private int accuracyPatience = 5; // 准确率patience
    private int accuracyCounter = 0;

    private final Random random = new Random();

    public EnhancedNeuralNetwork(int inputSize, int hiddenSize1, int hiddenSize2, int outputSize) {
        this.inputSize = inputSize;
        this.hiddenSize1 = hiddenSize1;
        this.hiddenSize2 = hiddenSize2;
        this.outputSize = outputSize;

        initializeWeights();
        // 初始化其他组件...
        initializeBatchNorm();
        initializeNadam();
        learningRate = initialLearningRate;

        // 计算并打印参数数量
        int totalParams = calculateTotalParameters();
        System.out.println("神经网络参数统计:");
        System.out.println("网络结构: " + inputSize + " -> " + hiddenSize1 + " -> " + hiddenSize2 + " -> " + outputSize);
        System.out.println("总参数数量: " + totalParams);
        System.out.println("参数密度: " + String.format("%.2f", (double) totalParams / 60000) + " 参数/样本");
    }

    // 计算总参数数量
    private int calculateTotalParameters() {
        int params = 0;
        // 权重参数
        params += hiddenSize1 * inputSize;      // W1
        params += hiddenSize2 * hiddenSize1;    // W2  
        params += outputSize * hiddenSize2;     // W3
        // 偏置参数
        params += hiddenSize1 + hiddenSize2 + outputSize;
        // 批量归一化参数
        params += hiddenSize1 * 2 + hiddenSize2 * 2; // gamma, beta for each layer
        return params;
    }

    // He初始化权重
    private void initializeWeights() {
        // 第一层权重 (128x64)
        W1 = new double[hiddenSize1][inputSize];
        b1 = new double[hiddenSize1];
        double std1 = Math.sqrt(2.0 / inputSize);
        for (int i = 0; i < hiddenSize1; i++) {
            for (int j = 0; j < inputSize; j++) {
                W1[i][j] = std1 * random.nextGaussian();
            }
            b1[i] = 0.01;
        }

        // 第二层权重 (128x128)
        W2 = new double[hiddenSize2][hiddenSize1];
        b2 = new double[hiddenSize2];
        double std2 = Math.sqrt(2.0 / hiddenSize1);
        for (int i = 0; i < hiddenSize2; i++) {
            for (int j = 0; j < hiddenSize1; j++) {
                W2[i][j] = std2 * random.nextGaussian();
            }
            b2[i] = 0.01;
        }

        // 输出层权重 (10x128)
        W3 = new double[outputSize][hiddenSize2];
        b3 = new double[outputSize];
        double std3 = Math.sqrt(2.0 / hiddenSize2);
        for (int i = 0; i < outputSize; i++) {
            for (int j = 0; j < hiddenSize2; j++) {
                W3[i][j] = std3 * random.nextGaussian();
            }
            b3[i] = 0.01;
        }
    }

    // 初始化批量归一化参数
    private void initializeBatchNorm() {
        gamma1 = new double[hiddenSize1];
        beta1 = new double[hiddenSize1];
        runningMean1 = new double[hiddenSize1];
        runningVar1 = new double[hiddenSize1];

        gamma2 = new double[hiddenSize2];
        beta2 = new double[hiddenSize2];
        runningMean2 = new double[hiddenSize2];
        runningVar2 = new double[hiddenSize2];

        for (int i = 0; i < hiddenSize1; i++) {
            gamma1[i] = 1.0;
            beta1[i] = 0.0;
        }

        for (int i = 0; i < hiddenSize2; i++) {
            gamma2[i] = 1.0;
            beta2[i] = 0.0;
        }
    }

    // 初始化Nadam优化器状态
    private void initializeNadam() {
        mW1 = new double[hiddenSize1][inputSize];
        vW1 = new double[hiddenSize1][inputSize];
        mb1 = new double[hiddenSize1];
        vb1 = new double[hiddenSize1];

        mW2 = new double[hiddenSize2][hiddenSize1];
        vW2 = new double[hiddenSize2][hiddenSize1];
        mb2 = new double[hiddenSize2];
        vb2 = new double[hiddenSize2];

        mW3 = new double[outputSize][hiddenSize2];
        vW3 = new double[outputSize][hiddenSize2];
        mb3 = new double[outputSize];
        vb3 = new double[outputSize];
    }


    // 前向传播 - 超优化版本
    public double[] forward(double[] x, boolean training) {
        // 第一层: z1 = W1*x + b1 - 使用向量化计算
        double[] z1 = new double[hiddenSize1];
        for (int i = 0; i < hiddenSize1; i++) {
            double sum = b1[i]; // 从偏置开始
            double[] w1i = W1[i]; // 缓存权重行
            for (int j = 0; j < inputSize; j++) {
                sum += w1i[j] * x[j];
            }
            z1[i] = sum;
        }

        // 批量归一化1 - 简化版本
        double[] bn1 = new double[hiddenSize1];
        if (training) {
            for (int i = 0; i < hiddenSize1; i++) {
                bn1[i] = gamma1[i] * z1[i] + beta1[i];
            }
        } else {
            for (int i = 0; i < hiddenSize1; i++) {
                double std = Math.sqrt(runningVar1[i] + bnEpsilon);
                bn1[i] = gamma1[i] * ((z1[i] - runningMean1[i]) / std) + beta1[i];
            }
        }

        // 激活函数 - 内联优化
        double[] a1 = new double[hiddenSize1];
        for (int i = 0; i < hiddenSize1; i++) {
            a1[i] = Math.max(0, bn1[i]);
        }

        // 应用Dropout
        if (training) {
            a1 = applyDropout(a1, dropoutRate);
        }

        // 第二层: z2 = W2*a1 + b2
        double[] z2 = new double[hiddenSize2];
        for (int i = 0; i < hiddenSize2; i++) {
            double sum = b2[i];
            double[] w2i = W2[i];
            for (int j = 0; j < hiddenSize1; j++) {
                sum += w2i[j] * a1[j];
            }
            z2[i] = sum;
        }

        // 批量归一化2 - 简化版本
        double[] bn2 = new double[hiddenSize2];
        if (training) {
            for (int i = 0; i < hiddenSize2; i++) {
                bn2[i] = gamma2[i] * z2[i] + beta2[i];
            }
        } else {
            for (int i = 0; i < hiddenSize2; i++) {
                double std = Math.sqrt(runningVar2[i] + bnEpsilon);
                bn2[i] = gamma2[i] * ((z2[i] - runningMean2[i]) / std) + beta2[i];
            }
        }

        // 激活函数
        double[] a2 = new double[hiddenSize2];
        for (int i = 0; i < hiddenSize2; i++) {
            a2[i] = Math.max(0, bn2[i]);
        }

        // 应用Dropout
        if (training) {
            a2 = applyDropout(a2, dropoutRate);
        }

        // 输出层: z3 = W3*a2 + b3
        double[] z3 = new double[outputSize];
        for (int i = 0; i < outputSize; i++) {
            double sum = b3[i];
            double[] w3i = W3[i];
            for (int j = 0; j < hiddenSize2; j++) {
                sum += w3i[j] * a2[j];
            }
            z3[i] = sum;
        }

        return softmaxFast(z3);
    }

    // 快速Softmax实现
    private double[] softmaxFast(double[] x) {
        double max = x[0];
        for (int i = 1; i < x.length; i++) {
            if (x[i] > max) max = x[i];
        }

        double sum = 0.0;
        double[] result = new double[x.length];
        for (int i = 0; i < x.length; i++) {
            result[i] = Math.exp(x[i] - max);
            sum += result[i];
        }

        for (int i = 0; i < x.length; i++) {
            result[i] /= sum;
        }
        return result;
    }


    // Softmax激活函数
    // Dropout层实现
    private double[] applyDropout(double[] x, double rate) {
        if (rate <= 0) return x;
        double[] result = new double[x.length];
        for (int i = 0; i < x.length; i++) {
            // 训练时以rate概率丢弃神经元
            if (random.nextDouble() > rate) {
                result[i] = x[i] / (1 - rate); // 缩放保持期望值不变
            } else {
                result[i] = 0;
            }
        }
        return result;
    }


    // 带L2正则化和标签平滑的交叉熵损失
    private final double labelSmoothing = 0.1; // 标签平滑参数

    public double crossEntropyLoss(double[] pred, int target, int batchSize) {
        // 应用标签平滑
        double loss = 0.0;
        int numClasses = pred.length;
        for (int i = 0; i < numClasses; i++) {
            // 平滑后的目标概率分布
            double targetProb = (i == target) ? (1 - labelSmoothing) : (labelSmoothing / (numClasses - 1));
            loss -= targetProb * Math.log(pred[i] + 1e-10);
        }

        // 添加L2正则化
        double regLoss = 0.0;
        for (int i = 0; i < hiddenSize1; i++) {
            for (int j = 0; j < inputSize; j++) {
                regLoss += W1[i][j] * W1[i][j];
            }
        }

        for (int i = 0; i < hiddenSize2; i++) {
            for (int j = 0; j < hiddenSize1; j++) {
                regLoss += W2[i][j] * W2[i][j];
            }
        }

        for (int i = 0; i < outputSize; i++) {
            for (int j = 0; j < hiddenSize2; j++) {
                regLoss += W3[i][j] * W3[i][j];
            }
        }

        regLoss *= lambda / (2 * batchSize);
        return loss + regLoss;
    }


    // Nadam参数更新 (Adam + Nesterov动量) - 优化版本
    private void updateWeightsWithNadam(double[][] gradW1, double[] gradB1, double[] gradGamma1, double[] gradBeta1,
                                        double[][] gradW2, double[] gradB2, double[] gradGamma2, double[] gradBeta2,
                                        double[][] gradW3, double[] gradB3) {
        // 预计算常用的指数值，避免重复计算
        double beta1_t = adamBeta1 * (1.0 - 0.5 * Math.pow(0.96, t / 250.00));
        double beta2_t = adamBeta2 * (1 - Math.pow(0.99, t));

        // 预计算偏差修正因子
        double beta1_pow_t = Math.pow(adamBeta1, t);
        double beta2_pow_t = Math.pow(adamBeta2, t);
        double mHat_factor = 1.0 / (1.0 - beta1_pow_t);
        double vHat_factor = 1.0 / (1.0 - beta2_pow_t);

        // 更新第一层参数
        for (int i = 0; i < hiddenSize1; i++) {
            for (int j = 0; j < inputSize; j++) {
                // 更新一阶矩估计
                mW1[i][j] = adamBeta1 * mW1[i][j] + (1 - adamBeta1) * gradW1[i][j];
                // 更新二阶矩估计
                vW1[i][j] = adamBeta2 * vW1[i][j] + (1 - adamBeta2) * gradW1[i][j] * gradW1[i][j];

                // 计算偏差修正后的估计
                double mHat = mW1[i][j] * mHat_factor;
                double vHat = vW1[i][j] * vHat_factor;

                // Nadam更新：加入Nesterov动量
                double nadamUpdate = (beta1_t * mHat + (1 - beta1_t) * gradW1[i][j])
                        / (Math.sqrt(vHat) + epsilon);

                W1[i][j] -= learningRate * nadamUpdate;
            }

            // 更新偏置
            mb1[i] = adamBeta1 * mb1[i] + (1 - adamBeta1) * gradB1[i];
            vb1[i] = adamBeta2 * vb1[i] + (1 - adamBeta2) * gradB1[i] * gradB1[i];

            double mHat = mb1[i] * mHat_factor;
            double vHat = vb1[i] * vHat_factor;

            double nadamUpdate = (beta1_t * mHat + (1 - beta1_t) * gradB1[i])
                    / (Math.sqrt(vHat) + epsilon);

            b1[i] -= learningRate * nadamUpdate;

            // 更新批量归一化参数
            gamma1[i] -= learningRate * gradGamma1[i];
            beta1[i] -= learningRate * gradBeta1[i];
        }

        // 更新第二层参数
        for (int i = 0; i < hiddenSize2; i++) {
            for (int j = 0; j < hiddenSize1; j++) {
                mW2[i][j] = adamBeta1 * mW2[i][j] + (1 - adamBeta1) * gradW2[i][j];
                vW2[i][j] = adamBeta2 * vW2[i][j] + (1 - adamBeta2) * gradW2[i][j] * gradW2[i][j];

                double mHat = mW2[i][j] * mHat_factor;
                double vHat = vW2[i][j] * vHat_factor;

                double nadamUpdate = (beta1_t * mHat + (1 - beta1_t) * gradW2[i][j])
                        / (Math.sqrt(vHat) + epsilon);

                W2[i][j] -= learningRate * nadamUpdate;
            }

            mb2[i] = adamBeta1 * mb2[i] + (1 - adamBeta1) * gradB2[i];
            vb2[i] = adamBeta2 * vb2[i] + (1 - adamBeta2) * gradB2[i] * gradB2[i];

            double mHat = mb2[i] * mHat_factor;
            double vHat = vb2[i] * vHat_factor;

            double nadamUpdate = (beta1_t * mHat + (1 - beta1_t) * gradB2[i])
                    / (Math.sqrt(vHat) + epsilon);

            b2[i] -= learningRate * nadamUpdate;

            // 更新批量归一化参数
            gamma2[i] -= learningRate * gradGamma2[i];
            beta2[i] -= learningRate * gradBeta2[i];
        }

        // 更新输出层参数
        for (int i = 0; i < outputSize; i++) {
            for (int j = 0; j < hiddenSize2; j++) {
                mW3[i][j] = adamBeta1 * mW3[i][j] + (1 - adamBeta1) * gradW3[i][j];
                vW3[i][j] = adamBeta2 * vW3[i][j] + (1 - adamBeta2) * gradW3[i][j] * gradW3[i][j];

                double mHat = mW3[i][j] * mHat_factor;
                double vHat = vW3[i][j] * vHat_factor;

                double nadamUpdate = (beta1_t * mHat + (1 - beta1_t) * gradW3[i][j])
                        / (Math.sqrt(vHat) + epsilon);

                W3[i][j] -= learningRate * nadamUpdate;
            }

            mb3[i] = adamBeta1 * mb3[i] + (1 - adamBeta1) * gradB3[i];
            vb3[i] = adamBeta2 * vb3[i] + (1 - adamBeta2) * gradB3[i] * gradB3[i];

            double mHat = mb3[i] * mHat_factor;
            double vHat = vb3[i] * vHat_factor;

            double nadamUpdate = (beta1_t * mHat + (1 - beta1_t) * gradB3[i])
                    / (Math.sqrt(vHat) + epsilon);

            b3[i] -= learningRate * nadamUpdate;
        }
    }

    // 预测函数
    public int predict(double[] x) {
        double[] output = forward(x, false); // 测试模式
        int maxIndex = 0;
        for (int i = 1; i < outputSize; i++) {
            if (output[i] > output[maxIndex]) {
                maxIndex = i;
            }
        }
        return maxIndex;
    }


    // 训练函数（超优化版本）
    public void train(double[][] X_train, int[] y_train, double[][] X_val, int[] y_val,
                      int epochs, int batchSize) {
        System.out.println("开始训练神经网络...");
        System.out.println("输入大小: " + inputSize);
        System.out.println("网络结构: " + inputSize + " -> " + hiddenSize1 + " -> " + hiddenSize2 + " -> " + outputSize);
        System.out.println("训练样本数: " + X_train.length + ", 验证样本数: " + X_val.length);
        System.out.println("批次大小: " + batchSize + ", 总epoch数: " + epochs);

        // 显示初始内存使用情况
        printMemoryUsage("训练开始前");

        int numSamples = X_train.length;
        int numBatches = (int) Math.ceil((double) numSamples / batchSize);

        // 验证集采样 - 进一步减少
        int valSampleSize = Math.min(200, X_val.length); // 减少到200个验证样本
        int[] valIndices = new int[valSampleSize];
        Random valRandom = new Random(42);
        for (int i = 0; i < valSampleSize; i++) {
            valIndices[i] = valRandom.nextInt(X_val.length);
        }

        long totalTrainingTime = 0;
        double[] epochTimes = new double[epochs];

        // 初始化学习率调度
        lastBestLoss = Double.MAX_VALUE;
        lrPatienceCounter = 0;

        // 预分配训练索引数组
        int[] trainIndices = new int[numSamples];
        for (int i = 0; i < numSamples; i++) {
            trainIndices[i] = i;
        }

        for (int epoch = 0; epoch < epochs && !stopTraining; epoch++) {
            long epochStartTime = System.currentTimeMillis();
            double totalLoss = 0;

            // 应用学习率衰减
            learningRate = initialLearningRate * Math.pow(decayRate, epoch / 2.0);

            // 打乱训练数据顺序
            shuffleArray(trainIndices, random);

            // 快速批次训练
            for (int batch = 0; batch < numBatches; batch++) {
                int start = batch * batchSize;
                int end = Math.min(start + batchSize, numSamples);
                int currentBatchSize = end - start;

                // 直接使用索引，避免数组复制
                double[][] batchX = new double[currentBatchSize][];
                int[] batchY = new int[currentBatchSize];

                for (int i = 0; i < currentBatchSize; i++) {
                    int idx = trainIndices[start + i];
                    batchX[i] = X_train[idx];
                    batchY[i] = y_train[idx];
                }

                // 超优化批次训练
                double batchLoss = trainBatchUltraOptimized(batchX, batchY, currentBatchSize);
                totalLoss += batchLoss * currentBatchSize;

                // 减少进度输出频率
                if (batch % 500 == 0 || batch == numBatches - 1) {
                    long currentTime = System.currentTimeMillis();
                    double progress = (double) batch / numBatches * 100;
                    System.out.printf("Epoch %d: %.1f%% 完成, 损失: %.4f, 耗时: %dms%n",
                            epoch + 1, progress, batchLoss, currentTime - epochStartTime);
                }
            }

            double avgTrainLoss = totalLoss / numSamples;

            // 减少验证频率 - 每3个epoch验证一次
            double valLoss = 0;
            double valAccuracy = 0;
            if (epoch % 3 == 0 || epoch == epochs - 1) {
                valLoss = calculateValidationLossSampled(X_val, y_val, valIndices);
                valAccuracy = calculateValidationAccuracySampled(X_val, y_val, valIndices);

                // 自适应学习率调度
                if (epoch > 0) {
                    if (valLoss < lastBestLoss - minImprovement) {
                        lastBestLoss = valLoss;
                        lrPatienceCounter = 0;
                    } else {
                        lrPatienceCounter++;
                        if (lrPatienceCounter >= lrPatience) {
                            double oldLR = learningRate;
                            learningRate *= 0.5;
                            lrPatienceCounter = 0;
                            System.out.println("学习率从 " + oldLR + " 降低到: " + learningRate);

                            if (learningRate < minLearningRate) {
                                learningRate = minLearningRate;
                            }
                        }
                    }
                } else {
                    lastBestLoss = valLoss;
                }

                log.info("Epoch: {}, LR: {}, Train Loss: {}, Val Loss: {}, Val Acc: {}%, 耗时: {}ms",
                        epoch + 1, learningRate, avgTrainLoss, valLoss, valAccuracy * 100, System.currentTimeMillis() - epochStartTime);
            } else {
                log.info("Epoch: {}, LR: {}, Train Loss: {}, 耗时: {}ms",
                        epoch + 1, learningRate, avgTrainLoss, System.currentTimeMillis() - epochStartTime);
            }

            long epochEndTime = System.currentTimeMillis();
            long epochDuration = epochEndTime - epochStartTime;
            totalTrainingTime += epochDuration;
            epochTimes[epoch] = epochDuration;

            // 减少内存监控频率
            if ((epoch + 1) % 10 == 0) {
                printMemoryUsage("Epoch " + (epoch + 1) + " 后");
                System.gc();
            }

            // 早停检查
            if (epoch % 3 == 0 && valAccuracy > bestAccuracy) {
                bestAccuracy = valAccuracy;
                accuracyCounter = 0;

                if (valAccuracy >= targetAccuracy) {
                    System.out.println("目标准确率达到: " + (valAccuracy * 100) + "%");
                    stopTraining = true;
                }
            } else if (epoch % 3 == 0) {
                accuracyCounter++;
                if (accuracyCounter >= accuracyPatience) {
                    System.out.println("准确率停滞，提前停止训练");
                    stopTraining = true;
                }
            }
        }

        // 训练完成统计
        System.out.println("\n=== 训练完成统计 ===");
        System.out.println("总训练时间: " + totalTrainingTime + "ms (" + (totalTrainingTime / 1000.0) + "s)");
        System.out.println("平均每epoch时间: " + (totalTrainingTime / epochs) + "ms");
        System.out.println("最佳验证准确率: " + (bestAccuracy * 100) + "%");
        System.out.println("最终学习率: " + learningRate);

        analyzeTrainingTimeTrend(epochTimes);
        printMemoryUsage("训练完成后");
    }

    // 打乱数组顺序
    private void shuffleArray(int[] array, Random random) {
        for (int i = array.length - 1; i > 0; i--) {
            int index = random.nextInt(i + 1);
            int temp = array[index];
            array[index] = array[i];
            array[i] = temp;
        }
    }

    // 优化的批量归一化前向传播
    private double trainBatchUltraOptimized(double[][] batchX, int[] batchY, int batchSize) {
        // 预分配所有数组，避免动态创建
        double[][] allZ1 = new double[batchSize][hiddenSize1];
        double[][] allBN1 = new double[batchSize][hiddenSize1];
        double[][] allA1 = new double[batchSize][hiddenSize1];
        double[][] allZ2 = new double[batchSize][hiddenSize2];
        double[][] allBN2 = new double[batchSize][hiddenSize2];
        double[][] allA2 = new double[batchSize][hiddenSize2];
        double[][] allZ3 = new double[batchSize][outputSize];
        double[][] allOutput = new double[batchSize][outputSize];

        double batchLoss = 0.0;

        // 1. 前向传播 - 向量化优化
        for (int n = 0; n < batchSize; n++) {
            double[] x = batchX[n];
            double[] z1 = allZ1[n];
            double[] bn1 = allBN1[n];
            double[] a1 = allA1[n];
            double[] z2 = allZ2[n];
            double[] bn2 = allBN2[n];
            double[] a2 = allA2[n];
            double[] z3 = allZ3[n];
            double[] output = allOutput[n];

            // 第一层 - 缓存权重行
            for (int i = 0; i < hiddenSize1; i++) {
                double sum = b1[i];
                double[] w1i = W1[i];
                for (int j = 0; j < inputSize; j++) {
                    sum += w1i[j] * x[j];
                }
                z1[i] = sum;
            }

            // 批量归一化1 - 简化
            for (int i = 0; i < hiddenSize1; i++) {
                bn1[i] = gamma1[i] * z1[i] + beta1[i];
            }

            // ReLU激活
            for (int i = 0; i < hiddenSize1; i++) {
                a1[i] = Math.max(0, bn1[i]);
            }

            // 第二层
            for (int i = 0; i < hiddenSize2; i++) {
                double sum = b2[i];
                double[] w2i = W2[i];
                for (int j = 0; j < hiddenSize1; j++) {
                    sum += w2i[j] * a1[j];
                }
                z2[i] = sum;
            }

            // 批量归一化2 - 简化
            for (int i = 0; i < hiddenSize2; i++) {
                bn2[i] = gamma2[i] * z2[i] + beta2[i];
            }

            // ReLU激活
            for (int i = 0; i < hiddenSize2; i++) {
                a2[i] = Math.max(0, bn2[i]);
            }

            // 输出层
            for (int i = 0; i < outputSize; i++) {
                double sum = b3[i];
                double[] w3i = W3[i];
                for (int j = 0; j < hiddenSize2; j++) {
                    sum += w3i[j] * a2[j];
                }
                z3[i] = sum;
            }

            // 快速Softmax
            softmaxFastInPlace(z3, output);

            // 计算损失
            batchLoss += crossEntropyLossFast(output, batchY[n]);
        }

        // 2. 反向传播 - 优化梯度计算
        double[][] gradW1 = new double[hiddenSize1][inputSize];
        double[] gradB1 = new double[hiddenSize1];
        double[] gradGamma1 = new double[hiddenSize1];
        double[] gradBeta1 = new double[hiddenSize1];

        double[][] gradW2 = new double[hiddenSize2][hiddenSize1];
        double[] gradB2 = new double[hiddenSize2];
        double[] gradGamma2 = new double[hiddenSize2];
        double[] gradBeta2 = new double[hiddenSize2];

        double[][] gradW3 = new double[outputSize][hiddenSize2];
        double[] gradB3 = new double[outputSize];

        for (int n = 0; n < batchSize; n++) {
            double[] output = allOutput[n];
            int y = batchY[n];
            double[] a1 = allA1[n];
            double[] a2 = allA2[n];
            double[] bn1 = allBN1[n];
            double[] bn2 = allBN2[n];

            // 输出层误差
            double[] delta3 = new double[outputSize];
            System.arraycopy(output, 0, delta3, 0, outputSize);
            delta3[y] -= 1.0;

            // 第二隐藏层误差
            double[] delta2 = new double[hiddenSize2];
            for (int i = 0; i < hiddenSize2; i++) {
                double error = 0.0;
                for (int j = 0; j < outputSize; j++) {
                    error += W3[j][i] * delta3[j];
                }
                delta2[i] = error * (bn2[i] > 0 ? 1 : 0); // 简化的ReLU导数
            }

            // 第一隐藏层误差
            double[] delta1 = new double[hiddenSize1];
            for (int i = 0; i < hiddenSize1; i++) {
                double error = 0.0;
                for (int j = 0; j < hiddenSize2; j++) {
                    error += W2[j][i] * delta2[j];
                }
                delta1[i] = error * (bn1[i] > 0 ? 1 : 0); // 简化的ReLU导数
            }

            // 累加梯度
            double[] x = batchX[n];
            for (int i = 0; i < hiddenSize1; i++) {
                for (int j = 0; j < inputSize; j++) {
                    gradW1[i][j] += delta1[i] * x[j];
                }
                gradB1[i] += delta1[i];
                gradGamma1[i] += delta1[i] * bn1[i];
                gradBeta1[i] += delta1[i];
            }

            for (int i = 0; i < hiddenSize2; i++) {
                for (int j = 0; j < hiddenSize1; j++) {
                    gradW2[i][j] += delta2[i] * a1[j];
                }
                gradB2[i] += delta2[i];
                gradGamma2[i] += delta2[i] * bn2[i];
                gradBeta2[i] += delta2[i];
            }

            for (int i = 0; i < outputSize; i++) {
                for (int j = 0; j < hiddenSize2; j++) {
                    gradW3[i][j] += delta3[i] * a2[j];
                }
                gradB3[i] += delta3[i];
            }
        }

        // 平均梯度并添加正则化
        double batchSizeInv = 1.0 / batchSize;
        for (int i = 0; i < hiddenSize1; i++) {
            for (int j = 0; j < inputSize; j++) {
                gradW1[i][j] = (gradW1[i][j] + lambda * W1[i][j]) * batchSizeInv;
            }
            gradB1[i] *= batchSizeInv;
            gradGamma1[i] *= batchSizeInv;
            gradBeta1[i] *= batchSizeInv;
        }

        for (int i = 0; i < hiddenSize2; i++) {
            for (int j = 0; j < hiddenSize1; j++) {
                gradW2[i][j] = (gradW2[i][j] + lambda * W2[i][j]) * batchSizeInv;
            }
            gradB2[i] *= batchSizeInv;
            gradGamma2[i] *= batchSizeInv;
            gradBeta2[i] *= batchSizeInv;
        }

        for (int i = 0; i < outputSize; i++) {
            for (int j = 0; j < hiddenSize2; j++) {
                gradW3[i][j] = (gradW3[i][j] + lambda * W3[i][j]) * batchSizeInv;
            }
            gradB3[i] *= batchSizeInv;
        }

        // 更新参数
        t++;
        updateWeightsWithNadam(gradW1, gradB1, gradGamma1, gradBeta1,
                gradW2, gradB2, gradGamma2, gradBeta2,
                gradW3, gradB3);

        return batchLoss / batchSize;
    }

    // 优化的批量归一化前向传播
    private void batchNormForwardOptimized(double[] x, double[] gamma, double[] beta,
                                           double[] runningMean, double[] runningVar,
                                           boolean training, int batchSize, double[] result) {
        int size = x.length;

        if (training) {
            // 计算当前批次的均值和方差
            double[] mean = new double[size];
            double[] var = new double[size];

            // 计算均值 - 这里x是单个样本的所有神经元输出
            for (int i = 0; i < size; i++) {
                mean[i] = x[i]; // 单个样本，均值就是其本身
            }

            // 计算方差 - 单个样本方差为0，使用一个小的正值避免除零
            for (int i = 0; i < size; i++) {
                var[i] = bnEpsilon; // 使用epsilon作为最小方差
            }

            // 更新运行均值和方差，添加数值稳定性检查
            for (int i = 0; i < size; i++) {
                runningMean[i] = bnMomentum * runningMean[i] + (1 - bnMomentum) * mean[i];
                runningVar[i] = bnMomentum * runningVar[i] + (1 - bnMomentum) * var[i];

                // 确保方差不为负或过小
                if (runningVar[i] < bnEpsilon) {
                    runningVar[i] = bnEpsilon;
                }
            }

            // 归一化
            for (int i = 0; i < size; i++) {
                double std = Math.sqrt(var[i] + bnEpsilon);
                result[i] = (x[i] - mean[i]) / std;
            }
        } else {
            // 测试时使用运行统计量
            for (int i = 0; i < size; i++) {
                double std = Math.sqrt(runningVar[i] + bnEpsilon);
                result[i] = (x[i] - runningMean[i]) / std;
            }
        }

        // 缩放和平移
        for (int i = 0; i < size; i++) {
            result[i] = gamma[i] * result[i] + beta[i];
        }
    }

    // 优化的ReLU激活函数
    private void applyReluOptimized(double[] x, double[] result) {
        for (int i = 0; i < x.length; i++) {
            result[i] = Math.max(0, x[i]);
        }
    }

    // 优化的Softmax激活函数
    private void softmaxOptimized(double[] x, double[] result) {
        double max = x[0];
        for (int i = 1; i < x.length; i++) {
            if (x[i] > max) {
                max = x[i];
            }
        }

        double sum = 0.0;
        for (int i = 0; i < x.length; i++) {
            result[i] = Math.exp(x[i] - max); // 数值稳定性
            sum += result[i];
        }

        for (int i = 0; i < x.length; i++) {
            result[i] /= sum;
        }
    }

    // 采样验证损失计算
    private double calculateValidationLossSampled(double[][] X_val, int[] y_val, int[] valIndices) {
        double totalLoss = 0.0;
        int numSamples = valIndices.length;

        for (int i = 0; i < numSamples; i++) {
            int idx = valIndices[i];
            double[] pred = forward(X_val[idx], false); // 测试模式
            totalLoss += crossEntropyLoss(pred, y_val[idx], numSamples);
        }

        return totalLoss / numSamples;
    }

    // 计算验证准确率（采样）
    private double calculateValidationAccuracySampled(double[][] X_val, int[] y_val, int[] valIndices) {
        int correct = 0;
        int numSamples = valIndices.length;

        for (int i = 0; i < numSamples; i++) {
            int idx = valIndices[i];
            int pred = predict(X_val[idx]);
            if (pred == y_val[idx]) {
                correct++;
            }
        }

        return (double) correct / numSamples;
    }

    // 评估准确率
    public double evaluateAccuracy(double[][] X_test, int[] y_test) {
        int correct = 0;
        for (int i = 0; i < X_test.length; i++) {
            int pred = predict(X_test[i]);
            if (pred == y_test[i]) {
                correct++;
            }
        }
        return (double) correct / X_test.length;
    }

    // 打印内存使用情况
    private void printMemoryUsage(String stage) {
        Runtime runtime = Runtime.getRuntime();
        long totalMemory = runtime.totalMemory();
        long freeMemory = runtime.freeMemory();
        long usedMemory = totalMemory - freeMemory;
        long maxMemory = runtime.maxMemory();

        System.out.printf("[%s] 内存使用: %.2f MB / %.2f MB (%.1f%%), 最大可用: %.2f MB%n",
                stage,
                usedMemory / (1024.0 * 1024.0),
                totalMemory / (1024.0 * 1024.0),
                (double) usedMemory / totalMemory * 100,
                maxMemory / (1024.0 * 1024.0));
    }

    // 分析训练时间趋势
    private void analyzeTrainingTimeTrend(double[] epochTimes) {
        if (epochTimes.length < 5) return;

        // 计算前5个epoch和后5个epoch的平均时间
        double earlyAvg = 0, lateAvg = 0;
        int count = Math.min(5, epochTimes.length / 2);

        for (int i = 0; i < count; i++) {
            earlyAvg += epochTimes[i];
            lateAvg += epochTimes[epochTimes.length - 1 - i];
        }

        earlyAvg /= count;
        lateAvg /= count;

        double ratio = lateAvg / earlyAvg;
        System.out.printf("训练时间趋势分析: 前%d个epoch平均%.0fms, 后%d个epoch平均%.0fms, 比率: %.2f%n",
                count, earlyAvg, count, lateAvg, ratio);

        if (ratio > 1.3) {
            System.out.println("警告: 训练时间有明显增加趋势，建议检查内存使用或减小batch size");
        } else if (ratio < 0.8) {
            System.out.println("良好: 训练时间保持稳定或有所改善");
        } else {
            System.out.println("正常: 训练时间基本稳定");
        }
    }

    // 快速交叉熵损失
    private double crossEntropyLossFast(double[] pred, int target) {
        return -Math.log(pred[target] + 1e-10);
    }

    // 原地Softmax
    private void softmaxFastInPlace(double[] x, double[] result) {
        double max = x[0];
        for (int i = 1; i < x.length; i++) {
            if (x[i] > max) max = x[i];
        }

        double sum = 0.0;
        for (int i = 0; i < x.length; i++) {
            result[i] = Math.exp(x[i] - max);
            sum += result[i];
        }

        for (int i = 0; i < x.length; i++) {
            result[i] /= sum;
        }
    }

    // 获取模型信息
    public void printModelInfo() {
        System.out.println("\n=== 模型信息 ===");
        System.out.println("网络结构: " + inputSize + " -> " + hiddenSize1 + " -> " + hiddenSize2 + " -> " + outputSize);
        System.out.println("总参数数量: " + calculateTotalParameters());
        System.out.println("最佳验证准确率: " + (bestAccuracy * 100) + "%");
        System.out.println("最佳验证损失: " + bestValidationLoss);
        System.out.println("当前学习率: " + learningRate);
        System.out.println("训练步数: " + t);
    }

    // 转换为模型参数对象
    public ModelParameters toModelParameters() {
        return new ModelParameters(
            W1, W2, W3,
            b1, b2, b3,
            gamma1, beta1, runningMean1, runningVar1,
            gamma2, beta2, runningMean2, runningVar2,
            dataMean, dataStd,
            inputSize, hiddenSize1, hiddenSize2, outputSize,
            bnEpsilon
        );
    }

    // 从模型参数重建网络
    public static EnhancedNeuralNetwork fromModelParameters(ModelParameters params) {
        EnhancedNeuralNetwork network = new EnhancedNeuralNetwork(
            params.getInputSize(),
            params.getHiddenSize1(),
            params.getHiddenSize2(),
            params.getOutputSize()
        );
        
        // 设置权重和偏置
        network.W1 = params.getW1();
        network.W2 = params.getW2();
        network.W3 = params.getW3();
        network.b1 = params.getB1();
        network.b2 = params.getB2();
        network.b3 = params.getB3();
        
        // 设置批量归一化参数
        network.gamma1 = params.getGamma1();
        network.beta1 = params.getBeta1();
        network.runningMean1 = params.getRunningMean1();
        network.runningVar1 = params.getRunningVar1();
        network.gamma2 = params.getGamma2();
        network.beta2 = params.getBeta2();
        network.runningMean2 = params.getRunningMean2();
        network.runningVar2 = params.getRunningVar2();
        network.bnEpsilon = params.getBnEpsilon();
        
        // 设置数据标准化参数
        network.dataMean = params.getDataMean();
        network.dataStd = params.getDataStd();
        
        return network;
    }
}
