/*
 * To change this license header, choose License Headers in Project Properties.
 * To change this template file, choose Tools | Templates
 * and open the template in the editor.
 */
package team11;

import java.util.*;

/**
 * 梯度下降优化器 使用带动量和学习率衰减的梯度下降算法训练神经网络
 *
 * @author 朱婧雯，raise
 */
public class Gradient_Descent_NeuralNetwork {

    private double learningRate;
    private double momentum;
    private double decayRate;
    private Map<String, double[]> previousWeightUpdates = new HashMap<>();
    private Map<String, Double> previousBiasUpdates = new HashMap<>();

    /**
     * 梯度下降构造函数
     */
    public Gradient_Descent_NeuralNetwork(double learningRate) {
        this(learningRate, 0.0, 0.0);
    }

    /**
     * 梯度下降构造函数（完整参数）
     */
    public Gradient_Descent_NeuralNetwork(double learningRate, double momentum, double decayRate) {
        if (learningRate <= 0 || learningRate > 1) {
            throw new IllegalArgumentException("学习率应该在(0,1]范围内");
        }
        if (momentum < 0 || momentum >= 1) {
            throw new IllegalArgumentException("动量系数应该在[0,1)范围内");
        }

        this.learningRate = learningRate;
        this.momentum = momentum;
        this.decayRate = decayRate;
    }

    /**
     * 训练神经网络
     */
    public void train(NeuralNetwork network, double[][] inputs, double[][] targets,
            int epochs, int printInterval) {
        if (network == null) {
            throw new IllegalArgumentException("神经网络不能为null");
        }
        if (inputs == null || targets == null) {
            throw new IllegalArgumentException("输入数据和目标数据不能为null");
        }
        if (inputs.length != targets.length) {
            throw new IllegalArgumentException("输入数据和目标数据数量不匹配");
        }
        if (epochs <= 0) {
            throw new IllegalArgumentException("训练轮数必须大于0");
        }
        if (printInterval <= 0) {
            throw new IllegalArgumentException("输出间隔必须大于0");
        }

        System.out.println("开始梯度下降训练...");
        System.out.printf("学习率: %.3f, 动量: %.2f, 衰减率: %.4f, 训练轮数: %d%n",
                learningRate, momentum, decayRate, epochs);

        double currentLearningRate = learningRate;

        for (int epoch = 0; epoch < epochs; epoch++) {
            double totalError = 0;

            // 学习率衰减
            if (decayRate > 0) {
                currentLearningRate = learningRate * (1.0 / (1.0 + decayRate * epoch));
            }

            // 随机打乱训练数据
            List<Integer> indices = new ArrayList<>();
            for (int i = 0; i < inputs.length; i++) {
                indices.add(i);
            }
            Collections.shuffle(indices);

            for (int idx : indices) {
                double[] output = network.forward(inputs[idx]);
                double error = output[0] - targets[idx][0];
                totalError += error * error;

                backpropagate(network, inputs[idx], targets[idx], currentLearningRate);
            }

            double mse = totalError / inputs.length;

            if (epoch % printInterval == 0) {
                System.out.printf("Epoch %d, MSE: %.6f, 学习率: %.6f%n", epoch, mse, currentLearningRate);
            }
        }
        System.out.println("梯度下降训练完成!");
    }

    /**
     * 带学习率的反向传播
     */
    private void backpropagate(NeuralNetwork network, double[] input, double[] target, double currentLearningRate) {
        Layer[] layers = network.getLayers();
        int numLayers = layers.length;

        double[][] deltas = new double[numLayers][];

        // 计算输出层delta
        Layer outputLayer = layers[numLayers - 1];
        deltas[numLayers - 1] = new double[outputLayer.neurons.length];

        for (int i = 0; i < outputLayer.neurons.length; i++) {
            Neuron neuron = outputLayer.neurons[i];
            double error = neuron.value - target[i];
            deltas[numLayers - 1][i] = error * neuron.sigmoidDerivative();
        }

        // 反向传播隐藏层delta
        for (int l = numLayers - 2; l >= 0; l--) {
            Layer currentLayer = layers[l];
            Layer nextLayer = layers[l + 1];
            deltas[l] = new double[currentLayer.neurons.length];

            for (int i = 0; i < currentLayer.neurons.length; i++) {
                Neuron neuron = currentLayer.neurons[i];
                double sum = 0.0;

                for (int j = 0; j < nextLayer.neurons.length; j++) {
                    sum += nextLayer.neurons[j].weights[i] * deltas[l + 1][j];
                }

                deltas[l][i] = sum * neuron.sigmoidDerivative();
            }
        }

        // 使用动量更新权重
        updateWeightsWithMomentum(network, input, deltas, currentLearningRate);
    }

    /**
     * 带动量的权重更新
     */
    private void updateWeightsWithMomentum(NeuralNetwork network, double[] input,
            double[][] deltas, double currentLearningRate) {
        Layer[] layers = network.getLayers();

        // 初始化存储结构（如果是第一次）
        if (previousWeightUpdates.isEmpty()) {
            for (int l = 0; l < layers.length; l++) {
                Layer layer = layers[l];
                for (int i = 0; i < layer.neurons.length; i++) {
                    Neuron neuron = layer.neurons[i];
                    String key = l + "_" + i;
                    previousWeightUpdates.put(key, new double[neuron.weights.length]);
                    previousBiasUpdates.put(key, 0.0);
                }
            }
        }

        // 更新第一层
        Layer firstLayer = layers[0];
        for (int i = 0; i < firstLayer.neurons.length; i++) {
            Neuron neuron = firstLayer.neurons[i];
            String key = "0_" + i;
            double[] weightUpdate = previousWeightUpdates.get(key);
            double biasUpdate = previousBiasUpdates.get(key);

            for (int j = 0; j < neuron.weights.length; j++) {
                double gradient = deltas[0][i] * input[j];
                double update = momentum * weightUpdate[j] - currentLearningRate * gradient;
                neuron.weights[j] += update;
                weightUpdate[j] = update;
            }

            double biasGradient = deltas[0][i];
            double biasUpdateValue = momentum * biasUpdate - currentLearningRate * biasGradient;
            neuron.bias += biasUpdateValue;
            previousBiasUpdates.put(key, biasUpdateValue);
        }

        // 更新后续层
        for (int l = 1; l < layers.length; l++) {
            Layer layer = layers[l];
            Layer prevLayer = layers[l - 1];

            for (int i = 0; i < layer.neurons.length; i++) {
                Neuron neuron = layer.neurons[i];
                String key = l + "_" + i;
                double[] weightUpdate = previousWeightUpdates.get(key);
                double biasUpdate = previousBiasUpdates.get(key);

                for (int j = 0; j < neuron.weights.length; j++) {
                    double gradient = deltas[l][i] * prevLayer.outputs[j];
                    double update = momentum * weightUpdate[j] - currentLearningRate * gradient;
                    neuron.weights[j] += update;
                    weightUpdate[j] = update;
                }

                double biasGradient = deltas[l][i];
                double biasUpdateValue = momentum * biasUpdate - currentLearningRate * biasGradient;
                neuron.bias += biasUpdateValue;
                previousBiasUpdates.put(key, biasUpdateValue);
            }
        }
    }

    public double getLearningRate() {
        return learningRate;
    }

    public void setLearningRate(double learningRate) {
        if (learningRate <= 0 || learningRate > 1) {
            throw new IllegalArgumentException("学习率应该在(0,1]范围内");
        }
        this.learningRate = learningRate;
    }

    public double getMomentum() {
        return momentum;
    }

    public void setMomentum(double momentum) {
        if (momentum < 0 || momentum >= 1) {
            throw new IllegalArgumentException("动量系数应该在[0,1)范围内");
        }
        this.momentum = momentum;
    }

    public double getDecayRate() {
        return decayRate;
    }

    public void setDecayRate(double decayRate) {
        this.decayRate = decayRate;
    }
}
