/*
 * To change this license header, choose License Headers in Project Properties.
 * To change this template file, choose Tools | Templates
 * and open the template in the editor.
 */
package main;

/**
 *
 * @author ZSQ
 */
/**
 * 梯度下降训练器
 * 由梯度下降算法专家实现
 */

public class GradientDescentTrainer implements Trainer {
    private double learningRate;
    private int epochs;
    private double[] lossHistory;
    
    public GradientDescentTrainer(double learningRate, int epochs) {
        this.learningRate = learningRate;
        this.epochs = epochs;
        this.lossHistory = new double[epochs];
    }
    
    @Override
    public void train(NeuralNetwork network, double[][] trainingData, double[][] targets) {
        System.out.println("开始梯度下降训练...");
        System.out.printf("初始学习率: %.6f, 最大轮次: %d\n", learningRate, epochs);
        
        double initialLearningRate = learningRate;
        double bestLoss = Double.MAX_VALUE;
        
        for (int epoch = 0; epoch < epochs; epoch++) {
            double totalLoss = 0.0;
            int successfulUpdates = 0;
            
            for (int sample = 0; sample < trainingData.length; sample++) {
                // 前向传播
                double[][] layerOutputs = forwardPass(network, trainingData[sample]);
                double[] prediction = layerOutputs[layerOutputs.length - 1];
                
                // 计算损失
                double sampleLoss = network.getLossFunction().calculate(prediction, targets[sample]);
                totalLoss += sampleLoss;
                
                // 反向传播
                boolean updateSuccess = backpropagate(network, trainingData[sample], targets[sample], layerOutputs);
                
                if (updateSuccess) {
                    successfulUpdates++;
                }
            }
            
            double currentLoss = totalLoss / trainingData.length;
            lossHistory[epoch] = currentLoss;
            
            // 动态学习率调整
            if (epoch > 0) {
                if (currentLoss > lossHistory[epoch-1]) {
                    // 损失上升，降低学习率
                    learningRate *= 0.95;
                } else if (epoch % 100 == 0) {
                    // 周期性稍微增加学习率
                    learningRate = Math.min(initialLearningRate, learningRate * 1.05);
                }
            }
            
            // 早停条件
            if (currentLoss < bestLoss) {
                bestLoss = currentLoss;
            }
            
            if (epoch % 100 == 0 || epoch < 10) {
                double successRate = (double) successfulUpdates / trainingData.length;
                System.out.printf("Epoch %d, Loss: %.6f, LR: %.6f, Success: %.1f%%\n", 
                                epoch, currentLoss, learningRate, successRate * 100);
            }
            
            // 改进的早停条件
            if (currentLoss < 0.01) {
                System.out.printf("训练在第%d轮达到目标精度\n", epoch);
                break;
            }
            
            if (epoch > 100 && Math.abs(currentLoss - lossHistory[epoch-50]) < 1e-6) {
                System.out.printf("训练在第%d轮收敛\n", epoch);
                break;
            }
            
            // 防止学习率过小
            if (learningRate < 1e-8) {
                System.out.printf("学习率过小，在第%d轮停止\n", epoch);
                break;
            }
        }
        
        System.out.printf("训练完成，最佳损失: %.6f\n", bestLoss);
    }
    
    private double[][] forwardPass(NeuralNetwork network, double[] inputs) {
        Layer[] layers = network.getLayers();
        double[][] layerOutputs = new double[layers.length + 1][];
        layerOutputs[0] = inputs;
        
        for (int i = 0; i < layers.length; i++) {
            layerOutputs[i + 1] = layers[i].calculateOutputs(layerOutputs[i]);
        }
        
        return layerOutputs;
    }
    
    private boolean backpropagate(NeuralNetwork network, double[] inputs, double[] target, double[][] layerOutputs) {
        Layer[] layers = network.getLayers();
        int numLayers = layers.length;
        
        // 存储每层的delta值
        double[][] deltas = new double[numLayers][];
        
        // 输出层delta计算
        Layer outputLayer = layers[numLayers - 1];
        Neuron[] outputNeurons = outputLayer.getNeurons();
        double[] output = layerOutputs[numLayers];
        
        deltas[numLayers - 1] = new double[outputNeurons.length];
        double[] lossDerivative = network.getLossFunction().derivative(output, target);
        
        for (int i = 0; i < outputNeurons.length; i++) {
            double derivative = outputLayer.getActivationFunction().derivative(outputNeurons[i].getOutput());
            deltas[numLayers - 1][i] = lossDerivative[i] * derivative;
        }
        
        // 隐藏层delta计算
        for (int layerIdx = numLayers - 2; layerIdx >= 0; layerIdx--) {
            Layer currentLayer = layers[layerIdx];
            Neuron[] currentNeurons = currentLayer.getNeurons();
            Layer nextLayer = layers[layerIdx + 1];
            Neuron[] nextNeurons = nextLayer.getNeurons();
            
            deltas[layerIdx] = new double[currentNeurons.length];
            
            for (int i = 0; i < currentNeurons.length; i++) {
                double error = 0.0;
                for (int j = 0; j < nextNeurons.length; j++) {
                    error += deltas[layerIdx + 1][j] * nextNeurons[j].getWeights()[i];
                }
                
                double derivative = currentLayer.getActivationFunction().derivative(currentNeurons[i].getOutput());
                deltas[layerIdx][i] = error * derivative;
            }
        }
        
        // 更新权重和偏置
        boolean success = true;
        for (int layerIdx = 0; layerIdx < numLayers; layerIdx++) {
            Layer layer = layers[layerIdx];
            Neuron[] neurons = layer.getNeurons();
            double[] layerInput = layerOutputs[layerIdx];
            
            for (int neuronIdx = 0; neuronIdx < neurons.length; neuronIdx++) {
                Neuron neuron = neurons[neuronIdx];
                double delta = deltas[layerIdx][neuronIdx];
                
                // 梯度裁剪
                delta = Math.max(-1.0, Math.min(1.0, delta));
                
                // 更新权重
                double[] weights = neuron.getWeights();
                for (int weightIdx = 0; weightIdx < weights.length; weightIdx++) {
                    double gradient = delta * layerInput[weightIdx];
                    // 进一步裁剪梯度
                    gradient = Math.max(-0.5, Math.min(0.5, gradient));
                    weights[weightIdx] -= learningRate * gradient;
                    
                    if (Double.isNaN(weights[weightIdx])) {
                        success = false;
                    }
                }
                
                // 更新偏置
                double biasGradient = Math.max(-0.5, Math.min(0.5, delta));
                neuron.setBias(neuron.getBias() - learningRate * biasGradient);
            }
        }
        
        return success;
    }
    
    @Override
    public double[] getLossHistory() { return lossHistory; }
    
    @Override
    public String getName() { return "梯度下降法"; }
}