package cn.myeasyai.neuralnet;

public class NeuralNetwork {
    // 输入层节点数
    private int inputNodes;
    // 隐藏层节点数
    private int hiddenNodes;
    // 输出层节点数
    private int outputNodes;
    // 输入层到隐藏层的权重矩阵
    private double[][] weightsInputHidden;
    // 隐藏层到输出层的权重矩阵
    private double[][] weightsHiddenOutput;
    // 隐藏层偏置
    private double[] hiddenBias;
    // 输出层偏置
    private double[] outputBias;

    /**
     * 设置输入、隐藏和输出层的节点数量；
     * 初始化权重矩阵与偏置数组；
     * 调用 randomizeWeights() 和 randomizeBiases()
     * 对其进行随机初始化（范围在 -0.5 到 0.5 之间）。
     * @param inputNodes 输入层数量
     * @param hiddenNodes 隐藏层数量
     * @param outputNodes 输出层数量
     */

    public NeuralNetwork(int inputNodes, int hiddenNodes, int outputNodes) {
        this.inputNodes = inputNodes;
        this.hiddenNodes = hiddenNodes;
        this.outputNodes = outputNodes;

        // Initialize weights with random values
        this.weightsInputHidden = new double[hiddenNodes][inputNodes];
        this.weightsHiddenOutput = new double[outputNodes][hiddenNodes];
        this.hiddenBias = new double[hiddenNodes];
        this.outputBias = new double[outputNodes];

        randomizeWeights(weightsInputHidden);
        randomizeWeights(weightsHiddenOutput);
        randomizeBiases(hiddenBias);
        randomizeBiases(outputBias);
    }

    //使用 Math.random() - 0.5 对权重和偏置进行随机初始化，避免初始值为零导致训练失效。
    private void randomizeWeights(double[][] weights) {
        for (int i = 0; i < weights.length; i++) {
            for (int j = 0; j < weights[i].length; j++) {
                weights[i][j] = Math.random() - 0.5;
            }
        }
    }

    //    //使用 Math.random() - 0.5
    //    对权重和偏置进行随机初始化，避免初始值为零导致训练失效。
    private void randomizeBiases(double[] biases) {
        for (int i = 0; i < biases.length; i++) {
            biases[i] = Math.random() - 0.5;
        }
    }

    //计算输入数据在网络中的传递过程；
    //先计算隐藏层每个节点的加权输入并加上偏置，通过 Sigmoid 激活函数得到输出；
    //再将隐藏层输出作为输入，计算输出层结果；
    //使用 Sigmoid 函数进行非线性映射。
    public double[] feedForward(double[] input) {
        double[] hiddenOutputs = new double[hiddenNodes];
        double[] finalOutputs = new double[outputNodes];

        // Calculate hidden layer outputs
        for (int i = 0; i < hiddenNodes; i++) {
            double sum = 0;
            for (int j = 0; j < inputNodes; j++) {
                sum += input[j] * weightsInputHidden[i][j];
            }
            sum += hiddenBias[i];
            hiddenOutputs[i] = sigmoid(sum);
        }

        // Calculate output layer outputs
        for (int i = 0; i < outputNodes; i++) {
            double sum = 0;
            for (int j = 0; j < hiddenNodes; j++) {
                sum += hiddenOutputs[j] * weightsHiddenOutput[i][j];
            }
            sum += outputBias[i];
            finalOutputs[i] = sigmoid(sum);
        }

        return finalOutputs;
    }

    //激活函数
    private double sigmoid(double x) {
        return 1 / (1 + Math.exp(-x));
    }

    //sogmoid导函数
    private double sigmoidDerivative(double x) {
        return x * (1 - x);
    }

    public void train(double[] input, double[] target, double learningRate) {
        double[] hiddenOutputs = new double[hiddenNodes];
        double[] finalOutputs = new double[outputNodes];

        // 前向传播
        for (int i = 0; i < hiddenNodes; i++) {
            double sum = 0;
            for (int j = 0; j < inputNodes; j++) {
                sum += input[j] * weightsInputHidden[i][j];
            }
            sum += hiddenBias[i];
            hiddenOutputs[i] = sigmoid(sum);
        }

        for (int i = 0; i < outputNodes; i++) {
            double sum = 0;
            for (int j = 0; j < hiddenNodes; j++) {
                sum += hiddenOutputs[j] * weightsHiddenOutput[i][j];
            }
            sum += outputBias[i];
            finalOutputs[i] = sigmoid(sum);
        }

        // 反向传播误差
//        计算输出层误差（目标值减去预测值）；
//        将误差反向传播至隐藏层，根据输出层权重加权求和得到隐藏层误差。
        double[] outputErrors = new double[outputNodes];
        for (int i = 0; i < outputNodes; i++) {
            outputErrors[i] = target[i] - finalOutputs[i];
        }

        double[] hiddenErrors = new double[hiddenNodes];
        for (int i = 0; i < hiddenNodes; i++) {
            double error = 0;
            for (int j = 0; j < outputNodes; j++) {
                error += outputErrors[j] * weightsHiddenOutput[j][i];
            }
            hiddenErrors[i] = error;
        }

        // c. 更新权重与偏置：
        //根据误差和激活函数导数调整输出层和隐藏层的权重与偏置；
        for (int i = 0; i < outputNodes; i++) {
            for (int j = 0; j < hiddenNodes; j++) {
                weightsHiddenOutput[i][j] += learningRate * outputErrors[i] * sigmoidDerivative(finalOutputs[i]) * hiddenOutputs[j];
            }
            outputBias[i] += learningRate * outputErrors[i] * sigmoidDerivative(finalOutputs[i]);
        }

        // Update weights and biases for hidden layer
        for (int i = 0; i < hiddenNodes; i++) {
            for (int j = 0; j < inputNodes; j++) {
                weightsInputHidden[i][j] += learningRate * hiddenErrors[i] * sigmoidDerivative(hiddenOutputs[i]) * input[j];
            }
            hiddenBias[i] += learningRate * hiddenErrors[i] * sigmoidDerivative(hiddenOutputs[i]);
        }
    }

    public static void main(String[] args) {
        NeuralNetwork nn = new NeuralNetwork(2, 3, 1);

        double[][] inputs = {
                {0, 0}, {0, 1}, {1, 0}, {1, 1}
        };
        double[][] targets = {
                {0}, {1}, {1}, {0}
        };

        int epochs = 100000;
        double learningRate = 0.1;

        for (int i = 0; i < epochs; i++) {
            for (int j = 0; j < inputs.length; j++) {
                nn.train(inputs[j], targets[j], learningRate);
            }
        }

        for (double[] input : inputs) {
            double[] output = nn.feedForward(input);
            System.out.println("Input: " + input[0] + ", " + input[1] + " Output: " + output[0]);
        }

    }

}
