package com.dfzt.myutils.personal;

import java.io.*;
import java.util.Arrays;
import java.util.Random;

public class SimpleNeuralNetwork {
    private final int inputSize; // 输入层的大小，即输入特征的数量
    private final int hiddenSize; // 隐藏层的大小，即隐藏层神经元的数量。
    private final int outputSize; // 输出层的大小，即输出特征的数量。
    private final double learningRate; // 学习率，控制权重更新的速度。

    // 权重矩阵和偏置
    private final double[][] weightsInputHidden; // 输入层到隐藏层的权重矩阵
    private final double[][] weightsHiddenOutput; // 隐藏层到输出层的权重矩阵
    private final double[] biasHidden; // 隐藏层的偏置
    private final double[] biasOutput; // 输出层的偏置

    private final Random random = new Random(); // 用于随机初始化权重的随机数生成器

    // 构造函数
    public SimpleNeuralNetwork(int inputSize, int hiddenSize, int outputSize, double learningRate) {
        // 初始化神经网络的结构和参数
        this.inputSize = inputSize; // 输入层的大小，即输入特征的数量
        this.hiddenSize = hiddenSize; // 隐藏层的大小，即隐藏层神经元的数量。
        this.outputSize = outputSize; // 输出层的大小，即输出特征的数量。
        this.learningRate = learningRate; // 学习率，控制权重更新的速度。

        // 初始化权重和偏置
        weightsInputHidden = new double[inputSize][hiddenSize]; // 输入层到隐藏层的权重矩阵
        weightsHiddenOutput = new double[hiddenSize][outputSize]; // 隐藏层到输出层的权重矩阵
        biasHidden = new double[hiddenSize]; // 隐藏层的偏置
        biasOutput = new double[outputSize]; // 输出层的偏置

        // 随机初始化权重（-1到1）
        initializeWeights(weightsInputHidden);
        initializeWeights(weightsHiddenOutput);

        // 初始化偏置为0
        Arrays.fill(biasHidden, 0);
        Arrays.fill(biasOutput, 0);
    }

    // 初始化权重矩阵
    private void initializeWeights(double[][] matrix) {
        for (int i = 0; i < matrix.length; i++) {
            for (int j = 0; j < matrix[i].length; j++) {
                matrix[i][j] = (random.nextDouble() * 2) - 1; // [-1, 1)
            }
        }
    }

    // Sigmoid激活函数
    // 作用：将输入映射到0到1之间，并作为激活函数的输出。
    // 如果没有激活函数，多层神经网络将退化为线性模型，无法解决非线性问题（如 XOR 问题）
    private double sigmoid(double x) {
        return 1 / (1 + Math.exp(-x));
    }

    // 前向传播
    public double[] predict(double[] input) {
        double[] hiddenOutput = new double[hiddenSize];

        // 计算隐藏层输出
        for (int j = 0; j < hiddenSize; j++) {
            double sum = biasHidden[j];
            for (int i = 0; i < inputSize; i++) {
                sum += input[i] * weightsInputHidden[i][j];
            }
            hiddenOutput[j] = sigmoid(sum);
        }

        // 计算输出层输出
        double[] output = new double[outputSize];
        for (int k = 0; k < outputSize; k++) {
            double sum = biasOutput[k];
            for (int j = 0; j < hiddenSize; j++) {
                sum += hiddenOutput[j] * weightsHiddenOutput[j][k];
            }
            output[k] = sigmoid(sum);
        }

        return output;
    }

    // 训练方法
    public void train(double[][] inputs, double[][] targets, int epochs) {
        for (int epoch = 0; epoch < epochs; epoch++) {
            System.out.println("训练轮数: " + (epoch + 1) + "/" + epochs); // 打印当前训练轮数
            for (int s = 0; s < inputs.length; s++) {
                double[] input = inputs[s];
                double[] target = targets[s];

                System.out.println("\n训练样本: " + Arrays.toString(input) + ", 目标输出: " + Arrays.toString(target));

                // 前向传播
                double[] hiddenOutput = new double[hiddenSize];
                for (int j = 0; j < hiddenSize; j++) {
                    double sum = biasHidden[j];
                    for (int i = 0; i < inputSize; i++) {
                        sum += input[i] * weightsInputHidden[i][j];
                    }
                    hiddenOutput[j] = sigmoid(sum);
                }
                System.out.println("隐藏层输出: " + Arrays.toString(hiddenOutput)); // 打印隐藏层输出

                double[] output = new double[outputSize];
                for (int k = 0; k < outputSize; k++) {
                    double sum = biasOutput[k];
                    for (int j = 0; j < hiddenSize; j++) {
                        sum += hiddenOutput[j] * weightsHiddenOutput[j][k];
                    }
                    output[k] = sigmoid(sum);
                }
                System.out.println("输出层输出: " + Arrays.toString(output)); // 打印输出层输出

                // 反向传播
                // 计算输出层误差
                double[] outputDelta = new double[outputSize];
                for (int k = 0; k < outputSize; k++) {
                    double error = target[k] - output[k];
                    outputDelta[k] = error * output[k] * (1 - output[k]);
                }
                System.out.println("输出层误差: " + Arrays.toString(outputDelta)); // 打印输出层误差

                // 计算隐藏层误差
                double[] hiddenDelta = new double[hiddenSize];
                for (int j = 0; j < hiddenSize; j++) {
                    double error = 0;
                    for (int k = 0; k < outputSize; k++) {
                        error += outputDelta[k] * weightsHiddenOutput[j][k];
                    }
                    hiddenDelta[j] = error * hiddenOutput[j] * (1 - hiddenOutput[j]);
                }
                System.out.println("隐藏层误差: " + Arrays.toString(hiddenDelta)); // 打印隐藏层误差

                // 更新权重和偏置
                // 隐藏层到输出层
                for (int j = 0; j < hiddenSize; j++) {
                    for (int k = 0; k < outputSize; k++) {
                        weightsHiddenOutput[j][k] += learningRate * outputDelta[k] * hiddenOutput[j];
                    }
                }
                System.out.println("更新后的权重（隐藏层到输出层）: " + Arrays.deepToString(weightsHiddenOutput)); // 打印更新后的权重

                // 输入层到隐藏层
                for (int i = 0; i < inputSize; i++) {
                    for (int j = 0; j < hiddenSize; j++) {
                        weightsInputHidden[i][j] += learningRate * hiddenDelta[j] * input[i];
                    }
                }
                System.out.println("更新后的权重（输入层到隐藏层）: " + Arrays.deepToString(weightsInputHidden)); // 打印更新后的权重

                // 更新偏置
                for (int k = 0; k < outputSize; k++) {
                    biasOutput[k] += learningRate * outputDelta[k];
                }
                System.out.println("更新后的偏置（输出层）: " + Arrays.toString(biasOutput)); // 打印更新后的偏置

                for (int j = 0; j < hiddenSize; j++) {
                    biasHidden[j] += learningRate * hiddenDelta[j];
                }
                System.out.println("更新后的偏置（隐藏层）: " + Arrays.toString(biasHidden)); // 打印更新后的偏置
            }
        }
    }

    // 新增模型保存方法
    public void saveModel(String filePath) throws IOException {
        try (PrintWriter writer = new PrintWriter(new FileWriter(filePath))) {
            // 写入网络结构参数
            writer.println(inputSize + "," + hiddenSize + "," + outputSize + "," + learningRate);

            // 保存输入层到隐藏层权重
            saveMatrix(writer, weightsInputHidden);

            // 保存隐藏层到输出层权重
            saveMatrix(writer, weightsHiddenOutput);

            // 保存偏置
            saveArray(writer, biasHidden);
            saveArray(writer, biasOutput);
        }
    }
    // 新增模型加载方法
    public static SimpleNeuralNetwork loadModel(String filePath) throws IOException {
        try (BufferedReader reader = new BufferedReader(new FileReader(filePath))) {
            // 读取网络结构参数
            String[] header = reader.readLine().split(",");
            int input = Integer.parseInt(header[0]);
            int hidden = Integer.parseInt(header[1]);
            int output = Integer.parseInt(header[2]);
            double lr = Double.parseDouble(header[3]);

            SimpleNeuralNetwork nn = new SimpleNeuralNetwork(input, hidden, output, lr);

            // 加载权重和偏置
            loadMatrix(reader, nn.weightsInputHidden);
            loadMatrix(reader, nn.weightsHiddenOutput);
            loadArray(reader, nn.biasHidden);
            loadArray(reader, nn.biasOutput);

            return nn;
        }
    }
    // 辅助方法：保存二维数组
    private static void saveMatrix(PrintWriter writer, double[][] matrix) {
        for (double[] row : matrix) {
            saveArray(writer, row);
        }
    }

    // 辅助方法：保存一维数组
    private static void saveArray(PrintWriter writer, double[] array) {
        StringBuilder sb = new StringBuilder();
        for (double v : array) {
            sb.append(String.format("%.8f", v)).append(",");
        }
        writer.println(sb.substring(0, sb.length()-1));
    }
    // 辅助方法：加载二维数组
    private static void loadMatrix(BufferedReader reader, double[][] matrix) throws IOException {
        for (int i = 0; i < matrix.length; i++) {
            String[] parts = reader.readLine().split(",");
            for (int j = 0; j < matrix[i].length; j++) {
                matrix[i][j] = Double.parseDouble(parts[j]);
            }
        }
    }
    // 辅助方法：加载一维数组
    private static void loadArray(BufferedReader reader, double[] array) throws IOException {
        String[] parts = reader.readLine().split(",");
        for (int i = 0; i < array.length; i++) {
            array[i] = Double.parseDouble(parts[i]);
        }
    }

    public static void main(String[] args) {
        // 模型文件路径
        final String MODEL_FILE = "xor_model.nn";
        try {
            // 尝试加载模型文件
            SimpleNeuralNetwork nn;
            // 尝试加载已有模型，如果没有，就训练一个并保存下来
            File modelFile = new File(MODEL_FILE);
            if (modelFile.exists()) {
                System.out.println("Loading existing model...");
                nn = SimpleNeuralNetwork.loadModel(MODEL_FILE);
            } else {
                System.out.println("Training new model...");
                double[][] inputs = {{0,0}, {0,1}, {1,0}, {1,1}};
                double[][] targets = {{0}, {1}, {1}, {0}};

                nn = new SimpleNeuralNetwork(2, 2, 1, 0.1);
                nn.train(inputs, targets, 10000);
                nn.saveModel(MODEL_FILE);
            }

            // 测试数据（XOR问题）
            double[][] inputs = {
                    {0, 0},
                    {0, 1},
                    {1, 0},
                    {1, 1}
            };
            // 测试模型预测能力
            System.out.println("Predictions after training:");
            for (double[] input : inputs) {
                double[] output = nn.predict(input);
                System.out.printf("%.0f XOR %.0f = %d (%.4f)\n",
                        input[0],
                        input[1],
                        Math.round(output[0]),  // 返回long类型
                        output[0]);             // 原始输出值
            }
        } catch (IOException e) {
            System.err.println("Error handling model file: " + e.getMessage());
        }

    }
}