/*
 * To change this license header, choose License Headers in Project Properties.
 * To change this template file, choose Tools | Templates
 * and open the template in the editor.
 */
package neuralnetwork;

import java.io.*;
import java.util.*;

/**
 * 神经网络实现类 继承自BpNet，提供梯度下降和遗传算法两种训练方式
 *
 * @author 朱婧雯
 */
public class MyNeuralNetwork extends BpNet {

    private NeuralNetwork network;
    private Gradient_Descent_NeuralNetwork gdTrainer;
    private GeneticAlgorithm gaTrainer;

    // 训练参数
    private int trainSize = 800;  // 减少训练集，增加验证集
    private int validationSize = 200;
    private int testSize = 200;
    private double minRange = -5.0;
    private double maxRange = 5.0;

    // 训练数据
    private double[][] trainInputs;
    private double[][] trainTargets;
    private double[][] validationInputs;
    private double[][] validationTargets;
    private double[][] testInputs;
    private double[][] testTargets;

    // 归一化参数
    private double trainMinTarget;
    private double trainMaxTarget;

    // 训练历史
    private List<Double> trainingHistory = new ArrayList<>();
    private List<Double> validationHistory = new ArrayList<>();

    @Override
    void init() {
        // 创建神经网络：2输入，2隐藏层，每层4个神经元，1输出
        network = new NeuralNetwork(2, 4, 1, 2);

        // 创建梯度下降训练器（带动量和学习率衰减）
        gdTrainer = new Gradient_Descent_NeuralNetwork(0.1, 0.9, 0.001);

        // 创建遗传算法（自适应变异）
        gaTrainer = new GeneticAlgorithm(50, 0.1, 0.8, 2);
        gaTrainer.setAdaptiveMutation(true);

        // 生成训练、验证和测试数据
        generateData();
    }

    private void generateData() {
        // 生成总数据
        int totalSize = trainSize + validationSize + testSize;
        double[][] allInputs = Data.generateData(totalSize, minRange, maxRange, f);
        double[][] allTargets = Data.generateTargets(allInputs, f);

        // 分割数据集
        trainInputs = Arrays.copyOfRange(allInputs, 0, trainSize);
        trainTargets = Arrays.copyOfRange(allTargets, 0, trainSize);

        validationInputs = Arrays.copyOfRange(allInputs, trainSize, trainSize + validationSize);
        validationTargets = Arrays.copyOfRange(allTargets, trainSize, trainSize + validationSize);

        testInputs = Arrays.copyOfRange(allInputs, trainSize + validationSize, totalSize);
        testTargets = Arrays.copyOfRange(allTargets, trainSize + validationSize, totalSize);

        // 保存归一化参数并归一化
        trainMinTarget = Data.minTarget;
        trainMaxTarget = Data.maxTarget;

        Data.normalizeTargets(trainTargets);
        Data.normalizeTargets(validationTargets);
        Data.normalizeTargets(testTargets);

        System.out.printf("数据分割完成: %d训练样本, %d验证样本, %d测试样本%n",
                trainSize, validationSize, testSize);
    }

    @Override
    void trainByGd() {
        System.out.println("开始梯度下降训练（带动量）...");
        long startTime = System.currentTimeMillis();

        // 清空历史记录
        trainingHistory.clear();
        validationHistory.clear();

        gdTrainer.train(network, trainInputs, trainTargets, 100, 20);

        long endTime = System.currentTimeMillis();
        System.out.printf("梯度下降训练完成! 耗时: %d 毫秒%n", (endTime - startTime));

        // 记录最终验证损失
        double validationLoss = calculateValidationLoss();
        System.out.printf("最终验证集损失: %.6f%n", validationLoss);
    }

    /**
     * 带早停的梯度下降训练
     *
     * @param maxEpochs
     * @param patience
     */
    public void trainByGdWithEarlyStopping(int maxEpochs, int patience) {
        System.out.println("开始梯度下降训练（带早停）...");
        long startTime = System.currentTimeMillis();

        trainingHistory.clear();
        validationHistory.clear();

        double bestValidationLoss = Double.MAX_VALUE;
        int wait = 0;
        int bestEpoch = 0;

        // 临时保存最佳模型参数
        double[] bestParams = null;

        for (int epoch = 0; epoch < maxEpochs; epoch++) {
            double epochLoss = trainOneEpoch(network, trainInputs, trainTargets);
            trainingHistory.add(epochLoss);

            // 计算验证集损失
            double validationLoss = calculateValidationLoss();
            validationHistory.add(validationLoss);

            if (validationLoss < bestValidationLoss) {
                bestValidationLoss = validationLoss;
                wait = 0;
                bestEpoch = epoch;
                // 保存最佳模型
                bestParams = network.getAllParameters();
            } else {
                wait++;
            }

            if (epoch % 20 == 0) {
                System.out.printf("Epoch %d, 训练损失: %.6f, 验证损失: %.6f%n",
                        epoch, epochLoss, validationLoss);
            }

            if (wait >= patience) {
                System.out.printf("早停在第 %d 代，最佳验证损失: %.6f (第 %d 代)%n",
                        epoch, bestValidationLoss, bestEpoch);
                break;
            }
        }

        // 恢复最佳模型
        if (bestParams != null) {
            network.setAllParameters(bestParams);
        }

        long endTime = System.currentTimeMillis();
        System.out.printf("早停训练完成! 耗时: %d 毫秒%n", (endTime - startTime));
    }

    private double trainOneEpoch(NeuralNetwork network, double[][] inputs, double[][] targets) {
        double totalError = 0;

        for (int i = 0; i < inputs.length; i++) {
            double[] output = network.forward(inputs[i]);
            double error = output[0] - targets[i][0];
            totalError += error * error;
        }

        return totalError / inputs.length;
    }

    @Override
    void trainByGa() {
        System.out.println("开始遗传算法训练（自适应变异）...");
        long startTime = System.currentTimeMillis();

        gaTrainer.train(network, trainInputs, trainTargets, 100);

        long endTime = System.currentTimeMillis();
        System.out.printf("遗传算法训练完成! 耗时: %d 毫秒%n", (endTime - startTime));

        // 记录验证损失
        double validationLoss = calculateValidationLoss();
        System.out.printf("最终验证集损失: %.6f%n", validationLoss);
    }

    @Override
    double test(double[][] v) {
        if (v == null || v.length == 0) {
            return Model.calculateMSE(network, testInputs, testTargets);
        } else {
            return testWithExternalData(v);
        }
    }

    private double testWithExternalData(double[][] externalInputs) {
        double[][] externalTargets = new double[externalInputs.length][1];
        for (int i = 0; i < externalInputs.length; i++) {
            double x = externalInputs[i][0];
            double y = externalInputs[i][1];
            externalTargets[i][0] = f.getValue(x, y);
        }

        normalizeExternalTargets(externalTargets);
        double loss = Model.calculateMSE(network, externalInputs, externalTargets);
        savePredictionResults(externalInputs, externalTargets);

        return loss;
    }

    private void normalizeExternalTargets(double[][] targets) {
        if (trainMinTarget == trainMaxTarget) {
            throw new IllegalStateException("归一化参数无效");
        }

        for (double[] target : targets) {
            target[0] = 0.1 + 0.8 * ((target[0] - trainMinTarget) / (trainMaxTarget - trainMinTarget));
        }
    }

    private void savePredictionResults(double[][] inputs, double[][] normalizedTargets) {
        double[][] predictions = new double[inputs.length][1];
        double[][] denormalizedPredictions = new double[inputs.length][1];
        double[][] denormalizedTargets = new double[inputs.length][1];

        for (int i = 0; i < inputs.length; i++) {
            double[] output = network.forward(inputs[i]);
            predictions[i][0] = output[0];
            denormalizedPredictions[i][0] = Data.denormalize(output[0]);
            denormalizedTargets[i][0] = Data.denormalize(normalizedTargets[i][0]);
        }

        saveResultsToFile(inputs, denormalizedPredictions, denormalizedTargets);
    }

    private void saveResultsToFile(double[][] inputs, double[][] predictions, double[][] targets) {
        try (PrintWriter writer = new PrintWriter(new FileWriter("improved_test_predictions.csv"))) {
            writer.println("输入X,输入Y,预测值,真实值,绝对误差,相对误差(%)");
            double totalAbsoluteError = 0;
            double totalRelativeError = 0;
            int count = 0;

            for (int i = 0; i < inputs.length; i++) {
                double absoluteError = Math.abs(predictions[i][0] - targets[i][0]);
                double relativeError = targets[i][0] != 0
                        ? (absoluteError / Math.abs(targets[i][0])) * 100 : 0;

                totalAbsoluteError += absoluteError;
                totalRelativeError += relativeError;
                count++;

                writer.printf("%.6f,%.6f,%.6f,%.6f,%.6f,%.2f%%%n",
                        inputs[i][0], inputs[i][1],
                        predictions[i][0], targets[i][0], absoluteError, relativeError);
            }

            double mae = totalAbsoluteError / count;
            double mre = totalRelativeError / count;

            writer.println();
            writer.printf("# 统计信息: 样本数=%d, 平均绝对误差=%.6f, 平均相对误差=%.2f%%",
                    count, mae, mre);

            System.out.println("改进版预测结果已保存到: improved_test_predictions.csv");
            System.out.printf("统计信息: 样本数=%d, 平均绝对误差=%.6f, 平均相对误差=%.2f%%%n",
                    count, mae, mre);

        } catch (IOException e) {
            System.err.println("保存预测结果失败: " + e.getMessage());
        }
    }

    public void evaluatePerformance(String methodName) {
        double testLoss = test(null);
        double validationLoss = calculateValidationLoss();

        System.out.printf("%s - 性能评估:%n", methodName);
        System.out.printf("  测试集损失: %.6f%n", testLoss);
        System.out.printf("  验证集损失: %.6f%n", validationLoss);

        Model.evaluate(network, testInputs, testTargets, methodName);
    }

    private double calculateValidationLoss() {
        return Model.calculateMSE(network, validationInputs, validationTargets);
    }

    public void testWithExternalData() {
        double[][] externalTestData = FileUtil.loadTestData("test");
        if (externalTestData != null) {
            double externalLoss = test(externalTestData);
            System.out.printf("外部测试集损失: %.6f%n", externalLoss);

            double[][] externalTargets = new double[externalTestData.length][1];
            for (int i = 0; i < externalTestData.length; i++) {
                externalTargets[i][0] = f.getValue(externalTestData[i][0], externalTestData[i][1]);
            }
            normalizeExternalTargets(externalTargets);

            String report = Model.generateReport(network, externalTestData, externalTargets, "外部测试集");
            System.out.println(report);
        } else {
            System.out.println("使用内置测试集进行评估");
            evaluatePerformance("当前模型");
        }
    }

    public void compareTrainingTime() {
        // 梯度下降训练时间测试
        long gdStart = System.currentTimeMillis();
        trainByGd();
        long gdEnd = System.currentTimeMillis();
        long gdTime = gdEnd - gdStart;

        // 重新初始化网络进行公平比较
        init();

        // 遗传算法训练时间测试
        long gaStart = System.currentTimeMillis();
        trainByGa();
        long gaEnd = System.currentTimeMillis();
        long gaTime = gaEnd - gaStart;

        System.out.println("\n=== 训练时间比较 ===");
        System.out.printf("梯度下降法: %d 毫秒%n", gdTime);
        System.out.printf("遗传算法: %d 毫秒%n", gaTime);
        System.out.printf("速度比: %.2f%n", (double) gaTime / gdTime);
    }

    /**
     * 绘制训练历史（控制台版本）
     */
    public void plotTrainingHistory() {
        if (trainingHistory.isEmpty() || validationHistory.isEmpty()) {
            System.out.println("没有训练历史数据");
            return;
        }

        System.out.println("\n训练历史:");
        System.out.println("Epoch\tTraining Loss\tValidation Loss");
        int displayCount = Math.min(10, trainingHistory.size());
        for (int i = 0; i < displayCount; i++) {
            int epoch = i * (trainingHistory.size() / displayCount);
            if (epoch < trainingHistory.size()) {
                System.out.printf("%d\t%.6f\t%.6f%n",
                        epoch, trainingHistory.get(epoch), validationHistory.get(epoch));
            }
        }
    }

    /**
     * 保存模型
     *
     * @param filename
     */
    public void saveModel(String filename) {
        try {
            // 创建模型信息对象
            ModelInfo modelInfo = new ModelInfo();
            modelInfo.networkParams = network.getAllParameters();
            modelInfo.networkStructure = network.getNetworkStructure();
            modelInfo.minTarget = trainMinTarget;
            modelInfo.maxTarget = trainMaxTarget;
            modelInfo.functionName = f.getClass().getSimpleName();

            try ( // 序列化保存
                    ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(filename))) {
                oos.writeObject(modelInfo);
            }

            System.out.println("模型已保存到: " + filename);
        } catch (IOException e) {
            System.err.println("保存模型失败: " + e.getMessage());
        }
    }

    /**
     * 内部类用于模型序列化
     */
    private static class ModelInfo implements Serializable {

        private static final long serialVersionUID = 1L;
        public double[] networkParams;
        public String networkStructure;
        public double minTarget;
        public double maxTarget;
        public String functionName;
    }
}
