/*
 * To change this license header, choose License Headers in Project Properties.
 * To change this template file, choose Tools | Templates
 * and open the template in the editor.
 */
package main;

/**
 *
 * @author ZSQ
 */
/**
 * 主程序类
 * 由架构师实现
 */

import java.util.Scanner;

/**
 * 主程序类
 */
public class Main {
    
    public static void main(String[] args) {
        System.out.println("=== 神经网络函数逼近系统 ===");
        System.out.println(FunctionParser.getSupportedFunctions());
        System.out.println();

        Scanner scanner = new Scanner(System.in);

        try {
            // 获取用户输入的函数
            System.out.print("请输入目标函数（使用x和y作为变量，例如: sin(x) + cos(y) 或 x*x + y）: ");
            String functionExpression = scanner.nextLine().trim();

            // 验证函数
            if (!FunctionParser.isValidFunction(functionExpression)) {
                System.out.println("错误: 函数表达式无效，请检查以下问题：");
                System.out.println("1. 确保使用 x 和 y 作为变量");
                System.out.println("2. 确保函数语法正确");
                System.out.println("3. 避免除零等数学错误");
                System.out.println("示例: sin(x) + cos(y), x*x + y, exp(x) * log(y+1)");
                return;
            }
            
            System.out.println("目标函数: z = " + functionExpression);
            System.out.println("网络结构: 2-4-4-1");
            
            // 严格按照要求的网络结构
            int[] layerSizes = {2, 4, 4, 1};
            
            // 使用Tanh激活函数（更适合回归问题）
            ActivationFunction activationFunction = new Tanh();
            LossFunction lossFunction = new MeanSquaredError();
            
            // 生成数据
            System.out.println("生成训练数据...");
            double[][] allData = DataGenerator.generateTrainingData(2000, functionExpression); // 增加数据量
            double[][][] splitData = DataGenerator.splitTrainTest(allData, 0.8);
            
            double[][] trainData = getInputs(splitData[0]);
            double[][] trainTargets = getTargets(splitData[0]);
            double[][] testData = getInputs(splitData[1]);
            double[][] testTargets = getTargets(splitData[1]);
            
            System.out.printf("训练集: %d 样本, 测试集: %d 样本\n", trainData.length, testData.length);
            
            // 对比两种算法
            compareAlgorithms(layerSizes, activationFunction, lossFunction,
                           trainData, trainTargets, testData, testTargets);
            
        } catch (Exception e) {
            System.err.println("程序运行出错: " + e.getMessage());
            e.printStackTrace();
        } finally {
            scanner.close();
        }
    }
    
    private static void compareAlgorithms(int[] layerSizes, ActivationFunction activationFunction,
                                        LossFunction lossFunction, double[][] trainData, double[][] trainTargets,
                                        double[][] testData, double[][] testTargets) {
        System.out.println("\n=== 算法对比开始 ===");
        
        // 梯度下降法 - 使用更小的学习率和更多轮次
        System.out.println("\n--- 梯度下降法 ---");
        NeuralNetwork gdNetwork = new NeuralNetwork(layerSizes, activationFunction, lossFunction);
        GradientDescentTrainer gdTrainer = new GradientDescentTrainer(0.001, 2000); // 减小学习率，增加轮次
        trainAndEvaluate("梯度下降法", gdNetwork, gdTrainer, trainData, trainTargets, testData, testTargets);
        
        // 遗传算法 - 优化参数
        System.out.println("\n--- 遗传算法 ---");
        NeuralNetwork gaNetwork = new NeuralNetwork(layerSizes, activationFunction, lossFunction);
        GeneticAlgorithmTrainer gaTrainer = new GeneticAlgorithmTrainer(200, 0.1, 500); // 增加种群和代数
        trainAndEvaluate("遗传算法", gaNetwork, gaTrainer, trainData, trainTargets, testData, testTargets);
        
        System.out.println("\n=== 算法对比完成 ===");
    }
    
    private static void trainAndEvaluate(String algorithmName, NeuralNetwork network, Trainer trainer,
                               double[][] trainData, double[][] trainTargets,
                               double[][] testData, double[][] testTargets) {
        long startTime = System.currentTimeMillis();

        // 训练
        trainer.train(network, trainData, trainTargets);

        long endTime = System.currentTimeMillis();
        long trainingTime = endTime - startTime;

        // 评估 - 添加训练集误差计算
        double trainLoss = network.calculateLoss(trainData, trainTargets);
        double testLoss = network.calculateLoss(testData, testTargets);

        // 输出结果
        System.out.printf("=== %s 训练结果 ===\n", algorithmName);
        System.out.printf("训练时间: %d ms\n", trainingTime);
        System.out.printf("训练集均方误差 (MSE): %.6f\n", trainLoss);
        System.out.printf("测试集均方误差 (MSE): %.6f\n", testLoss); 

        // 计算并输出平均绝对误差
        double trainMAE = calculateMeanAbsoluteError(network, trainData, trainTargets);
        double testMAE = calculateMeanAbsoluteError(network, testData, testTargets);
        System.out.printf("训练集平均绝对误差 (MAE): %.6f\n", trainMAE);
        System.out.printf("测试集平均绝对误差 (MAE): %.6f\n", testMAE);
        
        // 显示详细预测信息
        System.out.println("预测示例 (前10个测试样本):");
        System.out.println("输入(x,y)\t真实值\t预测值\t误差");
        int displayCount = Math.min(10, testData.length);

        double totalError = 0;
        for (int i = 0; i < displayCount; i++) {
            double[] input = testData[i];
            double[] prediction = network.predict(input);
            double actual = testTargets[i][0];
            double error = Math.abs(prediction[0] - actual);
            totalError += error;

            System.out.printf("(%.3f,%.3f)\t%.3f\t%.3f\t%.3f\n", 
                            input[0], input[1], actual, prediction[0], error);
        }

        double avgError = totalError / displayCount;
        System.out.printf("前%d个样本平均绝对误差: %.3f\n", displayCount, avgError);
        System.out.println(network.getWeightsString());
        System.out.println("----------------------------------------");
    }
    
 /**
 * 计算平均绝对误差
 */
private static double calculateMeanAbsoluteError(NeuralNetwork network, double[][] data, double[][] targets) {
    double totalAbsoluteError = 0.0;
    for (int i = 0; i < data.length; i++) {
        double[] prediction = network.predict(data[i]);
        totalAbsoluteError += Math.abs(prediction[0] - targets[i][0]);
    }
    return totalAbsoluteError / data.length;
}
    
    private static double[][] getInputs(double[][] data) {
        double[][] inputs = new double[data.length][2];
        for (int i = 0; i < data.length; i++) {
            inputs[i][0] = data[i][0];
            inputs[i][1] = data[i][1];
        }
        return inputs;
    }
    
    private static double[][] getTargets(double[][] data) {
        double[][] targets = new double[data.length][1];
        for (int i = 0; i < data.length; i++) {
            targets[i][0] = data[i][2];
        }
        return targets;
    }
}