/*
 * To change this license header, choose License Headers in Project Properties.
 * To change this template file, choose Tools | Templates
 * and open the template in the editor.
 */

/**
 *
 * @author chenguo
 */


import java.util.*;

/**
 * 神经网络函数逼近系统主程序
 */
public class NeuralNetworkApp {
    
    public static void main(String[] args) {
        System.out.println("=== 神经网络函数逼近系统启动 ===");
        
        try {
            // 1. 生成训练数据
            System.out.println("生成训练数据...");
            List<double[]> trainInputsList = DataGenerator.generateInputs(1000, -20, 20);
            List<Double> trainTargetsList = DataGenerator.generateTargets(
                trainInputsList, DataGenerator.TARGET_FUNCTION_1);
            
            // 计算数据范围
            double inputMin = -2, inputMax = 2;
            double targetMin = Double.MAX_VALUE, targetMax = Double.MIN_VALUE;
            for (Double target : trainTargetsList) {
                if (target < targetMin) targetMin = target;
                if (target > targetMax) targetMax = target;
            }
            System.out.printf("目标值范围: [%.2f, %.2f]%n", targetMin, targetMax);
            
            // 转换为数组格式并标准化
            double[][] trainInputs = normalizeInputs(listTo2DArray(trainInputsList), inputMin, inputMax);
            double[] trainTargets = normalizeTargets(listTo1DArray(trainTargetsList), targetMin, targetMax);
            
            // 调试：检查标准化后的数据范围
            System.out.println("\n=== 数据标准化验证 ===");
            checkNormalization(trainInputs, trainTargets, trainInputsList, trainTargetsList, inputMin, inputMax, targetMin, targetMax);
            
            // 2. 生成测试数据
            List<double[]> testInputsList = DataGenerator.generateInputs(300, -20, 20);
            List<Double> testTargetsList = DataGenerator.generateTargets(
                testInputsList, DataGenerator.TARGET_FUNCTION_1);
            
            // 转换为数组格式并标准化
            double[][] testInputs = normalizeInputs(listTo2DArray(testInputsList), inputMin, inputMax);
            double[] testTargets = normalizeTargets(listTo1DArray(testTargetsList), targetMin, targetMax);
            
            // 3. 梯度下降法训练
            System.out.println("\n=== 梯度下降法训练 ===");
            long gdStartTime = System.currentTimeMillis();
            
            GradientDescentNN gdNetwork = new GradientDescentNN(0.001);
            gdNetwork.train(trainInputs, trainTargets, 2000);
            
            long gdTrainingTime = System.currentTimeMillis() - gdStartTime;
            System.out.printf("梯度下降法训练完成，耗时: %dms%n", gdTrainingTime);
            
            // 4. 遗传算法训练
            System.out.println("\n=== 遗传算法训练 ===");
            long gaStartTime = System.currentTimeMillis();
            
            GeneticAlgorithmTrainer gaTrainer = new GeneticAlgorithmTrainer(60,0.25); // 改进参数
            NeuralNetwork gaNetwork = gaTrainer.train(trainInputs, trainTargets, 200); // 更多代数
            
            long gaTrainingTime = System.currentTimeMillis() - gaStartTime;
            System.out.printf("遗传算法训练完成，耗时: %dms%n", gaTrainingTime);
            
            // 5. 性能比较
            System.out.println("\n=== 算法性能比较 ===");
            
            // 计算损失（使用标准化数据）
            double gdLoss = gdNetwork.calculateLoss(testInputs, testTargets);
            double gaLoss = gaNetwork.calculateLoss(testInputs, testTargets);
            
            System.out.printf("梯度下降法 - 训练时间: %dms, 测试损失: %.6f%n", gdTrainingTime, gdLoss);
            System.out.printf("遗传算法   - 训练时间: %dms, 测试损失: %.6f%n", gaTrainingTime, gaLoss);
            
            // 样本测试
            System.out.println("\n=== 样本测试 ===");
            testSamples(gdNetwork, gaNetwork, inputMin, inputMax, targetMin, targetMax);
            
            // 算法推荐
            System.out.println("\n=== 算法推荐 ===");
            printAlgorithmRecommendation(gdLoss, gaLoss, gdTrainingTime, gaTrainingTime);
            
            System.out.println("=== 实验完成 ===");
            
        } catch (Exception e) {
            System.err.println("程序执行失败: " + e.getMessage());
            e.printStackTrace();
        }
    }
    
    /**
     * 检查数据标准化是否正确
     */
    private static void checkNormalization(double[][] normalizedInputs, double[] normalizedTargets,
                                         List<double[]> originalInputs, List<Double> originalTargets,
                                         double inputMin, double inputMax, double targetMin, double targetMax) {
        System.out.println("检查前5个样本的标准化:");
        for (int i = 0; i < Math.min(5, originalInputs.size()); i++) {
            double[] origInput = originalInputs.get(i);
            double origTarget = originalTargets.get(i);
            double[] normInput = normalizedInputs[i];
            double normTarget = normalizedTargets[i];
            
            System.out.printf("样本%d: 输入(%.1f,%.1f)->(%.3f,%.3f), 目标%.3f->%.3f%n",
                i, origInput[0], origInput[1], normInput[0], normInput[1], origTarget, normTarget);
        }
        
        // 检查标准化范围
        double minNormInput = Double.MAX_VALUE, maxNormInput = Double.MIN_VALUE;
        double minNormTarget = Double.MAX_VALUE, maxNormTarget = Double.MIN_VALUE;
        
        for (double[] input : normalizedInputs) {
            for (double val : input) {
                if (val < minNormInput) minNormInput = val;
                if (val > maxNormInput) maxNormInput = val;
            }
        }
        for (double target : normalizedTargets) {
            if (target < minNormTarget) minNormTarget = target;
            if (target > maxNormTarget) maxNormTarget = target;
        }
        
        System.out.printf("标准化后输入范围: [%.3f, %.3f]%n", minNormInput, maxNormInput);
        System.out.printf("标准化后目标范围: [%.3f, %.3f]%n", minNormTarget, maxNormTarget);
    }
    
    /**
     * 样本测试
     */
    private static void testSamples(NeuralNetwork gdNetwork, NeuralNetwork gaNetwork,
                                   double inputMin, double inputMax, double targetMin, double targetMax) {
        double[][] testSamples = {{0,0}, {10,10}, {-10,10}, {10,-10}, {3,-3}};
        
        for (double[] sample : testSamples) {
            double actual = DataGenerator.TARGET_FUNCTION_1.apply(sample);
            
            // 标准化输入，预测，然后反标准化输出
            double[] normalizedInput = normalizeInput(sample, inputMin, inputMax);
            double gdNormalizedPred = gdNetwork.predict(normalizedInput);
            double gaNormalizedPred = gaNetwork.predict(normalizedInput);
            double gdPred = denormalizeTarget(gdNormalizedPred, targetMin, targetMax);
            double gaPred = denormalizeTarget(gaNormalizedPred, targetMin, targetMax);
            
            // 调试：显示标准化值和预测值
            System.out.printf("样本(%.1f,%.1f):%n", sample[0], sample[1]);
            System.out.printf("  标准化输入: (%.3f, %.3f)%n", normalizedInput[0], normalizedInput[1]);
            System.out.printf("  标准化预测: GD=%.3f, GA=%.3f%n", gdNormalizedPred, gaNormalizedPred);
            System.out.printf("  反标准化: 真实=%7.3f, GD预测=%7.3f, GA预测=%7.3f%n",
                actual, gdPred, gaPred);
        }
    }
    
    /**
     * 打印算法推荐
     */
    private static void printAlgorithmRecommendation(double gdLoss, double gaLoss, 
                                                   long gdTime, long gaTime) {
        if (gdLoss < gaLoss && gdTime < gaTime) {
            System.out.println("✅ 推荐梯度下降法 - 精度更高且训练更快");
        } else if (gaLoss < gdLoss && gaTime < gdTime) {
            System.out.println("✅ 推荐遗传算法 - 精度更高且训练更快");
        } else if (gdLoss < gaLoss) {
            System.out.println("✅ 推荐梯度下降法 - 精度更高");
        } else if (gaLoss < gdLoss) {
            System.out.println("✅ 推荐遗传算法 - 精度更高");
        } else {
            System.out.println("⚠️ 两种算法性能相近");
        }
    }
    
    // 其他方法保持不变...
    private static double[][] normalizeInputs(double[][] inputs, double min, double max) {
        double[][] normalized = new double[inputs.length][2];
        for (int i = 0; i < inputs.length; i++) {
            normalized[i] = normalizeInput(inputs[i], min, max);
        }
        return normalized;
    }
    
    private static double[] normalizeInput(double[] input, double min, double max) {
        double range = max - min;
        return new double[]{
            (input[0] - min) / range * 2 - 1,
            (input[1] - min) / range * 2 - 1
        };
    }
    
    private static double[] normalizeTargets(double[] targets, double min, double max) {
        double[] normalized = new double[targets.length];
        double range = max - min;
        for (int i = 0; i < targets.length; i++) {
            normalized[i] = (targets[i] - min) / range * 2 - 1;
        }
        return normalized;
    }
    
    private static double denormalizeTarget(double normalized, double min, double max) {
        double range = max - min;
        return (normalized + 1) / 2 * range + min;
    }
    
    private static double[][] listTo2DArray(List<double[]> list) {
        double[][] array = new double[list.size()][];
        for (int i = 0; i < list.size(); i++) {
            array[i] = list.get(i);
        }
        return array;
    }
    
    private static double[] listTo1DArray(List<Double> list) {
        double[] array = new double[list.size()];
        for (int i = 0; i < list.size(); i++) {
            array[i] = list.get(i);
        }
        return array;
    }
}