package dateFitting;

import java.util.ArrayList;
import java.util.Collections;
import java.util.List;

public class function2 {
    /**
     * 模型参数类
     */
    public static class LearningParameters {
        public final double alpha; // 学习率指数
        public final double theta; // 转换因子

        public LearningParameters(double alpha, double theta) {
            this.alpha = alpha;
            this.theta = theta;
        }

        @Override
        public String toString() {
            return String.format("α: %.4f, θ: %.4f", alpha, theta);
        }
    }

    /**
     * Adam优化器类
     */
    private static class AdamOptimizer {
        private final double beta1;
        private final double beta2;
        private final double epsilon;
        private double[] m;
        private double[] v;
        private double learningRate;
        private int iteration;

        public AdamOptimizer(double initialLearningRate, double beta1, double beta2, double epsilon, int paramCount) {
            this.learningRate = initialLearningRate;
            this.beta1 = beta1;
            this.beta2 = beta2;
            this.epsilon = epsilon;
            this.m = new double[paramCount];
            this.v = new double[paramCount];
            this.iteration = 0;
        }

        public double[] update(double[] parameters, double[] gradients) {
            iteration++;

            for (int i = 0; i < parameters.length; i++) {
                // 更新动量
                m[i] = beta1 * m[i] + (1 - beta1) * gradients[i];
                v[i] = beta2 * v[i] + (1 - beta2) * gradients[i] * gradients[i];

                // 偏差校正
                double mHat = m[i] / (1 - Math.pow(beta1, iteration));
                double vHat = v[i] / (1 - Math.pow(beta2, iteration));

                // 参数更新
                double update = learningRate * mHat / (Math.sqrt(vHat) + epsilon);

                // 更新参数
                parameters[i] = parameters[i] - update;
            }

            // 学习率衰减
            learningRate *= 0.99;

            return parameters;
        }

        public double getLearningRate() {
            return learningRate;
        }
    }

    /**
     * 使用多个初始点尝试估计最优参数
     * @param trainData 训练数据
     * @param testData 测试数据（用于验证）
     * @return 最优学习参数
     */
    public static LearningParameters estimateParametersMultipleStarts(
            List<DataRead.PartData> trainData,
            List<DataRead.PartData> testData) {

        if (trainData == null || trainData.isEmpty()) {
            throw new IllegalArgumentException("训练数据不能为空");
        }

        double[] initialAlphas = {-0.1, -0.2, -0.3, -0.4, -0.5};
        double[] initialThetas = {0.01, 0.05, 0.1, 0.2, 0.5};

        LearningParameters bestParams = null;
        double bestValidationError = Double.MAX_VALUE;

        for (double initialAlpha : initialAlphas) {
            for (double initialTheta : initialThetas) {
                LearningParameters params = estimateParameters(trainData, testData, initialAlpha, initialTheta);
                double validationError = evaluateDataSet(testData, params);

                if (validationError < bestValidationError) {
                    bestValidationError = validationError;
                    bestParams = params;
                }
            }
        }

        return bestParams;
    }

    /**
     * 使用改进的梯度下降法来估计参数α和θ
     * @param trainData 训练数据
     * @param testData 测试数据（用于验证）
     * @param initialAlpha 初始α值
     * @param initialTheta 初始θ值
     * @return 学习参数
     */
    public static LearningParameters estimateParameters(
            List<DataRead.PartData> trainData,
            List<DataRead.PartData> testData,
            double initialAlpha,
            double initialTheta) {

        if (trainData == null || trainData.isEmpty()) {
            throw new IllegalArgumentException("训练数据不能为空");
        }

        double alpha = initialAlpha;
        double theta = initialTheta;
        double learningRate = 0.01;
        int maxIterations = 1000;
        double epsilon = 1e-6; // 收敛精度
        double lambda = 0.01;  // L2正则化系数
        int patience = 10;     // 提前停止的耐心参数

        AdamOptimizer optimizer = new AdamOptimizer(learningRate, 0.9, 0.999, epsilon, 2);

        double bestValidationError = Double.MAX_VALUE;
        double bestAlpha = alpha;
        double bestTheta = theta;
        int noImprovementCount = 0;

        for (int iter = 0; iter < maxIterations; iter++) {
            double prevAlpha = alpha;
            double prevTheta = theta;

            // 小批量梯度下降
            int batchSize = Math.min(32, trainData.size());
            List<DataRead.PartData> batch = getRandomBatch(trainData, batchSize);

            double[] parameters = {alpha, theta};
            double[] gradients = computeGradients(batch, alpha, theta, lambda);

            // 使用Adam优化器更新参数
            parameters = optimizer.update(parameters, gradients);
            alpha = parameters[0];
            theta = parameters[1];

            // 限制参数范围（根据实际问题约束）
            alpha = Math.min(0, alpha);
            theta = Math.max(0, theta);

            // 计算验证集误差
            LearningParameters currentParams = new LearningParameters(alpha, theta);
            double validationError = evaluateDataSet(testData, currentParams);

            // 更新最佳参数和提前停止计数
            if (validationError < bestValidationError) {
                bestValidationError = validationError;
                bestAlpha = alpha;
                bestTheta = theta;
                noImprovementCount = 0;
            } else {
                noImprovementCount++;
            }

            // 提前停止
            if (noImprovementCount >= patience) {
                break;
            }

            // 参数收敛检查
            if (Math.abs(alpha - prevAlpha) < epsilon && Math.abs(theta - prevTheta) < epsilon) {
                break;
            }
        }

        // 返回最佳验证误差对应的参数
        return new LearningParameters(bestAlpha, bestTheta);
    }

    /**
     * 计算梯度（包含L2正则化）
     * @param batch 数据批次
     * @param alpha 当前α值
     * @param theta 当前θ值
     * @param lambda 正则化系数
     * @return 梯度值数组 [alpha梯度, theta梯度]
     */
    /**
     * 计算梯度（包含L2正则化）
     */
    private static double[] computeGradients(List<DataRead.PartData> batch, double alpha, double theta, double lambda) {
        double gradientAlphaSum = 0;
        double gradientThetaSum = 0;

        for (DataRead.PartData data : batch) {
            try {
                double baseTime = data.getTime1();
                int[] repetitions = {1, 3, 5, 10};
                double[] actualTotalTimes = {
                        data.getTime1(),
                        data.getTime3(),
                        data.getTime5(),
                        data.getTime10()
                };

                for (int i = 0; i < repetitions.length; i++) {
                    int rep = repetitions[i];
                    double actualTotal = actualTotalTimes[i];

                    // 计算预测的总时间
                    double predictedTotalTime = 0;
                    double cumulativeTime = 0;
                    for (int j = 1; j <= rep; j++) {
                        double time = predictTime(baseTime, alpha, theta, cumulativeTime, j);
                        predictedTotalTime += time;
                        cumulativeTime += time;
                    }

                    double error = predictedTotalTime - actualTotal;

                    // 计算关于alpha和theta的梯度
                    cumulativeTime = 0;
                    for (int j = 1; j <= rep; j++) {
                        double term = 1 + theta * cumulativeTime;
                        double time = predictTime(baseTime, alpha, theta, cumulativeTime, j);

                        // alpha梯度
                        double alphaGrad = error * baseTime * Math.pow(term, alpha) * Math.log(term);
                        gradientAlphaSum += 2 * alphaGrad;

                        // theta梯度
                        double thetaGrad = error * baseTime * alpha * Math.pow(term, alpha - 1) * cumulativeTime;
                        gradientThetaSum += 2 * thetaGrad;

                        cumulativeTime += time;
                    }
                }
            } catch (Exception e) {
                System.err.println("计算梯度时出错: " + e.getMessage());
            }
        }

        // 添加L2正则化
        gradientAlphaSum += lambda * alpha;
        gradientThetaSum += lambda * theta;

        return new double[] {
                gradientAlphaSum / batch.size(),
                gradientThetaSum / batch.size()
        };
    }


    /**
     * 获取随机批次
     * @param data 完整数据
     * @param batchSize 批次大小
     * @return 随机选择的批次数据
     */
    private static List<DataRead.PartData> getRandomBatch(List<DataRead.PartData> data, int batchSize) {
        List<DataRead.PartData> shuffled = new ArrayList<>(data);
        Collections.shuffle(shuffled);
        return shuffled.subList(0, batchSize);
    }

    /**
     * 根据基准时间、学习参数和累积时间来预测时间
     * @param baseTime 基准时间
     * @param alpha α参数
     * @param theta θ参数
     * @param cumulativeTime 累积时间
     * @param repetition 当前重复次数
     * @return 预测的单件时间
     */
    public static double predictTime(double baseTime, double alpha, double theta, double cumulativeTime, int repetition) {
        double learningTerm = Math.pow(1 + theta * cumulativeTime, alpha);
        return learningTerm * baseTime;
    }

    /**
     * 计算预测值与实际值之间的误差
     * @param data 零件数据
     * @param params 学习参数
     * @return 均方根误差
     */
    /**
     * 计算预测值与实际值之间的误差
     */
    public static double calculateError(DataRead.PartData data, LearningParameters params) {
        try {
            double baseTime = data.getTime1();
            double alpha = params.alpha;
            double theta = params.theta;
            double sumSquaredError = 0;

            int[] repetitions = {1, 3, 5, 10};
            double[] actualTotalTimes = {
                    data.getTime1(),
                    data.getTime3(),
                    data.getTime5(),
                    data.getTime10()
            };

            for (int i = 0; i < repetitions.length; i++) {
                int rep = repetitions[i];
                double actualTotal = actualTotalTimes[i];

                // 计算预测的总时间
                double predictedTotalTime = 0;
                double cumulativeTime = 0;
                for (int j = 1; j <= rep; j++) {
                    double time = predictTime(baseTime, alpha, theta, cumulativeTime, j);
                    predictedTotalTime += time;
                    cumulativeTime += time;
                }

                sumSquaredError += Math.pow(predictedTotalTime - actualTotal, 2);
            }

            return Math.sqrt(sumSquaredError / repetitions.length); // 均方根误差
        } catch (Exception e) {
            System.err.println("计算误差时出错: " + e.getMessage());
            return Double.MAX_VALUE;
        }
    }
    /**
     * 评估数据集的总误差
     * @param dataSet 数据集
     * @param params 学习参数
     * @return 平均误差
     */
    public static double evaluateDataSet(List<DataRead.PartData> dataSet, LearningParameters params) {
        if (dataSet == null || dataSet.isEmpty()) {
            return Double.MAX_VALUE;
        }

        double totalError = 0;
        for (DataRead.PartData data : dataSet) {
            totalError += calculateError(data, params);
        }
        return totalError / dataSet.size(); // 返回平均误差
    }

    /**
     * 打印每个零件的实际值和预测值对比
     * @param data 零件数据
     * @param params 学习参数
     */
    public static void printPredictions(DataRead.PartData data, LearningParameters params) {
        try {
            double baseTime = data.getTime1();
            double alpha = params.alpha;
            double theta = params.theta;

            System.out.println("实际值 vs 预测值：");

            int[] repetitions = {1, 3, 5, 10};
            double[] actualTotalTimes = {
                    data.getTime1(),
                    data.getTime3(),
                    data.getTime5(),
                    data.getTime10()
            };

            for (int i = 0; i < repetitions.length; i++) {
                int rep = repetitions[i];
                double actualTotal = actualTotalTimes[i];
                double actualUnitTime = actualTotal / rep;

                // 计算预测的总时间
                double predictedTotalTime = 0;
                double cumulativeTime = 0;
                for (int j = 1; j <= rep; j++) {
                    double time = predictTime(baseTime, alpha, theta, cumulativeTime, j);
                    predictedTotalTime += time;
                    cumulativeTime += time;
                }

                System.out.printf("%d件: 实际总时间 %.2f (单件 %.2f), 预测总时间 %.2f\n",
                        rep, actualTotal, actualUnitTime, predictedTotalTime);
            }

        } catch (Exception e) {
            System.err.println("打印预测时出错: " + e.getMessage());
        }
    }
}