package dateFitting;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;

public class function3 {
    /**
     * 模型参数类
     */
    public static class LearningParameters {
        public final double alpha;
        public final double m;

        public LearningParameters(double alpha, double m) {
            this.alpha = alpha;
            this.m = m;
        }

        @Override
        public String toString() {
            return String.format("α: %.4f, M: %.4f (固定参数)", alpha, m);
        }
    }

    // Adam优化器类 和 estimateParameters 方法将不再被 estimateParametersMultipleStarts 调用
    // 但保留它们以便未来可能需要恢复优化功能，或者进行其他实验

    private static class AdamOptimizer {
        private final double beta1;
        private final double beta2;
        private final double epsilon_adam;
        private double[] m_optimizer_momentum;
        private double[] v_optimizer_variance;
        private double learningRate;
        private int iteration;

        public AdamOptimizer(double initialLearningRate, double beta1, double beta2, double epsilonForAdam, int paramCount) {
            this.learningRate = initialLearningRate;
            this.beta1 = beta1;
            this.beta2 = beta2;
            this.epsilon_adam = epsilonForAdam;
            this.m_optimizer_momentum = new double[paramCount];
            this.v_optimizer_variance = new double[paramCount];
            this.iteration = 0;
        }

        public double[] update(double[] parameters, double[] gradients) {
            iteration++;
            for (int i = 0; i < parameters.length; i++) {
                m_optimizer_momentum[i] = beta1 * m_optimizer_momentum[i] + (1 - beta1) * gradients[i];
                v_optimizer_variance[i] = beta2 * v_optimizer_variance[i] + (1 - beta2) * gradients[i] * gradients[i];
                double mHat = m_optimizer_momentum[i] / (1 - Math.pow(beta1, iteration));
                double vHat = v_optimizer_variance[i] / (1 - Math.pow(beta2, iteration));
                double update = learningRate * mHat / (Math.sqrt(vHat) + epsilon_adam);
                parameters[i] = parameters[i] - update;
            }
            learningRate *= 0.995;
            return parameters;
        }
    }

    /**
     * 使用固定的、根据平均比率数据拟合的参数。
     * 不再进行多起点优化搜索。
     * @param trainData 训练数据 (未使用)
     * @param testData 测试数据 (未使用)
     * @return 固定的学习参数 (M=0.20, alpha=-0.70)
     */
    public static LearningParameters estimateParametersMultipleStarts(
            List<DataRead.PartData> trainData, // 参数保留，但在此版本中未使用
            List<DataRead.PartData> testData) { // 参数保留，但在此版本中未使用

        // --- 核心修改：直接返回拟合的固定参数 ---
        double fittedAlpha = -0.70;
        double fittedM = 0.20;
        System.out.println("Function3: 使用基于平均比率数据拟合的固定参数: α=" + fittedAlpha + ", M=" + fittedM);
        return new LearningParameters(fittedAlpha, fittedM);
        // --- 结束核心修改 ---
    }

    // estimateParameters 方法现在变为一个辅助方法，如果需要进行实际优化时可以调用
    // 但在当前配置下，它不会被 estimateParametersMultipleStarts 调用
    public static LearningParameters estimateParameters(
            List<DataRead.PartData> trainData,
            List<DataRead.PartData> testData,
            double initialAlpha,
            double initialM) {

        // ... (此处的优化逻辑，包括Adam和梯度计算，在上面的修改后将不会被主动调用)
        // ... (为了简洁，可以暂时注释掉或移除这部分详细的优化代码，如果确定不再需要)
        // ... (但为了完整性，此处暂时保留，仅说明它不再是获取参数的主要途径)
        System.out.println("警告: estimateParameters 被调用，但在当前配置下，" +
                "estimateParametersMultipleStarts 应直接返回固定参数。");
        // 这是一个示例，表明优化逻辑仍然存在，但未被使用
        double alpha = initialAlpha;
        double m = initialM;
        // ... (省略原有的Adam优化循环)

        return new LearningParameters(alpha, m); // 返回基于初始值的参数，或经过某种简化的处理
    }

    // computeGradients 仍然保留，以支持 estimateParameters (如果它被其他方式调用)
    private static double[] computeGradients(List<DataRead.PartData> batch,
                                             double alpha, double m,
                                             double l2Lambda,
                                             double ratioLambdaCoefficient,
                                             double targetRatio20,
                                             double targetRatio50) {
        // ... (梯度计算逻辑不变，但由于优化过程被旁路，此方法也不会被主动调用)
        double gradientAlphaSumData = 0;
        double gradientMSumData = 0;
        // (省略详细梯度计算代码，因为它不会被执行)
        return new double[]{gradientAlphaSumData, gradientMSumData}; // 返回占位符
    }

    private static List<DataRead.PartData> getRandomBatch(List<DataRead.PartData> data, int batchSize) {
        if (data == null || data.isEmpty()) {
            return Collections.emptyList();
        }
        List<DataRead.PartData> shuffledData = new ArrayList<>(data);
        Collections.shuffle(shuffledData, new Random());
        return shuffledData.subList(0, Math.min(batchSize, shuffledData.size()));
    }

    public static double predictTime(double baseTime, double alpha, double m, int repetition) {
        if (repetition <= 0) return baseTime;

        double r_pow_alpha = Math.pow(repetition, alpha);
        if (Double.isNaN(r_pow_alpha) || Double.isInfinite(r_pow_alpha)) {
            if (repetition == 1) r_pow_alpha = 1.0;
            else if (alpha == 0) r_pow_alpha = 1.0;
            else if (alpha < 0 && repetition > 1) r_pow_alpha = 0.0;
            else r_pow_alpha = 1.0;
        }
        double learningTerm = m + (1 - m) * r_pow_alpha;
        return learningTerm * baseTime;
    }

    public static double calculateError(DataRead.PartData data, LearningParameters params) {
        if (params == null) return Double.MAX_VALUE;
        try {
            double baseTime = data.getTime1();
            double alpha = params.alpha;
            double m = params.m;
            double sumSquaredError = 0;
            int validRepCount = 0;

            int[] repetitions = {1, 3, 5, 10};
            double[] actualTotalTimes = {
                    data.getTime1(),
                    data.getTime3(),
                    data.getTime5(),
                    data.getTime10()
            };

            for (int i = 0; i < repetitions.length; i++) {
                int rep = repetitions[i];
                double actualTotal = actualTotalTimes[i];

                if (Double.isNaN(actualTotal) || Double.isInfinite(actualTotal)) {
                    continue;
                }

                double predictedTotalTime = 0;
                for (int j = 1; j <= rep; j++) {
                    predictedTotalTime += predictTime(baseTime, alpha, m, j);
                }
                if (Double.isNaN(predictedTotalTime) || Double.isInfinite(predictedTotalTime)) {
                    sumSquaredError += Math.pow(actualTotal * 10, 2);
                    validRepCount++;
                    continue;
                }

                sumSquaredError += Math.pow(predictedTotalTime - actualTotal, 2);
                validRepCount++;
            }

            return validRepCount > 0 ? Math.sqrt(sumSquaredError / validRepCount) : Double.MAX_VALUE;
        } catch (Exception e) {
            System.err.println("计算误差时出错: " + e.getMessage() + " for params " + params + " and data " + data);
            return Double.MAX_VALUE;
        }
    }

    public static double evaluateDataSet(List<DataRead.PartData> dataSet, LearningParameters params) {
        if (dataSet == null || dataSet.isEmpty()) {
            return 0.0;
        }
        if (params == null) {
            System.err.println("错误 (function3.evaluateDataSet): 学习参数为null。");
            return Double.MAX_VALUE;
        }

        double totalError = 0;
        int count = 0;
        for (DataRead.PartData data : dataSet) {
            double error = calculateError(data, params);
            if (Double.isFinite(error)) {
                totalError += error;
                count++;
            }
        }
        return count > 0 ? totalError / count : Double.MAX_VALUE;
    }

    public static void printPredictions(DataRead.PartData data, LearningParameters params) {
        if (params == null) {
            System.err.println("错误 (function3.printPredictions): 学习参数为null。");
            return;
        }
        try {
            double baseTime = data.getTime1();
            double alpha = params.alpha;
            double m = params.m;

            // 在输出中指明M值是否曾被硬性约束（虽然在此版本中M是固定的，但保留此格式）
            System.out.println("  实际值 vs 预测值 (基于固定参数 α=" + String.format("%.4f", alpha) + ", M=" + String.format("%.4f", m) + "):");


            int[] repetitions = {1, 3, 5, 10};
            double[] actualTotalTimes = {
                    data.getTime1(),
                    data.getTime3(),
                    data.getTime5(),
                    data.getTime10()
            };

            for (int i = 0; i < repetitions.length; i++) {
                int rep = repetitions[i];
                double actualTotal = actualTotalTimes[i];
                double actualUnitTimeAvg = (rep > 0) ? actualTotal / rep : actualTotal;

                double predictedTotalTime = 0;
                for (int j = 1; j <= rep; j++) {
                    predictedTotalTime += predictTime(baseTime, alpha, m, j);
                }
                double predictedUnitTimeAvg = (rep > 0) ? predictedTotalTime / rep : predictedTotalTime;

                System.out.printf("    %2d件: 实际总时间 %.3f (平均单件 %.3f), 预测总时间 %.3f (平均单件 %.3f)\n",
                        rep, actualTotal, actualUnitTimeAvg, predictedTotalTime, predictedUnitTimeAvg);
            }

            double minTimePerUnitTheoretic = baseTime * m;
            System.out.printf("    理论最小单件时间 (T1*M): %.3f (为T1的 %.1f%%)\n",
                    minTimePerUnitTheoretic, (m)*100);

            double N20_pow_alpha = Math.pow(20, alpha);
            if (Double.isNaN(N20_pow_alpha) || Double.isInfinite(N20_pow_alpha)) N20_pow_alpha = (alpha == 0) ? 1.0 : (alpha < 0 ? 0.0 : Double.MAX_VALUE);
            double N50_pow_alpha = Math.pow(50, alpha);
            if (Double.isNaN(N50_pow_alpha) || Double.isInfinite(N50_pow_alpha)) N50_pow_alpha = (alpha == 0) ? 1.0 : (alpha < 0 ? 0.0 : Double.MAX_VALUE);

            double R20_predicted = m + (1 - m) * N20_pow_alpha;
            double R50_predicted = m + (1 - m) * N50_pow_alpha;
            System.out.printf("    模型预测比率 (基于固定参数): T_unit(20)/T1 = %.4f (目标 0.34), T_unit(50)/T1 = %.4f (目标 0.25)\n",
                    R20_predicted, R50_predicted);

        } catch (Exception e) {
            System.err.println("打印预测时出错: " + e.getMessage());
            e.printStackTrace();
        }
    }
}