package com.ververica.cdc.guass.source.hybrid;

import com.ververica.cdc.guass.sink.jdbc.JdbcConnectionOptions;
import com.ververica.cdc.guass.source.jdbc.SnapShotSplit;
import com.ververica.cdc.guass.source.kafka.KafkaConnectionOptions;
import com.ververica.cdc.guass.source.kafka.table.PhysicalColumn;
import org.apache.flink.api.connector.source.*;

import org.apache.flink.core.io.SimpleVersionedSerializer;
import org.apache.flink.table.data.RowData;

import java.sql.*;
import java.sql.Date;
import java.util.*;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.stream.Collectors;


/**
 * HybridParallelSource 实现了 Source 接口，支持在 JDBC 和 Kafka 之间进行源切换。
 */
public class HybridParallelSource implements Source<RowData, HybridSourceSplit, HybridSourceEnumeratorState> {

    private final JdbcConnectionOptions jdbcOptions;
    private final KafkaConnectionOptions kafkaOptions;
    private final List<PhysicalColumn> columns;
    private final List<String> primaryKeys;
    private final String tableName;
    private HybridSourceReader hybridSourceReader;

    /**
     * 构造函数，初始化所有必要的配置和子源。
     */
    public HybridParallelSource(
            JdbcConnectionOptions jdbcOptions,
            KafkaConnectionOptions kafkaOptions,
            List<PhysicalColumn> columns,
            List<String> primaryKeys,
            String tableName) {
        this.jdbcOptions = jdbcOptions;
        this.kafkaOptions = kafkaOptions;
        this.columns = columns;
        this.primaryKeys = primaryKeys;
        this.tableName = tableName;

    }

    @Override
    public Boundedness getBoundedness() {
        return Boundedness.CONTINUOUS_UNBOUNDED;
    }

    /**
     * Flink 会根据并行度为每个并行的任务实例化一个独立的 SourceReader 实例
     * <p>
     * createReader 方法用于创建 SourceReader 实例。SourceReader 是源的核心组件，负责从分配给它的分片（Splits）中读取数据，
     * 并将数据发送到 Flink 的数据流中。
     */
    @Override
    public SourceReader<RowData, HybridSourceSplit> createReader(SourceReaderContext readerContext) throws Exception {

        // 创建一个空的 Queue 来存储分片
        Queue<HybridSourceSplit> splits = new ConcurrentLinkedQueue<>();

        // 如果需要，可以在这里添加初始分片
        // 例如，可以根据 readerContext 的信息来决定是否添加初始分片
        // 这里暂时保持为空，让 Enumerator 来分配分片

        hybridSourceReader = new HybridSourceReader(
                columns,
                primaryKeys,
                splits,  // 传入空的 splits Queue
                jdbcOptions,
                kafkaOptions,
                tableName
        );

        return hybridSourceReader;
    }

    /**
     * createEnumerator 方法用于创建 SplitEnumerator 实例。SplitEnumerator 的主要职责是管理和分配数据分片（Splits）
     * 给各个并行的 SourceReader 实例。
     */
    @Override
    public SplitEnumerator<HybridSourceSplit, HybridSourceEnumeratorState> createEnumerator(
            SplitEnumeratorContext<HybridSourceSplit> enumContext) throws Exception {

        // Calculate initial splits
        List<HybridSourceSplit> initialSplits = calculateInitialSplits(enumContext);

        return new HybridSourceEnumerator(
                enumContext,
                initialSplits,  // 传入初始分片列表
                0,
                null,
                getSplitSerializer()
        );
    }

    private List<HybridSourceSplit> calculateInitialSplits(SplitEnumeratorContext<HybridSourceSplit> enumContext) throws Exception {
        // 实现初始分片计算逻辑
        // 这里可以使用之前提供的 calculateSnapshotSplits 方法的逻辑
        List<HybridSourceSplit> splits = new ArrayList<>();

        try (Connection conn = DriverManager.getConnection(jdbcOptions.getDbURL(), jdbcOptions.getUsername().orElse(null), jdbcOptions.getPassword().orElse(null));
             Statement stmt = conn.createStatement()) {

            long tableSize = getTableSize(stmt);
            //每个任务预计处理多少数据
            long splitSize = Math.max(1, tableSize / enumContext.currentParallelism());

            String minMaxSql = primaryKeys.stream()
                    .map(col -> String.format("MIN(%s) as min_%s, MAX(%s) as max_%s", col, col, col, col))
                    .collect(Collectors.joining(", "));
            minMaxSql = String.format("SELECT %s FROM %s", minMaxSql, tableName);

            try (ResultSet minMaxRs = stmt.executeQuery(minMaxSql)) {
                if (minMaxRs.next()) {
                    Object[] minValues = new Object[primaryKeys.size()];
                    Object[] maxValues = new Object[primaryKeys.size()];
                    for (int i = 0; i < primaryKeys.size(); i++) {
                        minValues[i] = minMaxRs.getObject("min_" + primaryKeys.get(i));
                        maxValues[i] = minMaxRs.getObject("max_" + primaryKeys.get(i));
                    }

                    // 如果表总大小（tableSize）不是 splitSize 的整数倍，通过加上 splitSize - 1 实现向上取整，确保所有数据都能被覆盖到。
                    // 简而言之，numSplits 表示最终生成的总分片数。
                    long numSplits = (tableSize + splitSize - 1) / splitSize;
                    generateCompositeKeySplits(splits, minValues, maxValues, numSplits);
                }
            }
        }

        return splits;
    }


    private String generateSplitId(String tableName, Object[] start, Object[] end) {
        return tableName + "-" + Arrays.toString(start) + "-" + Arrays.toString(end);
    }


    private void generateCompositeKeySplits(List<HybridSourceSplit> splits, Object[] minValues, Object[] maxValues, long numSplits) {
        int keyCount = primaryKeys.size();
        if (keyCount == 0) return;

        // 计算每个主键列的分割数
        int splitsPerColumn = (int) Math.ceil(Math.pow(numSplits, 1.0 / keyCount));

        // 为每个主键列生成分割点
        List<List<Object>> columnSplitPoints = new ArrayList<>();
        for (int i = 0; i < keyCount; i++) {
            List<Object> points = generateColumnSplitPoints(minValues[i], maxValues[i], splitsPerColumn);
            columnSplitPoints.add(points);
        }

        // 生成区间组合的笛卡尔积
        List<List<ColumnInterval>> intervalCombinations = generateIntervalCombinations(columnSplitPoints);

        // 创建分片
        for (List<ColumnInterval> intervals : intervalCombinations) {
            Object[] splitStart = new Object[keyCount];
            Object[] splitEnd = new Object[keyCount];
            for (int i = 0; i < keyCount; i++) {
                ColumnInterval ci = intervals.get(i);
                splitStart[i] = ci.start;
                splitEnd[i] = ci.end;
            }

            String splitId = generateSplitId(tableName, splitStart, splitEnd);
            SnapShotSplit snapShotSplit = new SnapShotSplit(
                    tableName,
                    primaryKeys,
                    splitStart,
                    splitEnd,
                    splitId,
                    System.currentTimeMillis()
            );
            splits.add(new HybridSourceSplit(splitId, true, snapShotSplit, splits.size() + 1));
        }
    }

    private List<Object> generateColumnSplitPoints(Object min, Object max, int splits) {
        List<Object> points = new ArrayList<>();
        points.add(min);

        for (int i = 1; i < splits; i++) {
            points.add(calculateSplitPoint(min, max, i, splits));
        }

        points.add(max);
        return points;
    }

    private Object calculateSplitPoint(Object min, Object max, int index, int totalSplits) {
        if (min instanceof Number && max instanceof Number) {
            double minVal = ((Number) min).doubleValue();
            double maxVal = ((Number) max).doubleValue();
            return minVal + (maxVal - minVal) * index / totalSplits;
        } else if (min instanceof String && max instanceof String) {
            int commonPrefix = 0;
            String minStr = (String) min;
            String maxStr = (String) max;
            int minLength = Math.min(minStr.length(), maxStr.length());

            while (commonPrefix < minLength && minStr.charAt(commonPrefix) == maxStr.charAt(commonPrefix)) {
                commonPrefix++;
            }

            if (commonPrefix == minLength) {
                return minStr;
            }

            char minChar = minStr.charAt(commonPrefix);
            char maxChar = maxStr.charAt(commonPrefix);
            char boundary = (char) (minChar + (maxChar - minChar) * index / totalSplits);
            return minStr.substring(0, commonPrefix) + boundary;
        } else if (min instanceof Date) {
            long minTime = ((Date) min).getTime();
            long maxTime = ((Date) max).getTime();
            return new Date(minTime + (maxTime - minTime) * index / totalSplits);
        } else if (min instanceof Timestamp) {
            long minTime = ((Timestamp) min).getTime();
            long maxTime = ((Timestamp) max).getTime();
            return new Timestamp(minTime + (maxTime - minTime) * index / totalSplits);
        }
        return min;
    }

    private List<List<ColumnInterval>> generateIntervalCombinations(List<List<Object>> columnSplitPoints) {
        List<List<ColumnInterval>> intervals = new ArrayList<>();
        for (List<Object> points : columnSplitPoints) {
            List<ColumnInterval> columnIntervals = new ArrayList<>();
            for (int i = 0; i < points.size() - 1; i++) {
                columnIntervals.add(new ColumnInterval(points.get(i), points.get(i + 1)));
            }
            intervals.add(columnIntervals);
        }
        return cartesianProduct(intervals);
    }

    private List<List<ColumnInterval>> cartesianProduct(List<List<ColumnInterval>> lists) {
        List<List<ColumnInterval>> result = new ArrayList<>();
        if (lists.isEmpty()) return result;

        backtrackCartesian(lists, 0, new ArrayList<>(), result);
        return result;
    }

    // 回溯法生成笛卡尔积
    private void backtrackCartesian(List<List<ColumnInterval>> lists, int depth, List<ColumnInterval> current, List<List<ColumnInterval>> results) {
        if (depth == lists.size()) {
            results.add(new ArrayList<>(current));
            return;
        }

        for (ColumnInterval interval : lists.get(depth)) {
            current.add(interval);
            backtrackCartesian(lists, depth + 1, current, results);
            current.remove(current.size() - 1);
        }
    }

    private static class ColumnInterval {
        final Object start;
        final Object end;

        ColumnInterval(Object start, Object end) {
            this.start = start;
            this.end = end;
        }
    }

    private long getTableSize(Statement stmt) throws SQLException {
        String countSql = String.format("SELECT COUNT(*) as count FROM %s", tableName);
        try (ResultSet countRs = stmt.executeQuery(countSql)) {
            if (countRs.next()) {
                return countRs.getLong("count");
            }
        }
        return 0;
    }


    /**
     * 恢复 SplitEnumerator 实例。
     */
    @Override
    public SplitEnumerator<HybridSourceSplit, HybridSourceEnumeratorState> restoreEnumerator(
            SplitEnumeratorContext<HybridSourceSplit> enumContext,
            HybridSourceEnumeratorState checkpoint) throws Exception {
        return new HybridSourceEnumerator(
                enumContext,
                new ArrayList<>(),
                checkpoint.getCurrentSourceIndex(),
                checkpoint,
                getSplitSerializer());
    }

    /**
     * 获取 HybridSourceSplit 的序列化器。
     */
    @Override
    public SimpleVersionedSerializer<HybridSourceSplit> getSplitSerializer() {
        return new HybridSourceSplitSerializer();
    }

    @Override
    public SimpleVersionedSerializer<HybridSourceEnumeratorState> getEnumeratorCheckpointSerializer() {
        return new HybridSourceEnumeratorStateSerializer();
    }

}
