package com.ververica.cdc.guass.source.hybrid;

import com.ververica.cdc.guass.sink.jdbc.JdbcConnectionOptions;
import com.ververica.cdc.guass.source.jdbc.SnapShotSplit;
import com.ververica.cdc.guass.source.jdbc.SnapshotReader;
import com.ververica.cdc.guass.source.kafka.GaussKafkaSourceFunction;
import com.ververica.cdc.guass.source.kafka.KafkaConnectionOptions;
import com.ververica.cdc.guass.source.kafka.data.ReaderOutputWrapper;
import com.ververica.cdc.guass.source.kafka.table.ChangeEventToRowDataSourceFunction;
import com.ververica.cdc.guass.source.kafka.table.PhysicalColumn;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.connector.source.ReaderOutput;
import org.apache.flink.api.connector.source.SourceReader;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.core.io.InputStatus;
import org.apache.flink.table.data.RowData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.sql.SQLException;
import java.util.*;
import java.util.concurrent.CompletableFuture;

import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.CountDownLatch;

public class HybridSourceReader implements SourceReader<RowData, HybridSourceSplit> {

    private final Logger log = LoggerFactory.getLogger(this.getClass());


    private final List<PhysicalColumn> columns;
    private final List<String> primaryKeys;
    private final Queue<HybridSourceSplit> splits;
    private final JdbcConnectionOptions jdbcOptions;
    private final KafkaConnectionOptions kafkaOptions;
    private final String tableName;


    private ChangeEventToRowDataSourceFunction changeEventToRowDataSourceFunction;
    private boolean isChangeEventFunctionOpened = false;



    private volatile boolean snapshotDone;


    private SnapshotReader snapshotReader;
    private GaussKafkaSourceFunction gaussKafkaSourceFunction;

    // 使用静态锁（全局锁）： 可以在 HybridSourceReader 中引入一个静态锁（static），
    // //这样即使创建了多个 HybridSourceReader 实例，所有线程都会争用同一个锁
    private static final ReentrantLock kafkaLock = new ReentrantLock();

    private static  volatile boolean kafkaStarted;

    // Latch to wait until all snapshot splits are processed
    private  CountDownLatch snapshotLatch = new CountDownLatch(0);

    // 在HybridSourceReader类中添加
    private static final MapStateDescriptor<Void, Boolean> KAFKA_STARTED_DESC =
            new MapStateDescriptor<>("kafka-started", Types.VOID, Types.BOOLEAN);

    public HybridSourceReader(
            List<PhysicalColumn> columns,
            List<String> primaryKeys,
            Queue<HybridSourceSplit> splits,
            JdbcConnectionOptions jdbcOptions,
            KafkaConnectionOptions kafkaOptions,
            String tableName) {
        this.columns = columns;
        this.primaryKeys = primaryKeys;
        this.splits = splits;
        this.jdbcOptions = jdbcOptions;
        this.kafkaOptions = kafkaOptions;
        this.tableName = tableName;
        log.info("HybridSourceReader Thread ID: {}", Thread.currentThread().getId());

    }

    @Override
    public void start() {

        // 初始化 snapshot reader
        this.snapshotReader = new SnapshotReader(jdbcOptions, tableName, primaryKeys, splits.size(), columns);

        try {
            snapshotReader.open();
        } catch (SQLException|ClassNotFoundException e) {
            throw new RuntimeException(e);
        }

        // 初始化 ChangeEventToRowDataSourceFunction
        gaussKafkaSourceFunction = new GaussKafkaSourceFunction(
                kafkaOptions.getTopic(),
                kafkaOptions.getBootstrapServers(),
                kafkaOptions.getGroupId(),
                tableName,
                kafkaOptions.getScanStartupMode()
        );

        this.changeEventToRowDataSourceFunction = new ChangeEventToRowDataSourceFunction(gaussKafkaSourceFunction, columns);

        try {
            log.info("changeEventToRowDataSourceFunction open Thread ID: {}", Thread.currentThread().getId());
            changeEventToRowDataSourceFunction.open(new Configuration());
            isChangeEventFunctionOpened = true;

        } catch (Exception e) {
            throw new RuntimeException("Failed to open GaussKafkaSourceFunction", e);
        }
    }

    /**
     * 单线程串行调用
     * @param output
     * @return
     * @throws Exception
     */
    @Override
    public InputStatus pollNext(ReaderOutput<RowData> output) throws Exception {

        log.info("Thread ID: {}", Thread.currentThread().getId());

        if (!snapshotDone) {
            HybridSourceSplit split = splits.poll();
            if (split != null) {
                processSnapshotSplit(split, output);
                snapshotLatch.countDown();
                return InputStatus.MORE_AVAILABLE;
            }

            if (splits.isEmpty() && snapshotLatch.getCount() == 0) {
                snapshotDone = true;
                startKafkaReader(output);
            }
        } else {
            return InputStatus.NOTHING_AVAILABLE;
        }

        return InputStatus.END_OF_INPUT;

    }

    private void processSnapshotSplit(HybridSourceSplit split, ReaderOutput<RowData> output) {
        try {
            SnapShotSplit snapshotSplit = (SnapShotSplit) split.getSplitDetail();
            snapshotReader.readSplit(snapshotSplit, output);
        } catch (SQLException e) {
            throw new RuntimeException("Error reading snapshot split", e);
        }
    }

    private void startKafkaReader(ReaderOutput<RowData> output) throws Exception {
        kafkaLock.lock();
        try {
            // Check again if kafkaStarted flag was already set by another thread
            if (!kafkaStarted) {
                log.info("Starting Kafka reader for incremental data...");
                // Start reading Kafka data
                changeEventToRowDataSourceFunction.run(new ReaderOutputWrapper(output, "kafka-split"));
                kafkaStarted = true; // Mark Kafka as started
            }
        } finally {
            kafkaLock.unlock();
        }
    }

    @Override
    public List<HybridSourceSplit> snapshotState(long checkpointId) {
        List<HybridSourceSplit> state = new ArrayList<>();
        // Add unfinished splits
        splits.forEach(state::add);
        return state;
    }

    @Override
    public CompletableFuture<Void> isAvailable() {
        if (!splits.isEmpty() || snapshotDone) {
            return CompletableFuture.completedFuture(null);
        }
        return new CompletableFuture<>();
    }

    @Override
    public void addSplits(List<HybridSourceSplit> splits) {
        this.splits.addAll(splits);
        this.snapshotLatch = new CountDownLatch(splits.size());
    }

    @Override
    public void notifyNoMoreSplits() {
        // Mark that no more splits will be added
        log.info("No more splits will be added");
    }

    @Override
    public void close() throws Exception {

        if (snapshotReader != null) {
            snapshotReader.close();
        }
        if (gaussKafkaSourceFunction != null) {
            gaussKafkaSourceFunction.cancel();
        }
    }
}
