package com.ververica.cdc.guass.source.kafka;

import com.fasterxml.jackson.databind.ObjectMapper;
import com.ververica.cdc.guass.source.kafka.data.ChangeEvent;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.api.common.state.CheckpointListener;
import org.apache.flink.api.common.state.ListState;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.functions.source.RichSourceFunction;
import org.apache.flink.streaming.api.watermark.Watermark;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.WakeupException;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.io.IOException;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;

import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.HashMap;
import java.util.Map;
import java.util.Properties;

public class GaussKafkaSourceFunction extends RichSourceFunction<ChangeEvent> implements CheckpointedFunction, CheckpointListener {

    private final Logger LOG = LoggerFactory.getLogger(this.getClass());

    private final String topic;
    private final String groupId;
    private final String tableName;
    private final Properties properties;
    private volatile boolean isRunning = true;

    private Map<String, Set<Integer>> keyPartitionMap = new HashMap<>();


    private transient ListState<Map<String, Long>> offsetState;
    public transient Map<String, Long> offsets;

    private KafkaConsumer<String, String> consumer = null;
    private KafkaConsumer<String, String> monitorConsumer =  null;

    private ObjectMapper objectMapper;

    private transient ConcurrentLinkedQueue<Long> checkpointsToCommit;
    // Flag to indicate whether offsets need to be committed
    private transient AtomicBoolean needToCommitOffsets;
    private final AtomicBoolean connectionError = new AtomicBoolean(false);


    private transient ScheduledExecutorService connectionMonitor;


    public GaussKafkaSourceFunction(String topic, String bootstrapServers, String groupId, String tableName, String scanStartupMode) {
        this.topic = topic;
        this.groupId = groupId;
        this.tableName = tableName;
        this.properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        // 设置分区分配策略(结合了RoundRobin的均衡性和"粘性"特性适合频繁有消费者加入/离开的动态环境)
        properties.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StickyAssignor.class.getName());
        // 会话超时设置 - 用于检测消费者故障
        properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "10000");  // 10秒
        properties.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "3000"); // 3秒
        // 请求超时设置
        properties.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, "30000");  // 30秒
        // 重连参数设置
        properties.put(ConsumerConfig.RECONNECT_BACKOFF_MS_CONFIG, "1000");      // 1秒
        properties.put(ConsumerConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG, "10000"); // 10秒
        // 元数据刷新频率
        properties.put(ConsumerConfig.METADATA_MAX_AGE_CONFIG, "5000");  // 5秒

        if (StringUtils.isEmpty(scanStartupMode)) {
            properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        } else {
            properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, scanStartupMode.equals("earliest-offset") ? "earliest" : "latest");
        }
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
    }

    @Override
    public void open(Configuration parameters) throws Exception {
        // 初始化 Kafka 消费者
        consumer = new KafkaConsumer<>(properties);
        objectMapper = new ObjectMapper();

        // Initialize checkpoint-related structures
        checkpointsToCommit = new ConcurrentLinkedQueue<>();
        needToCommitOffsets = new AtomicBoolean(false);

        // Start connection monitor thread
        startConnectionMonitorThread();

        LOG.info("Starting Kafka consumer for table: {}, group: {}", tableName, groupId);
    }


    @Override
    public void run(SourceContext<ChangeEvent> ctx) {

        // 如果有上次的偏移量记录且不强制从头开始，则从记录的位置继续消费
        if (offsets != null && !offsets.isEmpty()) {
            LOG.info("Resuming from previous offsets: {}", offsets);
            List<TopicPartition> topicPartitions = getPartitions(consumer);
            consumer.assign(topicPartitions);
            for (TopicPartition tp : topicPartitions) {
                Long offset = offsets.get(topicPartitionToString(tp));
                if (offset != null) {
                    consumer.seek(tp, offset);
                    LOG.info("Seeking to offset {} for partition {}", offset, tp);
                } else {
                    consumer.seekToBeginning(Collections.singleton(tp));
                    LOG.info("Seeking to beginning for partition {}", tp);
                }
            }
        } else {
            // 订阅主题，让Kafka分配分区
            LOG.info("Subscribing to topic: {}", topic);
            consumer.subscribe(Collections.singletonList(topic));
            offsets = new HashMap<>();
        }

        try {
            while (isRunning) {

                // Check if we need to commit offsets from a completed checkpoint
                if (needToCommitOffsets.get()) {
                    commitOffsetsToKafka();
                }
                if (connectionError.get()) {
                    throw new IOException("Kafka Connection lost");
                }

                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000)); // 增加到1秒
                if (!records.isEmpty()) {
                    LOG.debug("Fetched {} records for table {}", records.count(), tableName);

                    Map<String, Long> offsetsToCommit = new HashMap<>();
                    synchronized (ctx.getCheckpointLock()) {
                        for (ConsumerRecord<String, String> record : records) {
                            try {

                                //LOG.info("Consumer {} in group {} currently assigned partitions: {}", consumer, groupId, consumer.assignment());

                                ChangeEvent event = objectMapper.readValue(record.value(), ChangeEvent.class);
                                if (tableName.equals(event.getTable())) {

                                    LOG.debug("Processing event for table {} from partition {} at offset {}", event.getTable(), record.partition(), record.offset());


                                    // 检查相同的 key 是否在不同分区出现
                                    String key = record.key();
                                    int partition = record.partition();
                                    keyPartitionMap.computeIfAbsent(key, k -> new HashSet<>()).add(partition);
                                    // 如果一个 key 出现在多个分区
                                    if (keyPartitionMap.get(key).size() > 1) {
                                        LOG.info("Key {} has been sent to multiple partitions: {}", key, keyPartitionMap.get(key));
                                    }


                                    ctx.collectWithTimestamp(event, record.timestamp());
                                    ctx.emitWatermark(new Watermark(record.timestamp()));

                                    String tpKey = topicPartitionToString(new TopicPartition(record.topic(),partition));
                                    //最后一个offset是当前记录的offset加1
                                    offsetsToCommit.put(tpKey, record.offset()+1);

                                }

                            } catch (Exception e) {
                                LOG.error("Error processing record: {}", record, e);
                            }
                        }

                        // 更新偏移量
                        offsets.putAll(offsetsToCommit);
                    }
                }
            }
        } catch (WakeupException e) {
            handleWakeupException(e);

        } catch (Exception e) {
            // 处理其他异常
            handleGenericException(e);
        }
    }


    /**
     * 处理 WakeupException 的专用方法
     */
    private void handleWakeupException(WakeupException e) {
        if (isRunning) {
            // 如果仍在运行状态出现 WakeupException，说明是意外唤醒
            LOG.error("Unexpected WakeupException while consumer is running", e);
            throw new RuntimeException("Consumer was unexpectedly woken up", e);
        } else {
            // 正常关闭流程中的唤醒，记录调试信息即可
            LOG.error("WakeupException received during consumer shutdown");
        }
    }

    /**
     * 处理其他异常
     */
    private void handleGenericException(Exception e) {
        LOG.error("Unexpected exception in Kafka consumer", e);
        throw new RuntimeException("Kafka consumer failure", e); // 触发任务失败
    }


    /**
     * 响应任务取消信号
     */
    @Override
    public void cancel() {
        try {
            if (connectionMonitor != null) {
                connectionMonitor.shutdownNow();
                LOG.info("Kafka connection monitor shut down successfully");
            }

            if (monitorConsumer != null) {
                monitorConsumer.close(Duration.ofSeconds(30));
                LOG.info("Kafka monitorConsumer closed successfully");
            }

            if (consumer != null) {
                consumer.close(Duration.ofSeconds(30));
                LOG.info("Kafka consumer closed successfully");
            }
        } catch (Exception e) {
            LOG.warn("Error closing Kafka consumer", e);
        }
    }




    /**
     * snapshotState 方法会在每次 Flink 作业进行 检查点（checkpoint）时调用，
     * 用于将当前的状态持久化。这里的状态是 offsets，即每个 Kafka 分区的消费位移（offsets）。
     *
     * @param context the context for drawing a snapshot of the operator
     * @throws Exception
     */
    @Override
    public void snapshotState(FunctionSnapshotContext context) throws Exception {

        LOG.info("Snapshot state called for checkpoint ID: {} for table: {}", context.getCheckpointId(), tableName);

        if (offsetState != null) {
            offsetState.clear();

            // Create a defensive copy of offsets
            if (offsets != null) {
                Map<String, Long> currentOffsets = new HashMap<>(offsets);
                offsetState.add(currentOffsets);
                LOG.info("Successfully snapshotted offsets for checkpoint {}: {}", context.getCheckpointId(), currentOffsets);
            } else {
                LOG.warn("Offsets map is null during snapshot for checkpoint {}!", context.getCheckpointId());
                offsetState.add(new HashMap<>());
            }
        } else {
            LOG.error("offsetState is null during snapshot!");
        }

    }

    private void startConnectionMonitorThread() {
        connectionMonitor = Executors.newSingleThreadScheduledExecutor(r -> {
            Thread t = new Thread(r, "kafka-connection-monitor-" + tableName);
            t.setDaemon(true);
            return t;
        });

        // Create properties for monitor consumer
        Properties monitorProps = new Properties();
        monitorProps.putAll(properties);
        monitorProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId + "-monitor");
        monitorProps.put(ConsumerConfig.CLIENT_ID_CONFIG, "monitor-client-" + UUID.randomUUID());
        // Create lightweight consumer just for connection check
        monitorConsumer =  new KafkaConsumer<>(monitorProps);

        connectionMonitor.scheduleAtFixedRate(() -> {

            try {
                // Check connection by listing topics (lightweight operation)
                monitorConsumer.listTopics(Duration.ofSeconds(5));

                LOG.debug("Kafka connection monitor verified connectivity for table: {}", tableName);
            } catch (Exception e) {

                LOG.error("Kafka connection monitor detected connectivity issue for table: {}", tableName, e);
                connectionError.set(true);

            }
        }, 30, 30, TimeUnit.SECONDS); // Check every 30 seconds
    }


    /**
     * 通过 initializeState 从 Flink 状态后端恢复之前保存的偏移量
     *
     * @param context the context for initializing the operator
     * @throws Exception
     */
    @Override
    public void initializeState(FunctionInitializationContext context) throws Exception {

        LOG.info("Initialize state called for table: {}, isRestored: {}", tableName, context.isRestored());

        ListStateDescriptor<Map<String, Long>> descriptor = new ListStateDescriptor<>("kafka-offsets-" + tableName,  // Make descriptor name unique per table
                TypeInformation.of(new TypeHint<Map<String, Long>>() {
                }));

        offsetState = context.getOperatorStateStore().getListState(descriptor);  // 获取存储的状态
        offsets = new HashMap<>();  // 初始化 offsets

        if (context.isRestored()) {  // 如果是恢复状态
            for (Map<String, Long> state : offsetState.get()) {  // 恢复保存的状态
                offsets.putAll(state);
            }

            // 打印恢复的 offset 状态
            LOG.info("Recovered offsets from checkpoint: {}", offsets);
        } else {
            LOG.info("No previous state to restore, starting with empty offsets.");
        }

        // Initialize checkpoint-related structures
        checkpointsToCommit = new ConcurrentLinkedQueue<>();
        needToCommitOffsets = new AtomicBoolean(false);
    }


    @Override
    public void notifyCheckpointComplete(long checkpointId) throws Exception {

        LOG.info("Checkpoint {} completed for table: {}, scheduling offset commit", checkpointId, tableName);

        // Instead of committing directly, just signal the main thread to commit
        checkpointsToCommit.add(checkpointId);
        needToCommitOffsets.set(true);
    }

    /**
     * Helper method to commit offsets to Kafka - only called from the main thread
     */
    private void commitOffsetsToKafka() {
        if (consumer != null && !offsets.isEmpty()) {
            Map<TopicPartition, OffsetAndMetadata> commitOffsets = offsets.entrySet().stream().collect(Collectors.toMap(e -> stringToTopicPartition(e.getKey()), e -> new OffsetAndMetadata(e.getValue())));
            try {
                Long checkpointId = checkpointsToCommit.poll();
                consumer.commitSync(commitOffsets);
                LOG.info("Successfully committed offsets to Kafka for checkpoint {}: {}", checkpointId, commitOffsets);
            } catch (Exception e) {
                LOG.error("Failed to commit offsets to Kafka: {}", commitOffsets, e);
            }

            // If there are more checkpoints to commit, keep the flag set
            needToCommitOffsets.set(!checkpointsToCommit.isEmpty());
        } else {
            LOG.info("No offsets to commit for table: {}", tableName);
            needToCommitOffsets.set(false);
        }
    }



    private List<TopicPartition> getPartitions(KafkaConsumer<String, String> consumer) {

        List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);

        LOG.info("partitionInfos is: {}", partitionInfos);

        // 将不可变列表转换为可变列表
        List<PartitionInfo> mutablePartitionInfos = new ArrayList<>(partitionInfos);

        // 对可变列表进行排序
        mutablePartitionInfos.sort(Comparator.comparingInt(PartitionInfo::partition));

        List<TopicPartition> availablePartitions = mutablePartitionInfos.stream().map(p -> new TopicPartition(topic, p.partition())).collect(Collectors.toList());


        return availablePartitions;
    }


    private String topicPartitionToString(TopicPartition tp) {
        return tableName + "///" + topic + "///" + tp.partition();
    }

    private TopicPartition stringToTopicPartition(String tpString) {

        LOG.info("tpString is: {}", tpString);
        String[] parts = tpString.split("///");
        String topic = parts[1];
        int partition = Integer.parseInt(parts[2]);
        return new TopicPartition(topic, partition);
    }
}
