package com.cn.daimajiangxin.flink.source;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.connector.kafka.source.reader.deserializer.KafkaRecordDeserializationSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.nio.charset.StandardCharsets;

public class KafkaSourceDemo {
    private static final Logger LOG = LoggerFactory.getLogger(KafkaSourceDemo.class);

    public static void main(String[] args) throws Exception {
        // 1. 创建Flink流执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 可选：启用检查点（生产环境必选，保证Exactly-Once语义）
        env.enableCheckpointing(5000); // 每5秒做一次检查点
        // 启用检查点
        env.enableCheckpointing(5000); // 每5秒做一次检查点

        // 设置检查点超时时间
        env.getCheckpointConfig().setCheckpointTimeout(60000);

        // 2. 配置Kafka参数
        String kafkaBootstrapServers = "192.168.0.199:9092"; // Kafka Broker地址
        String topic = "test_topic"; // 目标主题
        String consumerGroup = "flink-consumer-group"; // 消费者组ID

        LOG.info("Connecting to Kafka at " + kafkaBootstrapServers);
        LOG.info("Consuming topic: " + topic);
        LOG.info("Consumer group: " + consumerGroup);

        // 3. 定义Kafka Source（新版API）
        KafkaSource<String> kafkaSourceDemo = KafkaSource.<String>builder()
                .setBootstrapServers(kafkaBootstrapServers) // Kafka Broker地址
                .setTopics(topic) // 订阅的主题
                .setGroupId(consumerGroup) // 消费者组
                .setProperty("enable.auto.commit", "true")
                .setProperty("auto.commit.interval.ms", "1000")
                .setProperty("session.timeout.ms", "30000")
                .setProperty("retry.backoff.ms", "1000")
                .setProperty("reconnect.backoff.max.ms", "10000")
                .setDeserializer(new KafkaRecordDeserializationSchema<String>() {
                    @Override
                    public void deserialize(ConsumerRecord<byte[], byte[]> record, Collector<String> out)
                            throws IOException {
                        // 从ConsumerRecord中提取值（字节数组），并转为字符串
                        String value = new String(record.value(), StandardCharsets.UTF_8);
                        LOG.info("Received message: " + value);
                        out.collect(value); // 将反序列化后的数据收集到Flink流中
                    }

                    @Override
                    public TypeInformation<String> getProducedType() {
                        return TypeInformation.of(String.class);
                    }
                })
                // 从最早偏移量开始消费（这样即使没有新消息也会读取历史数据）
                .setStartingOffsets(OffsetsInitializer.earliest())
                .build();

        // 4. 将Kafka Source添加到Flink流环境，并处理数据
        DataStream<String> kafkaStream = env.fromSource(
                kafkaSourceDemo,
                WatermarkStrategy.noWatermarks(), // 无水印（适用于无序数据场景）
                "Kafka Source" // Source名称（用于监控）
        );

        LOG.info("Kafka source created successfully");

        // 5. 处理数据：将每条数据打印到日志（实际生产中可替换为写入数据库、消息队列等）
        kafkaStream.print("KafkaData");
        LOG.info("Flink Kafka Source Demo started.");
        // 6. 触发任务执行
        env.execute("Flink Kafka Source Demo");

    }
}
