package com.tang.source;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import java.time.Duration;

/**
 * kafka数据源
 * =====
 * kafka消费者的参数：
 * -   auto.reset.offsets
 * -       earliest: 如果有offset，从offset继续消费; 如果没有offset，从 最早 消费
 * -       latest  : 如果有offset，从offset继续消费; 如果没有offset，从 最新 消费
 * ====
 * flink的kafkasource，offset消费策略：OffsetsInitializer，默认是 earliest
 * -       earliest: 一定从 最早 消费
 * -       latest  : 一定从 最新 消费
 *
 * @author tang
 * @since 2023/5/31 16:03
 */
public class KafkaSourceDemo {

    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        /*
         * 创建主题
         * bin/kafka-topics.sh --create --zookeeper 192.168.70.141:2181 --replication-factor 1 --partitions 1 --topic flink_topic
         * 查看主题
         * bin/kafka-topics.sh --list --zookeeper 192.168.70.141:2181
         * 发送消息
         * bin/kafka-console-producer.sh --broker-list localhost:9092 --topic flink_topic
         * 监听消息
         * bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic flink_topic --from-beginning
         */
        KafkaSource<String> kafkaSource = KafkaSource.<String>builder().setBootstrapServers("192.168.70.141:9092") // 指定kafka节点的地址和端口，多个的话用逗号分割
                .setGroupId("flink") // 指定消费者组的ID
                .setTopics("flink_topic") // 指定消费者的topic
                .setValueOnlyDeserializer(new SimpleStringSchema()) // 指定反序列化器，这个是反序列化value
                .setStartingOffsets(OffsetsInitializer.latest()) // flink消费kafka的策略
                .build();

        //env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafkaSource").print();
        env.fromSource(kafkaSource, WatermarkStrategy.forBoundedOutOfOrderness(Duration.ofSeconds(3)), "kafkaSource").print();

        env.execute();

    }

}
