package demo2;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;

import java.util.Properties;

/**
 * @author ChinaManor
 * #Description T2
 * #Date: 24/6/2021 21:14
 */
public class T2 {
    public static void main(String[] args) throws Exception {
        //1.env
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //2.Source 实例化 FlinkKafkaConsumer 配置如下参数
        Properties props = new Properties();
        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.88.161:9092");
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG,"flink_consumer");
        //消费的数据的 offset 保存到
        props.setProperty("enable.auto.commit","true");
        props.setProperty("auto.offset.reset","earliest");
        //自动发现分区的配置
        props.setProperty("flink.partition-discovery.interval-millis","5000");
        //自动提交的间隔时间
        props.setProperty("auto.commit.interval.ms","2000");

        /* 需要设置如下参数:
         * 1.订阅的主题
         * 2.反序列化规则
         * 3.消费者属性-集群地址
         * 4.消费者属性-消费者组id(如果不设置,会有默认的,但是默认的不方便管理)
         * 5.消费者属性-offset重置规则,如earliest/latest...
         * 6.动态分区检测(当kafka的分区数变化/增加时,Flink能够检测到!)
         * 7.如果没有设置Checkpoint,那么可以设置自动提交offset,后续学习了Checkpoint会把offset随着做Checkpoint的时候提交到Checkpoint和默认主题中*/
        FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<>(
                "flink_kafka",
                new SimpleStringSchema(),
                props);

        // 是 Flink 读取 checkpoint 中的最新的 offset 的数据
        consumer.setStartFromLatest();

        // 将当前的 flink 的 offset 提交到 flink checkpoint 中 , 默认 checkpoint 存储到内存中
        consumer.setCommitOffsetsOnCheckpoints(true);
        DataStream<String> source = env
                .addSource(consumer);
        //4.Sink print
        source.print();
        //5.execute
        env.execute();
    }
}
