package com.youxin.dataStream.kafka;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.PrintSinkFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer010;
import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;

import java.util.HashMap;
import java.util.Map;
import java.util.Properties;

/**
 * kafka可以设置消费策略，即从哪个位置开始消费
 *
 * kafka consumer容错
 * 当checkpoint机制开启的时候 kafka Consumer 会定期的吧kafka的offset信息还有其他的operator的状态信息依赖保存起来。当job失败重启的手，flink会从最近的一次的checkpoint中进行恢复数据，重新消费kafka中的数据
 *
 * 为了能够只用支持容错的kafka consumer 需要开启checkpoint
 * env.enableCheckpointing(5000) // 每5s checkpoint一次
 *
 * kafka consumers offset 自动提交
 * checkpoint关闭时，可以通过下面两个参数配置
 *      enable.auto.commit
 *      auto.commit.interval.ms
 * checkpoint开启时，当执行checkpoint的时候才会保存offset，这样保证了kafka的offset和checkpoint的状态偏移量保持一致
 *      可以通过这个参数设置setCommitOffsetsOnCheckpoints(boolean)
 *      这个参数默认就是true.表示在checkpoint的时候提交offset
 *      此时，kafka中的自动提交机制就会被忽略
 */
public class KafkaConsumerSource {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment evn = StreamExecutionEnvironment.getExecutionEnvironment();

        //kafka的相关的配置
        Properties properties = new Properties();
        properties.put("bootstrap.servers","hadoop-1:9092");
        properties.put("zookeeper.connect","hadoop-1:2181");
        properties.put("group.id","flink");
        properties.put("auto.offset.reset","latest");
        properties.put("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        FlinkKafkaConsumer010<String> consumer010 = new FlinkKafkaConsumer010<>("topic", new SimpleStringSchema(), properties);
        consumer010.setStartFromGroupOffsets();//默认的消费策略
        /*consumer010.setStartFromEarliest();
        consumer010.setStartFromLatest();

        Map<KafkaTopicPartition, Long> specificStartupOffsets = new HashMap<>();
        specificStartupOffsets.put(new KafkaTopicPartition("topic",0),23L);
        consumer010.setStartFromSpecificOffsets(specificStartupOffsets);

        consumer010.setStartFromTimestamp(1575345813000L);*/
        DataStreamSource<String> topicDataSource = evn.addSource(consumer010);

        /**
         * you can use map flatMap and so on
         */
        topicDataSource.addSink(new PrintSinkFunction<>());

        evn.execute("flink kafka job");



    }
}
