package com.alison.datastream.exactlyonce;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition;
import org.apache.kafka.common.TopicPartition;

import java.util.Collections;
import java.util.Properties;

public class E2_FlinkKafkaConsumerTest {
    private static String taskName = FlinkKafkaConsumer.class.getSimpleName();

    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        Properties sourceProperties = new Properties();
        sourceProperties.setProperty("bootstrap.servers", "192.168.56.101:9092");
        sourceProperties.setProperty("group.id", "kafkaConsumerGroup");
        sourceProperties.setProperty("client.id.prefix", "FlinkKafkaConsumerTest");
        //端到端一致性：消费数据时需要配置isolation.level=read_committed(默认值为read_uncommitted)
        sourceProperties.put("isolation.level", "read_committed");
        FlinkKafkaConsumer kafkaSource = new FlinkKafkaConsumer("flink_output_topic", new SimpleStringSchema(), sourceProperties);
//        FlinkKafkaConsumer kafkaSource = new FlinkKafkaConsumer("flink_input_topic", new SimpleStringSchema(), sourceProperties);
//        kafkaSource.setStartFromLatest();
        kafkaSource.setStartFromEarliest();
//        kafkaSource.setStartFromSpecificOffsets(Collections.singletonMap(new KafkaTopicPartition("flink_output_topic", 0), 2L));

        env.addSource(kafkaSource).name("kafkaSource").print("out");
        env.execute(taskName);
    }
}