package com.atguigu.flink.streamapi.source;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;

/**
 * Created by Smexy on 2023/1/13
 */
public class Demo5_KafkaSourceNew
{
    public static void main(String[] args) {
        
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        
        env.setParallelism(1);

        /*
            public static <OUT> KafkaSourceBuilder<OUT> builder(): 是一个带泛型的方法！
                    调用带泛型的方法时，格式:    .<T>方法名(参数)

                    <OUT>: 读取kafka后向下游输出的类型。一般输出的都是String。
                                kafka中的数据格式，最常见的是 json str


              -----------
              .setStartingOffsets(OffsetsInitializer.earliest())： 从最早位置
              .setStartingOffsets(OffsetsInitializer.latest()): 从最后位置
              .setStartingOffsets(OffsetsInitializer.committedOffsets()): 从提交的位置
              .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.LATEST)): 当前组之前从未消费过，也没有提交过，此时从哪个位置消费
              .setStartingOffsets(OffsetsInitializer.offsets()) 自定义位置
         */
        KafkaSource<String> source = KafkaSource.<String>builder()
            .setBootstrapServers("hadoop102:9092")
            .setTopics("topicA")
            .setGroupId("test")
            //设置消费的起始位置
            .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.LATEST))
            .setValueOnlyDeserializer(new SimpleStringSchema())
            //希望封装其他参数
            .setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"true")
            .setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000")
            .build();

        /*
            使用env 调用 KafkaSource

            Source<OUT, ?, ?> source,
            WatermarkStrategy<OUT> timestampsAndWatermarks: 水印策略(后面会讲到)
            String sourceName: 自己给source起个名字，这个名字可以在监控界面(后面讲)看到
         */
        DataStreamSource<String> ds = env.fromSource(source, WatermarkStrategy.noWatermarks(), "kafka");

        ds.print();


        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
        
        
    }
}
