package com.atguigu.flink.watermark;

import com.alibaba.fastjson.JSON;
import com.atguigu.flink.pojo.WaterSensor;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * Created by Smexy on 2023/2/24
 *  1.14后推荐使用的。
 *      1.13可以用
 *
 *      KafkaSource
 */
public class Demo7_KafkaSource
{
    public static void main(String[] args) {


        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 3333);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);

        env.setParallelism(1);
        env.getConfig().setAutoWatermarkInterval(3000);
        env.disableOperatorChaining();

        WatermarkStrategy<String> watermarkStrategy = WatermarkStrategy
            .<String>forMonotonousTimestamps()
            // 从Kafka record的value(json)中提取 事件时间属性
            .withTimestampAssigner( (e, ts) -> JSON.parseObject(e).getLong("ts"));

        KafkaSource<String> kafkaSource = KafkaSource
            .<String>builder()
            .setBootstrapServers("hadoop102:9092")
            .setGroupId("atguigu2")
            .setTopics("topicB")
            .setValueOnlyDeserializer(new SimpleStringSchema())
            .setStartingOffsets(OffsetsInitializer.earliest())
            .build();

         /*
            kafka从源头读数据时，直接基于水印策略生成水印
          */
         env.fromSource(kafkaSource, watermarkStrategy,"kafka")
            .map(x -> x)
            .print();


        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }


    }
}
