package com.atguigu.chapter07;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;

import java.time.Duration;
import java.util.Properties;

/**
 * TODO
 *
 * @author cjp
 * @version 1.0
 * @date 2021/3/3 14:19
 */
public class Flink18_Watermark_Idle {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // TODO Source - 从 Kafka 读

        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "hadoop1:9092,hadoop2:9092,hadoop3:9092");
        properties.setProperty("group.id", "fffffffffff");


        FlinkKafkaConsumer<String> kafkaSourceFunction = new FlinkKafkaConsumer<>(
                "flink0923",
                new SimpleStringSchema(),
                properties
        );

        kafkaSourceFunction.assignTimestampsAndWatermarks(
                WatermarkStrategy
                        .<String>forBoundedOutOfOrderness(Duration.ofSeconds(2))
                        .withIdleness(Duration.ofSeconds(30))  // TODO 超时设置
        );




        env
                .addSource(kafkaSourceFunction)
                .print();


        env.execute();
    }
}
/*
    超时设置的意义和场景：
        实际工作中，可能有某一个kafka的分区一直没有数据， 结合watermark的传递，以最小的为准，就会导致 watermark不更新
        进一步导致，一些窗口、计算、定时器不会被触发。

        设置 idle超时，如果超时了，这个分区不会再被用来 更新watermark
 */