package cn.itcast.flink.join;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.consumer.ConsumerConfig;

import java.time.Duration;
import java.util.Properties;

/**
 * Author itcast
 * Date 2022/1/14 15:11
 * Desc 从kafka中读取数据并将其wordcount统计，读取的时候使用水印机制来保证延迟或迟到的数据被读进来
 */
public class WatermarkSourceDemo {
    public static void main(String[] args) throws Exception {
        //todo 获取数据源
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //todo 设置并行度并开启checkpoint
        env.setParallelism(1);
        //开启checkpoint，将当前全局的state状态保存到 JobManager 的内存（堆上）中，单位 ms
        env.enableCheckpointing(1000);
        //todo 读取kafka中的数据
        //todo 3.1 配置kafka的参数
        Properties props = new Properties();
        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"node1:9092,node2:9092,node3:9092");
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG,"_consumer_data_");
        //将消费 kafka 的offset 提交给 Kafka broker 服务器
        props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"true");
        //将消费 kafka 的offset 提交给 kafka broker 间隔时间
        props.setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        //todo 3.2 实例化 FlinkKafkaConsumer
        FlinkKafkaConsumer<String> consumer = new FlinkKafkaConsumer<>(
                "writetopic",
                new SimpleStringSchema(),
                props
        );
        //todo 3.3 设置consumer的参数
        //如果当前Flink Source程序失败了，从哪里接着执行，Latest 从最新
        consumer.setStartFromLatest();
        //如果设置true，将消费 offset 提交给 Flink 集群去管理和维护
        consumer.setCommitOffsetsOnCheckpoints(true);
        //todo 3.4 给 consumer 设置水印机制
        consumer.assignTimestampsAndWatermarks(
                new BoundedOutOfOrdernessTimestampExtractor<String>(Time.seconds(30)) {
                    @Override
                    public long extractTimestamp(String element) {
                        return System.currentTimeMillis();
                    }
                }
        );

        consumer.assignTimestampsAndWatermarks(WatermarkStrategy.forBoundedOutOfOrderness(Duration.ofSeconds(30)));
        //todo 4 获取数据源
        DataStreamSource<String> source = env.addSource(consumer);
        //todo 5 wordcount
        source.flatMap(new FlatMapFunction<String, Tuple2<String,Integer>>() {
            @Override
            public void flatMap(String value, Collector<Tuple2<String, Integer>> out) throws Exception {
                String[] words = value.split(" ");
                for (String word : words) {

                    out.collect(Tuple2.of(word,1));
                }

            }
        }).keyBy(t->t.f0)
                .sum(1)
        //todo 6 打印输出
        .print();
        //todo 7 执行流环境
        env.execute();
    }
}
