package com.fwmagic.flink.window;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.datastream.WindowedStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011;
import org.apache.flink.streaming.util.serialization.JSONKeyValueDeserializationSchema;

import java.util.Properties;
import java.util.regex.Pattern;

/**
 * 基于EventTime和并行Souce的滚动窗口计数
 */
public class EventTimeTumblingWindowOfParalleSource {
    public static void main(String[] args) throws Exception{
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);

        //时间设置为EventTime
        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

        //提取数据中的时间，转为Long类型的时间戳，当作EventTime
        //仅仅是提取时间，不会改变数据

        Properties prop = new Properties();
        prop.setProperty("bootstrap.servers", "localhost:9092");
        prop.setProperty("group.id", "gwc10");


        //3个分区
        // bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 3 --topic gwc10
        String topic = "gwc10";

        //并行的Source：KafkaSouce
        FlinkKafkaConsumer011<String> kafkaSource = new FlinkKafkaConsumer011<>(topic,
                new SimpleStringSchema(),
                prop);


        /**
         * WaterMark 是Flink中窗口延迟触发的机制
         * WaterMark = 数据所携带的时间(窗口中最大时间) - 延迟执行的时间
         * WaterMark >= 上一个窗口的结束边界就会触发窗口执行
         * */
        SingleOutputStreamOperator<String> dataSource = env.addSource(kafkaSource)
                .assignTimestampsAndWatermarks(new BoundedOutOfOrdernessTimestampExtractor<String>(Time.seconds(0)) {
            @Override
            public long extractTimestamp(String line) {
                String[] fields = line.split(",");
                return Long.parseLong(fields[0]);
            }
        });

        /**
         *
         * 准备数据：
         * 1000,spark,1
         * 2000,spark,2
         * 4998,spark,3
         * 4999,flink,10
         * 4999,hive,10
         * 4999,spark,10
         * 5000,scala,5
         * 6000,java,3
         * 8000,java,1
         * 9999,scala,1
         * 9999,flink,2
         * 9999,hive,8
         *
         *
         * 因为我们设置的滚动时间是5s,flink会将时间划分区间[0,5),[5,10),第一个区间触发的时间是4999ms
         * 结果：
         * 4> (flink,10)
         * 1> (spark,16)
         * 1> (hive,10)
         * 1> (scala,6)
         * 4> (flink,2)
         * 1> (hive,8)
         * 1> (java,4)
         *
         * 结论：
         *  并发度为1的source(socketSource)只有一个分区，在这个分区中，只要有一组数据在一个分区中满足条件，其他组的数据也会被统计打印出来。
         *
         *  并发度为多个的source(KafkaSource)时,因为kafka的topic有多个分区，每个分区都要满足时间触发条件，任务才会被触发，时间才会被打印出来。这样
         *  也有个弊端：就是基于EventTime的数据，在一段时间内不产生数据了，那么，最后一批数据有可能就一直不被触发，出现数据漏统计的情况！
         */
        SingleOutputStreamOperator<Tuple2<String, Integer>> maped = dataSource.map(new MapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public Tuple2<String, Integer> map(String value) throws Exception {
                String[] arr = value.split(",");
                return Tuple2.of(arr[1], Integer.parseInt(arr[2]));
            }
        });

        //分组
        KeyedStream<Tuple2<String, Integer>, Tuple> keyed = maped.keyBy(0);

        //每5s统计一次窗口的数据，统计数据的是基于EventTime(事件时间)的
        //WindowedStream<Tuple2<String, Integer>, Tuple, TimeWindow> window = keyed.timeWindow(Time.seconds(5));
        WindowedStream<Tuple2<String, Integer>, Tuple, TimeWindow> window = keyed.window(TumblingEventTimeWindows.of(Time.seconds(5)));

        //计数
        SingleOutputStreamOperator<Tuple2<String, Integer>> sumed = window.sum(1);

        sumed.print();

        env.execute("EventTimeTumblingWindowOfParalleSource");
    }
}
