package tk.xboot.flink.kfk.json;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.formats.json.JsonNodeDeserializationSchema;
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.AssignerWithPeriodicWatermarks;
import org.apache.flink.streaming.api.watermark.Watermark;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011;

import javax.annotation.Nullable;
import java.io.InputStream;
import java.text.SimpleDateFormat;
import java.util.Properties;

public class ReadJsonFromKafkaMain {
    public static void main(String[] args) throws Exception {

        /**
         *
         * Consumer读取partition中的数据是通过调用发起一个fetch请求来执行的。而从KafkaConsumer来看，它有一个poll方法，
         * 但是这个poll方法只是可能会发起fetch请求。
         * 原因是：Consumer每次发起fetch请求时，读取到的数据是有限制的，通过配置项max.partition.fetch.bytes来限制的。
         * 而在执行poll方法时，会根据配置项个max.poll.records来限制一次最多pool多少个record。
         *
         * 那么就可能出现这样的情况：
         * 在满足max.partition.fetch.bytes限制的情况下，假如fetch到了100个record，放到本地缓存后，
         * 由于max.poll.records限制每次只能poll出15个record。
         * 那么KafkaConsumer就需要执行7次才能将这一次通过网络发起的fetch请求所fetch到的这100个record消费完毕。
         * 其中前6次是每次pool中15个record，最后一次是poll出10个record。
         */
        Properties props = new Properties();
        try (InputStream is = ReadJsonFromKafkaMain.class
                .getClassLoader()
                .getResourceAsStream("kfk-consumer.properties")) {
            props.load(is);
        }

        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);



        FlinkKafkaConsumer011 consumer = new FlinkKafkaConsumer011<>(props.getProperty("topic"),
                new JsonNodeDeserializationSchema(), props);
        // consumer return type of ObjectNode
        DataStreamSource<ObjectNode> dataStream = env.addSource(consumer);
        // return a SingleOutputStreamOperator<T> object.
        DataStream<Tuple3<String, Long, Long>> soso = dataStream.map(new MapFunction<ObjectNode, Tuple3<String, Long, Long>>() {
            @Override
            public Tuple3<String, Long,Long> map(ObjectNode value) throws Exception {
                if (value != null && value.get("cnt") != null && value.get("coin") != null) {
                    return new Tuple3<>(value.get("coin").textValue(), value.get("cnt").longValue(),value.get("ts").longValue());
                }
                return null;
            }
        });

        // for watermark
        DataStream<Tuple3<String, Long, Long>> watermarkDS = soso
                .assignTimestampsAndWatermarks(new AssignerWithPeriodicWatermarks<Tuple3<String, Long, Long>>() {
                    Long currentMaxTimestamp = 0L;
                    Long maxOutOfOrderness = 1000L;//最大允许的乱序时间是1s
                    Watermark watermark = null;
                    SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");

                    @Nullable
                    @Override
                    public Watermark getCurrentWatermark() {
                        watermark = new Watermark(currentMaxTimestamp - maxOutOfOrderness);
                        return watermark;
                    }

                    @Override
                    public long extractTimestamp(Tuple3<String, Long, Long> element, long previousElementTimestamp) {
                        if (element.f2 != null) {
                            currentMaxTimestamp = Math.max(element.f2, currentMaxTimestamp);
                            return element.f2;
                        }
                        return 0;
                    }
                });

        // grouping and apply.
        //watermarkDS.keyBy(0)
        watermarkDS.keyBy(0)
                .window(TumblingEventTimeWindows.of(Time.seconds(1)))
                .maxBy(1).print();
        env.execute();
    }
}
