package com.atguigu.chapter07;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.JoinFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.ProcessJoinFunction;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;

/**
 * TODO
 *
 * @author cjp
 * @version 1.0
 * @date 2021/3/6 9:12
 */
public class Flink22_Join_IntervalJoin {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        //
        SingleOutputStreamOperator<Tuple2<String, Long>> ds1 = env
                .fromElements(
                        Tuple2.of("a", 1L),
                        Tuple2.of("a", 2L),
                        Tuple2.of("b", 3L),
                        Tuple2.of("c", 4L),
                        Tuple2.of("d", 5L)
                )
                .assignTimestampsAndWatermarks(
                        WatermarkStrategy
                                .<Tuple2<String, Long>>forMonotonousTimestamps()
                                .withTimestampAssigner((value, ts) -> value.f1 * 1000L)
                );

        SingleOutputStreamOperator<Tuple3<String, Long, Integer>> ds2 = env
                .fromElements(
                        Tuple3.of("a", 1L, 1),
                        Tuple3.of("a", 3L, 1),
                        Tuple3.of("b", 2L, 1),
                        Tuple3.of("b", 13L, 1),
                        Tuple3.of("c", 14L, 1),
                        Tuple3.of("d", 15L, 1)
                )
                .assignTimestampsAndWatermarks(
                        WatermarkStrategy
                                .<Tuple3<String, Long, Integer>>forMonotonousTimestamps()
                                .withTimestampAssigner((value, ts) -> value.f1 * 1000L)
                );

        // TODO Interval Join
        KeyedStream<Tuple2<String, Long>, String> ks1 = ds1.keyBy(r -> r.f0);
        KeyedStream<Tuple3<String, Long, Integer>, String> ks2 = ds2.keyBy(r -> r.f0);

        ks1.intervalJoin(ks2)
                .between(Time.seconds(-2), Time.seconds(2))
                .process(new ProcessJoinFunction<Tuple2<String, Long>, Tuple3<String, Long, Integer>, String>() {
                    @Override
                    public void processElement(Tuple2<String, Long> left, Tuple3<String, Long, Integer> right, Context ctx, Collector<String> out) throws Exception {
                        // 进入这个方法，是关联上的数据
                        out.collect(left + " <--> " + right);
                    }
                })
                .print();


        env.execute();
    }
}
/*
    intervaljoin源码：
        1） 底层使用了connect进行关联，关联条件就是 各自的 keyby
        2） 判断数据是否迟到，如果迟到就不处理  =>  事件时间 < 当前watermark 就是迟到 （以小的为准）
        3） 左流和右流，都初始化了一个 MapState，数据来的时候，会添加进去，key是 ts，value是 List（数据）
        4） 遍历 对方 的 buffer， 判断时间是否落在区间内，如果落在区间内，匹配上，发送到用户定义的 processElement方法
               左流.intervalJoin(右流)
                   左ts + 下界 <= 右 ts <= 左ts + 上界
                   右ts - 上界 <= 左 ts <= 右ts - 下界
        5） 注册一个定时器，去清理 MapState
                    左流的清理时间 = 左ts + 上界
                    右流的清理时间 = 右ts - 下界
 */