package com.atguigu.flink.chapter07;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.JoinFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.ProcessJoinFunction;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;

/**
 * TODO
 *
 * @author cjp
 * @version 1.0
 * @date 2021/1/20 14:06
 */
public class Flink20_Process_IntervalJoin {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        SingleOutputStreamOperator<Tuple2<String, Long>> ds1 = env
                .fromElements(
                        Tuple2.of("a", 1L),
                        Tuple2.of("b", 5L),
                        Tuple2.of("c", 1L),
                        Tuple2.of("d", 11L)
                )
                .assignTimestampsAndWatermarks(
                        WatermarkStrategy
                                .<Tuple2<String, Long>>forMonotonousTimestamps()
                                .withTimestampAssigner((value, ts) -> value.f1 * 1000L)
                );

        SingleOutputStreamOperator<Tuple3<String, Long, Integer>> ds2 = env
                .fromElements(
                        Tuple3.of("a", 3L, 1),
                        Tuple3.of("b", 8L, 11),
                        Tuple3.of("c", 13L, 1),
                        Tuple3.of("d", 23L, 1)
                ).assignTimestampsAndWatermarks(
                        WatermarkStrategy
                                .<Tuple3<String, Long, Integer>>forMonotonousTimestamps()
                                .withTimestampAssigner((value, ts) -> value.f1 * 1000L)
                );

        // TODO interval join
        KeyedStream<Tuple2<String, Long>, String> ks1 = ds1.keyBy(tuple2 -> tuple2.f0);
        KeyedStream<Tuple3<String, Long, Integer>, String> ks2 = ds2.keyBy(tuple3 -> tuple3.f0);

        ks1
                .intervalJoin(ks2)
                .between(Time.seconds(-5), Time.seconds(5))    // 指定 B的时间范围， 传的是 时间差值
                .process(new ProcessJoinFunction<Tuple2<String,Long>, Tuple3<String,Long,Integer>, String>() {
                    @Override
                    public void processElement(Tuple2<String, Long> left, Tuple3<String, Long, Integer> right, Context ctx, Collector<String> out) throws Exception {
                        out.collect(left + " <============> " + right);
                    }
                })
                .print();


        env.execute();
    }

}
/*
    interval join
        1、底层调用的 connect，分别根据关联条件进行 keyby
        2、处理逻辑：
            1）初始化了状态，每条流都有一个buffer（Map类型的状态，key=事件时间，value=数据本身），用来 存数据
            2）判断是否迟到 ： eventtime < watermark (watermark以小的为准)
            3）每来一条数据，就去遍历对方的 buffer，如果时间在设置的范围内，那么就 join 上
            4）Map数据（buffer）的清理，数据的时间跟上下界时间做一个计算，注册一个定时器，时间到了，就删除对应的数据
 */