import com.nepu.gmall.realtime.bean.UnknowBean;
import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.JoinFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.ProcessJoinFunction;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;

import java.time.Duration;

/**
 *
 * 本类主要介绍的是flink中关于windowJoin的操作
 *  windowJoin是分为三种：
 *  （1）Tumbling Window Join
 *  （2）Sliding Window Join
 *  （3）Session Window Join
 *      会话窗口join是有弊端的，如果会话时间太长，就会导致窗口长时间无法关闭，而积累大量的数据。
 * @author chenshuaijun
 * @create 2023-02-24 19:22
 */
public class Flink_IntervalJoinExample {

    public static void main(String[] args) throws Exception {
        // 创建两条sock流
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        SingleOutputStreamOperator<UnknowBean> streamOne = env.socketTextStream("hadoop102", 8888).map(new MapFunction<String, UnknowBean>() {
            @Override
            public UnknowBean map(String value) throws Exception {
                String[] fields = value.split(",");
                return new UnknowBean(fields[0], fields[1], fields[2]);
            }
        }).assignTimestampsAndWatermarks(WatermarkStrategy.<UnknowBean>forBoundedOutOfOrderness(Duration.ZERO).withTimestampAssigner(new SerializableTimestampAssigner<UnknowBean>() {
            @Override
            public long extractTimestamp(UnknowBean element, long recordTimestamp) {
                return Long.parseLong(element.getScore());
            }
        }));

//        streamOne.print("dataOne --> ");
        SingleOutputStreamOperator<UnknowBean> streamTwo = env.socketTextStream("hadoop102", 9999).map(new MapFunction<String, UnknowBean>() {
            @Override
            public UnknowBean map(String value) throws Exception {
                String[] fields = value.split(",");
                return new UnknowBean(fields[0], fields[1], fields[2]);
            }
        }).assignTimestampsAndWatermarks(WatermarkStrategy.<UnknowBean>forBoundedOutOfOrderness(Duration.ZERO).withTimestampAssigner(new SerializableTimestampAssigner<UnknowBean>() {
            @Override
            public long extractTimestamp(UnknowBean element, long recordTimestamp) {
                return Long.parseLong(element.getScore());
            }
        }));
//        streamTwo.print("dataTwo --> ");
        streamOne.keyBy(UnknowBean::getId)
                .intervalJoin(streamTwo.keyBy(UnknowBean::getId))
                .between(Time.seconds(-5),Time.seconds(2))
                .process(new ProcessJoinFunction<UnknowBean, UnknowBean, String>() {
                    @Override
                    public void processElement(UnknowBean left, UnknowBean right, Context ctx, Collector<String> out) throws Exception {
                        out.collect(left.toString()+"--->"+right.toString());
                    }
                }).print();

        env.execute();
    }
}
