package com.atguigu.gmall.realtime.test;

import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.ProcessJoinFunction;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;

/**
 * @author Felix
 * @date 2023/11/11
 * 该案例演示了IntervalJoin
 */
public class Flink03_IntervalJoin {
    public static void main(String[] args) throws Exception {
        //TODO 1.基本环境准备
        //1.1 指定流处理环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //1.2 设置并行度
        env.setParallelism(2);
        //TODO 2.检查点相关的设置(略)
        //TODO 3.从指定的网络端口读取员工数据
        SingleOutputStreamOperator<Emp> empDS = env
            .socketTextStream("hadoop102", 8888)
            .map(
                lineStr -> {
                    String[] fieldArr = lineStr.split(",");
                    return new Emp(Integer.valueOf(fieldArr[0]), fieldArr[1], Integer.valueOf(fieldArr[2]), Long.valueOf(fieldArr[3]));
                }
            ).assignTimestampsAndWatermarks(
                // WatermarkStrategy.<Emp>forBoundedOutOfOrderness(Duration.ofSeconds(5))
                WatermarkStrategy
                    .<Emp>forMonotonousTimestamps()
                    .withTimestampAssigner(
                        new SerializableTimestampAssigner<Emp>() {
                            @Override
                            public long extractTimestamp(Emp emp, long recordTimestamp) {
                                return emp.getTs();
                            }
                        }
                    )
            );
        empDS.print("emp:");
        //TODO 4.从指定的网络端口读取部门数据
        SingleOutputStreamOperator<Dept> deptDS = env
            .socketTextStream("hadoop102", 9999)
            .map(
                lineStr -> {
                    String[] fieldArr = lineStr.split(",");
                    return new Dept(Integer.valueOf(fieldArr[0]), fieldArr[1], Long.valueOf(fieldArr[2]));
                }
            ).assignTimestampsAndWatermarks(
                WatermarkStrategy
                    .<Dept>forMonotonousTimestamps()
                    .withTimestampAssigner(
                        new SerializableTimestampAssigner<Dept>() {
                            @Override
                            public long extractTimestamp(Dept dept, long recordTimestamp) {
                                return dept.getTs();
                            }
                        }
                    )
            );
        deptDS.print("dept:");
        //TODO 5.使用IntervalJoin将员工和部门进行关联
        empDS
            .keyBy(Emp::getDeptno)
            .intervalJoin(deptDS.keyBy(Dept::getDeptno))
            .between(Time.milliseconds(-5),Time.milliseconds(5))
            .process(
                new ProcessJoinFunction<Emp, Dept, Tuple2<Emp,Dept>>() {
                    @Override
                    public void processElement(Emp emp, Dept dept, Context ctx, Collector<Tuple2<Emp, Dept>> out) throws Exception {
                        out.collect(Tuple2.of(emp,dept));
                    }
                }
            ).print();

        env.execute();
    }
}
