package cn._51doit.flink.day09;

import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.CoGroupFunction;
import org.apache.flink.api.common.functions.JoinFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.api.java.tuple.Tuple5;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;

import java.time.Duration;

/**
 * 将两个数据流，进行leftOutJoin(左外连接)
 *
 * 左边流的数据不论是否join上，都输出数据
 * join是innerJoin，必须完全join上才输出数据
 *
 */
public class EventTimeTumblingWindowLeftOutJoin {

    public static void main(String[] args) throws Exception {


        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //1000,c1,300
        //4999,c8,300
        //4999,c1,300
        //5000,c2,200
        DataStreamSource<String> lines1 = env.socketTextStream("localhost", 8888);
        //1200,c1,图书
        //5001,c2,家具
        DataStreamSource<String> lines2 = env.socketTextStream("localhost", 9999);

        //期望返回的数据
        //1000,c1,300,1200,图书
        //按照EventTime进行LeftOuterJoin，窗口长度为5000秒，使用新的提取EventTime生成WaterMark的API
        SingleOutputStreamOperator<String> lines1WithWaterMark = lines1.assignTimestampsAndWatermarks(WatermarkStrategy.<String>forBoundedOutOfOrderness(Duration.ofSeconds(0)).withTimestampAssigner(new SerializableTimestampAssigner<String>() {
            @Override
            public long extractTimestamp(String element, long recordTimestamp) {
                return Long.parseLong(element.split(",")[0]);
            }
        }));

        SingleOutputStreamOperator<Tuple3<Long, String, String>> tpStream1WithWaterMark = lines1WithWaterMark.map(new MapFunction<String, Tuple3<Long, String, String>>() {
            @Override
            public Tuple3<Long, String, String> map(String input) throws Exception {
                String[] fields = input.split(",");
                return Tuple3.of(Long.parseLong(fields[0]), fields[1], fields[2]);
            }
        });

        SingleOutputStreamOperator<String> lines2WithWaterMark = lines2.assignTimestampsAndWatermarks(WatermarkStrategy.<String>forBoundedOutOfOrderness(Duration.ofSeconds(0)).withTimestampAssigner(new SerializableTimestampAssigner<String>() {
            @Override
            public long extractTimestamp(String element, long recordTimestamp) {
                return Long.parseLong(element.split(",")[0]);
            }
        }));

        SingleOutputStreamOperator<Tuple3<Long, String, String>> tpStream2WithWaterMark = lines2WithWaterMark.map(new MapFunction<String, Tuple3<Long, String, String>>() {
            @Override
            public Tuple3<Long, String, String> map(String input) throws Exception {
                String[] fields = input.split(",");
                return Tuple3.of(Long.parseLong(fields[0]), fields[1], fields[2]);
            }
        });
        //https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/dev/datastream/operators/joining/
        //期望得到的数据
        //1000,c1,300,1200,图书
        //1300,c8,300,null,null
        DataStream<Tuple5<Long, String, String, Long, String>> res = tpStream1WithWaterMark.coGroup(tpStream2WithWaterMark)
                .where(t -> t.f1)
                .equalTo(t -> t.f1)
                .window(TumblingEventTimeWindows.of(Time.seconds(5)))
                .apply(new CoGroupFunction<Tuple3<Long, String, String>, Tuple3<Long, String, String>, Tuple5<Long, String, String, Long, String>>() {

                    /**
                     * coGroup当窗口触发后，每个key会调用一次coGroup
                     * 三种情况会调用coGroup方法
                     * 1.第一个流和第二个流中，都有key相同的数据数据，并且在同一个窗口呢，那么coGroup方法中的两个Iterable都不为empty
                     * 2.第一个流中出现了同一个key的数据，.第二个流中没有出现相同key的数据，那么coGroup方法中的第一个Iterable不为empty，第二个为empty
                     * 3.第二个流中出现了同一个key的数据，.第一个流中没有出现相同key的数据，那么coGroup方法中的第二个Iterable不为empty，第一个为empty
                     * @param first
                     * @param second
                     * @param out
                     * @throws Exception
                     */
                    @Override
                    public void coGroup(Iterable<Tuple3<Long, String, String>> first, Iterable<Tuple3<Long, String, String>> second, Collector<Tuple5<Long, String, String, Long, String>> out) throws Exception {
                        //实现左外连接
                        boolean isJoined = false;
                        //先循环左流的数据
                        for (Tuple3<Long, String, String> left : first) {

                            for (Tuple3<Long, String, String> right : second) {
                               isJoined = true;
                               //输出数据
                                out.collect(Tuple5.of(left.f0, left.f1, left.f2, right.f0, right.f2));
                            }
                            if (!isJoined) {
                                out.collect(Tuple5.of(left.f0, left.f1, left.f2, null, null));
                            }
                        }

                    }
                });

        res.print();

        env.execute();
    }
}
