package timeandwindow;

/*
* WindowJoin:来自于两条流的相同key，（join条件）的数据，如果能进入到同一时间范围的窗口中，即可join成功
*
* 需要注意两条相同key的数据，相差几毫秒，但是正好卡到窗口的结束边界上，一个进入上个窗口，
* 一个进入下个窗口，导致join失败
*
*
* */

import com.atguigu.pojo.OrderDetailEvent;
import com.atguigu.pojo.OrderEvent;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatJoinFunction;
import org.apache.flink.api.common.functions.JoinFunction;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;

import java.time.Duration;

public class Flink14_WindowJoin {
     public static void main(String[] args) {
             StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
             env.setParallelism(1);

         SingleOutputStreamOperator<OrderEvent> orderDs = env.socketTextStream("hadoop102", 9999)
                 .map(
                         line -> {
                             String[] fields = line.split(",");
                             return new OrderEvent(fields[0].trim(), Long.valueOf(fields[1].trim()));
                         }
                 ).assignTimestampsAndWatermarks(
                         WatermarkStrategy.<OrderEvent>forBoundedOutOfOrderness(Duration.ZERO)
                                 .withTimestampAssigner(
                                         (event, ts) -> event.getTs()
                                 )
                 );

         orderDs.print("order");

         SingleOutputStreamOperator<OrderDetailEvent> orderDetailDs = env.socketTextStream("hadoop102", 8888)
                 .map(
                         line -> {
                             String[] fields = line.split(",");
                             return new OrderDetailEvent(fields[0].trim(), fields[1].trim(), Long.valueOf(fields[2].trim()));
                         }
                 ).assignTimestampsAndWatermarks(
                         WatermarkStrategy.<OrderDetailEvent>forBoundedOutOfOrderness(Duration.ZERO)
                                 .withTimestampAssigner(
                                         (event, ts) -> event.getTs()
                                 )
                 );
         orderDetailDs.print("detail");

         //windowJoin
         orderDs.join(orderDetailDs)
                 .where(orderEvent -> orderEvent.getId())//todo 指定orderEvent的key
                 .equalTo(orderDetailEvent -> orderDetailEvent.getId())//todo 指定orderDetailEvent的key
                 .window(TumblingEventTimeWindows.of(Time.seconds(10)))
                 .apply(
                         //new JoinFunction<OrderEvent, OrderDetailEvent, String>() {
                         //    @Override
                         //    public String join(OrderEvent first, OrderDetailEvent second) throws Exception {
                         //        return first + "==" + second;
                         //        }

                         new FlatJoinFunction<OrderEvent, OrderDetailEvent,String>() {
                             @Override
                             public void join(OrderEvent first, OrderDetailEvent second, Collector<String> out) throws Exception {

                                 out.collect(first + "==" + second);
                             }
                         }

                 )
                 .print("JOIN");


         try {
                 env.execute();
             } catch (Exception e) {
                 throw new RuntimeException(e);
             }
         }
}
