package com.atguigu.chapter13;

import com.atguigu.chapter5.source.WaterSensor;
import org.apache.flink.api.common.eventtime.SerializableTimestampAssigner;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.ProcessJoinFunction;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;

/**
 * @ClassName: Flink02_BloomFilter
 * @Description:
 * @Author: kele
 * @Date: 2021/4/14 15:33
 *
 *
 * interval
 * 是指使用一个流的数据按照key去join另外一条流的指定范围的数据.
 *
 *
 **/
public class Flink01_interval_join {

    public static void main(String[] args) {

        Configuration conf = new Configuration();
        conf.setInteger("rest.port",20000);

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);

        env.setParallelism(1);

        /**
         *
         *  通过 8888 端口读取数据
         */
        SingleOutputStreamOperator<WaterSensor> s1 = env
                .socketTextStream("hadoop162", 8888)  // 在socket终端只输入毫秒级别的时间戳
                .map(value -> {
                    String[] datas = value.split(",");
                    return new WaterSensor(datas[0], Long.valueOf(datas[1]), Integer.valueOf(datas[2]));

                })
                .assignTimestampsAndWatermarks(
                        WatermarkStrategy
                                .<WaterSensor>forMonotonousTimestamps()
                                .withTimestampAssigner(new SerializableTimestampAssigner<WaterSensor>() {
                                    @Override
                                    public long extractTimestamp(WaterSensor element, long recordTimestamp) {
                                        return element.getTs() * 1000;
                                    }
                                })
                );

        /**
         *
         * 通过 9999 端口读取数据
         */
        SingleOutputStreamOperator<WaterSensor> s2 = env
                .socketTextStream("hadoop162", 9999)  // 在socket终端只输入毫秒级别的时间戳
                .map(value -> {
                    String[] datas = value.split(",");
                    return new WaterSensor(datas[0], Long.valueOf(datas[1]), Integer.valueOf(datas[2]));
                })
                .assignTimestampsAndWatermarks(
                        WatermarkStrategy
                                .<WaterSensor>forMonotonousTimestamps()
                                .withTimestampAssigner(new SerializableTimestampAssigner<WaterSensor>() {
                                    @Override
                                    public long extractTimestamp(WaterSensor element, long recordTimestamp) {
                                        return element.getTs() * 1000;
                                    }
                                })
                );


        /**
         * intervaljoin必须是两个keyby的流
         */

        s1
                .keyBy(WaterSensor::getId)
                .intervalJoin(s2.keyBy(WaterSensor::getId))
                //-5s  +5s之间的数据可以join
                .between(Time.seconds(-5),Time.seconds(5))
                .process(new ProcessJoinFunction<WaterSensor, WaterSensor, String>() {

                    @Override
                    public void processElement(WaterSensor left,
                                               WaterSensor right,
                                               Context ctx, Collector<String> out) throws Exception {

                        out.collect(left + "," + right);
                    }
                })
                .print();

        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }

    }

}
