package com.arch.flink.window;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.tuple.Tuple3;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SideOutputDataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.PrintSinkFunction;
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor;
import org.apache.flink.streaming.api.windowing.assigners.EventTimeSessionWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.OutputTag;

/**
 * 数据:
 * Sensor1 1000
 * Sensor1 7000
 * <p>
 * Sensor1 10000
 * Sensor1 15000
 * Sensor1 17000
 * Sensor1 24000
 * <p>
 * <p>
 * 触发计算:
 * (Sensor1,1000,1)
 * (Sensor1,7000,4)
 * <p>
 * 计算过程:
 * 输入第1个参数表示传感器id,空格后第2个参数表示时间，进行前一次输入与当前输入时间对比是否超过时间间隔。
 * <p>
 * 第一个会话窗口 7000-1000=6秒 超过活动时间间隔5秒+延迟的watermark1秒，触发计算
 * 第一个会话窗口 24000-17000=7秒 超过活动时间间隔5秒+延迟的watermark1秒，触发计算
 *
 *
 *
 * ============== 另外的实际业务需求
 * 参考资料: https://cloud.tencent.com/developer/article/1539537
 * 每个用户在一个独立的session中平均页面访问时长，session 和 session 的间隔时间是15分钟
 *
 * 我们使用 flink 来解决这个问题
 * （1）读取 kafka 中的数据
 * （2）基于用户的 userId，设置 一个 session window 的 gap，在同一个session window 中的数据表示用户活跃的区间
 * （3）最后使用一个自定义的 window Function
 *
 *
 * @author pizhihui
 * @date 2024-05-17 10:54
 */
public class SessionWindow {


    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        DataStreamSource<String> source = env.socketTextStream("127.0.0.1", 9999);


        SingleOutputStreamOperator<Tuple3<String, Long, Integer>> mapStream =
                source.map(new MapFunction<String, Tuple3<String, Long, Integer>>() {
                    @Override
                    public Tuple3<String, Long, Integer> map(String value) throws Exception {
                        String[] arr = value.split(" ");
                        return Tuple3.of(arr[0], Long.parseLong(arr[1]), 1);
                    }

                });

        // TODO watermark = 1s
        SingleOutputStreamOperator<Tuple3<String, Long, Integer>> mapStreamWithWatermark =
                mapStream.assignTimestampsAndWatermarks(
                        new BoundedOutOfOrdernessTimestampExtractor<Tuple3<String, Long, Integer>>(Time.seconds(1)) {
                            @Override
                            public long extractTimestamp(Tuple3<String, Long, Integer> element) {
                                return element.f1;
                            }
                        }).setParallelism(1);

        KeyedStream<Tuple3<String, Long, Integer>, String> keyedStream =
                mapStreamWithWatermark.keyBy(k -> k.f0);

        OutputTag<Tuple3<String, Long, Integer>> lateData = new OutputTag<>("late_data");

        // TODO gap = 5s
        SingleOutputStreamOperator<Tuple3<String, Long, Integer>> windowStream =
                keyedStream
                        .window(EventTimeSessionWindows.withGap(Time.seconds(5)))
                        .allowedLateness(Time.minutes(60))
                        .sideOutputLateData(lateData)  // 允许延迟的时间
                        .sum(2);

        // 处理数据
        SideOutputDataStream<Tuple3<String, Long, Integer>> sideOutput = windowStream.getSideOutput(lateData);
        // sideOutput.addSink()

        windowStream.print().setParallelism(1);


        env.execute("session window demo");


    }

}
