package com.abyss.window;


import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.streaming.api.windowing.assigners.TumblingProcessingTimeWindows;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.windows.TimeWindow;

import java.util.Arrays;
import java.util.List;
import java.util.Random;

/**
 * 演示一个 滚动的 时间窗口(处理时间)
 */

/**
 * 滚动-时间-窗口演示
 * 自定义一个Source, 每隔1秒产生一个的k,v  k是hadoop spark flink 其中某一个, v是随机数字
 * 用时间窗口统计和
 */
public class TumblingTimeWindowDemo {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // Source
        DataStreamSource<Tuple2<String, Integer>> randomIntSource = env.addSource(new GenerateRandomNumEverySecond());

        // 如果直接对source执行窗口的话, 是执行windowAll系列的方法
        // 对未分组的数据统计总和, 每5秒统计一次
        SingleOutputStreamOperator<Tuple2<String, Integer>> sumOfAll = randomIntSource
                .timeWindowAll(Time.seconds(5)).sum(1);


        // 安装key进行分流, 对分流后每个组进行求和统计, 窗口是滚动窗口, 每5秒一次
        SingleOutputStreamOperator<Tuple2<String, Integer>> sumEachKey = randomIntSource
                .keyBy(0).timeWindow(Time.seconds(5)).sum(1);

        sumOfAll.print("Sum of all:");
        sumEachKey.print("Sum each key:");

        env.execute();
    }

    /*
    自定义Source
    每隔1秒产生一个的k,v  k是hadoop spark flink 其中某一个, v是随机数字
     */
    public static class GenerateRandomNumEverySecond implements SourceFunction<Tuple2<String, Integer>> {
        private boolean isRun = true;
        private final Random random = new Random();
        private final List<String> keyList = Arrays.asList("hadoop", "spark", "flink");
        @Override
        public void run(SourceContext<Tuple2<String, Integer>> ctx) throws Exception {
            while (this.isRun) {
                String key = keyList.get(random.nextInt(3));
                ctx.collect(Tuple2.of(key, random.nextInt(99)));
                Thread.sleep(1000L);
            }
        }

        @Override
        public void cancel() {
            this.isRun = false;
        }
    }
}