package com.wuwangfu.window;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.datastream.WindowedStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.windows.GlobalWindow;

/**
 * @Author jcshen
 * @Date 2023-02-23
 * @PackageName:com.wuwangfu.window
 * @ClassName: CountWindow
 * @Description:
 * @Version 1.0.0
 *
 *  http://www.doitedu.cn/archives/3485.html
 *
 * 先keyBy，再划分count窗口
 * 然后调用reduce对窗口的数据进行处理
 *
 */
public class CountWindowKeyBy {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        /**
         * spark,1
         * hadoop,1
         * hive,1
         * flink,1
         */
        DataStreamSource<String> line = env.socketTextStream("localhost", 8888);
        SingleOutputStreamOperator<Tuple2<String, Integer>> maped = line.map(new MapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public Tuple2<String, Integer> map(String value) throws Exception {
                String[] fields = value.split(",");
                return Tuple2.of(fields[0], Integer.valueOf(fields[1]));
            }
        });
        //分组
        KeyedStream<Tuple2<String, Integer>, String> keyed = maped.keyBy(t -> t.f0);
        //划分窗口，按照keyBy来分区分组，有多个分区，每个分区有0到多个组，触发的条件是每个分区的每个组里面的数据条数达到这个组才会触发
        //每个分区的每个组内是增量聚合
        WindowedStream<Tuple2<String, Integer>, String, GlobalWindow> windowed = keyed.countWindow(5);
        //聚合
        //windowed.sum(1).print();

        /*reduce 增量聚合，全量聚合用apply方法*/
        windowed.reduce(new ReduceFunction<Tuple2<String, Integer>>() {
            /**
             * @param v1 key第一次出现或中间累加的结果
             * @param v2 同一个分区里面key相同的其它的数据
             * @return
             * @throws Exception
             */
            @Override
            public Tuple2<String, Integer> reduce(Tuple2<String, Integer> v1, Tuple2<String, Integer> v2) throws Exception {
                v1.f1 = v1.f1 + v2.f1;
                return v1;
            }
        }).print();


        env.execute();
    }
}
