package com.yc.bigdata.flink.demo;

import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * <p></p>
 *
 * @author: YuanChilde
 * @date: 2020-02-11 13:30
 * @version: 1.0
 * Modification History:
 * Date    Author      Version     Description
 * -----------------------------------------------------------------
 * 2020-02-11 13:30    YuanChilde     1.0        新增
 */
public class KeyByTestJob {
    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // this can be used in a streaming program like this (assuming we have a StreamExecutionEnvironment env)
       /* env.fromElements(Tuple2.of(2L, 3L), Tuple2.of(1L, 5L), Tuple2.of(1L, 7L), Tuple2.of(2L, 4L), Tuple2.of(1L, 2L))
                .keyBy(0) // 以数组的第一个元素作为key
                .map((MapFunction<Tuple2<Long, Long>, String>) longLongTuple2 -> "key:" + longLongTuple2.f0 + ",value:" + longLongTuple2.f1)
                .print();

        env.execute("execute");*/

        /*reduce需要针对分组或者一个window(窗口)来执行，也就是分别对应于keyBy、window/timeWindow
        * 根据ReduceFunction将元素与上一个reduce后的结果合并，产出合并之后的结果。
        * */

        env.fromElements(Tuple2.of(2L, 3L), Tuple2.of(1L, 2L), Tuple2.of(1L, 5L), Tuple2.of(1L, 7L), Tuple2.of(2L, 4L))
                .keyBy(0) // 以数组的第一个元素作为key
                .reduce((ReduceFunction<Tuple2<Long, Long>>) (t2, t1) -> new Tuple2<>(t1.f0, t2.f1 + t1.f1)) // value做累加
                .print();

        env.execute("execute");
/*        keyedStream.sum(0);
        keyedStream.sum("key");
        keyedStream.min(0);
        keyedStream.min("key");
        keyedStream.max(0);
        keyedStream.max("key");
        keyedStream.minBy(0);
        keyedStream.minBy("key");
        keyedStream.maxBy(0);
        keyedStream.maxBy("key");*/
    }
}
