package com.it.operator;

import com.it.pojo.Event;
import com.it.operator.utils.SourceUtils;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * 使用reduce来实现聚合操作，可以提高代码的灵活性。
 *
 * @author code1997
 */
public class Operator_TransformReduce {

    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment executionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
        executionEnvironment.setParallelism(1);
        SingleOutputStreamOperator<Event> eventSource = SourceUtils.getEventSource(executionEnvironment);
        //统计每个用户的访问频次
        SingleOutputStreamOperator<Tuple2<String, Long>> userF = eventSource.map(event -> Tuple2.of(event.user, 1L)).returns(Types.TUPLE(Types.STRING,Types.LONG))
                .keyBy(data -> data.f0)
                .reduce((ReduceFunction<Tuple2<String, Long>>) (value1, value2) -> Tuple2.of(value1.f0, value1.f1 + value2.f1));
        //选取当前最活跃的用户：我们想使用reduce进行操作，那么就必须使用keyBy，就必须要指定一个key，我们可以写死一个key来将所有的数据分到同一个分区中去,
        // 这种方式要谨慎使用。
        SingleOutputStreamOperator<Tuple2<String, Long>> maxClickUser = userF.keyBy(data -> "key").maxBy(1);
        maxClickUser.print();
        //reduce:可以实现一些自定义的聚合逻辑.
        SingleOutputStreamOperator<Tuple2<String, Long>> maxClickUser2 = userF.keyBy(data -> "key")
                .reduce((ReduceFunction<Tuple2<String, Long>>) (value1, value2) -> value1.f1>value2.f1?value1:value2);
        maxClickUser2.print();
        executionEnvironment.execute();
    }
}
