package com.atguigu.day01;


import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * @description : 从 socket 中消费数据然后进行单词计数
 * @autor :lyh
 */
public class Example1 {
    public static void main(String[] args) throws Exception {
        // 获取流执行环境（上下文）
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 从 socket 消费数据
        // nc -lk 9999
        // 输出 String 类型
        // '.setParallelism(1)' 将 'socketTextStream' 算子的并行总任务的数量置为 1
        DataStreamSource<String> source = env.socketTextStream("hadoop102", 9999).setParallelism(1);

        // map 阶段
        // "hello world" => Tuple2.of("hello", 1), Tuple2.of("word", 1)
        // 一对多的转换，使用 FlapMap 算子
        SingleOutputStreamOperator<Tuple2<String, Integer>> mappedStream = source.flatMap(new Tokenizer()).setParallelism(1);


        // shuffle 阶段
        // 将相同单词的二元组分到一个组里面
        // KeyedStream<输入数据的泛型，key的泛型>
        // 'r -> .f0' , r是输入数据，将二元组的'f0'字段s设置为key
        // 每来一条数据，就为这条数据设置一个key
        KeyedStream<Tuple2<String, Integer>, String> keyedStream = mappedStream.keyBy(r -> r.f0);

        // reduce 阶段
        SingleOutputStreamOperator<Tuple2<String, Integer>> result = keyedStream.reduce(new WordCount()).setParallelism(1);

        // 输出
        result.print().setParallelism(1);

        // 提交任务
        env.execute();

    }

    // FlatMapFunction<IN,OUT>
    public static class Tokenizer implements FlatMapFunction<String, Tuple2<String, Integer>> {
        /**
         *
         * @param in 输入数据
         * @param out 集合，用于收集向下游发送的数据
         * @throws Exception
         */
        @Override
        public void flatMap(String in, Collector<Tuple2<String, Integer>> out) throws Exception {
            String[] words = in.split(" ");

            for (String word : words) {
                out.collect(Tuple2.of(word, 1));
            }
        }
    }

    // reduce 输入，输出和累加器的泛型一样
    // 第一条数据到来，直接作为累加器保存下来，然后输出累加器的值
    // 后来数据的到来，和累加器进行聚合，产生新的累计器覆盖旧的累计器，然后将新的累加器进行输出
    // 每个 key 都有自己独有的累加器
    public static class WordCount implements ReduceFunction<Tuple2<String, Integer>> {
        /**
         * @param accumulator 累加器
         * @param in          输入数据
         * @return
         * @throws Exception
         */
        @Override
        public Tuple2<String, Integer> reduce(Tuple2<String, Integer> accumulator, Tuple2<String, Integer> in) throws Exception {
            return Tuple2.of(accumulator.f0, accumulator.f1 + in.f1);
        }
    }
}
