package com.atbeijing.D01;


import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * 批处理
 */
public class WordCountFromBatch {
    public static void main(String[] args) throws Exception {
        //获取执行环境上下文
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //设置并行任务数量
        env.setParallelism(1);

        //数据源
        DataStreamSource<String> dss = env.fromElements("hello word", "hello word", "hello word","你好","你好");

        // map操作,映射转换为WordWithCount：string => (string, 1)
        // flatMap的语义：将列表中的每一个元素转化成0个，1个或者多个元素
        //匿名类的第一个泛型为输入类型,第二个泛型为输出参数
        SingleOutputStreamOperator<WordWithCount> mappedStream = dss.flatMap(new FlatMapFunction<String, WordWithCount>() {
            @Override
            public void flatMap(String s, Collector<WordWithCount> collector) throws Exception {
                //拆分数据
                String[] s1 = s.split(" ");
                for (String s2 : s1) {
                    //使用collect方法向下游发送数据类似(hello,1)(word,1),这里用对象封装
                    collector.collect(new WordWithCount(s2, 1));
                }
            }
        });

        //shuffle操作,wordWithCount.word相同的分到一起
        //匿名类的第一个泛型为流中数据,第二个泛型为key的类型,根据什么进行聚合的类型,也就是wordWithCount.word的类型
        KeyedStream<WordWithCount, String> keyedStream = mappedStream.keyBy(new KeySelector<WordWithCount, String>() {
            @Override
            public String getKey(WordWithCount wordWithCount) throws Exception {
                return wordWithCount.word;
            }
        });

        //reduce操作,分到一起的两个WordWithCount怎么聚合成新的WordWithCount
        // 累加器的类型和流中元素类型是一样的
        SingleOutputStreamOperator<WordWithCount> reduceStream = keyedStream.reduce(new ReduceFunction<WordWithCount>() {
            @Override
            public WordWithCount reduce(WordWithCount t1, WordWithCount t2) throws Exception {
                return new WordWithCount(t1.word, t1.count + t2.count);
            }
        });

        //输出聚合的累加器结果
        reduceStream.print();

        //开始执行
        env.execute();
    }


    //POJO Class
    //类和字段必须是public
    //必须有空构造器
    public static class WordWithCount {
        public String word;
        public long count;

        public WordWithCount(String word, long count) {
            this.word = word;
            this.count = count;
        }

        public WordWithCount() {
        }

        @Override
        public String toString() {
            return "WordWithCount{" +
                    "word='" + word + '\'' +
                    ", count=" + count +
                    '}';
        }
    }
}


