package com.shujia.flink.core;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

public class Demo01StreamWordCount {
    public static void main(String[] args) throws Exception {
        // 1、构建Flink环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment
                .getExecutionEnvironment();
        // 手动改变任务的并行度
//        env.setParallelism(1);
        env.setBufferTimeout(200);

        // 2、通过Socket模拟无界流环境，方便FLink处理
        // 虚拟机启动：nc -lk 8888
        // 从Source构建第一个DataStream
        DataStream<String> lineDS = env.socketTextStream("master", 8888);
        System.out.println("lineDS并行度:" + lineDS.getParallelism());

        // 统计每个单词的数量
        // 第一步：将每行数据的每个单词切出来并进行扁平化处理
        DataStream<String> wordsDS = lineDS.flatMap(new FlatMapFunction<String, String>() {
            /**
             *
             * @param line DS中的一条数据
             * @param out 通过collect方法将数据发送到下游
             * @throws Exception
             */
            @Override
            public void flatMap(String line, Collector<String> out) throws Exception {
                for (String word : line.split(",")) {
                    // 将每个单词发送到下游
                    out.collect(word);
                }
            }
        });
        System.out.println("wordsDS并行度:" + wordsDS.getParallelism());

        // 第二步：将每个单词变成 KV格式，V置为1
        DataStream<Tuple2<String, Integer>> wordKVDS = wordsDS.map(new MapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public Tuple2<String, Integer> map(String word) throws Exception {
                return Tuple2.of(word, 1);
            }
        });
        System.out.println("wordKVDS并行度:" + wordKVDS.getParallelism());

        // 第三步：按每一个单词进行分组
        // keyBy之后数据流会进行分组，相同的key会进入同一个线程中被处理
        // 传递数据的规则：hash取余（线程总数，默认CPU的总线程数）原理
        KeyedStream<Tuple2<String, Integer>, String> keyedDS = wordKVDS.keyBy(new KeySelector<Tuple2<String, Integer>, String>() {
            @Override
            public String getKey(Tuple2<String, Integer> tuple2) throws Exception {
                return tuple2.f0;
            }
        });
        System.out.println("keyedDS并行度:" + keyedDS.getParallelism());

        // 第四步：对1进行聚合sum
        DataStream<Tuple2<String, Integer>> wordCntDS = keyedDS.sum(1);
        System.out.println("wordCntDS并行度:" + wordCntDS.getParallelism());
        // 3、打印结果：将DS中的内容Sink到控制台
        keyedDS.print();

        env.execute();

    }
}
