package cn.itcast.hello;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;
import java.util.Arrays;
/**
 * @author KTL
 * @version V1.0
 * @Package cn.itcast.hello
 * @date 2021/2/21 0021 10:3
 * @Copyright © 2015-04-29  One for each, and two for each
 *      演示：flink-datastream-api-实现wordcount
 *      注意：结果写入hdfs
 */
public class WordCount5 {
    public static void main(String[] args) throws Exception {
        //对输入参数进行判断
        final ParameterTool parameterTool = ParameterTool.fromArgs(args);
        String output="";
        if (parameterTool.has("output")) {
            output = parameterTool.get("output");
            System.out.println("指定了输出路径使用:" + output);
        } else {
            output = "hdfs://node1:9000/wordcount/output47_";
            System.out.println("可以指定输出路径使用 --output ,没有指定使用默认的:" + output);
        }
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
         DataStreamSource<String> lines = env.fromElements("itcast hadoop spark", "itcast hadoop spark", "itcast hadoop", "itcast");
        //void flatMap(T value, Collector<O> out) throws Exception;
         SingleOutputStreamOperator<String> words = lines
                 .flatMap((String value, Collector<String> out) -> Arrays.stream(value.split(" "))
                         //.forEach(out::collect)); 写法二
                         .forEach(word->out.collect(word)));
        //O map(T value) throws Exception;
         SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOne = words.map((String value) -> Tuple2.of(value, 1)).returns(Types.TUPLE(Types.STRING,Types.INT));
        final KeyedStream<Tuple2<String, Integer>, String> grouped = wordAndOne.keyBy(t -> t.f0);
        final SingleOutputStreamOperator<Tuple2<String, Integer>> result = grouped.sum(1);
        System.setProperty("HADOOP_USER_NAME", "root");//设置用户名
        result.writeAsText(output + System.currentTimeMillis()).setParallelism(1);
        env.execute();
    }
}
