package com.learn.unify;

import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * @create: 2023-04-17 16:44
 * @author: Mr.Du
 * --------------
 * @notes:
 **/
public class BatchWordCountToYarn {
    public static void main(String[] args) throws Exception {
        ParameterTool parameterTool = ParameterTool.fromArgs(args);
        String output = "";
        if(parameterTool.has("output")) {
            output = parameterTool.get("output");
            System.out.println("指定了输出路径：" + output);
        } else {
            output = "hdfs://node1:8020/wordcount/output47_";
            System.out.println("可以指定输出路径使用 --output ,没有指定使用默认的:" + output);
        }

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        DataStreamSource<String> lines = env.fromElements("python java scala", "python php js", "java", "scala scala");

        SingleOutputStreamOperator<Tuple2<String, Integer>> result = lines.flatMap((String line, Collector<String> out) -> {
                    String[] words = line.split(" ");
                    for (String word : words) {
                        out.collect(word);
                    }
                }).returns(Types.STRING)
                .map(word -> Tuple2.of(word, 1))
                .returns(Types.TUPLE(Types.STRING, Types.INT))
                .keyBy(0)
                .sum(1);

        System.setProperty("HADOOP_USER_NAME", "root");
        result.writeAsText(output + System.currentTimeMillis()).setParallelism(1);
        env.execute("BatchWordCountToYarn");
    }
}
