package cn.itcast.flink.sql;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * 需求：批的方式统计单词出现的次数
 * todo 使用flink1.12版本之前流批一体架构不太成熟，因此批流开发需要使用两套api实现，但是从该版本以后，可以使用一套既可以跑批作业，也可以跑流作业
 *  批作业的数据抽象是：DataSet
 *  流作业的数据抽象是：DataStream
 *
 *  在flink1.12以后，批流作业的数据抽象是：DataStream
 */
public class BatchWordCount {
    public static void main(String[] args) throws Exception {
        // 获取参数
        ParameterTool parameterTool = ParameterTool.fromArgs(args);

        String output = "";
        if(parameterTool.has("output")) {
            output = parameterTool.get("output");
            System.out.println("制定了输入路径使用: " + output);
        } else {
            output = "hdfs://node1:8020/wordcount/output66_";
            System.out.println("可以指定输出路径使用 --output ,没有指定使用默认的:" + output);
        }

        // 环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 数据源
        DataStreamSource<String> source = env.fromElements("hello flink spark hadoop", "hello flink spark", "hello flink");

        // 数据转换
        // 切割
        SingleOutputStreamOperator<String> words = source.flatMap(new FlatMapFunction<String, String>() {
            @Override
            public void flatMap(String line, Collector<String> out) throws Exception {
                String[] arr = line.split(" ");
                for (String word : arr) {
                    out.collect(word);
                }
            }
        });

        // word, 1
        SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOne = words.map(new MapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public Tuple2<String, Integer> map(String word) throws Exception {
                return Tuple2.of(word, 1);
            }
        });

        // 分组 求和
        KeyedStream<Tuple2<String, Integer>, Tuple> tuple2TupleKeyedStream = wordAndOne.keyBy(0);

        SingleOutputStreamOperator<Tuple2<String, Integer>> result = tuple2TupleKeyedStream.sum(1);


        // sink 写
        System.setProperty("HADOOP_USER_NAME", "root");
        result.writeAsText(output + System.currentTimeMillis()).setParallelism(1);

        // 执行
        env.execute();

    }
}