package com.hu.flink12.demo.wc;

import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

import java.util.Arrays;

/**
 * @Author: hujianjun
 * @Date: 2021/2/3 0:08
 * @Describe:
 */
public class StreamBatchWordCount {

    public static void main(String[] args) throws Exception {
        //  TODO 1.声明环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

//        env.setRuntimeMode(RuntimeExecutionMode.AUTOMATIC);
        env.setParallelism(1);

        //  TODO 2.读取source
        DataStream<String> lines = env.fromElements("hdfs hadoop hive", "hive hadoop", "hive flink");

        //  TODO 3.数据处理
        DataStream<String> words = lines.flatMap((String line, Collector<String> out) -> Arrays.stream(line.split(" ")).forEach(out::collect)).returns(Types.STRING);
        DataStream<Tuple2<String, Integer>> wordAndOne = words.map(word -> Tuple2.of(word, 1)).returns(Types.TUPLE(Types.STRING, Types.INT));

        SingleOutputStreamOperator<Tuple2<String, Integer>> wordCount = wordAndOne.keyBy(w -> w.f0).sum(1);

        //  TODO 4.结果输出
        wordCount.print();

        System.setProperty("HADOOP_USER_NAME","root");
        wordCount.writeAsText("hdfs://localhost:8020/test/out/wc.txt");

        env.execute();

    }
}
