package com.zx.learn.flink.sink;

import com.zx.learn.flink.utils.DataUtils;
import com.zx.learn.flink.utils.PathUtil;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.io.TextOutputFormat;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

public class writeUsingOutputFormatSink {
    public static void main(String[] args) throws Exception {
        //1：获取流处理运行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //调用env的fromElements创建一个非并行的DataStreamSource
        DataStreamSource<String> words = env.fromElements(
                "hadoop","spark","flink","hbase","flink","spark"
        );
        //4：对拆分后的单词，每个单词记一次数
        SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOne = words.map(new MapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public Tuple2<String, Integer> map(String word) throws Exception {
                return Tuple2.of(word, 1);
            }
        });
        DataStream<Tuple2<String, Integer>> result = wordAndOne.keyBy(0).sum(1);
        String fullFilePathName = PathUtil.CLASSPATH_DATA_OUTPUT + "5/";
        DataUtils.clearDir(fullFilePathName);
        result.writeUsingOutputFormat(new TextOutputFormat<>(new Path(fullFilePathName)));
        env.execute();
    }
}
