package com.zx.learn.flink.helloworld;

import com.zx.learn.flink.utils.DataUtils;
import com.zx.learn.flink.utils.NcMockServer;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

import java.util.Arrays;
import java.util.List;

/**
 * flink1.12之前的写法
 * 编写Flink程序，接收socket的单词数据，并以空格进行单词拆分打印。
 */
@Slf4j
public class LambdaStreamWordCount {
    public static void main(String[] args) throws Exception {
        /**
         * 实现步骤：
         * 1）获取flink流处理的运行环境
         * 2）接入数据源，读取文件获取数据
         * 3）数据处理
         *   3.1：使用flatMap对单词进行拆分
         *   3.2：对拆分后的单词进行记一次数
         *   3.3：使用分组算子对key进行分组
         *   3.4：对分组后的key进行聚合操作
         * 4）构建sink，输出结果
         */
        log.info("生成NC数据");
        List<String> ncData = DataUtils.getData("wordcount.txt");
        NcMockServer.generateData(ncData);
        //1）获取flink流处理的运行环境
        log.info("处理数据");
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //2）接入数据源，读取文件获取数据
        DataStreamSource<String> lines = env.socketTextStream("localhost", NcMockServer.PORT);

        //3）数据处理
        //  3.1：使用flatMap对单词进行拆分
//        SingleOutputStreamOperator<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
//            @Override
//            public void flatMap(String line, Collector<String> out) throws Exception {
//                String[] words = line.split(" ");
//                //返回数据
//                for (String word : words) {
//                    out.collect(word);
//                }
//            }
//        });
        SingleOutputStreamOperator<String> words = lines.flatMap((String line, Collector<String> out) ->
                Arrays.stream(line.split(" ")).forEach(out:: collect)).returns(Types.STRING);

        //  3.2：对拆分后的单词进行记一次数
//        SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOne = words.map(new MapFunction<String, Tuple2<String, Integer>>() {
//            @Override
//            public Tuple2<String, Integer> map(String word) throws Exception {
//                return Tuple2.of(word, 1);
//            }
//        });
        SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOne = words.map(w -> Tuple2.of(w, 1)).returns(Types.TUPLE(Types.STRING, Types.INT));

        //  3.3：使用分组算子对key进行分组
        //wordAndOne.keyBy(0);
//        KeyedStream<Tuple2<String, Integer>, String> grouped = wordAndOne.keyBy(new KeySelector<Tuple2<String, Integer>, String>() {
//            @Override
//            public String getKey(Tuple2<String, Integer> value) throws Exception {
//                return value.f0;
//            }
//        });
        KeyedStream<Tuple2<String, Integer>, String> grouped = wordAndOne.keyBy(t -> t.f0);

        //  3.4：对分组后的key进行聚合操作
        SingleOutputStreamOperator<Tuple2<String, Integer>> sumed = grouped.sum(1);

        //4）构建sink，输出结果
        sumed.print();

        //5）启动运行
        env.execute();
    }
}
