package com.zx.learn.flink.source;

import com.zx.learn.flink.utils.DataUtils;
import com.zx.learn.flink.utils.NcMockServer;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

import java.util.List;

/**
 * 编写Flink程序，接收socket的单词数据，并以空格进行单词拆分打印。
 */
@Slf4j
public class FromSocketDemo {
    public static void main(String[] args) throws Exception {
        /**
         * 实现步骤：
         * 1）获取flink流处理的运行环境
         * 2）接入数据源，读取文件获取数据
         * 3）数据处理
         *   3.1：使用flatMap对单词进行拆分
         *   3.2：对拆分后的单词进行记一次数
         *   3.3：使用分组算子对key进行分组
         *   3.4：对分组后的key进行聚合操作
         * 4）构建sink，输出结果
         */
        log.info("生成NC数据");
        List<String> ncData = DataUtils.getData("wordcount.txt");
        NcMockServer.generateData(ncData);
        //1）获取flink流处理的运行环境
        log.info("处理数据");
        // 1）获取flink流处理的运行环境
        Configuration configuration = new Configuration();
        configuration.setInteger("rest.port", 8081);//设置webui的端口号
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(configuration);

        log.info("执行环境默认的并行度："+env.getParallelism());

        // 2）接入数据源，读取文件获取数据
        DataStreamSource<String> lines = env.socketTextStream("localhost", NcMockServer.PORT);
        log.info("socketTextStream的并行度："+lines.getParallelism());
        // 3）数据处理
        //  3.1：使用flatMap对单词进行拆分
        SingleOutputStreamOperator<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
            @Override
            public void flatMap(String line, Collector<String> out) throws Exception {
                String[] words = line.split(" ");
                //返回数据
                for (String word : words) {
                    out.collect(word);
                }
            }
        }).setParallelism(2);
        log.info("words的并行度："+words.getParallelism());
        //  3.2：对拆分后的单词进行记一次数
        SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOne = words.map(new MapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public Tuple2<String, Integer> map(String word) throws Exception {
                return Tuple2.of(word, 1);
            }
        });
        log.info("wordAndOne的并行度："+wordAndOne.getParallelism());
        KeyedStream<Tuple2<String, Integer>, String> grouped = wordAndOne.keyBy(t -> t.f0);

        //  3.4：对分组后的key进行聚合操作
        SingleOutputStreamOperator<Tuple2<String, Integer>> sumed = grouped.sum(1);
        log.info("sumed的并行度："+sumed.getParallelism());
        // 4）构建sink，输出结果
        sumed.print();

        // 5）启动运行
        env.execute();
    }
}
