package com.zx.learn.flink.helloworld;

import com.zx.learn.flink.utils.DataUtils;
import com.zx.learn.flink.utils.NcMockServer;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

import java.util.List;

/**
 * flink1.12以后的实现方案
 * 使用批流一体API编程模型实现单词计数
 *
 * 在flink中批是流的一个特例，也就意味着不管实现批还是流处理，肯定按照流的api实现批处理
 * DataStream
 * StreamExecutionEnvironment
 */
@Slf4j
public class UnifyWordCount {
        public static void main(String[] args) throws Exception {
            /**
             * 实现步骤：
             * 1）获取flink流处理的运行环境
             * 2）接入数据源，读取文件获取数据
             * 3）数据处理
             *   3.1：使用flatMap对单词进行拆分
             *   3.2：对拆分后的单词进行记一次数
             *   3.3：使用分组算子对key进行分组
             *   3.4：对分组后的key进行聚合操作
             * 4）构建sink，输出结果
             */
            log.info("生成NC数据");
            List<String> ncData = DataUtils.getData("wordcount.txt");
            NcMockServer.generateData(ncData);
            //1）获取flink流处理的运行环境
            log.info("处理数据");

            //1）获取flink流处理的运行环境
            StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
            //Detected an UNBOUNDED source with the 'execution.runtime-mode' set to 'BATCH'. This combination is not
            // allowed, please set the 'execution.runtime-mode' to STREAMING or AUTOMATIC
            //env.setRuntimeMode(RuntimeExecutionMode.BATCH);      //使用dataStream实现批处理
            //env.setRuntimeMode(RuntimeExecutionMode.STREAMING);  //使用dataStream实现流处理（如果数据源是一个有界数据流则依然是一个批处理）
            env.setRuntimeMode(RuntimeExecutionMode.AUTOMATIC);//使用DataStream的时候根据数据源自动选择是批还是流
            //2）接入数据源，读取文件获取数据
            //DataStreamSource<String> lines = env.readTextFile("./data/input/wordcount.txt");
            DataStreamSource<String> lines = env.socketTextStream("localhost", NcMockServer.PORT);

            //3）数据处理
            //  3.1：使用flatMap对单词进行拆分
            SingleOutputStreamOperator<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
                @Override
                public void flatMap(String line, Collector<String> out) throws Exception {
                    String[] words = line.split(" ");
                    //返回数据
                    for (String word : words) {
                        out.collect(word);
                    }
                }
            });

            //  3.2：对拆分后的单词进行记一次数
            SingleOutputStreamOperator<Tuple2<String, Integer>> wordAndOne = words.map(new MapFunction<String, Tuple2<String, Integer>>() {
                @Override
                public Tuple2<String, Integer> map(String word) throws Exception {
                    return Tuple2.of(word, 1);
                }
            });

            //  3.3：使用分组算子对key进行分组
            //wordAndOne.keyBy(0);
//        KeyedStream<Tuple2<String, Integer>, String> grouped = wordAndOne.keyBy(new KeySelector<Tuple2<String, Integer>, String>() {
//            @Override
//            public String getKey(Tuple2<String, Integer> value) throws Exception {
//                return value.f0;
//            }
//        });
            KeyedStream<Tuple2<String, Integer>, String> grouped = wordAndOne.keyBy(t -> t.f0);

            //  3.4：对分组后的key进行聚合操作
            SingleOutputStreamOperator<Tuple2<String, Integer>> sumed = grouped.sum(1);

            //4）构建sink，输出结果
            sumed.print();

            //5）启动运行
            env.execute();
        }
}
