package com.atguigu.flink.wordcount;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * Created by Smexy on 2023/1/12
 *
 *  计算一般套路：
 *      ①先有环境
 *          SparkContext           ExecutionEnviroment(批处理)
 *                                 StreamExecutionEnviroment(流处理)
 *
 *      ②从环境中获取编程模型
 *          RDD                        批处理  DataSource
 *                                     流处理: DataStreamSource
 *
 *      ③调用编程API编程
 *        RDD new =  RDD.map(map逻辑)         DataSource| DataStreamSource new = DataSource| DataStreamSource.map(map逻辑)
 *
 *      ④触发计算
 *          行动算子                    批处理：无需触发
 *                                     流处理: StreamExecutionEnviroment.execute()
 *
 * ------------------------
 *
 *  模拟一个无界流。
 *          在虚拟机上安装软件:  sudo yum -y install nc
 *
 *          开启一个服务端:   nc -lk 绑定主机名 端口
 *
 *          开启一个客户端(任意客户端)连接服务端: nc 服务端绑定主机名 端口
 *                  flink程序客户端
 *                  nc模拟
 *
 *
 *
 */
public class Demo3_UnBoundedStream
{
    public static void main(String[] args) throws Exception {

        //①先有环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //设置并行计算的程度
        env.setParallelism(1);

        //②从环境中获取编程模型
        DataStreamSource<String> ds = env.socketTextStream("hadoop103",8888);

        //③调用编程API编程 输出(单词,1) Tuple2
        SingleOutputStreamOperator<Tuple2<String, Integer>> ds1 = ds.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>()
        {
            // Collector out: 负责收集输出的元素，把元素发现下游的算子继续处理
            @Override
            public void flatMap(String line, Collector<Tuple2<String, Integer>> out) throws Exception {
                String[] words = line.split(" ");
                for (String word : words) {
                    out.collect(Tuple2.of(word, 1));
                }
            }
        });

        /*
            批处理： groupBy分组，流处理: keyBy
                如果数据模型是POJO: 调用ds.groupBy(String POJO的属性名 )
                如果数据模型是Tuple: 调用ds.groupBy(int tuple中元素的索引)

         */
        ds1.keyBy(new KeySelector<Tuple2<String, Integer>, String>()
        {
            @Override
            public String getKey(Tuple2<String, Integer> data) throws Exception {
                return data.f0;
            }
        })
          .sum(1)
          .print();

        //提交运算
        env.execute();


    }
}
