package com.atguigu.flink.wordcount;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * Created by Smexy on 2023/2/23
 *      模拟源源不断的数据:
 *              从网络端口源源不断地发。
 *              安装:  sudo yum -y install nc
 *
 *              开启服务端:  nc -lk  hadoop103 8888
 *
 *
 */
public class Demo3_UnBoundedStream
{
    public static void main(String[] args) throws Exception {

        System.out.println("hahaha");
        //获取编程的环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.setParallelism(1);

        //以计算的数据模型
        DataStreamSource<String> source = env.socketTextStream("hadoop103",8888);

        //进行单词统计
        SingleOutputStreamOperator<Tuple2<String, Integer>> ds1 = source
            .flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>()
            {
                @Override
                public void flatMap(String line, Collector<Tuple2<String, Integer>> out) throws Exception {
                    String[] words = line.split(" ");
                    for (String word : words) {
                        out.collect(new Tuple2<>(word, 1));
                    }
                }
            });

        ds1
            // keyBy
            .keyBy(new KeySelector<Tuple2<String, Integer>, String>()
            {
                //从流中的元素中获取key部分
                @Override
                public String getKey(Tuple2<String, Integer> value) throws Exception {
                    return value.f0;
                }
            })
            .sum(1)
            .print();

        //流式运算，必须启动环境。 批处理无需！
        env.execute();
    }
}
