package org.example;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

import java.util.Objects;

/**
 * 有界流
 * @author shenguangyang
 */
public class BoundedStreamWordCount {
    public static void main(String[] args) throws Exception {
        // 1. 创建流试执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 2. 从文件中读取数据
        String filePath = Objects.requireNonNull(BatchWordCount.class.getClassLoader().getResource("words.txt")).getPath();
        DataStream<String> inputDataStream = env.readTextFile(filePath);
        // 3. 对数据集进行处理, 按照空格分词展开, 转成(word, 1)二元组进行统计
        DataStream<Tuple2<String, Integer>> resultDataSet = inputDataStream.flatMap(new BatchWordCount.MyFlatMapper())
                // 按照第一个位置的word进行分组
                .keyBy(data -> data.f0)
                // 将第二个位置上的数据求和, 分组内聚合统计
                .sum(1);
        // 4. 打印
        resultDataSet.print();

        // 5. 启动执行, 有数据到来就会执行一遍上边定义的流程
        env.execute();
    }

    /**
     * FlatMapFunction泛型
     * T: 输入泛型
     * O: 输出泛型
     *
     * Tuple2: flink实现的二元组
     */
    public static class MyFlatMapper implements FlatMapFunction<String, Tuple2<String, Integer>> {
        @Override
        public void flatMap(String value, Collector<Tuple2<String, Integer>> out) throws Exception {
            // 按照空格分词
            String[] words = value.split(" ");
            // 遍历所有word包成二元组输出
            for (String word : words) {
                out.collect(new Tuple2<>(word, 1));
            }

        }
    }
}
