package com.wuji1626.wc;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.operators.AggregateOperator;
import org.apache.flink.api.java.operators.DataSource;
import org.apache.flink.api.java.operators.FlatMapOperator;
import org.apache.flink.api.java.operators.UnsortedGrouping;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.util.Collector;

/**
 * @author Administrator
 * @version 1.0
 * @description: 以批的形式处理有界数据
 * @date 2025/3/8 15:54
 */
public class Flink01_Bound_Batch {
    public static void main(String[] args) {
        // step1 环境准备
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
        // step2 从指定的文件读取数据
        DataSource<String> ds = env.readTextFile("D:\\workspace\\Java\\bigdata-sample\\input\\word.txt");
        // step3 对读取的数据进行扁平化处理，封装为二元组对象 Tuple2<单词，count>
        FlatMapOperator<String, Tuple2<String, Long>> flatMapDS = ds.flatMap(
                new FlatMapFunction<String, Tuple2<String, Long>> () {
                    @Override
                    public void flatMap(String lineString, Collector<Tuple2<String, Long>> out) throws Exception {
                        String[] wordArr = lineString.split(" ");
                        for (String word : wordArr) {
                            // 将封装好的二元组对象发送到下游
                            out.collect(Tuple2.of(word, 1L));
                        }
                    }
                }
        );
        // step4 按照单词进行分组
        UnsortedGrouping<Tuple2<String, Long>>groupByDS = flatMapDS.groupBy(0);
        // step5 聚合计算
        AggregateOperator<Tuple2<String,Long>> sumDS = groupByDS.sum(1);
        // step6 打印聚合结果
        try {
            sumDS.print();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
}
