
/*
 * Copyright © 2021 https://www.cestc.cn/ All rights reserved.
 */

package com.zx.learn.flink.helloworld;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.DataSet;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.operators.AggregateOperator;
import org.apache.flink.api.java.operators.DataSource;
import org.apache.flink.api.java.operators.MapOperator;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.util.Collector;

/**
 * 通过 flink 批处理实现 wordcount
 * 开发步骤：
 * 1. 获取环境变量
 * 2. 读取数据源
 * 3. 转换操作
 * 4. 将数据落地，打印到控制台
 * 5. 执行（流环境下）
 * https://blog.csdn.net/xianyu120/article/details/118087756
 */
public class BatchWordcount {
    public static void main(String[] args) throws Exception {
        //获取环境变量
        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
        //读取数据
        //1. 文件中读取
        //2. 获取本地的数据，开发测试用
        DataSource<String> source = env
                .fromElements("it hadoop spark", "it hadoop spark", "it hadoop", "it");
        //3. 进行转换操作
        DataSet<String> flatMapDS = source.flatMap(new FlatMapFunction<String, String>() {
            @Override
            public void flatMap(String value, Collector<String> out) throws Exception {
                String[] words = value.split(" ");
                for (String word : words) {
                    out.collect(word);
                }
            }
        });
        //3.2 转换成元素 map
        MapOperator<String, Tuple2<String, Integer>> mapDS = flatMapDS.map(new MapFunction<String, Tuple2<String,
                Integer>>() {
            @Override
            public Tuple2<String, Integer> map(String value) throws Exception {
                return Tuple2.of(value, 1);
            }
        });
        //3.3统计
        AggregateOperator<Tuple2<String, Integer>> result = mapDS.groupBy(0).sum(1);
        //4.打印输出
        result.print();

    }
}
