package cn.smileyan.demos;

import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.DataSet;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.utils.MultipleParameterTool;
import org.apache.flink.util.Collector;
import org.apache.flink.util.Preconditions;


/**
 * 说明：
 * 1. 代码中的 DEFAULT_WORDS 数组包含了一些默认的文本数据，用于 WordCount 示例。
 * 2. main 方法是程序的入口点，解析命令行参数，设置 Flink 执行环境，并执行 WordCount 示例。
 * 3. Tokenizer 类是一个 FlatMapFunction，用于将输入的文本进行切分和计数。
 * @author Smileyan
 */
@Slf4j
public class BatchWordCount {
    /**
     * 默认的用于统计单词个数的字符串
     */
    protected static final String[] DEFAULT_WORDS = {"Flink’s Table & SQL API makes it possible to work with queries written ",
            "in the SQL language, but these queries need to be embedded within a table program that is written in either Java or Scala. ",
            "Moreover, these programs need to be packaged with a build tool before being submitted to a cluster. ",
            "This more or less limits the usage of Flink to Java/Scala programmers",
            "The SQL Client aims to provide an easy way of writing, debugging, and submitting table programs ",
            "to a Flink cluster without a single line of Java or Scala code. ",
            "The SQL Client CLI allows for retrieving and visualizing real-time results from the running distributed ",
            "application on the command line."};

    public static void main(String[] args) throws Exception {
        // 解析命令行参数
        final MultipleParameterTool params = MultipleParameterTool.fromArgs(args);

        // 获取 Flink 执行环境
        final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

        // 设置全局作业参数
        env.getConfig().setGlobalJobParameters(params);

        // 定义文本数据集
        DataSet<String> text = null;
        if (params.has("input")) {
            // 如果命令行参数包含输入路径，则从文件中读取文本数据
            for (String input : params.getMultiParameterRequired("input")) {
                if (text == null) {
                    text = env.readTextFile(input);
                } else {
                    text = text.union(env.readTextFile(input));
                }
            }
            Preconditions.checkNotNull(text, "Input DataSet should not be null.");
        } else {
            // 否则，使用默认的文本数据
            text = env.fromElements(DEFAULT_WORDS);
        }

        // 执行 WordCount 示例
        assert text != null;
        DataSet<Tuple2<String, Integer>> counts =
                text.flatMap(new Tokenizer())
                        .groupBy(0)
                        .sum(1);

        // 打印结果
        counts.print();
    }

    /**
     * Tokenizer 类实现了 FlatMapFunction 接口，用于将输入文本切分并计数。
     */
    public static final class Tokenizer
            implements FlatMapFunction<String, Tuple2<String, Integer>> {
        /**
         * 切分并计数逻辑
         *
         * @param value 输入文本
         * @param out   输出 Tuple2<String, Integer> 的 Collector
         */
        @Override
        public void flatMap(String value, Collector<Tuple2<String, Integer>> out) {
            // 将文本转换为小写，并根据非单词字符切分
            String[] tokens = value.toLowerCase().split("\\W+");
            // 遍历切分后的单词数组，排除空单词，并将单词和计数为 1 的 Tuple 发送到 Collector
            for (String token : tokens) {
                if (!token.isEmpty()) {
                    out.collect(new Tuple2<>(token, 1));
                }
            }
        }
    }
}
