/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.beam.examples;

// beam-playground:
//   name: WordCount
//   description: An example that counts words in Shakespeare's works.
//   multifile: false
//   pipeline_options: --output output.txt
//   context_line: 204
//   categories:
//     - Combiners
//     - Options
//     - Quickstart
//   complexity: MEDIUM
//   tags:
//     - count
//     - strings

import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import org.apache.beam.examples.common.ExampleUtils;
import org.apache.beam.sdk.Pipeline;
import org.apache.beam.sdk.io.TextIO;
import org.apache.beam.sdk.metrics.Counter;
import org.apache.beam.sdk.metrics.Distribution;
import org.apache.beam.sdk.metrics.Metrics;
import org.apache.beam.sdk.options.Default;
import org.apache.beam.sdk.options.Description;
import org.apache.beam.sdk.options.PipelineOptions;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.options.Validation.Required;
import org.apache.beam.sdk.runners.TransformHierarchy;
import org.apache.beam.sdk.transforms.*;
import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.PValue;

/**
 * 一个示例，用于统计莎士比亚作品中的单词数量，并包含 Beam 的最佳实践。
 *
 * <p>This class, {@link WordCount}, is the second in a series of four successively more detailed
 * 'word count' examples. You may first want to take a look at {@link MinimalWordCount}. After
 * you've looked at this example, then see the {@link DebuggingWordCount} pipeline, for introduction
 * of additional concepts.
 *
 * <p>For a detailed walkthrough of this example, see <a
 * href="https://beam.apache.org/get-started/wordcount-example/">
 * https://beam.apache.org/get-started/wordcount-example/ </a>
 *
 * <p>Basic concepts, also in the MinimalWordCount example: Reading text files; counting a
 * PCollection; writing to text files
 *
 * <p>New Concepts:
 *
 * <pre>
 *   1. Executing a Pipeline both locally and using the selected runner
 *   2. Using ParDo with static DoFns defined out-of-line
 *   3. Building a composite transform
 *   4. Defining your own pipeline options
 * </pre>
 *
 * <p>Concept #1: you can execute this pipeline either locally or using by selecting another runner.
 * These are now command-line options and not hard-coded as they were in the MinimalWordCount
 * example.
 *
 * <p>To change the runner, specify:
 *
 * <pre>{@code
 * --runner=YOUR_SELECTED_RUNNER
 * }</pre>
 *
 * <p>To execute this pipeline, specify a local output file (if using the {@code DirectRunner}) or
 * output prefix on a supported distributed file system.
 *
 * <pre>{@code
 * --output=[YOUR_LOCAL_FILE | YOUR_OUTPUT_PREFIX]
 * }</pre>
 *
 * <p>The input file defaults to a public data set containing the text of King Lear, by William
 * Shakespeare. You can override it and choose your own input with {@code --inputFile}.
 */
public class WordCount {
    static final Log log = LogFactory.get();

    /**
     * 概念 #2: 通过静态地定义 DoFn 来减少 pipeline 组装代码的冗余。这个 DoFn 将文本行拆分为单词。
     */
    // [START extract_words_fn]
    static class ExtractWordsFn extends DoFn<String, String> {
        private final Counter emptyLines = Metrics.counter(ExtractWordsFn.class, "emptyLines");
        private final Distribution lineLenDist =
                Metrics.distribution(ExtractWordsFn.class, "lineLenDistro");

        @ProcessElement
        public void processElement(@Element String element, OutputReceiver<String> receiver) {
            lineLenDist.update(element.length());
            if (element.trim().isEmpty()) {
                emptyLines.inc();
            }

            // Split the line into words.
            String[] words = element.split(ExampleUtils.TOKENIZER_PATTERN, -1);

            // Output each word encountered into the output PCollection.
            for (String word : words) {
                if (!word.isEmpty()) {
                    receiver.output(word);
                }
            }
        }
    }
    // [END extract_words_fn]

    /**
     * A SimpleFunction that converts a Word and Count into a printable string.
     */
    public static class FormatAsTextFn extends SimpleFunction<KV<String, Long>, String> {
        @Override
        public String apply(KV<String, Long> input) {
            return input.getKey() + ": " + input.getValue();
        }
    }

    /**
     * 一个 PTransform，将包含文本行的 PCollection 转换为格式化的单词计数的 PCollection。
     * 概念 #3: 这是一个自定义的复合变换，将两个变换（ParDo 和 Count）捆绑为一个可重用的 PTransform 子类。
     */
    // [START count_words]
    public static class CountWords
            extends PTransform<PCollection<String>, PCollection<KV<String, Long>>> {
        @Override
        public PCollection<KV<String, Long>> expand(PCollection<String> lines) {

            // Convert lines of text into individual words.
            PCollection<String> words = lines.apply("分解单词", ParDo.of(new ExtractWordsFn()));

            // Count the number of times each word occurs.
            PCollection<KV<String, Long>> wordCounts = words.apply("统计单词数", Count.perElement());

            return wordCounts;
        }
    }
    // [END count_words]

    /**
     * WordCount 支持的选项。
     * 概念 #4: 定义自己的配置选项。这里，您可以添加自己的参数，指定默认值，并在 pipeline 代码中访问这些值。
     */
    // [START wordcount_options]
    public interface WordCountOptions extends PipelineOptions {

        /**
         * By default, this example reads from a public dataset containing the text of King Lear. Set
         * this option to choose a different input file or glob.
         */
        @Description("Path of the file to read from")
        @Default.String("gs://apache-beam-samples/shakespeare/kinglear.txt")
        String getInputFile();

        void setInputFile(String value);

        /**
         * Set this required option to specify where to write the output.
         */
        @Description("Path of the file to write to")
        @Required
        String getOutput();

        void setOutput(String value);
    }
    // [END wordcount_options]

    static void runWordCount(WordCountOptions options) {


        options.setJobName("WordCount");
//        SparkPipelineOptions sparkOptions = options.as(SparkPipelineOptions.class);
//
//        sparkOptions.setSparkMaster("spark://192.168.56.101:7077");
//        sparkOptions.setRunner(SparkRunner.class);
//        Pipeline p = Pipeline.create(sparkOptions);


        Pipeline p = Pipeline.create(options);

        // 应用读取操作、CountWords 复合变换、格式化函数和写入操作
        // static FormatAsTextFn() to the ParDo transform.

        p.apply("读取文件", TextIO.read().from(options.getInputFile()))
                .apply("分解单词并计数", new CountWords())
                .apply("将单词组织成展示形式", MapElements.via(new FormatAsTextFn()))
                .apply("写入文件", TextIO.write().to(options.getOutput()));

        
        p.traverseTopologically(new Pipeline.PipelineVisitor.Defaults() {
            @Override
            public void visitPrimitiveTransform(TransformHierarchy.Node node) {
                // 处理每个转换节点
                log.debug("节点切换中：{}",node.getFullName());
            }

            @Override
            public void visitValue(PValue value, TransformHierarchy.Node producer) {
                // 处理每个 PCollection
                log.debug("生成值调用处理过程：【{}】,【{}】",value.getName(),producer);
            }
        });

        p.run().waitUntilFinish();
    }

    public static void main(String[] args) {
        // 从命令行参数解析选项，设置运行器为 SparkRunner，并运行 pipeline
        WordCountOptions options =
                PipelineOptionsFactory.fromArgs(args).withValidation().as(WordCountOptions.class);

//        options.setStreaming(false);
//        options.setAppName("WordCount");
//        options.setSparkMaster("spark://192.168.56.101:7077");
//        options.setRunner(SparkRunner.class);


        runWordCount(options);
    }
}
