package com.github.binarylei.trident.wordcount;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import org.apache.storm.generated.StormTopology;
import org.apache.storm.trident.Stream;
import org.apache.storm.trident.TridentTopology;
import org.apache.storm.trident.operation.BaseFunction;
import org.apache.storm.trident.operation.TridentCollector;
import org.apache.storm.trident.operation.builtin.Count;
import org.apache.storm.trident.testing.FixedBatchSpout;
import org.apache.storm.trident.tuple.TridentTuple;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Values;

/**
 * @author: leigang
 * @version: 2018-04-10
 */
public class TridentWordCountTopology {

    public static void main(String[] args) throws InterruptedException, InvalidTopologyException,
            AuthorizationException, AlreadyAliveException {
        Config conf = new Config();
        conf.setNumWorkers(2);
        conf.setMaxSpoutPending(20);

        if (args == null || args.length ==0) {
            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("trident-function", conf, buildTopology());

            Thread.sleep(20 * 1000);
            cluster.shutdown();
        } else {
            StormSubmitter.submitTopology(args[0], conf, buildTopology());
        }
    }

    private static StormTopology buildTopology() {

        TridentTopology topology = new TridentTopology();
        // 设置数据源
        FixedBatchSpout spout = new FixedBatchSpout(
                new Fields("subjects"),
                4,  // 设置批处理大小
                new Values("java php python c++"),
                new Values("java ruby python c++"),
                new Values("java php ruby c++"),
                new Values("ruby php python c++")
        );
        // 指定是否循环
        spout.setCycle(false);
        Stream inputStream = topology.newStream("spout", spout);
        /**
         * 要实现流 spout-bout 的模式在 trident 里是使用 each 来做的
         * 1. 输入数据源参数名称："a", "b", "c", "d", "sum"
         * 2. 需要流转执行的 function 对象(也就是bolt对象)：new Result()
         * 3. 指定  function 对象里的输出参数名称：sum
         */
        inputStream.shuffle()
                .each(new Fields("subjects"), new SplitFunction(), new Fields("sub"))
                // 进行分组操作，参数为分组字段 subject，比较类似于之前所接触的 FiledsGroup
                .groupBy(new Fields("sub"))
                // 对分组之后的结果进行聚合操作，参数1：聚合方法为 count 函数；参数2：输出字段名称为 count
                .aggregate(new Count(), new Fields("count"))
                /**
                 * 继续使用 each 调用下一个 function(bolt)
                 * 第一个参数为："sub", "count"
                 * 第二个参数为：new Result() 也就是执行函数
                 * 第三个参数为：new Fields() 没有输出
                 */
                .each(new Fields("sub", "count"), new ResultFunction(), new Fields())
                .parallelismHint(1);
        return topology.build();
    }

    public static class SplitFunction extends BaseFunction {

        @Override
        public void execute(TridentTuple tuple, TridentCollector collector) {
            System.out.println("传入进来的内容为：" + tuple);
            String subjects = tuple.getStringByField("subjects");
            String[] words = subjects.split(" ");
            for (String word : words) {
                collector.emit(new Values(word));
            }
        }
    }

    public static class ResultFunction extends BaseFunction {

        @Override
        public void execute(TridentTuple tuple, TridentCollector collector) {
            String sub = tuple.getStringByField("sub");
            Long count = tuple.getLongByField("count");
            System.out.println(String.format("---- %s，%s", sub, count));
        }
    }
}
