/**
 * Copyright (C), 2015-2018, XXX有限公司
 * FileName: DoubleAndTripleBoltApp
 * Author:   An-Il
 * Date:     2018/11/9 9:19
 * Description: 平方三次方
 * History:
 * <author>          <time>          <version>          <desc>
 * 作者姓名           修改时间           版本号              描述
 */
package com.blog.storm.example.cluster.shuffle;

import org.apache.storm.Config;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.StormTopology;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
import org.apache.storm.utils.Utils;

import java.util.Map;

/**
 * 使用storm实现二倍，在集群上运行(Shuffle grouping)
 *
 * @url http://storm.apache.org/releases/1.2.2/Concepts.html
 */
public class ClusterDoubleBoltShuffleGroupingApp {

    /**
     * 数据源参数的Spout
     */
    public static class DataSourceSpout extends BaseRichSpout {

        SpoutOutputCollector collector;

        Integer val = 1;

        /**
         * 初始化
         *
         * @param map                  配置参数
         * @param topologyContext      上下文
         * @param spoutOutputCollector 数据发射器
         */
        @Override
        public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
            this.collector = spoutOutputCollector;
        }

        /**
         * 此方法会死循环
         */
        @Override
        public void nextTuple() {
            System.out.println("send val:" + val);
            this.collector.emit(new Values(val++));
            Utils.sleep(3000);
        }

        /**
         * 声明输出字段
         *
         * @param outputFieldsDeclarer
         */
        @Override
        public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
            outputFieldsDeclarer.declare(new Fields("val"));
        }
    }

    /**
     * 接受数据并处理
     */
    public static class DoubleAndTripleBolt extends BaseRichBolt {

        /**
         * 初始化
         *
         * @param stormConf 配置参数
         * @param context   上下文
         * @param collector 数据发射器
         */
        @Override
        public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {

        }

        @Override
        public void execute(Tuple input) {
            /**
             * 这里通过上面定义的Fields进行获取
             * 也可以通过下标获取：input.getInteger(0)
             */
            Integer _val = input.getIntegerByField("val");
            System.out.println("ThreadId:" + Thread.currentThread().getId() + " double is:" + _val * 2);

            //TODO:可以继续发送数据到下流
        }

        @Override
        public void declareOutputFields(OutputFieldsDeclarer declarer) {

        }
    }

    /**
     * 构建本地Topology
     *
     * @param args
     * @url http://storm.apache.org/releases/1.2.2/Local-mode.html
     * http://storm.apache.org/releases/1.2.2/Running-topologies-on-a-production-cluster.html
     */
    public static void main(String[] args) {

        /**
         * 定义拓扑,在生产群集上运行拓扑与在本地模式下运行类似
         * Topology需要指定相关的Spout和Bolt的执行顺序
         * shuffleGrouping("dataSourceSpout"):Tuples以一种随机分布方式在Bolt的任务中,每个Bolt都保证获得相同数量的Tuples
         */
        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("dataSourceSpout", new DataSourceSpout());
        builder.setBolt("doubleBolt", new DoubleAndTripleBolt(), 3).shuffleGrouping("dataSourceSpout");

        StormTopology topology = builder.createTopology();

        /**
         * 提交到集群上运行
         * @url http://storm.apache.org/releases/1.2.2/Running-topologies-on-a-production-cluster.html
         */
        String name = ClusterDoubleBoltShuffleGroupingApp.class.getSimpleName();
        Config conf = new Config();
        try {
            StormSubmitter.submitTopology(name, conf, topology);
        } catch (Exception e) {
            e.printStackTrace();
        }


    }

}