/**
 * Copyright (C), 2015-2018, XXX有限公司
 * FileName: DoubleAndTripleBoltApp
 * Author:   An-Il
 * Date:     2018/11/9 9:19
 * Description: 平方三次方
 * History:
 * <author>          <time>          <version>          <desc>
 * 作者姓名           修改时间           版本号              描述
 */
package com.blog.storm.example.local;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.generated.StormTopology;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
import org.apache.storm.utils.Utils;

import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;

/**
 * 使用storm实现数的二倍,保证消息处理（ack/fail机制）
 *
 * @url http://storm.apache.org/releases/1.2.2/Guaranteeing-message-processing.html
 */
public class GuaranteeLocalDoubleAndTripleBoltApp {

    /**
     * 数据源参数的Spout
     */
    public static class DataSourceSpout extends BaseRichSpout {

        private ConcurrentHashMap<UUID, Values> pendingPool;

        private SpoutOutputCollector collector;

        Integer val = 0;

        /**
         * 初始化
         *
         * @param map                  配置参数
         * @param topologyContext      上下文
         * @param spoutOutputCollector 数据发射器
         */
        @Override
        public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
            this.collector = spoutOutputCollector;
            this.pendingPool = new ConcurrentHashMap<UUID, Values>();
        }

        /**
         * 此方法会死循环
         */
        @Override
        public void nextTuple() {
            val++;
            //缓存数据

            UUID key = UUID.randomUUID();
            Values value = new Values(val);
            //缓存数据
            this.pendingPool.put(key, value);
            //value messageId
            this.collector.emit(value, key);

            Utils.sleep(1000);
        }

        /**
         * 声明输出字段
         *
         * @param outputFieldsDeclarer
         */
        @Override
        public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
            outputFieldsDeclarer.declare(new Fields("val"));
        }

        @Override
        public void ack(Object msgId) {
            //消息处理成功，清除缓存
            this.pendingPool.remove(msgId);
        }

        @Override
        public void fail(Object msgId) {
            System.err.println("msg fail_1:" + msgId);
            Values value = this.pendingPool.get(msgId);
            if (value != null) {
                //TODO：将失败数据重发或保存下来
                //this.collector.emit(value, msgId);
            }
        }
    }


    /**
     * 数据源参数的Spout
     */
    public static class DataSourceSpout1 extends BaseRichSpout {

        private ConcurrentHashMap<UUID, Values> pendingPool;

        private SpoutOutputCollector collector;

        Integer val = 5;

        /**
         * 初始化
         *
         * @param map                  配置参数
         * @param topologyContext      上下文
         * @param spoutOutputCollector 数据发射器
         */
        @Override
        public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
            this.collector = spoutOutputCollector;
            this.pendingPool = new ConcurrentHashMap<UUID, Values>();
        }

        /**
         * 此方法会死循环
         */
        @Override
        public void nextTuple() {
            val++;
            //缓存数据

            UUID key = UUID.randomUUID();
            Values value = new Values(val);
            //缓存数据
            this.pendingPool.put(key, value);
            //value messageId
            this.collector.emit(value, key);

            Utils.sleep(1000);
        }

        /**
         * 声明输出字段
         *
         * @param outputFieldsDeclarer
         */
        @Override
        public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
            outputFieldsDeclarer.declare(new Fields("val"));
        }

        @Override
        public void ack(Object msgId) {
            //消息处理成功，清除缓存
            this.pendingPool.remove(msgId);
        }

        @Override
        public void fail(Object msgId) {
            System.err.println("msg fail_2:" + msgId);
            Values value = this.pendingPool.get(msgId);
            if (value != null) {
                //TODO：将失败数据重发或保存下来
                //this.collector.emit(value, msgId);
            }
        }
    }

    /**
     * 接受数据并处理
     */
    public static class DoubleAndTripleBolt extends BaseRichBolt {

        private OutputCollector collector;

        /**
         * 初始化
         *
         * @param stormConf 配置参数
         * @param context   上下文
         * @param collector 数据发射器
         */
        @Override
        public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
            this.collector = collector;
        }

        @Override
        public void execute(Tuple input) {
            /**
             * 这里通过上面定义的Fields进行获取
             * 也可以通过下标获取：input.getInteger(0)
             */
            Integer _val = input.getIntegerByField("val");

            if (_val < 10) {
                this.collector.ack(input);
            } else {
                this.collector.fail(input);
            }
            System.err.println("~~~~~~~~~~~~~~~~~~~~~~~");
            System.err.println(_val + "*2=" + _val * 2);


            //TODO:也可以继续发送数据到下游
        }

        @Override
        public void declareOutputFields(OutputFieldsDeclarer declarer) {

        }
    }


    /**
     * 构建本地Topology
     *
     * @param args
     * @url http://storm.apache.org/releases/1.2.2/Local-mode.html
     * http://storm.apache.org/releases/1.2.2/Running-topologies-on-a-production-cluster.html
     */
    public static void main(String[] args) {

        /**
         * 定义拓扑,在生产群集上运行拓扑与在本地模式下运行类似
         * Topology需要指定相关的Spout和Bolt的执行顺序
         * shuffleGrouping("dataSourceSpout"):Tuples以一种随机分布方式在Bolt的任务中,每个Bolt都保证获得相同数量的Tuples
         */
        TopologyBuilder builder = new TopologyBuilder();
        builder.setSpout("dataSourceSpout", new DataSourceSpout());
        builder.setSpout("dataSourceSpout1", new DataSourceSpout1());
        builder.setBolt("doubleAndTripleBolt", new DoubleAndTripleBolt()).shuffleGrouping("dataSourceSpout")
                .shuffleGrouping("dataSourceSpout1");

        StormTopology topology = builder.createTopology();
        //本地模式模拟正在进行的Storm集群，只需使用LocalCluster类
        LocalCluster cluster = new LocalCluster();
        /**
         * 然后，您可以使用submitTopology该LocalCluster对象上的方法提交拓扑。就像StormSubmitter上的相应方法一样，
         * submitTopology采用名称，拓扑配置和拓扑对象。然后，您可以使用killTopology将拓扑名称作为参数的方法来终止拓扑。
         */
        cluster.submitTopology("LocalDoubleAndTripleBoltApp", new Config(), topology);


        //要关闭本地群集，请简单调用
        //cluster.shutdown();
    }

}