package com.zbj.storm;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.StormTopology;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.tuple.Fields;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.concurrent.TimeUnit;

/**
 * WordCountTopology
 *
 * @author weigang
 * @create 2019-08-29
 **/
public class WordCountTopology {

    private static final Logger log = LoggerFactory.getLogger(WordCountTopology.class);

    /**
     * 各个组件名字的唯一标识
     */
    private final static String SENTENCE_SPOUT_ID = "sentence-spout";
    private final static String SPLIT_SENTENCE_BOLT_ID = "split-bolt";
    private final static String WORD_COUNT_BOLT_ID = "count-bolt";
    private final static String REPORT_BOLT_ID = "report-bolt";

    /**
     * 拓扑名称
     */
    private final static String TOPOLOGY_NAME = "wordCountTopology";

    public static void main(String[] args) throws Exception {

        log.info(".........beginning.......");
        //各个组件的实例
        SentenceSpout sentenceSpout = new SentenceSpout();
        SplitSentenceBolt splitSentenceBolt = new SplitSentenceBolt();
        WordCountBolt wordCountBolt = new WordCountBolt();
        ReportBolt reportBolt = new ReportBolt();

        //构建一个拓扑Builder
        TopologyBuilder topologyBuilder = new TopologyBuilder();

        //配置第一个组件sentenceSpout
        topologyBuilder.setSpout(SENTENCE_SPOUT_ID, sentenceSpout, 2);

        //配置第二个组件splitSentenceBolt,上游为sentenceSpout,tuple分组方式为随机分组shuffleGrouping
        topologyBuilder.setBolt(SPLIT_SENTENCE_BOLT_ID, splitSentenceBolt).shuffleGrouping(SENTENCE_SPOUT_ID);

        //配置第三个组件wordCountBolt,上游为splitSentenceBolt,tuple分组方式为fieldsGrouping,同一个单词将进入同一个task中(bolt实例)
        topologyBuilder.setBolt(WORD_COUNT_BOLT_ID, wordCountBolt).fieldsGrouping(SPLIT_SENTENCE_BOLT_ID, new Fields("word"));

        //配置最后一个组件reportBolt,上游为wordCountBolt,tuple分组方式为globalGrouping,即所有的tuple都进入这一个task中
        topologyBuilder.setBolt(REPORT_BOLT_ID, reportBolt).globalGrouping(WORD_COUNT_BOLT_ID);

        // Config()对象继承自HashMap，但本身封装了一些基本的配置
        Config config = new Config();
        //config.setDebug(true);

        //建立本地集群,利用LocalCluster,storm在程序启动时会在本地自动建立一个集群,不需要用户自己再搭建,方便本地开发和debug
        //LocalCluster cluster = new LocalCluster("localhost", 2181L);
        //LocalCluster cluster = new LocalCluster();

        // 使用builder构建topology
        StormTopology topology = topologyBuilder.createTopology();

        // 启动topology，本地启动使用LocalCluster，集群启动使用StormSubmitter
        // 没有参数时使用本地模式，有参数时使用集群模式
        if (args == null || args.length < 1) {
            // 本地开发模式，创建的对象为LocalCluster
            LocalCluster localCluster = new LocalCluster();
            //config.put("wordsFile","/data/www/java/work/logs/stormFile");
            //config.put(Config.TOPOLOGY_MAX_SPOUT_PENDING, 1);
            localCluster.submitTopology(TOPOLOGY_NAME, config, topology);
            TimeUnit.SECONDS.sleep(20);
            localCluster.shutdown();
        } else {
            // 集群模式允许需要加参数 否则使用本地模式
            // storm jar storm-demo.jar com.zbj.storm.WordCountTopology HelloWorld    (HelloWorld 为topology名称 但当前没有使用)
            // 拓扑的名称 TOPOLOGY_NAME
            StormSubmitter.submitTopology(TOPOLOGY_NAME, config, topology);
        }

        //创建拓扑实例,并提交到本地集群进行运行
        //System.out.println(topology);
        //TimeUnit.MINUTES.sleep(10);
        //log.info(".........ending.......");
    }
}