package com.anlu.storm;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
import org.apache.storm.utils.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.HashMap;
import java.util.Map;
import java.util.Random;

public class WordCountTepology   {


    /**
     * spout
     * spout 继承一个基类，实现接口，这个里面主要负责从数据源获取数据
     * 我们这里做一个简化，就不从外部数据获取数据了，只是自己内部不断发射的一些句子。
     */
    public static class RandomSentenceSpout extends BaseRichSpout {
        private static final long serialVersionUID = 3699352201538354417L;

        private static final Logger LOGGER = LoggerFactory.getLogger(RandomSentenceSpout.class);

        private SpoutOutputCollector collector;
        private Random random;

        /**
         * open方法就是对spout进行一个 初始化
         * 比如：创建一个线程池，或者创建一个数据库线程池，或者构造一个httpclient
         * @param conf
         * @param context
         * @param collector
         */
        public void open(Map conf,TopologyContext context,SpoutOutputCollector collector){
            this.collector= collector;
            this.random=new Random();
        }

        /**
         * nextTuple方法最終會運行在task中，某个worker进程的某个executor线程内部的某个task中，
         * 那个task会无限循环调用nextTuple方法，
         * 可以不断发射最新的数据出去，形成一个数据流
         */
        public void nextTuple(){
            Utils.sleep(100);
            String[] sentences = new String[]{"the cow jumped over the moon", "an apple a day keeps the doctor away",
                    "four score and seven years ago", "snow white and the seven dwarfs", "i am at two with nature"};
            String sentence = sentences[random.nextInt(sentences.length)];
            LOGGER.info("【发射句子】sentense="+sentence);
            /**
             * 这个value可以认为是构建一个tuple
             * tuple是最小的数据单位，无限个tuple组成的流就是一个stream
             */
            collector.emit(new Values(sentence));

        }



        /**
         * 很重要，这个方法就是定义一个发射出去的每个tuple中的每个field的名称是什么
         * @param declarer
         */
       @Override
       public void declareOutputFields(OutputFieldsDeclarer declarer) {
        declarer.declare(new Fields("sentence"));
       }
   }


    /**
     * 写一个bolt,直接继承一个baseRichBolt基类，
     * 实现里面的所有方法即可，每个bolt,同样是发送到worker某个executor的某个task里面去运行
     */
   public static class SplitSentence extends BaseRichBolt{
       private static final long serialVersionUID = 6604009953652729483L;

       private OutputCollector collector;

        /**
         * 对于bolt来说，第一个方法，就是prepare方法
         * OutputCollector,这个也是spout的发射器
         * @param map
         * @param topologyContext
         * @param outputCollector
         */
       @Override
       public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {

        this.collector = outputCollector;
       }

        /**
         * 就是说每次接收到消息之后，就会交给这个execute来执行
         * @param tuple
         */
       @Override
       public void execute(Tuple tuple) {
           String sentence = tuple.getStringByField("sentence");
           String[] words = sentence.split(" ");
           for (String word:words){
               collector.emit(new Values(word));
           }
       }

        /**
         * 定义发射出去的tuple,每个field的名称
         * @param outputFieldsDeclarer
         */
       @Override
       public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
            outputFieldsDeclarer.declare(new Fields("word"));
       }
   }


   public static class WordCount extends  BaseRichBolt{
       private static final long serialVersionUID = 7208077706057284643L;

       private static final  Logger LOGGER = LoggerFactory.getLogger(WordCount.class);

       private OutputCollector collector;
       private Map<String,Long> wordCounts = new HashMap<>();

       @Override
       public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {
            this.collector = outputCollector;
       }

       @Override
       public void execute(Tuple tuple) {
            String word = tuple.getStringByField("word");

            Long count = wordCounts.get(word);
            if(count==null){
                count=0L;
            }
            count++;
            wordCounts.put(word,count);
            LOGGER.info("【单词计数】"+word+"出现的次数是"+count);

            collector.emit(new Values(word,count));
       }

       @Override
       public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {

       }
   }


   public static void main(String[] args){
       //在main方法中，会把spoout和bolts组合起来，形成一个拓扑

       TopologyBuilder builder = new TopologyBuilder();
       /**
        * 第一个参数就是给spout起一个名字
        * 第二个参数就是创建一个spout对象
        * 第三个参数就是设置spout的executor有几个
        */
       builder.setSpout("RandomSentence",new RandomSentenceSpout(),2);

       builder.setBolt("splitSentence",new SplitSentence(),5).setNumTasks(10).shuffleGrouping("RandomSentence");

       // 这个很重要，就是说，相同的单词，从SplitSentence发射出来时，一定会进入到下游的指定的同一个task中
       // 只有这样子，才能准确的统计出每个单词的数量
       // 比如你有个单词，hello，下游task1接收到3个hello，task2接收到2个hello
       // 5个hello，全都进入一个task

       builder.setBolt("WordCount",new WordCount(),10).setNumTasks(20).
               fieldsGrouping("SplitSentence",new Fields("word"));

       Config config = new Config();
       //说明是在命令行执行，打算提交到storm集群中去
        if(args != null && args.length>0){
            config.setNumWorkers(3);
            try{
                StormSubmitter.submitTopology(args[0],config,builder.createTopology());
            }catch (Exception e){
                e.printStackTrace();
            }
        }else{
            //说明是在eclipse中执行
            config.setMaxTaskParallelism(20);

            LocalCluster cluster = new LocalCluster();
            cluster.submitTopology("WordCountTopology",config,builder.createTopology());

            Utils.sleep(60000);
            cluster.shutdown();

        }
   }

}
