package com.xhs;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
import org.apache.storm.utils.Utils;

import java.io.Serializable;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;

/**
 *
 *
 */
public class WordCountTestApp
{
    public static void main( String[] args )
    {
        // 在main方法中，会去将spout和bolts组合起来，构建成一个拓扑
        TopologyBuilder builder = new TopologyBuilder();

        // 这里的第一个参数的意思，就是给这个spout设置一个名字
        // 第二个参数的意思，就是创建一个spout的对象
        // 第三个参数的意思，就是设置spout的executor有几个
        builder.setSpout("WordProducer",new WordProducer(),2);

        //2个executor 4个task执行这个任务,并且从spout到bolt是负载均衡发送任务(句子)
        builder.setBolt("SplitSentence",new SplitSentence(),2)
                .setNumTasks(4)
                .shuffleGrouping("WordProducer");

        // 这个很重要，就是说，相同的单词，从SplitSentence发射出来时，一定会进入到下游的指定的同一个task中
        // 只有这样子，才能准确的统计出每个单词的数量
        // 比如你有个单词，hello，下游task1接收到3个hello，task2接收到2个hello
        // 5个hello，全都进入一个task
        builder.setBolt("WordCount",new WordCount(),3)
                .setNumTasks(6)
                .fieldsGrouping("SplitSentence",new Fields("word"));

        Config config = new Config();


        if (args != null && args.length > 0){
            // 说明是在命令行执行，提交到storm集群上去
            config.setNumWorkers(2);

            try {
                StormSubmitter.submitTopology(args[0],config,builder.createTopology());
            } catch (AlreadyAliveException e) {
                e.printStackTrace();
            } catch (InvalidTopologyException e) {
                e.printStackTrace();
            } catch (AuthorizationException e) {
                e.printStackTrace();
            }
        }else {
            // 说明是在eclipse里面本地运行
            config.setMaxTaskParallelism(20);

            LocalCluster cluster = new LocalCluster();

            cluster.submitTopology("WCT",config,builder.createTopology());

            try {
                Thread.sleep(60 * 1000);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }

            cluster.shutdown();

        }



    }

    /**
     * 数据生产spout
     *
     * spout，继承一个基类，实现接口，这个里面主要是负责从数据源获取数据
     * 我们这里作为一个简化，就不从外部的数据源去获取数据了，只是自己内部不断发射一些句子
     */
    public static class WordProducer extends BaseRichSpout implements Serializable {

        private SpoutOutputCollector collector;
        private Random random;

        @Override
        public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
            // 在open方法初始化的时候，会传入进来一个东西，叫做SpoutOutputCollector
            // 这个SpoutOutputCollector就是用来发射数据出去的
            this.collector = spoutOutputCollector;
            // 构造一个随机数生产对象
            this.random = new Random();
        }

        /**
         * nextTuple方法
         *
         * 这个spout类，最终会运行在某个worker进程的某个executor线程内部的某个task中
         * 那个task会负责去不断的无限循环调用nextTuple()方法
         * 只要无限循环调用，可以不断发射最新的数据出去，形成一个stream
         */
        @Override
        public void nextTuple() {
            Utils.sleep(200);
            String[] sentence = {"The darkest hour is that before the dawn",
                    "We soon believe what we desire",
                    "The longest day has an end",
                    "Living without an aim is like sailing without a compass"};
            String s = sentence[random.nextInt(sentence.length)];
            System.out.println("【发射句子】sentence=" + s);
            collector.emit(new Values(s));
        }
        /**
         * declareOutputFielfs这个方法
         *
         * 很重要，这个方法是定义一个你发射出去的每个tuple中的每个field的名称是什么
         */
        @Override
        public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
            outputFieldsDeclarer.declare(new Fields("sentence"));
        }
    }

    /**
     * 写一个bolt，直接继承一个BaseRichBolt基类
     * 实现里面的所有的方法即可，每个bolt代码同样是发送到worker某个executor的task里面去运行
     */
    public static class SplitSentence extends BaseRichBolt{

        private OutputCollector collector;

        /**
         * 对于bolt来说，第一个方法，就是prepare方法
         * OutputCollector，是Bolt的这个tuple的发射器
         */
        @Override
        public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {
            this.collector = outputCollector;
        }
        /**
         * execute方法
         * 每次接收到一条数据后，就会交给这个executor方法来执行
         *
         */
        @Override
        public void execute(Tuple tuple) {
            String sentence = tuple.getStringByField("sentence");
            String[] words = sentence.split(" ");
            System.out.println("【拆分句子】sentence=" + sentence);
            for (String w : words)
                collector.emit(new Values(w));

        }

        @Override
        public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
            outputFieldsDeclarer.declare(new Fields("word"));
        }
    }


    public static class WordCount extends BaseRichBolt{

        private OutputCollector collector;
        private Map<String,Long> countMap = new HashMap<>();

        @Override
        public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {
            this.collector = outputCollector;
        }

        @Override
        public void execute(Tuple tuple) {
            String word = tuple.getStringByField("word");
            Long count = countMap.get(word);
            if (count == null)
                count = 0L;
            countMap.put(word,++count);

            System.out.println("【单词计数】" + word + "出现的次数是" + count);
            collector.emit(new Values(word,count));
        }

        @Override
        public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
            outputFieldsDeclarer.declare(new Fields("word","count"));
        }
    }






}
