package com.roncoo.eshop.storm;

import java.util.HashMap;
import java.util.Map;
import java.util.Random;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
import org.apache.storm.utils.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * Copyright: Copyright (c) 2019 Hu-Cheng
 *
 * @ClassName: WordCountTopology.java
 * @Description: storm简单计数程序，storm源源不断的接收到一些句子，然后你需要实时的统计出句子中每个单词的出现次数
 *
 * @version: v1.0.0
 * @author: hucheng
 * @date: Oct 9, 2019 3:58:37 PM
 *
 */
public class WordCountTopology {

	/**
	 * Copyright: Copyright (c) 2019 Hu-Cheng
	 *
	 * @ClassName: WordCountTopology.java
	 * @Description: 通过继承实现Spout spout:数据源的一个代码组件，就是我们可以实现一个spout接口，写一个java类
	 *               在这spout代码中，我们可以自己去尝试取数据源获取数据，比如说从kafka 中消费数据
	 *
	 * @version: v1.0.0
	 * @author: hucheng
	 * @date: Oct 9, 2019 4:02:18 PM
	 *
	 */
	@SuppressWarnings("serial")
	public static class RandomSentenceSpout extends BaseRichSpout {

		SpoutOutputCollector _collector;
		Random _rand;

		/**
		 * open方法 是对spout进行初始化的 比方说，创建一个线程池，或者创建一个数据库连接池，或者构建一个httpclient
		 */
		@SuppressWarnings("rawtypes")
		public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
			// 在open初始化的时候，会传入进来一个东西，叫做SpoutOutputCollector
			// 这个SpoutOuputCollector就是用来发射数据出去的
			_collector = collector;
			// 构建一个随机数生产对象
			_rand = new Random();
		}

		/**
		 * nextTuple方法 这个spout类，最终会运行在task中，某个worker进程的某个executor线程内部的没个task中
		 * 那个task会负责去不断的无线循环调用nextTuple()方法 只要无限循环的调用，可以不断的发射最新的数据出去，形成一个数据流
		 */
		public void nextTuple() {
			Utils.sleep(100);
			String[] sentences = new String[] { "the cow jumped over the moon", "an apple a day keeps the doctor away",
					"four score and seven years ago", "snow white and the snven dwarfs", "i am at two with nature" };
			final String sentence = sentences[_rand.nextInt(sentences.length)];
			_collector.emit(new Values(sentence));
		}

		/**
		 * declareOutputFields方法很重要 这个方法是定义一个你发射出去的没个tuple中的每个field的名称是什么
		 */
		public void declareOutputFields(OutputFieldsDeclarer declarer) {
			declarer.declare(new Fields("sentence"));
		}

		protected String sentence(String input) {
			return input;
		}

	}

	/**
	 * Copyright: Copyright (c) 2019 Hu-Cheng
	 *
	 * @ClassName: WordCountTopology.java
	 * @Description: 写一个bolt，直接继承一个BaseRichBolt，实现里面所有的方法即可，每个bolt方法，同样是发送到worker某个executor的task里面运行
	 *               bolt是一个业务处理的代码组件，spout会将数据传给bolt，各种bolt还可以串联成一个计算链，java类实现一个bolt接口
	 *
	 * @version: v1.0.0
	 * @author: hucheng
	 * @date: Oct 10, 2019 9:45:16 AM
	 *
	 */
	@SuppressWarnings("serial")
	public static class SplitSentence extends BaseRichBolt {

		@SuppressWarnings("unused")
		private static final long seriaVersionUID = 6604009953652729483L;

		private OutputCollector collector;

		/**
		 * 对于bolt来说，第一个方法，就是perpare方法 OutputCollector,这个也是Bolt的这个tuple的发射器
		 */
		@SuppressWarnings("rawtypes")
		public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
			this.collector = collector;
		}

		/**
		 * executor方法就是说，每次接收到一条数据后，就会交给这个executor方法来执行
		 */
		public void execute(Tuple tuple) {
			String sentence = tuple.getStringByField("sentence");
			String[] words = sentence.split(" ");
			for (String string : words) {
				collector.emit(new Values(string));
			}
		}

		/**
		 * 定义发射出去的tuple，每个field的名称
		 */
		public void declareOutputFields(OutputFieldsDeclarer declarer) {
			declarer.declare(new Fields("word"));
		}
	}

	/**
	 * Copyright: Copyright (c) 2019 Hu-Cheng
	 *
	 * @ClassName: WordCountTopology.java
	 * @Description: 统计处理收到的单词出现的次数
	 *
	 * @version: v1.0.0
	 * @author: hucheng
	 * @date: Oct 10, 2019 10:10:58 AM
	 *
	 */
	public static class WordCount extends BaseRichBolt {

		private static final long serialVersionUID = 7208077706057284643L;

		private static final Logger LOGGER = LoggerFactory.getLogger(WordCount.class);

		private OutputCollector collector;

		private Map<String, Long> wordCounts = new HashMap<>();

		@SuppressWarnings("rawtypes")
		public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {
			this.collector = collector;
		}

		public void execute(Tuple tuple) {
			String word = tuple.getStringByField("word");
			Long count = wordCounts.get(word);
			if (count == null) {
				count = 0L;
			}
			count++;

			wordCounts.put(word, count);

			LOGGER.info("[单词计数] " + word + "出现的次数是 ：" + count);
			collector.emit(new Values(word, count));
		}

		public void declareOutputFields(OutputFieldsDeclarer declarer) {
			declarer.declare(new Fields("word", "count"));
		}
	}

	public static void main(String[] args) {
		// 在main方法中，会去将spout和bolt组合起来，构建成一个Topology拓扑
		TopologyBuilder builder = new TopologyBuilder();

		// 这里的第一个参数的意思，就是给spout设置一个名称
		// 第二个参数的意思，就是创建一个spout的对象
		// 第三个参数的意思，就是设置spout的executor有几个
		builder.setSpout("RandomSentence", new RandomSentenceSpout(), 2);
		builder.setBolt("SplitSentence", new SplitSentence(), 5).setNumTasks(20).shuffleGrouping("RandomSentence");

		// 这个很重要，就是说，相同的单词，从SplitSentence发射出来时，一定会进入到下游的制定的同一个task中
		// 只有这样子，才能准确的统计出每个单词的数量
		// 比如你有个单词，hello，下游task1接收到3个hello，task2接收到了2个hello
		// 5和hello，全部都会进入一个task
		builder.setBolt("WordCount", new WordCount(), 10).setNumTasks(20).fieldsGrouping("SplitSentence",
				new Fields("word"));

		Config config = new Config();

		//说明是在命令执行，打算提交到storm集群上去
		if (args != null && args.length > 0) {
			config.setNumWorkers(3);
			try {
				StormSubmitter.submitTopology(args[0], config, builder.createTopology());
			} catch (Exception e) {
				e.printStackTrace();
			} 
		}else {
			//说明是在eclipse里面本地运行
			config.setMaxTaskParallelism(20);
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("WordCountTopology", config, builder.createTopology());
			Utils.sleep(60000);
			cluster.shutdown();
		}

	}

}
