package storm;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.kafka.BrokerHosts;
import org.apache.storm.kafka.KafkaSpout;
import org.apache.storm.kafka.SpoutConfig;
import org.apache.storm.kafka.ZkHosts;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.utils.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * 
 * @author 李岩飞
 * @email eliyanfei@126.com
 * @date 2018年6月8日上午11:09:38
 */
public class StormKafakTopology {
	private static Logger LOG = LoggerFactory.getLogger(StormKafakTopology.class);

	public static void main(String[] args) throws Exception {
		TopologyBuilder builder = new TopologyBuilder();
		// 设置topology中的spout
		String zkStr = "localhost:2181";
		BrokerHosts hosts = new ZkHosts(zkStr);
		String topic = "log2";
		String zkRoot = "/storm_kafka_log";
		String id = "mr";
		SpoutConfig spoutConf = new SpoutConfig(hosts, topic, zkRoot, id);

		builder.setSpout("log", new KafkaSpout(spoutConf));
		builder.setBolt("ip", new KafkaLogIPBolt()).shuffleGrouping("log");
		builder.setBolt("url", new LogURLBolt()).shuffleGrouping("ip");

		Config conf = new Config();
		conf.setDebug(false);

		if (args != null && args.length > 0) {
			conf.setNumWorkers(3);
			LOG.info(args[0] + "===============");
			LOG.info(args[1] + "=================");
			conf.put("path", args[1]);
			StormSubmitter.submitTopologyWithProgressBar(args[0], conf, builder.createTopology());
		} else {
			LocalCluster cluster = new LocalCluster();
			cluster.submitTopology("test3", conf, builder.createTopology());
			Utils.sleep(50000);
			cluster.killTopology("test3");
			cluster.shutdown();
		}
	}
}