package com.saic.topology;

import java.io.InputStream;
import java.util.Properties;
import java.util.concurrent.LinkedBlockingDeque;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.kafka.SpoutConfig;
import org.apache.storm.topology.TopologyBuilder;

import com.saic.bolt.HBaseBolt;
import com.saic.consumer.KafkaConsumer;
import com.saic.spout.KafkaSpoutConf;
import com.saic.spout.KafkaSpoutImp;
import com.saic.utils.HbaseUtil;	

/**
 * 该类中定义了数据从kafka到Storm再到HBase的拓扑机构，包含KafkaSpoutImp和JSONToMapBolt组建，
 * 
 * @author miao
 *
 */
public class KafkaTopology {

	

	/**
	 * 定义toplogy的执行节点数，一个物理JVM代表一个worker，在集群模式下，对应集群中的工作节点（worker）
	 * 在本地模式下，对应本地中JVM的个数。
	 */
	private static Integer workers;
	

	/**
	 * 定义集群中Spout的启动个数，每个Spout为一个Task
	 */
	private static Integer spoutNumTasks;

	/**
	 * 定义每个Spout执行的线程数
	 */
	private static Integer spoutExecutors;
	/**
	 * 定义集群中bolt的启动个数，每个Spout为一个Task
	 */
	private static Integer boltNumTasks;
	/**
	 * 定义每个bolt执行的线程数
	 */
	private static Integer boltExecutors;

	public static LinkedBlockingDeque<String> memoryQueue = new LinkedBlockingDeque<String>(1024 * 4);
	static {
		// 启动Storm
		InputStream in = KafkaTopology.class.getClassLoader().getResourceAsStream("storm.properties");
		Properties properties = new Properties();
		try {
			properties.load(in);
			workers = Integer.valueOf(properties.getProperty("storm.topolopy.workers"));
			spoutNumTasks = Integer.valueOf(properties.getProperty("storm.KafkaSpoutDef.numTasks"));
			spoutExecutors = Integer.valueOf(properties.getProperty("storm.KafkaSpoutDef.executor"));
			boltNumTasks = Integer.valueOf(properties.getProperty("storm.JSONToMapBolt.numTasks"));
			boltExecutors = Integer.valueOf(properties.getProperty("storm.JSONToMapBolt.executor"));

		} catch (Exception e) {
			e.printStackTrace();
			System.out.println("---KafkaTology init fail -----");
		}

		
		// 初始化HBase
		new HbaseUtil();

	}

	public static void main(String[] args) {
		try {

			// 1 create topology
			TopologyBuilder topologyBuilder = new TopologyBuilder();
			SpoutConfig kafkaSpoutconf = KafkaSpoutConf.initKafkaSpoutConf();
			KafkaSpoutImp kafkaSpout = new KafkaSpoutImp(kafkaSpoutconf);
			topologyBuilder.setSpout("kafkaSpout", kafkaSpout, spoutExecutors).setNumTasks(spoutNumTasks);
			topologyBuilder.setBolt("HBaseBolt", new HBaseBolt(), boltExecutors).setNumTasks(boltNumTasks)
					.shuffleGrouping("kafkaSpout");
			// 2 set executing topology model
			Config config = new Config();
			config.setDebug(true);
			
			// Ssubmit application  
			
			System.out.println("------------------storm starts-------------------");
			if (args != null && args.length>0) {
				// 2-0 set executing topology into cluster model
				config.setNumWorkers(workers);
				StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());

			} else {
				// 2-1 set executing topology into local model
				// config.setMaxTaskParallelism(1);
				LocalCluster cluster = new LocalCluster();
				// 2 submitToplogy
				cluster.submitTopology("KafkaTopology", config, topologyBuilder.createTopology());
				try {
					// Wait for some time before exiting
					System.out.println("--------------Waiting to consume from kafka--------------------");
					Thread.sleep(1000*10);
					// 启动kafkaconsumer
					KafkaConsumer consumer1 = new KafkaConsumer();
					consumer1.start();
				} catch (Exception exception) {
					System.out.println("--------------Thread interrupted exception : -------------------" + exception);
				}
				// kill the KafkaTopology
				// cluster.killTopology("KafkaTopology");
				// shut down the storm test cluster
				// cluster.shutdown();
			}

		} catch (Exception e) {
			e.printStackTrace();
		}

	}
   
}
