package com.song.storm_demo.hbase_chap8.consumer;

import storm.kafka.Broker;
import storm.kafka.BrokerHosts;
import storm.kafka.KafkaSpout;
import storm.kafka.SpoutConfig;
import storm.kafka.StaticHosts;
import storm.kafka.ZkHosts;
import storm.kafka.trident.GlobalPartitionInformation;
import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.topology.TopologyBuilder;



public class TestStormKafkaMain {

	
	
	public static void main(String[] args)
	{
//		GlobalPartitionInformation info = new GlobalPartitionInformation();  
//		info.addPartition(0, new Broker("dmg",9092));  
//		BrokerHosts brokerHosts = new StaticHosts(info); 
		
		BrokerHosts brokerHosts = new ZkHosts("dmg:2181");
		SpoutConfig spoutConfig = new SpoutConfig(brokerHosts, "song", "/storm-kafka", "total");
		
		TopologyBuilder builder = new TopologyBuilder();
		builder.setSpout("kafka-reader", new KafkaSpout(spoutConfig) ,2);
		builder.setBolt("rowkey", new TotalCountBolt(),2).shuffleGrouping("kafka-reader");
		builder.setBolt("save-hbase", new SaveBolt(),2).shuffleGrouping("rowkey");
		
		Config conf = new Config();
		if(args !=null && args.length >0)
		{
			
		}else{
			try {
				LocalCluster cluster = new LocalCluster();
				cluster.submitTopology("storm-kafka", conf, builder.createTopology());
				Thread.sleep(6000);
				cluster.shutdown();
			} catch (InterruptedException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
			
		}
		
		
		
	}
	
	
	
	
}
