package com.irt.rootcloud.realtime.gketl;

import java.io.IOException;
import java.util.Properties;

import com.irt.rootcloud.realtime.gketl.bolt.EtlBolt;
import com.irt.rootcloud.realtime.gketl.bolt.SubEtlBolt;
import com.irt.rootcloud.realtime.gketl.bolt.SubSendBolt;
import com.irt.rootcloud.realtime.gketl.spout.MessageSpout;

import storm.kafka.BrokerHosts;
import storm.kafka.KafkaSpout;
import storm.kafka.SpoutConfig;
import storm.kafka.ZkHosts;
import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.StormSubmitter;
import backtype.storm.spout.SchemeAsMultiScheme;
import backtype.storm.topology.TopologyBuilder;

public class SubMainTopology {

	 public static void main(String[] args) {
	        Properties propkafka = new Properties();
	        Properties propdb = new Properties();
			try {
				propkafka.load(SubMainTopology.class.getResourceAsStream("/conf.properties"));
			} catch (IOException e1) {
				// TODO Auto-generated catch block
				e1.printStackTrace();
			}
			
			BrokerHosts hosts = new ZkHosts(propkafka.getProperty("kafka.zkhosts.common").toString());
		    //spout 1
		    SpoutConfig spoutConfig = new SpoutConfig(hosts,propkafka.getProperty("kafka.topic.common").toString(), "/"+propkafka.getProperty("topology.datadir"),propkafka.getProperty("kafka.groupid.common").toString());
		    spoutConfig.scheme = new SchemeAsMultiScheme(new MessageSpout());
		    KafkaSpout kafkaSpout = new KafkaSpout(spoutConfig);
			
			TopologyBuilder builder = new TopologyBuilder();
	        builder.setSpout("kafkaSpout", kafkaSpout,Integer.valueOf(propkafka.getProperty("topology.spouts.num.common")));
	        builder.setBolt("etlBolt", new EtlBolt(propkafka),Integer.valueOf(propkafka.getProperty("topology.etlbolt.num.common"))).shuffleGrouping("kafkaSpout");
	        builder.setBolt("subetlBolt", new SubEtlBolt(propkafka),Integer.valueOf(propkafka.getProperty("topology.subetlbolt.num.common"))).shuffleGrouping("etlBolt");
	        builder.setBolt("subsendBolt", new SubSendBolt(propkafka),Integer.valueOf(propkafka.getProperty("topology.sendbolt.num.common"))).shuffleGrouping("subetlBolt");
			// 配置KafkaBolt中的kafka.broker.properties
			Config conf = new Config();
			conf.setDebug(false);
		    conf.put("topology.message.timeout.secs",  Integer.valueOf(propkafka.getProperty("message.timeout.secs")));  //消息处理超时时间
		    conf.put("topology.max.spout.pending",  Integer.valueOf(propkafka.getProperty("max.spout.pending")));  //spout最多缓存的tuple数
	        if (args != null && args.length > 0) {
	            conf.setNumWorkers(Integer.valueOf(propkafka.getProperty("topology.workers.common")));
	            try {
	                StormSubmitter.submitTopologyWithProgressBar(propkafka.getProperty("topology.name.common"), conf,builder.createTopology());
	            } catch (Exception e) {
	                e.printStackTrace();
	            }
	        } else {
	            // Local commit jar
	            LocalCluster local = new LocalCluster();
	            conf.setNumWorkers(Integer.valueOf(1));
	            local.submitTopology("sasscommon-test", conf, builder.createTopology());
	            try {
	                Thread.sleep(60000);
	            } catch (InterruptedException e) {
	                e.printStackTrace();
	            }
	            local.shutdown();
	        }
	    }
}
