//OTT_B2B_Nginx_AccessLog_Topo
package Bestv.OTT_B2B_Replay;
import org.apache.log4j.Logger;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import org.apache.storm.topology.TopologyBuilder;
import org.omg.CORBA.PUBLIC_MEMBER;
import Bestv.OTT_B2B_Replay.*;
import scala.annotation.StaticAnnotation;

public class OTT_B2B_Nginx_AccessLog_Topo {
	private final static Logger logger=Logger.getLogger(OTT_B2B_Nginx_AccessLog_Topo.class);
	public static void main(String[] args) throws Exception{
		
		GetPropertiesItems storm_properties=new GetPropertiesItems("/opt/storm/apache-storm-1.0.1/work/OTT-B2B-REPLAYEPG/storm.properties");
		int spout_tasks=Integer.parseInt(storm_properties.ReadProperty("spout_tasks"));
		int Workers_Nums=Integer.parseInt(storm_properties.ReadProperty("Workers_Nums"));
		int NumAckers=Integer.parseInt(storm_properties.ReadProperty("NumAckers"));
		int MaxSpoutPendings=Integer.parseInt(storm_properties.ReadProperty("MaxSpoutPending"));
		int MessageTimeoutSecs=Integer.parseInt(storm_properties.ReadProperty("MessageTimeoutSecs"));
		String TopoloyName=storm_properties.ReadProperty("TopoloyName");
		
		TopologyBuilder builder=new TopologyBuilder();
		builder.setSpout("kafka_input", new MyKafkaInput(),spout_tasks);
		builder.setBolt("NginxResolve", new NginxResolveBolt(),2).shuffleGrouping("kafka_input");
		builder.setBolt("CreateEsData", new CreateElasticsearchDataBolt(),1).shuffleGrouping("NginxResolve");
		//builder.setBolt("ES_bolt", new MyKafkaES(),6).shuffleGrouping("DataFormat_bolt");		
		Config conf=new Config();
		conf.setDebug(Boolean.parseBoolean(storm_properties.ReadProperty("is_debug")));			
		conf.setNumWorkers(Workers_Nums);
		//设置acker的数量为非0,保证消息的可靠性
		conf.setNumAckers(NumAckers);
		conf.setMessageTimeoutSecs(MessageTimeoutSecs);
		//spout可以缓存的tuple数目
		conf.setMaxSpoutPending(MaxSpoutPendings);
		
		
		if (args!=null && args.length > 0) {
			
	
		if (args!=null && args[0].equals("local")) {
			LocalCluster cluster=new LocalCluster();
			logger.info("Start to submit Topology  "+TopoloyName+" use local way............");
			//System.out.println("Start to submit Topology  OTT-B2B-Replay use local way............");
			cluster.submitTopology(TopoloyName, conf,builder.createTopology());
			
		}else if (args!=null && args[0].equals("cluster")) {
			logger.info("Start to submit Topology  "+TopoloyName+" By cluser............");
			//System.out.println("Start to submit Topology  OTT-B2B-Replay By cluser............");
			
				StormSubmitter.submitTopology(TopoloyName, conf,builder.createTopology());	
		}else {
			logger.error("请输入需要提交 Topology 的模式：local or cluster");
			//System.out.println("请输入需要提交 Topology 的模式：local or cluster");
			System.exit(0);
		}
		}else{
			logger.error("请输入需要提交 Topology 的模式：local or cluster");
			//System.out.println("请输入需要提交 Topology 的模式：local or cluster");
			System.exit(0);
		}
				
		//Thread.sleep(3000);
		//cluster.shutdown();
	}			
}


