package Bestv.OTT_B2B_Replay;

import java.util.Map;
import java.util.concurrent.TimeUnit;

import org.apache.log4j.Logger;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
import org.elasticsearch.action.bulk.BulkProcessor;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.bulk.BulkProcessor.Listener;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.json.simple.JSONObject;
import org.omg.CORBA.PRIVATE_MEMBER;

import Bestv.OTT_B2B_Replay.ES_Client;;

public class CreateElasticsearchDataBolt extends BaseRichBolt{
	private static final long serialVersionUID= -917956850826439496L;
	private final static String conf_es_file="/opt/storm/apache-storm-1.0.1/work/OTT-B2B-REPLAYEPG/elasticsearch.properties";
	private OutputCollector collector;
	private Integer BulkNums;
	private Integer ByteSizes;
	private Integer TimeValues;
	private Integer setConcurrentRequests;
	
	Logger logger;
    Client es;
	@Override
	public void execute(Tuple input) {
		// TODO Auto-generated method stub
		JSONObject log_json=(JSONObject) input.getValue(0);
		//logger.info("log is : "+log_json.toString());
		//logger.info(log_json.get("_index"));
		//logger.info(log_json.get("_type"));
		
		//使用bulk 进行多条写入es集群
		 try {
		   if(log_json != null && log_json.size() > 1) {
		   //System.out.println("需要写入的数据是： "+log_json);
		   //System.out.println("");
		   BulkProcessor bulkProcessor=BulkProcessor.builder(es, 
					  new Listener() {
			   			@Override
				        public void beforeBulk(long executionId, BulkRequest request) {
							// TODO Auto-generated method stub
			   				//提交前调用
							
						}
			   			@Override
				        public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
							// TODO Auto-generated method stub
			   				//提交结束后调用（无论成功或失败）
			   				//logger.info("此次提交" + response.getItems().length+"个文档,用时"+ response.getTookInMillis() + "MS" + (response.hasFailures() ? " 有文档提交失败！" : ""));
			   				boolean isbulk=response.hasFailures();//是否有提交失败
			   				if (isbulk==false) {
			   					//logger.info("Now a bulk  has done successfully.");
			   					
								//System.out.println("Now a bulk  has done successfully.");
								//System.out.println("");
								//在数据入库成功后进行ack告知storm数据处理成功，否则数据处理不成功，进行重试
								 //collector.emit(input,new Values(msg));
								  //collector.ack(input);
							}else {
								logger.error("Now a bulk  has failed yet!");
								//System.out.println("Now a bulk  has failed yet!");
								//在数据入库成功后进行ack告知storm数据处理成功，否则数据处理不成功，进行重试
								 // collector.fail(input);
							}
							
						}
			   			@Override
				        public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
							// TODO Auto-generated method stub
			   				//提交结束且失败时调用
			   				logger.error("happen failly : " + failure.getMessage() + "\ncause :" +failure.getCause());
				        	logger.error(" 有文档提交失败！after failure=" + failure); 
			   				//System.out.println("happen failly : " + failure.getMessage() + "\ncause :" +failure.getCause());
				        	 //System.out.println( " 有文档提交失败！after failure=" + failure);
				        	 collector.fail(input);
				        	 
						}
					}).setBulkActions(BulkNums)//1000条数据写入一次
					  .setBulkSize(new ByteSizeValue(ByteSizes, ByteSizeUnit.MB)) 
					  .setFlushInterval(TimeValue.timeValueSeconds(TimeValues))// 5秒写入一次
				      //.setConcurrentRequests(300)//允许多少个请求去执行
					  .setConcurrentRequests(setConcurrentRequests)//允许多少个请求去执行
				      .build();	   
		   			//System.out.println("ES_Bolt:    开始批量写入elastic 集群.........");
			  		//bulkProcessor.add(new IndexRequest("logstash-"+json_msg.getIndex(),json_msg.getType()).source(log_json));
		   			bulkProcessor.add(new IndexRequest(log_json.get("_index").toString(), log_json.get("_type").toString()).source(log_json));
		   			bulkProcessor.flush();
			  		//System.out.println("完成批量写入 Elastic集群..........");
			  		//System.out.println();
		
			  		//boolean isbulk=bulkProcessor.awaitClose(1, TimeUnit.MINUTES);	
			  		
			  		bulkProcessor.awaitClose(10, TimeUnit.MINUTES);
		   	   	  	//es.close();
			  	    bulkProcessor=null;
			  	    log_json=null;
		   }
		   
		   } catch (Exception e) {
				// TODO: handle exception
				e.printStackTrace();
				logger.error("CreateElasticsearchDataBolt, Get an Exception: ",e);
			}
			
		//  collector.emit(new Values(msg));
		  //在数据入库成功后进行ack告知storm数据处理成功，否则数据处理不成功，进行重试
		   
		 collector.emit(input,new Values(log_json));
		 collector.ack(input);
		 log_json=null;	    
		 cleanup();	
		
	}
	
	public void cleanup(){
		
	}
	@Override
	public void prepare(Map map, TopologyContext context, OutputCollector collector) {
		// TODO Auto-generated method stub
		this.collector=collector;
		this.logger=Logger.getLogger(CreateElasticsearchDataBolt.class);
		ES_Client es_client=new ES_Client();
		this.es=es_client.getTransportClient();
		GetPropertiesItems getPropertiesItems=new GetPropertiesItems(conf_es_file);
		BulkNums=Integer.parseInt(getPropertiesItems.ReadProperty("SetBulkActions"));
		ByteSizes=Integer.parseInt(getPropertiesItems.ReadProperty("ByteSizeValue"));
		TimeValues=Integer.parseInt(getPropertiesItems.ReadProperty("TimeValue"));
		setConcurrentRequests=Integer.parseInt(getPropertiesItems.ReadProperty("setConcurrentRequests"));
		
		
	}

	@Override
	public void declareOutputFields(OutputFieldsDeclarer declarer) {
		// TODO Auto-generated method stub
		declarer.declare(new Fields("log_json"));
		
	}

}
