/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package chenxu.test.hbase.storm;

import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.node.NodeBuilder.nodeBuilder;

import java.io.IOException;
import java.text.NumberFormat;
import java.util.Calendar;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;

import org.elasticsearch.client.Client;
import org.elasticsearch.node.Node;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;

/**
 * A generic <code>backtype.storm.topology.IRichBolt</code> implementation
 * for testing/debugging the Storm JMS Spout and example topologies.
 * <p/>
 * For debugging purposes, set the log level of the 
 * <code>backtype.storm.contrib.jms</code> package to DEBUG for debugging
 * output.
 * @author tgoetz
 *
 */
@SuppressWarnings("serial")
public class ObjectIDCountBolt extends BaseRichBolt {
	private static final Logger LOG = LoggerFactory.getLogger(ObjectIDCountBolt.class);
	private OutputCollector collector;
	private boolean autoAck = false;
	private boolean autoAnchor = false;
	private Fields declaredFields;
	private String name;
	private Calendar instance;
	private Map<String, Set<Integer>> oidsInWeeks;
	private Client client;
	
	/**
	 * Constructs a new <code>GenericBolt</code> instance.
	 * 
	 * @param name The name of the bolt (used in DEBUG logging)
	 * @param autoAck Whether or not this bolt should automatically acknowledge received tuples.
	 * @param autoAnchor Whether or not this bolt should automatically anchor to received tuples.
	 * @param declaredFields The fields this bolt declares as output.
	 */
	public ObjectIDCountBolt(String name, boolean autoAck, boolean autoAnchor, Fields declaredFields){
		this.name = name;
		this.autoAck = autoAck;
		this.autoAnchor = autoAnchor;
		this.declaredFields = declaredFields;
	}
	
	public ObjectIDCountBolt(String name, boolean autoAck, boolean autoAnchor){
		this(name, autoAck, autoAnchor, null);
	}

	@SuppressWarnings("rawtypes")
	public void prepare(Map stormConf, TopologyContext context,
			OutputCollector collector) {
		this.collector = collector;
		instance = Calendar.getInstance();
		oidsInWeeks = new HashMap<String, Set<Integer>>();
		
		Node node = nodeBuilder().clusterName("elasticsearch").node();
		client = node.client();
		LOG.info("INIT es client "+client);
	}

	public void execute(Tuple input) {
		
		String oid = input.getString(0);
		String protocolType = input.getStringByField("protocoltype");
		Long longByField = input.getLongByField("timestamp");
		instance.setTimeInMillis(longByField);
		
		//先从缓存中获取一次如果没有获取到再从数据库中获取，然后更新缓存继续
		
		int week = instance.get(Calendar.WEEK_OF_YEAR);
		Set<Integer> weeks = oidsInWeeks.get(oid);
		if(null!=weeks) {//已经在某些周里存在了
			if(!weeks.contains(week)) {
				//先重新获取一次，确认没有此数据存在
				if(!getDB(oid,longByField)) {
					insertDB(oid,protocolType,longByField);
				}
				//更新缓存
				weeks.add(week);
			}
		} else {
			insertDB(oid,protocolType,longByField);
			weeks = new HashSet<Integer>();
			weeks.add(week);
		}
		oidsInWeeks.put(oid, weeks);
		
		// only emit if we have declared fields.
		if(this.declaredFields != null){
			LOG.debug("[" + this.name + "] emitting: " + input);
			if(this.autoAnchor){
				this.collector.emit(input, input.getValues());
			} else{
				this.collector.emit(input.getValues());
			}
		}
		
		if(this.autoAck){
			LOG.debug("[" + this.name + "] ACKing tuple: " + input);
			this.collector.ack(input);
		}
	}
	
	private String getTextWeek(long timestamp) {
		instance.setTimeInMillis(timestamp);
		NumberFormat nf = NumberFormat.getInstance();
		nf.setGroupingUsed(false);
		nf.setMaximumIntegerDigits(2);
		nf.setMinimumIntegerDigits(2);
		return "week"+nf.format(instance.get(Calendar.WEEK_OF_YEAR));
	}
	
	private boolean getDB(String oid, Long timestamp) {
		
		return client.prepareGet("statistics", getTextWeek(timestamp), oid)
		        .execute().actionGet().isExists();
	}
	private void insertDB(String oid, String protocolType, Long timestamp) {
		//说明这对K-V之前不存在，需要持久化一下
		String type = "mac";
		String[] split = oid.split("~");
		if(null!=split && split.length==2) {
			type = split[1];
		}
		try {
			client.prepareIndex("statistics", getTextWeek(timestamp), oid)
			        .setSource(jsonBuilder()
			                    .startObject()
			                        .field("timestamp", timestamp)
			                        .field("count", 1)
			                        .field("type",type)
			                        .field("protocoltype",protocolType)
			                    .endObject()
			                  )
			        .execute()
			        .actionGet();
		} catch (IOException e) {
			LOG.info(e.getMessage());
			LOG.error("insert to es error",e);
		}
	}

	public void cleanup() {
		client.close();
	}

	public void declareOutputFields(OutputFieldsDeclarer declarer) {
		if(this.declaredFields != null){
			declarer.declare(this.declaredFields);
		}
	}
	
	public boolean isAutoAck(){
		return this.autoAck;
	}
	
	public void setAutoAck(boolean autoAck){
		this.autoAck = autoAck;
	}

}
