package com.whq.thrift.api.common.kafka;

import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;

import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.whq.thrift.api.common.constants.CommonConfigConstants;

public class KafkaStringProducer {
	private static final Logger LOGGER = LoggerFactory.getLogger(KafkaStringProducer.class);

	private static final Logger LOGGER_QUARTZ = LoggerFactory.getLogger(CommonConfigConstants.LOGGER_KEY_QUARTZ);

	private static final Map<String, Producer<String, String>> MAP_TOPIC_PRODUCER = new ConcurrentHashMap<String, Producer<String, String>>(0);

	private static final KafkaStringMsgQueue PRESET_SEND_QUEUE = new KafkaStringMsgQueue();

	private String topic = "";
	private String metadataBrokerList = "";
	private String requestRequiredAcks = "";
	private static int localSendQueueMaxCount = 1000;

	public void initialize() {
		Properties props = new Properties();
		// 此处配置的是kafka的端口
		props.put("metadata.broker.list", metadataBrokerList);

		// 配置value的序列化类
		props.put("serializer.class", "kafka.serializer.StringEncoder");
		// 配置key的序列化类
		props.put("key.serializer.class", "kafka.serializer.StringEncoder");

		// request.required.acks
		// 0, which means that the producer never waits for an acknowledgement
		// from the broker (the same behavior as 0.7). This option provides the
		// lowest latency but the weakest durability guarantees (some data will
		// be lost when a server fails).
		// 1, which means that the producer gets an acknowledgement after the
		// leader replica has received the data. This option provides better
		// durability as the client waits until the server acknowledges the
		// request as successful (only messages that were written to the
		// now-dead leader but not yet replicated will be lost).
		// -1, which means that the producer gets an acknowledgement after all
		// in-sync replicas have received the data. This option provides the
		// best durability, we guarantee that no messages will be lost as long
		// as at least one in sync replica remains.
		props.put("request.required.acks", requestRequiredAcks);

		Producer<String, String> producer = new Producer<String, String>(new ProducerConfig(props));
		MAP_TOPIC_PRODUCER.put(topic, producer);
	}

	public void shutDown() {
		for (Iterator<String> iterator = MAP_TOPIC_PRODUCER.keySet().iterator(); iterator.hasNext();) {
			String topic = (String) iterator.next();
			Producer<String, String> producer = MAP_TOPIC_PRODUCER.get(topic);
			if (producer != null) {
				try {
					producer.close();
				} catch (Exception e) {
					LOGGER.error("close kafka producer[topic: " + topic + "] exception: ", e);
				}
			}
		}
	}

	/**
	 * 预发送消息
	 * 
	 * @param msgTopic
	 * @param key
	 * @param Data
	 * @return
	 */
	public static boolean sendMsg(String msgTopic, String key, String Data) {
		if (PRESET_SEND_QUEUE.count() >= localSendQueueMaxCount) {
			LOGGER.error("pre send kafka queue [max: " + localSendQueueMaxCount + "] is full");
			return false;
		}
		LOGGER.debug("pre send kafka msg: topic[" + msgTopic + "], key[" + key + "], data[" + Data + "]");
		PRESET_SEND_QUEUE.put(new KafkaStringMsg(msgTopic, key, Data));
		return true;
	}

	/**
	 * 发送预发送队列中的消息（真实发送）
	 */
	public void doRealSendMsg() {
		LOGGER_QUARTZ.info("will real send kafka message");
		while (!PRESET_SEND_QUEUE.isEmpty()) {
			KafkaStringMsg oneMsg = PRESET_SEND_QUEUE.get();
			if (oneMsg != null) {
				Producer<String, String> producer = MAP_TOPIC_PRODUCER.get(oneMsg.getTopic());
				if (producer != null) {
					LOGGER_QUARTZ.info("send kafka msg: topic[" + oneMsg.getTopic() + "], key[" + oneMsg.getMsgKey() + "], data[" + oneMsg.getMsgData() + "]");

					producer.send(new KeyedMessage<String, String>(oneMsg.getTopic(), oneMsg.getMsgKey(), oneMsg.getMsgData()));
				}
			}
		}
	}

	public String getTopic() {
		return topic;
	}

	public void setTopic(String topic) {
		this.topic = topic;
	}

	public String getMetadataBrokerList() {
		return metadataBrokerList;
	}

	public void setMetadataBrokerList(String metadataBrokerList) {
		this.metadataBrokerList = metadataBrokerList;
	}

	public String getRequestRequiredAcks() {
		return requestRequiredAcks;
	}

	public void setRequestRequiredAcks(String requestRequiredAcks) {
		this.requestRequiredAcks = requestRequiredAcks;
	}

	public int getLocalSendQueueMaxCount() {
		return localSendQueueMaxCount;
	}

	public void setLocalSendQueueMaxCount(String localSendQueueMaxCount) {
		try {
			KafkaStringProducer.localSendQueueMaxCount = Integer.valueOf(localSendQueueMaxCount);
		} catch (Exception e) {
			//
		}
	}

}
