/*
 * 文件名：KafkaProducerUtil.java
 * 版权：亚信科技(南京)版权所有
 * 描述：TODO
 * 修改人：xuwei3
 * 修改时间：2015-11-18
 * 修改内容：TODO
 */
package com.ailk.bigdata.etl.realstream.server.service.impl;

import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import org.apache.commons.collections4.CollectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import kafka.javaapi.producer.Producer;
import kafka.producer.KeyedMessage;
import kafka.producer.ProducerConfig;

import com.ailk.bigdata.etl.realstream.server.model.KafkaInfo;
import com.ailk.bigdata.etl.realstream.server.model.KafkaProduceException;
import com.ailk.bigdata.etl.realstream.server.model.RingBufferQueue;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.ThreadFactoryBuilder;

/**
 * @description TODO
 * @author [xuwei3]
 * @version [版本号,2015-11-18]
 * @see [相关类/方法]
 * @since [产品/模块版本]
 */
public class KafkaProducerUtil
{
	private static Logger logger = LoggerFactory.getLogger(KafkaProducerUtil.class);

	private ConcurrentLinkedQueue<Producer<String, String>> producer_queue = new ConcurrentLinkedQueue<Producer<String, String>>();
	private static ConcurrentHashMap<String, KafkaProducerUtil> kafkaProducerUtils = new ConcurrentHashMap<String, KafkaProducerUtil>();
	private final ExecutorService executePool;
	private final ExecutorService sendPools;

	@SuppressWarnings("rawtypes")
	private RingBufferQueue<KeyedMessage> sendMessCache = new RingBufferQueue<KeyedMessage>(KeyedMessage.class, 20000);
	private KafkaInfo kafkaInfo;

	private int batchSize;// 默认批量执行数量

	private final int BATCH_NUM_DEFAULT = 2000;
	private final int PROCEDURE_POOL_SIZE = 20;// 默认kafka生产者数量
	private final int SEND_THREAD_NUM = 8;

	public KafkaProducerUtil(KafkaInfo kafkaInfo)
	{
		this.kafkaInfo = kafkaInfo;
		batchSize = kafkaInfo.getBatchSize() < 1 ? BATCH_NUM_DEFAULT : kafkaInfo.getBatchSize();
		String pingPackThName = "realstream-kafka-call-runner-%d";
		executePool = Executors.newFixedThreadPool(SEND_THREAD_NUM, new ThreadFactoryBuilder().setNameFormat(pingPackThName).build());
		String kafkaSendName = "realstream-kafka-send-runner-%d";
		sendPools = Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat(kafkaSendName).build());
	}

	/**
	 * 
	 * @description TODO
	 * @param kafkaInfo
	 *            kafka配置信息
	 * @return
	 */
	public static KafkaProducerUtil getInstance(KafkaInfo kafkaInfo)
	{
		if (!kafkaProducerUtils.containsKey(kafkaInfo.getKafkaId()))
		{
			synchronized (KafkaProducerUtil.class)
			{
				if (!kafkaProducerUtils.containsKey(kafkaInfo.getKafkaId()))
				{
					KafkaProducerUtil kafkaProducerUtil = new KafkaProducerUtil(kafkaInfo);
					kafkaProducerUtil.initDataSendThread();
					kafkaProducerUtils.put(kafkaInfo.getKafkaId(), kafkaProducerUtil);
				}
			}
		}
		return kafkaProducerUtils.get(kafkaInfo.getKafkaId());

	}

	private void initDataSendThread()
	{
		for (int i = 0; i < SEND_THREAD_NUM; i++)
		{
			executePool.submit(new MessDispatchTask());
		}

		for (int i = 0; i < PROCEDURE_POOL_SIZE; i++)
		{
			try
			{
				Producer<String, String> producer = initKafkaProductors();
				producer_queue.add(producer);
			}
			catch (KafkaProduceException e)
			{
				logger.warn("", e);
			}
		}

	}

	public Producer<String, String> initKafkaProductors() throws KafkaProduceException
	{
		Producer<String, String> producer = null;
		try
		{
			Preconditions.checkNotNull(kafkaInfo.getMetaBrokerList(), "文件转kafka消息失败,kafka配置失败，请检查相关配置[%s]", kafkaInfo.toString());

			Properties kafkaProps = new Properties();
			// 此处配置的是kafka的端口
			kafkaProps.put("metadata.broker.list", kafkaInfo.getMetaBrokerList());
			// 配置value的序列化类
			kafkaProps.put("serializer.class", "kafka.serializer.StringEncoder");
			kafkaProps.put("patition.class", "");
			kafkaProps.put("request.required.acks", kafkaInfo.getRequiredAcks());
			kafkaProps.put("producer.type", "async");// 异步写入
			kafkaProps.put("batch.num.messages", Integer.toString(kafkaInfo.getBatchSize()));
			kafkaProps.put("queue.buffering.max.ms", "5000");
			kafkaProps.put("queue.buffering.max.messages", "20000");
			kafkaProps.put("socket.send.buffer.bytes", "1048576");
			// kafkaProps.put("socket.receive.buffer.bytes", "1048576");
			kafkaProps.put("socket.request.max.bytes", "104857600");
			kafkaProps.put("topic.metadata.refresh.interval.ms", "-1");
			ProducerConfig config = new ProducerConfig(kafkaProps);
			producer = new Producer<String, String>(config);
		}
		catch (Exception e)
		{
			logger.error("创建kafka消息生产者失败", e);
			throw new KafkaProduceException(e);
		}
		catch (Throwable th)
		{
			logger.error("创建kafka消息生产者失败", th);
			throw new KafkaProduceException(th);
		}

		logger.info("获取生成者成功............");
		return producer;

	}

	public void pubBatchMessage(KeyedMessage<String, String> kafkaMess)
	{
		sendMessCache.enQueue(kafkaMess);
	}

	private void sendBatchMessages(List<KeyedMessage<String, String>> kafkaMess) throws KafkaProduceException
	{
		Producer<String, String> producer = null;
		try
		{
			while (null == (producer = producer_queue.poll()))
			{
				try
				{
					Thread.sleep(10);
				}
				catch (Exception e)
				{

				}
			}

			logger.debug("获取到kafka生产者成功.........");

			producer.send(kafkaMess);

			logger.debug("发送一批[{}]数据到kafka成功.........", batchSize);

			kafkaMess = null;
			producer_queue.add(producer);
		}
		catch (Exception e1)
		{

			logger.warn("发送kafka消息异常[" + (null != producer) + "]", e1);
			if (e1 instanceof KafkaProduceException)
			{
				// 创建生产者异常
				throw new KafkaProduceException(e1);
			}
			else
			{
				if (null != producer)
				{
					// 异常时间producer返回到生产者队列
					producer_queue.add(producer);
				}
			}
		}
	}

	@Deprecated
	private Runnable getRunningTasks()
	{
		return new Runnable()
		{

			@SuppressWarnings("unchecked")
			@Override
			public void run()
			{
				while (true)
				{
					List<KeyedMessage<String, String>> kafkaMess = new ArrayList<KeyedMessage<String, String>>(batchSize);
					try
					{
						for (int i = 0; i < batchSize; i++)
						{
							kafkaMess.add(sendMessCache.deQueue());
						}

						sendPools.submit(getSendDateRunn(kafkaMess));
					}
					catch (Exception e)
					{
						logger.error("kafka消息发送失败............", e);
					}
				}
			}
		};
	}

	private Runnable getSendDateRunn(final List<KeyedMessage<String, String>> kafkaMess)
	{
		return new Runnable()
		{

			@Override
			public void run()
			{
				try
				{
					sendBatchMessages(kafkaMess);
				}
				catch (Exception e)
				{
					logger.error("kafka消息发送失败............", e);
				}
			}
		};
	}

	/**
	 * @description 消息封装发送给kafka
	 * @author [xuwei3]
	 * @version [版本号,2015-11-23]
	 * @see [相关类/方法]
	 * @since [产品/模块版本]
	 */
	class MessDispatchTask implements Runnable
	{

		/**
		 * @see java.lang.Runnable#run()
		 */
		@SuppressWarnings("unchecked")
		@Override
		public void run()
		{
			logger.info("发送kafka消息封装线程启动[{}]",Thread.currentThread().getName());
			KeyedMessage<String, String> messTmp = null;
			while (true)
			{
				List<KeyedMessage<String, String>> kafkaMess = new ArrayList<KeyedMessage<String, String>>(batchSize);
				try
				{
					for (int i = 0; i < batchSize; i++)
					{
						if (null != (messTmp = sendMessCache.deQueue()))
						{
							kafkaMess.add(messTmp);
						}
					}

					if (CollectionUtils.isEmpty(kafkaMess))
					{
						kafkaMess = null;
						continue;
					}
					sendPools.submit(getSendDateRunn(kafkaMess));
				}
				catch (Exception e)
				{
					logger.error("kafka消息发送失败............", e);
				}
			}

		}
	}
}
