/*
 * 文件名：KafkaDataAcceptServiceImpl.java
 * 版权：亚信科技(南京)版权所有
 * 描述：TODO
 * 修改人：xuwei3
 * 修改时间：2015-11-12
 * 修改内容：TODO
 */
package com.ailk.bigdata.etl.realstream.server.protocal.realmq;

import java.io.BufferedInputStream;
import java.io.FileInputStream;
import java.util.Properties;
import java.util.PropertyResourceBundle;
import java.util.ResourceBundle;

import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.ailk.bigdata.etl.realstream.server.dao.RealStreamUnitDao;
import com.ailk.bigdata.etl.realstream.server.dao.impl.RealStreamUnitDaoPGImpl;
import com.ailk.bigdata.etl.realstream.server.model.KafkaInfo;
import com.ailk.bigdata.etl.realstream.server.model.KafkaProduceException;
import com.ailk.bigdata.etl.realstream.server.model.RealStreamUnit;
import com.ailk.bigdata.etl.realstream.server.monitor.EventCounter;
import com.ailk.bigdata.etl.realstream.server.service.DataAcceptService;
import com.google.common.base.Preconditions;

/**
 * @description kafka消息接收服务启动类
 * @author [xuwei3]
 * @version [版本号,2015-11-12]
 * @see [相关类/方法]
 * @since [产品/模块版本]
 */
public class KafkaDataAcceptServiceImpl implements DataAcceptService
{
	private static Logger logger = LoggerFactory.getLogger(KafkaDataAcceptServiceImpl.class);

	/**
	 * @see com.ailk.bigdata.etl.realstream.server.service.DataAcceptService#initRealStreamAcceptService(com.ailk.bigdata.etl.realstream.server.monitor.EventCounter)
	 */
	@Override
	public void initRealStreamAcceptService(EventCounter eventCount, RealStreamUnit... params)
	{
		Preconditions.checkArgument(params != null, "初始化");
		RealStreamUnit perUnit = params[0];

		// topic 为实时流单元类 单元名称 unitName
		Long unitCode = perUnit.getOnlyUnitCode();
		String topicName = perUnit.getUnitName();
		Preconditions.checkArgument(StringUtils.isNotEmpty(topicName), "主题不能为空,接口单元配置失败{}........................", perUnit.getOnlyUnitCode());

		logger.info("启动接收kafka消息线程.........");

		BufferedInputStream inputStream;
		ResourceBundle bundle;

		try
		{
			String configLocation = "../conf/consumer.properties";
			inputStream = new BufferedInputStream(new FileInputStream(configLocation));
			bundle = new PropertyResourceBundle(inputStream);
			String clusterNum = bundle.containsKey("kafka_consumer_num") ? bundle.getString("kafka_consumer_num") : "1";

			Preconditions.checkArgument(StringUtils.isNotEmpty(perUnit.getKafkaId()), "kafka信息不能为空，配置错误，接口单元编码[" + unitCode + "]");
			RealStreamUnitDao unitDao = new RealStreamUnitDaoPGImpl();
			KafkaInfo kafkaInfo = unitDao.getKafkaInfo(perUnit.getKafkaId());
			Preconditions.checkArgument(kafkaInfo != null, "kafka信息不能为空，配置错误，接口单元编码[" + unitCode + "]");

			// 获取kafka配置信息
			Properties config = new Properties();

			config.setProperty(JMSSourceConfiguration.KAFKA_CONSUMER_INTERFACE_TYPE, Long.toString(unitCode));
			config.setProperty(JMSSourceConfiguration.KAFKA_TOPIC_NAME, topicName);
			config.setProperty(JMSSourceConfiguration.KAFKA_ZOOKEEPER_CONNECT, kafkaInfo.getZkConnect());
			config.setProperty(JMSSourceConfiguration.KAFKA_NUMS_PATITION, Integer.toString(getPartitionNum(kafkaInfo, topicName, clusterNum)));

			PollableSourceRunner sourceRunner = new PollableSourceRunner();
			AbstractPollableSource pollableSource = new KafKaRealMessageSource(eventCount);
			pollableSource.configure(config);
			sourceRunner.setSource(pollableSource);
			sourceRunner.start();
		}
		catch (Exception e)
		{
			logger.error("初始化kafka消息接入异常", e);
		}

	}

	private int getPartitionNum(KafkaInfo kafkaInfo, String topicName, String clusterNum)
	{
		Producer<String, String> producer = null;
		int partitionNum = 0;
		int clusterN = Integer.parseInt(clusterNum);

		try
		{
			producer = initKafkaProductor(kafkaInfo);
			partitionNum = producer.partitionsFor(topicName).size();
		}
		catch (KafkaProduceException e)
		{
			logger.error("初始化kafka生产者异常", e);
			partitionNum = 1;

		}
		finally
		{
			if (null != producer)
			{
				producer.close();
			}
		}
		return partitionNum % clusterN == 0 ? partitionNum / clusterN : partitionNum / clusterN + 1;
	}

	private Producer<String, String> initKafkaProductor(KafkaInfo kafkaInfo) throws KafkaProduceException
	{
		Producer<String, String> producer = null;
		try
		{
			Preconditions.checkNotNull(kafkaInfo.getMetaBrokerList(), "文件转kafka消息失败,kafka配置失败，请检查相关配置[%s]", kafkaInfo.toString());
			Properties kafkaProps = new Properties();
			kafkaProps.put("topic.metadata.refresh.interval.ms", "-1");
			kafkaProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaInfo.getMetaBrokerList());
			kafkaProps.put(ProducerConfig.ACKS_CONFIG, kafkaInfo.getRequiredAcks());
			kafkaProps.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 1024 * 1024 * 128 + "");
			kafkaProps.put(ProducerConfig.BATCH_SIZE_CONFIG, 163840 + "");
			kafkaProps.put(ProducerConfig.SEND_BUFFER_CONFIG, 5 * 1024 * 1024 + "");
			kafkaProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
			kafkaProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
			producer = new KafkaProducer<String, String>(kafkaProps);
		}
		catch (Exception e)
		{
			logger.error("创建kafka消息生产者失败", e);
			throw new KafkaProduceException(e);
		}
		catch (Throwable th)
		{
			logger.error("创建kafka消息生产者失败", th);
			throw new KafkaProduceException(th);
		}
		return producer;
	}

}
