/*
 * 文件名：PassivSendHandler.java
 * 版权：亚信联创版权所有
 * 描述：TODO
 * 修改人：yuanze
 * 修改时间：2014年11月30日
 * 修改内容：TODO
 */
package com.ailk.bigdata.etl.realstream.server.disruptor;

import java.util.Properties;
import java.util.Random;
import java.util.concurrent.atomic.AtomicInteger;

import io.netty.channel.Channel;

import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.ailk.bigdata.etl.realstream.server.model.BaseNetPackage;
import com.ailk.bigdata.etl.realstream.server.model.KafkaInfo;
import com.ailk.bigdata.etl.realstream.server.model.KafkaProduceException;
import com.ailk.bigdata.etl.realstream.server.model.NettyChannelInfo;
import com.ailk.bigdata.etl.realstream.server.model.RealDataDispatcher;
import com.ailk.bigdata.etl.realstream.server.model.RealStreamUnit;
import com.ailk.bigdata.etl.realstream.server.model.ValueEvent;
import com.ailk.bigdata.etl.realstream.server.monitor.EventCounter;
import com.ailk.bigdata.etl.realstream.server.tools.RealStreamTool;
import com.google.common.base.Preconditions;
import com.lmax.disruptor.WorkHandler;

/**
 * @description 被动模式发送
 * @author [yuanze]
 * @version [版本号,2014年11月30日]
 * @see [相关类/方法]
 * @since [产品/模块版本]
 */
public class PassivSendHandler implements WorkHandler<ValueEvent>
{
	private static Logger logger = LoggerFactory.getLogger(PassivSendHandler.class);
	private static int batchPartition = 10000;
	private Producer<String, String> productor;
	private Random random = new Random(batchPartition);
	private AtomicInteger count = new AtomicInteger(0);
	private String partition = "0";
	private EventCounter eventCounter;

	public PassivSendHandler(EventCounter eventCounter)
	{
		super();
		this.eventCounter = eventCounter;

	}

	public void setKafkaInfo(KafkaInfo kafkaInfo) throws KafkaProduceException
	{
		if (null != kafkaInfo)
		{

			productor = initKafkaProductor(kafkaInfo);
		}
		else
		{
			productor = null;
		}
	}

	/**
	 * @see com.lmax.disruptor.WorkHandler#onEvent(java.lang.Object)
	 */
	@Override
	public void onEvent(ValueEvent event) throws Exception
	{
		RealStreamUnit streamUnit = null;
		BaseNetPackage dataPackage = event.getDataPackage();
		long dataType = Long.valueOf(dataPackage.getDataType());
		if (!RealStreamTool.REALS_UNIT_CACHE.containsKey(dataType))
		{
			logger.error("单元接口类型未进行初始化，分发失败......");
			return;
		}
		else
		{
			streamUnit = RealStreamTool.REALS_UNIT_CACHE.get(dataType);
		}
		dataPackage.format(dataType, streamUnit.getAcceptFormat(), streamUnit.getSendFormat());

		// kafka消息
		if (RealStreamTool.DATA_SEND_KAFKA_CAHCE.contains(dataType))
		{
			String sendMess = dataPackage.getSendContent();
			int index = streamUnit.getPartitionColumnIndex();
			ProducerRecord<String, String> record = null;

			if (0 != index)
			{
				String[] values = StringUtils.split(sendMess, dataPackage.getSeparator());
				if (StringUtils.isNotEmpty(values[index - 1]))
				{
					record = new ProducerRecord<String, String>(streamUnit.getUnitName(), values[index - 1], sendMess);
				}
				else
				{
					record = new ProducerRecord<String, String>(streamUnit.getUnitName(), sendMess);
				}

				values = null;
			}
			else
			{
				if (count.incrementAndGet() % batchPartition == 0)
				{
					partition = Integer.toString(random.nextInt());
				}
				record = new ProducerRecord<String, String>(streamUnit.getUnitName(), partition, sendMess);
			}
			productor.send(record);

			record = null;
			sendMess = null;
		}

		// 落地文件
		if (RealStreamTool.UNITCODE_FILEBACK_BUS_DISRUPTOR.containsKey(dataType))
		{
			RealStreamTool.UNITCODE_FILEBACK_BUS_DISRUPTOR.get(dataType).writeStream(dataPackage.getSendContent());
		}

		RealDataDispatcher dispatcher = RealStreamTool.DISPATCH_CAHCE.get(dataType);
		boolean sendMsgIsNormal = false;
		for (NettyChannelInfo nettyChannelInfo : dispatcher.getSEND_CHANNEL_CACHE())
		{
			Channel channel = nettyChannelInfo.getChannel();
			if (channel.isActive())
			{
				channel.writeAndFlush(dataPackage);
				sendMsgIsNormal = true;
			}
			else
			{
				logger.warn(nettyChannelInfo.getRemoteIp() + "该连接无法进行数据传输");
			}
		}

		dataPackage = null;

		if (!sendMsgIsNormal)
		{
			// dispatcher.addMessage(event);
			// dispatcher.setSendable(false);
			logger.debug("单元接口[" + dispatcher.getRealStreamUnit().getOnlyUnitCode() + "]监测到数据没有可以发送的通道 停止发送");
		}

		eventCounter.incrementEventSendedCount();

		// logger.debug("Event_Count_Print[" + eventCounter.toString() + "]");
	}

	private Producer<String, String> initKafkaProductor(KafkaInfo kafkaInfo) throws KafkaProduceException
	{
		Producer<String, String> producer = null;
		try
		{
			Preconditions.checkNotNull(kafkaInfo.getMetaBrokerList(), "文件转kafka消息失败,kafka配置失败，请检查相关配置[%s]", kafkaInfo.toString());
			Properties kafkaProps = new Properties();
			kafkaProps.put("topic.metadata.refresh.interval.ms", "-1");
			kafkaProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaInfo.getMetaBrokerList());
			kafkaProps.put(ProducerConfig.ACKS_CONFIG, kafkaInfo.getRequiredAcks());
			kafkaProps.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 1024 * 1024 * 128 + "");
			kafkaProps.put(ProducerConfig.BATCH_SIZE_CONFIG, 163840 + "");
			kafkaProps.put(ProducerConfig.SEND_BUFFER_CONFIG, 5 * 1024 * 1024 + "");
			kafkaProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
			kafkaProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
			producer = new KafkaProducer<String, String>(kafkaProps);
		}
		catch (Exception e)
		{
			logger.error("创建kafka消息生产者失败", e);
			throw new KafkaProduceException(e);
		}
		catch (Throwable th)
		{
			logger.error("创建kafka消息生产者失败", th);
			throw new KafkaProduceException(th);
		}
		return producer;
	}
}
