/*
 * 文件名：KafKaMessageConsumer.java
 * 版权：亚信联创版权所有
 * 描述：TODO
 * 修改人：napo
 * 修改时间：2014-12-25
 * 修改内容：TODO
 */
package com.ailk.bigdata.etl.realstream.server.protocal.realmq;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.ailk.bigdata.etl.realstream.server.monitor.EventCounter;
import com.ailk.bigdata.etl.realstream.server.protocal.realmq.PollableSource.Status;

/**
 * @description TODO
 * @author [xuwei3]
 * @version [版本号,2014-12-25]
 * @see [相关类/方法]
 * @since [产品/模块版本]
 */
public class KafKaMessageConsumer
{
	private static final Logger logger = LoggerFactory.getLogger(KafKaMessageConsumer.class);
	public static ConcurrentHashMap<Integer, KafkaStream<byte[], byte[]>> KAFKA_STREAM_CHACHE = new ConcurrentHashMap<Integer, KafkaStream<byte[], byte[]>>();
	private EventCounter eventCounter;
	private final ConsumerConnector consumerConnector;
	private ExecutorService executoThread;
	private final int onlyUnitCode;

	KafKaMessageConsumer(EventCounter eventCounter, String zkConnect, String consumerGroupid, int numsPatitions, String topic, String dataType)
	{
		this.eventCounter = eventCounter;
		this.onlyUnitCode = Integer.parseInt(dataType);
		Properties props = new Properties();
		// zookeeper 配置
		props.put("zookeeper.connect", zkConnect);

		// group 代表一个消费组
		props.put("group.id", consumerGroupid);

		// zk连接超时
		props.put("zookeeper.session.timeout.ms", "10000");
		props.put("zookeeper.sync.time.ms", "2000");
		props.put("zookeeper.connection.timeout.ms", "10000");
		props.put("auto.commit.interval.ms", "1000");
		props.put("queued.max.message.chunks", "50");
		props.put("auto.offset.reset", "smallest");
		props.put("auto.commit.enable", "true");
		props.put("fetch.min.bytes", "10240");//
		// 每次feth将得到多条消息,此值为总大小,提升此值,将会消耗更多的consumer端内存
		props.put("fetch.message.max.bytes", "1024000");
		props.put("fetch.wait.max.ms", "1000");//
		// 当消息的尺寸不足时,server阻塞的时间,如果超时,消息将立即发送给consumer
		props.put("socket.receive.buffer.bytes", "1048576");
		// 序列化类
		props.put("serializer.class", "kafka.serializer.StringEncoder");
		props.put("partition.assignment.strategy", "roundrobin");
		ConsumerConfig config = new ConsumerConfig(props);
		consumerConnector = Consumer.createJavaConsumerConnector(config);
		Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
		topicCountMap.put(topic, new Integer(numsPatitions));
		this.executoThread = Executors.newFixedThreadPool(numsPatitions);
		List<KafkaStream<byte[], byte[]>> comsuStreams = consumerConnector.createMessageStreams(topicCountMap).get(topic);
		int threadNum = 0;
		for (KafkaStream<byte[], byte[]> comsuStream : comsuStreams)
		{
			KAFKA_STREAM_CHACHE.put(threadNum++, comsuStream);
		}
	}

	Status take() throws Exception
	{
		logger.debug("..................开始获取消息..................");
		for (final Entry<Integer, KafkaStream<byte[], byte[]>> stream : KAFKA_STREAM_CHACHE.entrySet())
		{
			KAFKA_STREAM_CHACHE.remove(stream);
			executoThread.submit(new KafkaMessageConverter(eventCounter, onlyUnitCode, stream.getValue(), stream.getKey()));
		}
		Thread.sleep(5000);
		logger.debug("..................获取消息结束..................");
		if (KAFKA_STREAM_CHACHE.isEmpty())
		{
			return Status.BACKOFF;
		}
		return Status.READY;

	}

	void commit()
	{
		if (consumerConnector != null)
		{
			consumerConnector.commitOffsets(true);
		}
	}

	void close()
	{
		if (consumerConnector != null)
		{
			try
			{
				Thread.sleep(10000);
				/*
				 * 内部实现持久化每个分区最后读到的消息的offset，数据保存在zookeeper中的消费组名中(如/consumers/push
				 * - token-group/offsets/push-token/2。
				 * 其中push-token-group是消费组，push-token是topic
				 * ，最后一个2表示第3个分区)，每间隔一个(默认1000ms)时间更新一次offset，
				 * 那么可能在重启消费者时拿到重复的消息。此外，
				 * 当分区leader发生变更时也可能拿到重复的消息。因此在关闭消费者时最好等待一定时间（10s）然后再shutdown()
				 */
			}
			catch (InterruptedException e)
			{
			}
			consumerConnector.shutdown();
		}
		if (executoThread != null)
		{
			executoThread.shutdown();
		}
		try
		{
			executoThread.awaitTermination(1000, TimeUnit.SECONDS);
		}
		catch (InterruptedException e)
		{
			logger.warn("", e);
		}
	}

}
