package com.common;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;

import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
import kafka.serializer.StringDecoder;
import kafka.utils.VerifiableProperties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;


/**
 * 消费者使用API
 *
 * @author admin
 */
public class KafkaConsumerUtil {
    private static final Logger LOGGER = LoggerFactory.getLogger(KafkaConsumerUtil.class);
    
    private static ConsumerConfig createConsumerConfig(String topic) {
        Properties props = new Properties();
        props.put(KafkaConstant.ZKCONNECT, KafkaConfigUtils.getProps(
                KafkaConstant.ZKCONNECT_TOPIC + topic,
                KafkaConfigUtils.getProps(KafkaConstant.ZKCONNECT)));

        props.put(
                KafkaConstant.GROUP_ID,
                KafkaConfigUtils.getProps(KafkaConstant.GROUP_ID_TOPIC + topic, "group-"
                        + topic));// 指定消费者分组

        props.put(KafkaConstant.AOTU_OFFSET_RESET, KafkaConfigUtils.getProps(
                KafkaConstant.AUTO_OFFSET_RESET_TOPIC + topic, "smallest"));

        boolean autoCommit = Boolean.valueOf(true);
        props.put(KafkaConstant.AUTOCOMMIT_OFFSET, autoCommit);// 设置是否自动提交offset
        if (autoCommit) {
            props.put(KafkaConstant.COMMIT_OFFSET_INTERVAL_MS, KafkaConfigUtils.getProps(
                    KafkaConstant.COMMIT_OFFSET_INTERVAL_MS_TOPIC + topic, "1000"));// 自动提交offset，最好不自动提交，在数据处理完后再手动提交
        }

        return new ConsumerConfig(props);
    }

    public static BlockingQueue<String> startConsumer(String topic) {
        int consumerThreadCount = Integer.valueOf(KafkaConfigUtils.getProps(
                KafkaConstant.CONSUMER_THREAD_TOPIC + topic, "1"));
        
        if (consumerThreadCount < 1) {
            LOGGER.error("消费者线程数量必须大于等于1");
            return null;
        }

        return KafkaConsumerUtil.startConsumerThreadPool(topic, consumerThreadCount);
    }

    private static class MessageConsumer implements Runnable {
        KafkaStream<String, String> stream;
        BlockingQueue<String> queue;

        public MessageConsumer(KafkaStream<String, String> stream, BlockingQueue<String> queue) {
            this.stream = stream;
            this.queue = queue;
        }

        public void run() {
            ConsumerIterator<String, String> it = stream.iterator();
            // 有信息则消费，无信息将会阻塞
            while (it.hasNext()) {
                MessageAndMetadata<String, String> message = it.next();
                try {
                    queue.put(message.message());
                } catch (InterruptedException e) {
                    e.printStackTrace();
                    break;
                }
            }
        }
    }

    /**
     * 启动消费者线程池: <br>
     * 〈功能详细描述〉
     *
     * @param topic
     * @param consumerThreadCount
     * @param handler
     * @param monitorExecutor
     * @return
     * @see [相关类/方法](可选)
     * @since [产品/模块版本](可选)
     */
    private static BlockingQueue<String> startConsumerThreadPool(String topic, int consumerThreadCount) {
        ConsumerConnector consumer = Consumer
                .createJavaConsumerConnector(createConsumerConfig(topic));
        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();

        topicCountMap.put(topic, new Integer(consumerThreadCount));

        StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
        StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

        Map<String, List<KafkaStream<String, String>>> consumerMap = consumer.createMessageStreams(topicCountMap,
                keyDecoder, valueDecoder);

        ExecutorService executor = Executors.newFixedThreadPool(consumerThreadCount);

        // 获取对应topic的消息队列
        List<KafkaStream<String, String>> streams = consumerMap.get(topic);

        BlockingQueue<String> blockingQueue = new LinkedBlockingQueue<String>(Integer.valueOf(KafkaConfigUtils
                .getProps(KafkaConstant.BLOCKINGQUEUE_SIZE_TOPIC + topic, "10")));

        for (final KafkaStream<String, String> stream : streams) {// 将消费线程提交到消费线程池中，因为从kafka获取消息是阻塞的，线程数必须跟streams.size()大小一致
            executor.submit(new MessageConsumer(stream, blockingQueue));
        }

        return blockingQueue;
    }
}
