/**
 * <p>文件名:		BaseConsumer.java</p>
 * <p>版权:		CopyrightTag</p>
 * <p>公司:		CompanyTag</p>
 * @author		周华彬(zhouhuabin@ctfo.com, zhou_hua_bin@163.com)
*/

package com.caits.lbs.framework.services.kafka;
import java.lang.reflect.Constructor;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;
import kafka.serializer.Decoder;
import kafka.utils.VerifiableProperties;

import org.apache.log4j.Logger;

import com.caits.lbs.framework.log.CommonLogFactory;
import com.caits.lbs.framework.services.sms.IConsumeMessage;
/**
 * <p>BaseConsumer</p>
 * <p>kafka普通消费类</p>
 *
 * @author		周华彬(zhouhuabin@ctfo.com, zhou_hua_bin@163.com)
 * @version		0.0.0
 * <table style="border:1px solid gray;">
 * <tr>
 * <th width="100px">版本号</th><th width="100px">动作</th><th width="100px">修改人</th><th width="100px">修改时间</th>
 * </tr>
 * <!-- 以 Table 方式书写修改历史 -->
 * <tr>
 * <td>0.0.0</td><td>创建类</td><td>zhouhuabin</td><td>2015年4月13日 下午6:11:01</td>
 * </tr>
 * <tr>
 * <td>XXX</td><td>XXX</td><td>XXX</td><td>XXX</td>
 * </tr>
 * </table>
 */

public class BaseConsumer {  
  
	/** 日志记录器 */
	protected Logger log = CommonLogFactory.getLog();
    /** 消费配置类 */
    private ConsumerConfig config;
    /** 默认解码器类 */
    private String decoderClass="kafka.serializer.DefaultDecoder";
    /** 解码器实例 */
    private Decoder<?> decoder;
    /** 主题 */
    private String topic;  
    /** 分区数量 */
    private int partitionsNum;  
    /** 处理消息的类 */
    private IConsumeMessage messageHandler;  
    /** kafka连接器 */
    private ConsumerConnector connector;  
    /** 获取消息的线程池 */
    private ExecutorService threadPool;  
    /**
     * 构造器
     * @param topic
     * @param partitionsNum
     * @param handler
     * @throws Exception 
     */
    public BaseConsumer(String topicName,int partitionsNum,IConsumeMessage handler) throws Exception{  
        Properties properties = new Properties();  
        properties.load(ClassLoader.getSystemResourceAsStream("consumer.properties"));  
        config = new ConsumerConfig(properties);  
        topic = topicName;
        if(properties.containsKey("deserializer.class")){
        	decoderClass=(String) properties.getProperty("deserializer.class");
        }
        @SuppressWarnings("unchecked")
		Class<Decoder<?>> clazz = (Class<Decoder<?>>) Class.forName(decoderClass);
        Constructor<Decoder<?>> constructor = clazz.getConstructor(VerifiableProperties.class);
        VerifiableProperties vp = new VerifiableProperties(properties);
        decoder = constructor.newInstance(vp);
        this.partitionsNum = partitionsNum;  
        this.messageHandler = handler;
    }  
      
    /**
     * 启动消息获取线程池 
     * @throws Exception
     */
    public void start() throws Exception{  
        connector = Consumer.createJavaConsumerConnector(config);  
        Map<String,Integer> topics = new HashMap<String,Integer>();  
        topics.put(topic, partitionsNum);  
        Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(topics);  
        List<KafkaStream<byte[], byte[]>> partitions = streams.get(topic);  
        threadPool = Executors.newFixedThreadPool(partitionsNum);  
        for(KafkaStream<byte[], byte[]> partition : partitions){  
            threadPool.execute(new MessageRunner(partition));  
        }   
    }  
  
          
    /**
     * 关闭线程池 
     */
    public void close(){  
        try{  
            threadPool.shutdownNow();  
        }catch(Exception e){  
            //  
        }finally{  
            connector.shutdown();  
        }  
          
    }  
    
    class MessageRunner implements Runnable{
        /** 单个partition连接的数据流 */
        private KafkaStream<byte[], byte[]> partionStream;  
        MessageRunner(KafkaStream<byte[], byte[]> partition) {  
            this.partionStream = partition;  
        }  
        public void run(){  
        	int index=0;
            ConsumerIterator<byte[], byte[]> it = partionStream.iterator();  
            while(it.hasNext()){
                connector.commitOffsets();//手动提交offset,当autocommit.enable=false时使用  
                MessageAndMetadata<byte[],byte[]> item = it.next();
                log.info("partiton:" + item.partition()+",offset:" + item.offset());
                byte[] src_data = item.message();
                byte[] data = Arrays.copyOfRange(src_data, 2, src_data.length-1);
                if(index<10000){
                	index++;
	                Object result = decoder.fromBytes(data);
	                try{
	                	messageHandler.onMessage(result);
	                }catch(Exception e){ 
	                	log.error("回调消息处理异常,",e);
	                }
                }
            }
        }
    }
      
      
  
}