package com.kafka;

import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.serializer.StringDecoder;
import kafka.utils.VerifiableProperties;

import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

import org.apache.log4j.PropertyConfigurator;

import com.kafka.hbase.HBaseUtils;

public class KafkaConsumer {
	/**
	 * Create by fengtang
	 * 2015/10/8 0008
	 * KafkaDemo_01
	 */

	    private final ConsumerConnector consumer;
	    private KafkaConsumer() {
	        Properties props = new Properties();
	        /**
	         * zookeeper 配置
	         */
	        props.put("zookeeper.connect", "10.0.104.20:2181,10.0.104.21:2181,10.0.104.22:2181");

	        /**
	         * group 代表一个消费组
	         */
	        props.put("group.id", "jd-group3");

	        /**
	         * zk连接超时
	         */
	        props.put("zookeeper.session.timeout.ms", "400000");
	        props.put("zookeeper.sync.time.ms", "200");
	        props.put("auto.commit.interval.ms", "1000");
	        
	        /*
	         * 
	         * kafka + zookeeper,当消息被消费时,会想zk提交当前groupId的consumer消费的offset信息,当consumer再次启动将会从此offset开始继续消费.
在consumter端配置文件中(或者是ConsumerConfig类参数)有个"autooffset.reset"(在kafka 0.8版本中为auto.offset.reset),有2个合法的值"largest"/"smallest",默认为"largest",此配置参数表示当此groupId下的消费者,在ZK中没有offset值时(比如新的groupId,或者是zk数据被清空),consumer应该从哪个offset开始消费.largest表示接受接收最大的offset(即最新消息),smallest表示最小offset,即从topic的开始位置消费所有消
	         */
	        props.put("enable.auto.commit", "false");
	        props.put("auto.offset.reset", "smallest");
	        //props.put("auto.offset.reset", "largest");
	        /**
	         * 序列化类
	         */
	        props.put("serializer.class", "kafka.serializer.StringEncoder");
	        ConsumerConfig config = new ConsumerConfig(props);
	        consumer = kafka.consumer.Consumer.createJavaConsumerConnector(config);
	    }

	    void consume() {
	        Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	        topicCountMap.put(KafkaProducer.TOPIC, new Integer(1));
	        StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
	        StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());
	        Map<String, List<KafkaStream<String, String>>> consumerMap = 
	                consumer.createMessageStreams(topicCountMap, keyDecoder, valueDecoder);
	        KafkaStream<String, String> stream = consumerMap.get(KafkaProducer.TOPIC).get(0);
	        ConsumerIterator<String, String> it = stream.iterator();
	        
	        
	     //while (it.hasNext())
	            //System.out.println(it.next().message());
	     
	        int i=0;
	        while (it.hasNext()){
	        	try {
	                   System.out.println("11111");
	                   HBaseUtils.put(new String(it.next().message())); 
	                   i++;
	               } catch (IOException e) { 
	                   // TODO Auto-generated catch block 
	                   e.printStackTrace(); 
	               } 
	        }
	    }

	    public static void main(String[] args) {
	    	System.out.println(System.getProperty("user.dir"));
	    	String path = System.getProperty("user.dir");
	    	String log4jConfPath = path+"\\src\\main\\resources\\log4j.properties";
	    	PropertyConfigurator.configure(log4jConfPath);
	        new KafkaConsumer().consume();
	    }
}
