package com.tiantang.kafka.consumer;

import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * 
 * @description:功能描述 自定义消费者
 * @author: liujinkun
 * @date 2018年5月27日
 * @version:1.0
 */
public class MyConsumer {
	
	private static Logger logger=LoggerFactory.getLogger(MyConsumer.class);
	
	public void consumer(){
		// 1.配置消费者属性
		Map<String,Object> configs=new HashMap<String,Object>();
		// kafka集群
		configs.put("bootstrap.servers", "com.tiantang.hadoop01:9092");
		
		// 消费组id
		configs.put("group.id", "testGroup1");
		
		// 是否自动确认offset
		configs.put("enable.auto.commit", true);
		
		// 反序列化key的类
		configs.put("key.deserializer", org.apache.kafka.common.serialization.StringDeserializer.class);
		
		// 反序列化value的类
		configs.put("value.deserializer", org.apache.kafka.common.serialization.StringDeserializer.class);
		
		// 2.创建KafkaConsumer实例
		final KafkaConsumer<String,String> consumer=new KafkaConsumer<String, String>(configs);
		//订阅主题
		consumer.subscribe(Arrays.asList("first"));
		
		// 开启一个守护进程，关闭连接
		Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
			
			public void run() {
				if(null != consumer)
					consumer.close();
			}
		}));
		// 3.消费消息
		while(true){
			ConsumerRecords<String,String> records=consumer.poll(1000);
			for (ConsumerRecord<String, String> record : records) {
				logger.info("消费的数据为：offset={},partition={},key={},value={}",record.offset(),record.partition(),record.key(),record.value());
			}
		}
		// 4.关闭连接
	}

}
