package com.htdata.iiot.kafka2tsdb.kafka;

import java.util.Properties;

import org.apache.kafka.clients.consumer.Consumer;
import org.apache.log4j.Logger;

import com.htdata.iiot.kafka2tsdb.config.Configs;

/**
 * 
 * @author SunMy
 *
 */

public class HTKafkaConsumer{
    private static final Logger logger = Logger.getLogger(HTKafkaConsumer.class);


    private Consumer<String, String> consumer;
    
    public HTKafkaConsumer() {
        Properties props = new Properties();
        props.put("group.id", Configs.KafkaConfig.GROUP_ID);
        props.put("bootstrap.servers", Configs.KafkaConfig.BOOTSTRAP_SERVERS);
        props.put("enable.auto.commit", Configs.KafkaConfig.ENABLE_AUTO_COMMIT);
        props.put("metadata.broker.list", Configs.KafkaConfig.METADATA_BROKERS);
        props.put("topic", Configs.KafkaConfig.TOPIC);
        props.put("auto.offset.reset", Configs.KafkaConfig.AUTO_OFFSET_RESET);
     //   props.put("auto.commit.interval.ms", "1000");  
      //  props.put("session.timeout.ms", "300000");  
        //消息发送的最长等待时间.需大于session.timeout.ms这个时间  
      //  props.put("request.timeout.ms", "400000");  
        //一次从kafka中poll出来的数据条数  
        //max.poll.records条数据需要在在session.timeout.ms这个时间内处理完  
    //    props.put("min.poll.records","10000");  
     //   props.put("max.partition.fetch.bytes","5242880");  
        //server发送到消费端的最小数据，若是不满足这个数值则会等待直到满足指定大小。默认为1表示立即接收。  
     //   props.put("fetch.min.bytes", "1024000"); 
        //若是不满足fetch.min.bytes时，等待消费端请求的最长等待时间  
        props.put("fetch.wait.max.ms", "1000");  
        
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        this.consumer = new org.apache.kafka.clients.consumer.KafkaConsumer<String, String>(props);
        logger.info("KafkaConsumer init sucess ");
    }
    
    public Consumer<String, String> getConsumer() {
        return this.consumer;
    }


}
