package com.etc.cafka;


import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.*;


public class ConsumerDemo {
    private static final Logger log = LoggerFactory.getLogger(ConsumerDemo.class);

    public static void main(String[] args) {
        String topic = "test";
        Properties props = new Properties();
        props.put("zookeeper.connect", "192.168.88.5:2181,192.168.88.6:2181,192.168.88.7:2181");
//        props.put("auto.offset.reset","smallest");
        props.put("group.id", "test-consumer-group");
        props.put("zookeeper.session.timeout.ms", "10000");
        props.put("zookeeper.sync.time.ms", "2000");
        props.put("auto.commit.interval.ms", "10000");
        //设置ConsumerIterator的hasNext的超时时间,不设置则永远阻塞直到有新消息来
        // props.put("consumer.timeout.ms", "10000");
        /**
         * 服务器将用于分发分区所有权的分区分配策略的友好名称
         * *在使用组管理的消费者实例中
         */
        props.put(org.apache.kafka.clients.consumer.ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY, "range");
        //读取配置文件
        ConsumerConfig consumerConfig = new kafka.consumer.ConsumerConfig(props);
        ConsumerConnector consumerConnector = kafka.consumer.Consumer.createJavaConsumerConnector(consumerConfig);
        Map<String, Integer> topicCountMap = new HashMap<String,Integer>();
        //key  topic名字    value 该topic的分区数量  replication-factor（副本） 3  --partitions（分区）3
        topicCountMap.put(topic, 3);

        Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumerConnector
                .createMessageStreams(topicCountMap);

        consumerMap.get(topic).stream().forEach( metadata -> {
            ConsumerIterator<byte[], byte[]> it = metadata.iterator();
               while (it.hasNext()){
                   log.info("【收到消息】{}",new String(it.next().message()));
               }
        });

    }
}
