package com.wt.springsamples.kafka.demo;

import com.wt.springsamples.kafka.Constants;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.*;

/**
 *需要配置broker的listeners 、 advertised.listeners参数值，默认被注释
 * 如：listeners=PLAINTEXT://127.0.0.1:9092 、advertised.listeners=PLAINTEXT://127.0.0.1:9092
 * 或者listeners=PLAINTEXT://172.22.240.20:9092 、advertised.listeners=PLAINTEXT://172.22.240.20:9092
 */
public class KafkaConsumerTest {
    public  static  final  String CONSUMER_GROUP_ID = "TEST_GROUP";
    public static void main(String[] args) {

        Properties properties = new Properties();
//        properties.put("bootstrap.servers","172.22.240.20:9092");
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"172.30.226.167:9092");
        properties.put("group.id",CONSUMER_GROUP_ID);
//        properties.put("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
//        properties.put("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
        //是否自动提交偏移量
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);
//        properties.put("group.min.session.timeout.ms",10);
//        properties.put("group.max.session.timeout.ms",10000);

        /**
         * session.timeout.ms, 会话过期时间，指定时间内没有发送心跳，该消费者会被认为死亡，默认值3s
         * 消费者调用poll方法时发送心跳，所以业务处理时间不能过长
         */
//        properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG,2000);

        /**
         *  max.partition.fetch.bytes, 每个分区返回的最大字节数，默认值为1M, broker最大消息字节数max.message.size默认也为1M,
         *  当max.partition.fetch.bytes<max.message.size时，消费者无法读取数据
         */
//        properties.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG,2048);

        KafkaConsumer consumer = new KafkaConsumer(properties);
        consumer.subscribe(Collections.singletonList(Constants.TOPIC_NAME), new ConsumerRebalanceListener() {
            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
                //发生再均衡之前和消费者停止读取消息之后被调用
            }

            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
                //再均衡之后和消费者开始读取消息之前被调用
                System.out.println("------再均衡-------");
                consumer.seekToBeginning(partitions);
//                Map<TopicPartition, Long>  mp = consumer.beginningOffsets(partitions);
//                for(Map.Entry<TopicPartition, Long> entry:mp.entrySet()){
//                    TopicPartition partition = entry.getKey();
//                    Long offset = entry.getValue();
//                    consumer.seek(partition,offset);
//                }
            }
        });
//        Set<TopicPartition> assignment = consumer.assignment();
//        consumer.seekToBeginning(assignment);
        System.out.println(" 接收 kafka message:");
        try {
            while (true){
                // 等待100毫秒，直到数据返沪，0表示立即返回
                ConsumerRecords<String,String> consumerRecords = consumer.poll(Duration.ofMillis(0));
                Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
                for(ConsumerRecord<String,String> record:consumerRecords){

                    //todo handle message
                    System.out.println(record.value());
                    //提交指定偏移量
                    TopicPartition topicPartition= new TopicPartition(record.topic(),record.partition());
                    offsets.put(topicPartition,new OffsetAndMetadata(record.offset()+1));//下一次返回的数据位置
                    consumer.commitSync(offsets);
                }
                //异步提交
                //consumer.commitAsync();//提交poll方法返回的最新偏移量
            }
        }finally {
            //同步提交
            //consumer.commitSync();////提交poll方法返回的最新偏移量
            consumer.close();
        }
    }
}
