package chapter5.eg6;

import java.time.Duration;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import kafka.common.TopicAndPartition;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringSerializer;

/**
 * @author zhangxufeng@meitunmama.com 2019-08-31 09:50:33
 */
public class ConsumerRebalanceListenerApp {
  public static void main(String[] args) {
    Properties properties = new Properties();
    properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
    properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
    properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    properties.put(ConsumerConfig.GROUP_ID_CONFIG, "test-group");

    // 用于保存最新的消息消费位置，以防止其未提交
    Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
    consumer.subscribe(Collections.singleton("test-topic"), new ConsumerRebalanceListener() {
      @Override
      public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
        consumer.commitSync(offsets);
      }

      @Override
      public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
        for (TopicPartition partition : partitions) {
          OffsetAndMetadata offset = consumer.committed(partition);
          consumer.seek(partition, offset.offset());
        }

        offsets.clear();
      }
    });

    while (true) {
      ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
      Set<TopicPartition> partitions = records.partitions();
      partitions.forEach(partition -> {
        List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
        partitionRecords.forEach(record -> {
          // 消息处理逻辑
          System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());

          // 保存消息的位移信息
          OffsetAndMetadata offset = offsets
              .computeIfAbsent(partition, x -> new OffsetAndMetadata(record.offset()));
          if (offset.offset() < record.offset()) {
            offsets.put(partition, new OffsetAndMetadata(record.offset()));
          }

          // 这里实现的是精确一次的语义，即每消费一条消息就提交该消息的位移信息，这里粒度也可以更粗一些，
          // 就是在当前循环外一次性提交当前partition的最新的offset，但是这样有可能出现消费到中间位置断开的情况
          consumer.commitSync(offsets);
        });
      });
    }
  }
}
