package it.learn.kafka.infra.consumer.interceptor;

import it.learn.kafka.infra.common.SomeBean;
import org.apache.kafka.clients.consumer.ConsumerInterceptor;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

import java.util.Map;

public class MyConsumerInterceptor0 implements ConsumerInterceptor<String, String> {

    private SomeBean bean;

    // 1、在消费者接收到消息后但在消息被应用程序处理之前调用
    // 2、如果在此方法中抛出异常，则Kafka消费者会将该批次的消息标记为失败，并根据消费者的重试策略进行处理
    @Override
    public ConsumerRecords onConsume(ConsumerRecords records) {
        // this.bean.someMethod("consumer interceptor");
        System.out.println("消费者收到信息，但是还未处理：");
        System.out.println("消息数量 = " + records.count());
        return records;
    }

    @Override
    public void close() {

    }

    // 1、在消费者提交偏移量时调用
    // 2、该方法不会影响偏移量的实际提交结果，但如果抛出异常，异常会被捕获并记录，但不会阻止偏移量的提交
    @Override
    public void onCommit(Map<TopicPartition, OffsetAndMetadata> offsets) {
        if (!offsets.isEmpty()) {
            // System.out.println("消费者提交偏移量, offsets = " + offsets);
            // 打印每个分区的元数据信息
            // Iterator<Map.Entry<TopicPartition, OffsetAndMetadata>> iterator = offsets.entrySet().iterator();
            // while (iterator.hasNext()) {
            //     Map.Entry<TopicPartition, OffsetAndMetadata> next = iterator.next();
            //     TopicPartition topicPartition = next.getKey();
            //     OffsetAndMetadata offsetAndMetadata = next.getValue();
            //     System.out.println(topicPartition.toString() + "的元数据 ==> " + offsetAndMetadata.toString());
            // }
        }
    }

    @Override
    public void configure(Map<String, ?> configs) {
        this.bean = (SomeBean) configs.get("some.bean");
    }

}
