package study.mq.kafka.consumer.support.interceptor;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerInterceptor;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

import java.util.Map;

@Slf4j
public class CustomInterceptor implements ConsumerInterceptor<String, String> {

    @Override
    public void configure(Map<String, ?> configs) {
        log.info(">>>>>> configure");
    }

    /**
     * （★）KafkaConsumer#poll(Duration)返回记录之前，该方法被调用
     *
     * @param records ConsumerRecords<K, V>
     */
    @Override
    public ConsumerRecords<String, String> onConsume(ConsumerRecords<String, String> records) {
        log.info(">>>>>>>>>> onConsume >>>>>>>>>>");
        records.forEach(record -> {
            String topic = record.topic();
            int partition = record.partition();
            String key = record.key();
            String value = record.value();
            long offset = record.offset();
            //
            log.info("[{}, {}] current record offset= {}", topic, partition, offset);
        });
        return records;
    }

    /**
     * （★）位移被提交时，该方法被调用
     *
     * @param offsets Map<TopicPartition, OffsetAndMetadata>
     */
    @Override
    public void onCommit(Map<TopicPartition, OffsetAndMetadata> offsets) {
        log.info(">>>>>>>>>> onCommit >>>>>>>>>>");
        offsets.forEach((tp, metadata) -> {
            String topic = tp.topic();
            int partition = tp.partition();
            long offset = metadata.offset();
            //
            log.info("[{}, {}] next record offset= {}", topic, partition, offset);
        });
    }

    /**
     * 拦截器被关闭时，该方法被调用
     */
    @Override
    public void close() {
        log.info(">>>>>> close");
    }
}
