package com.grf.kafka.java.simple;

import ch.qos.logback.classic.Logger;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.*;

import java.time.Duration;
import java.util.*;

/**
 * @apiNote 消费者-自己管理offset
 * @author guorf
 * @since 2024/4/4
 */
@Slf4j
public class ManagerOffsetConsumer {
    private static final String BOOTSTRAP_SERVERS = "127.0.0.1:9092";

    private static final String TOPIC = "disTopic";

    private static final Map<String, Long> redisOffset = new HashMap<>();

    public static void main(String[] args) {
        // Part1: 设置发送相关属性
        Properties properties = new Properties();
        // 设置kafka地址
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS);
        // 每个消费者要指定一个group
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, "test");
        // 黄色至key序列化化雷
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        // 设置value的序列值
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        Consumer<String, String> consumer = new KafkaConsumer(properties);
        consumer.subscribe(Arrays.asList(TOPIC));
        while (true){
            // PART2:拉取消息
            // 100毫秒超时时间
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofNanos(100));

            // PART3: 处理消息,自己管理offset
            records.partitions().forEach(topicPartition -> {
                // 主题
                String topic = topicPartition.topic();
                // 分区
                int partition = topicPartition.partition();
                // 从redis中获取offset
                long redisKafkaOffset = redisOffset.containsKey(topic+partition) ? redisOffset.get(topic+partition) : -1;
                log.info("read redis offset: {}", redisOffset);
                // 获取指定分区的消息列表
                List<ConsumerRecord<String, String>> partitionRecords = records.records(topicPartition);
                partitionRecords.forEach(record -> {
                    // 如果缓存中的offset>=kafka的offset,则表示当前消息已消费过，直接抛弃
                    if (redisKafkaOffset >= record.offset()){
                        return;
                    }
                    log.info("topic: {}, value: {}", record.topic(), record.value());
                });
                // 通过消息列表的最后一条记录获取指定分区本批消息的最后offset
                long offset = partitionRecords.get(partitionRecords.size()-1).offset();
                // 更新redis的offset
                redisOffset.put(topic+partition,offset);
                log.info("write redis offset: {}", redisOffset);
            });

            // 提交offset，消息就不会重复推送
            consumer.commitSync();//同步提交，表示必须等到offset提交后再去拉取下一批消息
            // consumer.commitAsync();//异步提交，表示发送完offset的提交请求后，就开始消费下一批数据了。不用等到broker的确认
        }
    }
    /**
     * @description: 禁用debug日志
     * @param: null
     * @return: null
     **/
    static {
        ch.qos.logback.classic.LoggerContext loggerContext = (ch.qos.logback.classic.LoggerContext) org.slf4j.LoggerFactory.getILoggerFactory();
        List<Logger> loggerList = loggerContext.getLoggerList();
        loggerList.forEach(logger -> logger.setLevel(ch.qos.logback.classic.Level.INFO));
    }
}
