package it.wyx.kafkaclients;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.util.StringUtils;

import java.time.Duration;
import java.util.Collections;
import java.util.List;
import java.util.Properties;

public class KafkaConsumerExample {

    public static final String TOPIC = "springboot-spring-kafka-demo-topic";

    private static final RedisTemplate<String, String> redisTemplate = new RedisTemplate<>();
    public static void main(String[] args) {
        Properties props = new Properties();
        props.put("bootstrap.servers", "localhost:9092");
        props.put("group.id", "test-group");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Collections.singletonList(TOPIC));

        try {
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
                for (ConsumerRecord<String, String> record : records) {
                    System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
                }

                // 获取当前消费者组已提交的偏移量
                consumer.commitSync(); // 或者使用 consumer.committed() 获取特定分区的偏移量
                TopicPartition partition = new TopicPartition(TOPIC, 0);
                OffsetAndMetadata offsetAndMetadata=consumer.committed(partition);
                Long committedOffset =offsetAndMetadata!=null?offsetAndMetadata.offset():null;
                System.out.println("Committed Offset for partition 0: " + committedOffset);
                Thread.sleep(3000);
                //redis 记录偏移量，如果偏移量>offset，表示已经处理过了，可以丢弃
//                从redis获取partition的偏移量
                String redisKafkaOffset = redisTemplate.opsForHash().get(partition.topic(), "" + partition.partition()).toString();
                long redisOffset = StringUtils.isEmpty(redisKafkaOffset)?-1:Long.valueOf(redisKafkaOffset);
                List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
                partitionRecords.forEach(record -> {
                    //redis记录的偏移量>=kafka实际的偏移量，表示已经消费过了，则丢弃。
                    if (redisOffset >= record.offset()) {
                        return;
                    }
                });
//                处理完成后立即保存Redis偏移量
                long saveRedisOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
                redisTemplate.opsForHash().put(partition.topic(),"" + partition.partition(),saveRedisOffset);
            }
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        } finally {
            consumer.close();
        }
    }
}