package com.whale.springboot.kafka.java;

import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.util.*;

public class Demo2 {

    public static void main(String[] args) {
        String address = "192.168.179.131:9092";
        String group = "JKH1";
        String topic = "test-more";


        Properties consumerProp = new Properties();
        consumerProp.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, address);
        consumerProp.put(ConsumerConfig.GROUP_ID_CONFIG,group);
        consumerProp.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        consumerProp.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

        Consumer<String, String> consumer = new KafkaConsumer<>(consumerProp);

        Long commitOffset = getTopicCommitOffset(consumer, topic);

        Long all = getTopicEndOffset(consumer, topic);

        System.out.println();

    }

    private static Long getTopicCommitOffset(Consumer<String, String> consumer, String topic) {

        Long commitOffset = 0L;

        // 这一步是因为 kafka架构的特殊性 在每一个topic中都存在partition 根据每个项目的不同 会配置或不配置partition
        // 消息会分散的存储在partition中 所以我们计算时应该计算所有partition的offset的和
        List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
        for (PartitionInfo partitionInfo : partitionInfos) {
            TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
            OffsetAndMetadata committed = consumer.committed(topicPartition);
            if (committed != null){
                commitOffset += committed.offset();
            }
        }
        return commitOffset;
    }

    /**
     * 获取单个topic接收量
     * @param consumer
     * @param topic
     * @return
     */
    public static Long getTopicEndOffset(Consumer<String, String> consumer, String topic){
        Long endOffset = 0L;

        // 这一步是因为 kafka架构的特殊性 在每一个topic中都存在partition 根据每个项目的不同 会配置或不配置partition
        // 消息会分散的存储在partition中 所以我们计算时应该计算所有partition的offset的和
        List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
        List<TopicPartition> partitions = new ArrayList<>();
        for (PartitionInfo partitionInfo : partitionInfos) {
            TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
            partitions.add(topicPartition);
        }

        Collection<Long> values = consumer.endOffsets(partitions).values();
        for (Long value : values) {
            endOffset += value;
        }
        return endOffset;
    }

}
