package cn.com.demo.kafka;

import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
import org.apache.kafka.clients.admin.ConsumerGroupListing;
import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult;
import org.apache.kafka.clients.admin.ListConsumerGroupsResult;
import org.apache.kafka.clients.admin.ListOffsetsResult;
import org.apache.kafka.clients.admin.ListTopicsResult;
import org.apache.kafka.clients.admin.OffsetSpec;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.clients.admin.TopicListing;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
public class KafkaCountDemo {
    public static void main(String[] args) throws ExecutionException, InterruptedException {
        Properties props = new Properties();
        props.put("bootstrap.servers", Config.BOOTSTRAP_SERVERS_CONFIG);
        AdminClient adminClient = AdminClient.create(props);

        // 获取所有主题
        ListTopicsResult topicsResult = adminClient.listTopics();
        Collection<TopicListing> topicListings = topicsResult.listings().get();
        for (TopicListing topicListing : topicListings) {
            String topic = topicListing.name();
            System.out.println("topic-->"+topic);
            // 获取指定主题的描述信息
            TopicDescription topicDescription = adminClient.describeTopics(Collections.singletonList(topic)).values().get(topic).get();
            // 获取消费者组列表
            ListConsumerGroupsResult consumerGroupsResult = adminClient.listConsumerGroups();
            Collection<ConsumerGroupListing> consumerGroupListings = consumerGroupsResult.all().get();
            for (ConsumerGroupListing consumerGroupListing : consumerGroupListings) {
                String groupId = consumerGroupListing.groupId();
                System.out.println("topic-->"+topic + "----->" + groupId);
                // 获取指定消费者组的描述信息
                ConsumerGroupDescription consumerGroupDescription = adminClient.describeConsumerGroups(Collections.singletonList(groupId)).all().get().get(groupId);
                // 如果消费者组订阅了该主题，获取其分配的分区
                if (consumerGroupDescription.members().stream().anyMatch(member -> member.assignment().topicPartitions().stream().anyMatch(tp -> tp.topic().equals(topic)))) {
                    ListConsumerGroupOffsetsResult groupOffsetsResult = adminClient.listConsumerGroupOffsets(groupId);
                    Map<TopicPartition, OffsetAndMetadata> partitionOffsets = groupOffsetsResult.partitionsToOffsetAndMetadata().get();
                    for (TopicPartition tp : partitionOffsets.keySet()) {
                        // 获取指定分区的最新可用偏移量
                        ListOffsetsResult listOffsetsResult = adminClient.listOffsets(Collections.singletonMap(tp, OffsetSpec.latest()));
                        Long latestOffset = listOffsetsResult.partitionResult(tp).get().offset();
                        OffsetAndMetadata currentOffset = partitionOffsets.get(tp);
                        // 计算积压消息数量
                        long backlog = latestOffset - currentOffset.offset();
                        System.out.println("Topic: " + topic + " Group: " + groupId + " Partition: " + tp.partition() + " Backlog: " + backlog);
                    }
                }
            }
        }

        adminClient.close();
    }
    public static void minitorByGroup( AdminClient adminClient,String groupId) throws ExecutionException, InterruptedException {
        ListConsumerGroupOffsetsResult groupOffsetsResult = adminClient.listConsumerGroupOffsets(groupId);
        Map<TopicPartition, OffsetAndMetadata> partitionOffsets = groupOffsetsResult.partitionsToOffsetAndMetadata().get();
        partitionOffsets.forEach((topicPartition, offsetAndMetadata) -> {
            ListOffsetsResult listOffsetsResult = adminClient.listOffsets(Collections.singletonMap(topicPartition, OffsetSpec.latest()));
            Long latestOffset = null;
            try {
                latestOffset = listOffsetsResult.partitionResult(topicPartition).get().offset();
            } catch (InterruptedException e) {
                e.printStackTrace();
            } catch (ExecutionException e) {
                e.printStackTrace();
            }
            OffsetAndMetadata currentOffset = partitionOffsets.get(topicPartition);
            // 计算积压消息数量
            long backlog = latestOffset - currentOffset.offset();
            System.out.println("Topic: " + topicPartition.topic() + " Group: " + groupId + " Partition: " + topicPartition.partition() + " Backlog: " + backlog);
        });
    }
}
