package git.soulbgm.monitor;

import git.soulbgm.pojo.dto.KcgInfo;
import org.apache.kafka.clients.admin.*;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;

import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;

/**
 * Kafka监控
 *
 * @author SoulBGM
 * @date 2024-10-08
 */
public class KafkaMonitor {

    /**
     * 获取组ID
     *
     * @param client Kafka客户端
     * @return {@link List}<{@link String}>
     * @throws ExecutionException   执行异常
     * @throws InterruptedException 中断异常
     */
    public static List<String> getGroupIds(AdminClient client) throws ExecutionException, InterruptedException {
        ListConsumerGroupsResult result = client.listConsumerGroups();
        return result.all().get().stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList());
    }

    /**
     * 获取消费者群体信息
     *
     * @param client        Kafka客户端
     * @param kafkaConsumer Kafka消费者
     * @param groupId       消费组ID
     * @return {@link List}<{@link KcgInfo}>
     * @throws ExecutionException   执行异常
     * @throws InterruptedException 中断异常
     */
    public static List<KcgInfo> getConsumerGroupInfo(AdminClient client, KafkaConsumer<?, ?> kafkaConsumer, String groupId) throws ExecutionException, InterruptedException {
        DescribeConsumerGroupsResult describeResult = client.describeConsumerGroups(Collections.singletonList(groupId));
        ConsumerGroupDescription groupDescription = describeResult.all().get().get(groupId);

        if (groupDescription != null) {
            // 获取消费组的成员信息
            ListConsumerGroupOffsetsResult offsetsResult = client.listConsumerGroupOffsets(groupId);
            Map<TopicPartition, OffsetAndMetadata> offsets = offsetsResult.partitionsToOffsetAndMetadata().get();

            // 获取每个分区的最新偏移量
            List<TopicPartition> list = new ArrayList<>(offsets.keySet());
            List<KcgInfo> infoList = new ArrayList<>(list.size());
            kafkaConsumer.assign(list);
            kafkaConsumer.seekToEnd(list);

            // 计算滞后值
            for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) {
                TopicPartition tp = entry.getKey();
                long currentOffset = entry.getValue().offset();
                long latestOffset = kafkaConsumer.position(tp);
                long lag = latestOffset - currentOffset;
                PartitionInfo partitionInfo = kafkaConsumer.partitionsFor(tp.topic()).get(tp.partition());
                String host = partitionInfo.leader().host();

                infoList.add(new KcgInfo(groupId, groupDescription.state().toString(),
                        tp.topic(), tp.partition(), currentOffset, latestOffset, lag, host));
            }
            return infoList;
        }
        return Collections.emptyList();
    }

}
