package bigdata.backend.monitor.utils;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.ExecutionException;

/**
 * 通用 Kafka 消费进度查询工具类 (按需查询版)
 * 每次查询都创建和销毁客户端连接，适用于按需查询场景
 */
public class KafkaProgressUtils {

    /**
     * 查询指定消费者组对指定 Topic 的消费进度信息
     *
     * @param bootstrapServers Kafka 集群地址，例如 "localhost:9092,localhost:9093"
     * @param consumerGroupId  消费者组 ID
     * @param topicName        要查询的 Topic 名称
     * @return TopicProgressInfo 对象，包含各个分区的进度详情
     */
    public static TopicProgressInfo getTopicProgress(String bootstrapServers, String consumerGroupId, String topicName) {
        // 1. 创建 AdminClient 用于查询消费者组的 offset
        Properties adminProps = new Properties();
        adminProps.put("bootstrap.servers", bootstrapServers);
        AdminClient adminClient = null;

        // 2. 创建临时 KafkaConsumer 用于查询 Topic 的最新 offset
        Properties consumerProps = new Properties();
        consumerProps.put("bootstrap.servers", bootstrapServers);
        consumerProps.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        consumerProps.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        // 使用临时消费者组ID查询 offset，避免干扰实际消费者
        // Kafka 会自动清理长时间未活动的消费者组元数据（通过 offsets.retention.minutes 配置控制，通常默认为7天）
        consumerProps.put("group.id", "temp-offset-query-group-" + UUID.randomUUID().toString());
        consumerProps.put("enable.auto.commit", "false");
        Consumer<String, String> kafkaConsumer = null;

        TopicProgressInfo progressInfo = new TopicProgressInfo(topicName);

        try {
            // 初始化 AdminClient
            adminClient = AdminClient.create(adminProps);
            // 初始化 KafkaConsumer
            kafkaConsumer = new KafkaConsumer<>(consumerProps);

            // 3. 获取 Topic 的分区信息
            System.out.println("Fetching topic partitions for: " + topicName);
            Map<String, List<PartitionInfo>> topicPartitions = kafkaConsumer.listTopics(Duration.ofSeconds(10));
            List<PartitionInfo> partitions = topicPartitions.get(topicName);

            if (partitions == null || partitions.isEmpty()) {
                System.out.println("Topic '" + topicName + "' not found or has no partitions.");
                return progressInfo; // 返回空的进度信息
            }

            // 构建 TopicPartition 集合
            Set<TopicPartition> topicPartitionsSet = new HashSet<>();
            for (PartitionInfo partitionInfo : partitions) {
                topicPartitionsSet.add(new TopicPartition(topicName, partitionInfo.partition()));
            }

            // 4. 获取消费者组的当前消费 offset (committed offset)
            System.out.println("Fetching committed offsets for group: " + consumerGroupId);
            ListConsumerGroupOffsetsResult groupOffsetsResult = adminClient.listConsumerGroupOffsets(consumerGroupId);
            Map<TopicPartition, OffsetAndMetadata> committedOffsets = groupOffsetsResult.partitionsToOffsetAndMetadata().get();

            // 5. 获取每个分区的最新 offset (log end offset)
            System.out.println("Fetching log end offsets for topic: " + topicName);
            Map<TopicPartition, Long> endOffsets = kafkaConsumer.endOffsets(topicPartitionsSet, Duration.ofSeconds(10));

            // 6. 计算进度并填充结果对象
            long totalConsumed = 0;
            long totalMessages = 0;
            long totalLag = 0;

            System.out.println("Calculating progress for topic: " + topicName);
            for (PartitionInfo partitionInfo : partitions) {
                TopicPartition tp = new TopicPartition(topicName, partitionInfo.partition());
                long endOffset = endOffsets.getOrDefault(tp, 0L);
                // 从查询到的组 offsets 中查找当前分区的 committed offset
                OffsetAndMetadata offsetAndMetadata = committedOffsets.get(tp);
                long committedOffset = 0;
                if (offsetAndMetadata != null) {
                    committedOffset = offsetAndMetadata.offset();
                } else {
                    // 如果该分区在消费者组中没有提交过 offset，根据 auto.offset.reset 策略，可能是从头开始 (0) 或从最新 (endOffset)
                    // 这里我们假设没有提交过，则认为已消费为 0
                    System.out.println("No committed offset found for partition " + tp + " in group " + consumerGroupId + ". Assuming 0.");
                }

                long lag = Math.max(0, endOffset - committedOffset); // 计算 lag，确保不为负数
                long consumedMessages = committedOffset;

                totalConsumed += consumedMessages;
                totalMessages += endOffset;
                totalLag += lag;

                // 添加分区级别的进度信息
                progressInfo.addPartitionProgress(tp.partition(), consumedMessages, endOffset, lag);
                System.out.println("  Partition " + tp.partition() +
                        ": consumed=" + consumedMessages +
                        ", total=" + endOffset +
                        ", lag=" + lag);
            }

            // 设置 Topic 级别的汇总信息
            progressInfo.setTotalConsumed(totalConsumed);
            progressInfo.setTotalMessages(totalMessages);
            progressInfo.setTotalLag(totalLag);
            System.out.println("Total - consumed=" + totalConsumed +
                    ", total=" + totalMessages +
                    ", lag=" + totalLag);

        } catch (InterruptedException e) {
            System.err.println("Thread interrupted while getting Kafka consumer progress for topic '" + topicName + "': " + e.getMessage());
            e.printStackTrace();
            Thread.currentThread().interrupt(); // 重要：恢复中断状态
            progressInfo.setError("Thread interrupted: " + e.getMessage());
        } catch (ExecutionException e) {
            System.err.println("Error getting Kafka consumer progress for topic '" + topicName + "': " + e.getMessage());
            e.printStackTrace();
            progressInfo.setError("Execution error: " + e.getMessage());
        } finally {
            // 7. 确保资源被释放
            System.out.println("Closing AdminClient and KafkaConsumer...");
            if (adminClient != null) {
                try {
                    adminClient.close();
                    System.out.println("AdminClient closed.");
                } catch (Exception e) {
                    System.err.println("Error closing AdminClient: " + e.getMessage());
                }
            }
            if (kafkaConsumer != null) {
                try {
                    kafkaConsumer.close();
                    System.out.println("KafkaConsumer closed.");
                } catch (Exception e) {
                    System.err.println("Error closing KafkaConsumer: " + e.getMessage());
                }
            }
        }

        return progressInfo;
    }

    /**
     * 用于封装单个 Topic 的进度信息
     */
    public static class TopicProgressInfo {
        private final String topicName;
        private long totalConsumed = 0;
        private long totalMessages = 0;
        private long totalLag = 0;
        private final List<PartitionProgress> partitionProgressList = new ArrayList<>();
        private String error = null; // 存储查询过程中可能发生的错误

        public TopicProgressInfo(String topicName) {
            this.topicName = topicName;
        }

        public void addPartitionProgress(int partition, long consumedMessages, long totalMessages, long lag) {
            partitionProgressList.add(new PartitionProgress(partition, consumedMessages, totalMessages, lag));
        }

        // Getters
        public String getTopicName() { return topicName; }
        public long getTotalConsumed() { return totalConsumed; }
        public void setTotalConsumed(long totalConsumed) { this.totalConsumed = totalConsumed; }
        public long getTotalMessages() { return totalMessages; }
        public void setTotalMessages(long totalMessages) { this.totalMessages = totalMessages; }
        public long getTotalLag() { return totalLag; }
        public void setTotalLag(long totalLag) { this.totalLag = totalLag; }
        public List<PartitionProgress> getPartitionProgressList() { return partitionProgressList; }
        public String getError() { return error; }
        public void setError(String error) { this.error = error; }

        @Override
        public String toString() {
            if (error != null) {
                return "TopicProgressInfo{topicName='" + topicName + "', error='" + error + "'}";
            }
            return "TopicProgressInfo{" +
                    "topicName='" + topicName + '\'' +
                    ", totalConsumed=" + totalConsumed +
                    ", totalMessages=" + totalMessages +
                    ", totalLag=" + totalLag +
                    ", partitionProgressList=" + partitionProgressList +
                    '}';
        }

        /**
         * 用于封装单个 Topic 分区的进度信息
         */
        public static class PartitionProgress {
            private final int partition;
            private final long consumedMessages; // 已消费的消息数
            private final long totalMessages;    // 总消息数
            private final long lag;              // 未消费的消息数 (lag)

            public PartitionProgress(int partition, long consumedMessages, long totalMessages, long lag) {
                this.partition = partition;
                this.consumedMessages = consumedMessages;
                this.totalMessages = totalMessages;
                this.lag = lag;
            }

            // Getters
            public int getPartition() { return partition; }
            public long getConsumedMessages() { return consumedMessages; }
            public long getTotalMessages() { return totalMessages; }
            public long getLag() { return lag; }

            @Override
            public String toString() {
                return "PartitionProgress{" +
                        "partition=" + partition +
                        ", consumedMessages=" + consumedMessages +
                        ", totalMessages=" + totalMessages +
                        ", lag=" + lag +
                        '}';
            }
        }
    }
}