package com.greate.community.util;

import com.greate.community.event.EventConsumer;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.TopicPartitionInfo;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.stereotype.Component;

import java.util.*;
import java.util.stream.Collectors;

@Component
@ConditionalOnProperty(name = "spring.kafka.consumer.enable-auto-commit", havingValue = "false")
public class OffsetManager {

    @Autowired
    private RedisTemplate<String, String> redisTemplate;

    //这样记录的日志信息将会包含类名，便于在查看日志时知道是哪个类输出了该日志。
    private static final Logger logger = LogManager.getLogger(EventConsumer.class);

    /**
     * 保存 offset 到 Redis
     *
     * @param topic   主题
     * @param groupId 消费者组 ID
     * @param offsets 偏移量
     */
    public void saveOffset(String topic, String groupId, Map<TopicPartition, Long> offsets) {
        if (offsets != null && !offsets.isEmpty()) {
            Map<String, String> redisOffsets = new HashMap<>();

            for (Map.Entry<TopicPartition, Long> entry : offsets.entrySet()) {
                int partition = entry.getKey().partition();
                long offset = entry.getValue();
                redisOffsets.put(String.valueOf(partition), String.valueOf(offset));
            }

            logger.info("提交 offset: " + redisOffsets);

            // 往 Redis 中存
            String redisKey = "offsets:" + groupId + ":" + topic;
            redisTemplate.opsForHash().putAll(redisKey, redisOffsets);
        }
    }

    /**
     * 从 Redis 中读取存储的 offset
     *
     * @param topic   主题
     * @param groupId 消费者组 ID
     * @return 存储的 offsets
     */
    public Map<TopicPartition, Long> readOffset(String topic, String groupId) {
        String redisKey = "offsets:" + groupId + ":" + topic;
        Map<Object, Object> offsets = redisTemplate.opsForHash().entries(redisKey);

        logger.info("读取到 offset: " + offsets);

        // 如果 Redis 中存在偏移量，则使用 Redis 中的偏移量
        if (!offsets.isEmpty()) {
            return offsets.entrySet().stream()
                    .collect(Collectors.toMap(
                            entry -> new TopicPartition(topic, Integer.parseInt((String) entry.getKey())),
                            entry -> Long.parseLong((String) entry.getValue())
                    ));
        }

        // 如果 Redis 中不存在偏移量，则从分区的最新位置开始消费
        try {
            Map<TopicPartition, Long> endOffsets = getEndOffsets(topic);
            logger.info("从 Kafka 获取分区的最新偏移量: " + endOffsets);
            return endOffsets;
        } catch (Exception e) {
            logger.error("获取分区最新偏移量失败", e);
            throw new RuntimeException("获取分区最新偏移量失败", e);
        }
    }

    /**
     * 临时消费者：：初始化消费者时，需要从最新位置开始消费。监控 Kafka 主题的消费进度。
     * 获取分区的最新偏移量
     */
    private Map<TopicPartition, Long> getEndOffsets(String topic) {
        Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); // Kafka 服务器地址
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "offset-fetcher"); // 临时消费者组 ID
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");

        try (KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(props)) {
            // 获取主题的所有分区
            List<TopicPartition> partitions = consumer.partitionsFor(topic).stream()
                    .map(p -> new TopicPartition(topic, p.partition()))
                    .collect(Collectors.toList());

            // 获取分区的最新偏移量
            return consumer.endOffsets(partitions);
        }
    }
}


