package com.kafka.study.consumer;

import com.kafka.study.model.PartitionState;
import com.kafka.study.util.TimeUtils;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.PreDestroy;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeaders;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.listener.ConsumerAwareRebalanceListener;
import org.springframework.stereotype.Service;

import java.time.Duration;
import java.time.Instant;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;

/**
 * 代码存在缺陷，但是找不出原因， ScheduleConsumer类可以实现功能，这个类没办法实现功能
 */
@Service
@Slf4j
public class FilterConsumer {
    private final Consumer<String, String> consumer;
    private final Map<TopicPartition, PartitionState> partitionStates = new ConcurrentHashMap<>();

    @Autowired
    public FilterConsumer(@Qualifier("timeConsumerFactory") ConsumerFactory<String, String> consumerFactory) {
        this.consumer = consumerFactory.createConsumer();
    }

    @Resource(name = "businessKafkaTemplate")
    KafkaTemplate<String, String> kafkaTemplate;

    @Value("${spring.kafka.template.time-topic}")
    private String timeTopic;

    @Value("${spring.kafka.template.business-topic}")
    private String businessTopic;

    @Value("${spring.kafka.consumer.time.time-window}")
    private int timeWindow;

    @Value("${spring.kafka.consumer.time.poll-timeout:3000}")
    private int pollTimeout;

    /**
     * 外部调度框架定时消费
     * 需要确保consumer的线程安全问题
     */
    public synchronized void consume() {
        try {
            // 步骤1: 拉取消息（消费组自动分配分区）
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(pollTimeout));
            if (records.isEmpty()) {
                log.warn("Records is Empty!");
                return;
            }

            // 按分区处理消息
            Map<TopicPartition, Long> offsetsToCommit = new HashMap<>();
            Set<TopicPartition> partitions = records.partitions();
            log.info("Fetched partitions: {}", partitions);
            for (TopicPartition partition : partitions) {
                PartitionState state = partitionStates.get(partition);
                if (state == null) {
                    log.warn("Partition state missing for {}", partition);
                    continue;
                }

                List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
                processPartitionRecords(partition, partitionRecords, state);

                // 确定提交偏移量
                long commitOffset = determineCommitOffset(partition, state);
                if (commitOffset != -1) {
                    offsetsToCommit.put(partition, commitOffset);
                }
            }

            // 提交偏移量
            if (!offsetsToCommit.isEmpty()) {
                consumer.commitSync(offsetsToCommit.entrySet().stream()
                        .collect(Collectors.toMap(
                                Map.Entry::getKey,
                                e -> new OffsetAndMetadata(e.getValue())
                        )));
                log.debug("Committed offsets: {}", offsetsToCommit);
            }
        } catch (Exception e) {
            log.error("Error during consumption cycle", e);
        }
    }

    private void processPartitionRecords(TopicPartition partition,
                                         List<ConsumerRecord<String, String>> records,
                                         PartitionState state) {
        long now = Instant.now().toEpochMilli();
        long windowStart = now - (timeWindow * 1000L);
        long windowEnd = now + (timeWindow * 1000L);
        // 重置处理状态（每次处理批次时重置）
        state.setLastProcessedOffset(-1);
        state.setFirstFutureOffset(-1);
        // 更新水位线（确保单调递增）
        if (state.getWatermarkTimestamp() < windowStart) {
            state.setWatermarkTimestamp(windowStart);
        }
        for (ConsumerRecord<String, String> record : records) {
            long recordTime = record.timestamp();
            long recordOffset = record.offset();

            // 记录第一条未来消息
            if (recordTime > windowEnd) {
                if (state.getFirstFutureOffset() == -1) {
                    state.setFirstFutureOffset(recordOffset);
                    log.info("Future message found: partition={}, offset={}, time={}, msgId={}",
                            partition.partition(), recordOffset, TimeUtils.timestampToDate(recordTime), record.key());
                }
                continue;
            }

            // 跳过过期消息
            if (recordTime < state.getWatermarkTimestamp()) {
                log.trace("Skipping old record: partition={}, offset={}, time={}, msgId={}",
                        partition.partition(), recordOffset, TimeUtils.timestampToDate(recordTime), record.key());
                continue;
            }

            // 处理时间窗口内消息
            try {
                Headers headers = new RecordHeaders();
                if (record.headers().lastHeader("traceId") != null) {
                    headers.add("traceId", record.headers().lastHeader("traceId").value());
                }

                ProducerRecord<String, String> businessRecord = new ProducerRecord<>(
                        businessTopic, null, record.timestamp(),
                        record.key(), record.value(), headers);

                kafkaTemplate.send(businessRecord).get();
                state.setLastProcessedOffset(recordOffset);

                // 更新水位线（实际处理的最新时间戳）
                if (recordTime > state.getWatermarkTimestamp()) {
                    state.setWatermarkTimestamp(recordTime);
                }

                log.info("Processed record: partition={}, offset={}, time={}, msgId={}",
                        partition.partition(), recordOffset, TimeUtils.timestampToDate(recordTime), record.key());
            } catch (InterruptedException | ExecutionException e) {
                log.error("Failed to process record: partition={}, offset={}, msgId={}",
                        partition.partition(), recordOffset, record.key(), e);
                break;
            }
        }
    }

    private long determineCommitOffset(TopicPartition partition, PartitionState state) {
        if (state.getFirstFutureOffset() != -1) {
            // 优先提交未来消息边界
            return state.getFirstFutureOffset();
        } else if (state.getLastProcessedOffset() != -1) {
            // 其次提交已处理消息
            return state.getLastProcessedOffset() + 1;
        } else {
            // 最后提交过期消息批次
            return  -1;
        }
    }

    private void subscribeWithRebalancedListener() {
        consumer.subscribe(Collections.singletonList(timeTopic), new ConsumerAwareRebalanceListener() {
            @Override
            public void onPartitionsAssigned(Consumer<?, ?> consumer, Collection<TopicPartition> partitions) {
                log.info("分区已分配 >>> {}", partitions);

                // 确保线程安全
                synchronized (partitionStates) {
                    partitions.forEach(tp -> {
                        PartitionState state = new PartitionState();

                        // 从Broker获取最新偏移量信息
                        Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions);
                        Map<TopicPartition, Long> endOffsets = consumer.endOffsets(partitions);

                        log.info("分区 {} 偏移量范围: [{}, {}]",
                                tp.partition(), beginningOffsets.get(tp), endOffsets.get(tp));

                        partitionStates.put(tp, state);

                        // 定位到正确偏移量
                        OffsetAndMetadata committed = consumer.committed(tp);
                        long seekTo = committed != null ? committed.offset() : beginningOffsets.get(tp);
                        consumer.seek(tp, seekTo);
                        log.info("分区 {} 定位到偏移量: {}", tp.partition(), seekTo);
                    });
                }
            }

            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
                log.info("分区被回收 <<< {}", partitions);
                partitions.forEach(partitionStates::remove);
            }
        });
    }

    @PostConstruct
    public void initConsumer() {
        // 1. 强制重置消费组状态（开发环境适用）
        resetConsumerGroupIfNeeded();

        // 2. 订阅主题（带重试机制）
        int subscribeRetry = 3;
        while (subscribeRetry-- > 0) {
            try {
                subscribeWithRebalancedListener();
                break;
            } catch (Exception e) {
                log.error("Subscribe failed (retries left: {})", subscribeRetry, e);
            }
        }

        // 3. 强制触发重平衡（关键修复）
        triggerRebalance();

        // 4. 验证分配结果
        awaitPartitionAssignment(10, Duration.ofSeconds(1));

        log.info("当前消费者元数据: groupId={}, memberId={}",
                consumer.groupMetadata().groupId(),
                consumer.groupMetadata().memberId());
    }

    // 新增辅助方法
    private void resetConsumerGroupIfNeeded() {
        try {
            consumer.unsubscribe();
        } catch (Exception e) {
            log.warn("Failed to reset consumer state", e);
        }
    }

    private void triggerRebalance() {
        try {
            // 正确做法：通过poll+unsubscribe/subscribe组合触发
            consumer.unsubscribe();
            consumer.subscribe(Collections.singletonList(timeTopic));
            consumer.poll(Duration.ZERO); // 必须调用poll才会真正触发
        } catch (Exception e) {
            log.error("Force rebalance failed", e);
        }
    }

    private void awaitPartitionAssignment(int maxRetry, Duration interval) {
        while (maxRetry-- > 0 && partitionStates.isEmpty()) {
            log.info("等待分区分配... (剩余重试: {})", maxRetry);
            consumer.poll(interval);

            // 检查分配状态
            Set<TopicPartition> assigned = consumer.assignment();
            if (!assigned.isEmpty()) {
                log.info("分区分配成功: {}", assigned);
                return;
            }
        }

        if (partitionStates.isEmpty()) {
            log.error("分区分配失败！诊断步骤：");
            log.error("1. 执行命令确认主题存在: kafka-topics.sh --describe --topic {}", timeTopic);
            log.error("2. 检查消费组是否注册: kafka-consumer-groups.sh --group {} --describe", consumer.groupMetadata().groupId());
            log.error("3. 检查Broker日志是否有错误");
        }
    }

    @PreDestroy
    public void destroy() {
        if (consumer != null) {
            consumer.close();
        }
    }
}
