package yunjiao.javatutorials.apache.kafka;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;

/**
 * 高级消费者实现
 *
 * @author yangyunjiao
 */
@Slf4j
public class KafkaBestPracticeConsumer {
    private final KafkaConsumer<String, String> consumer;
    private final String topic;
    private volatile boolean running = true;
    private final Map<TopicPartition, OffsetAndMetadata> currentOffsets = new ConcurrentHashMap<>();

    public KafkaBestPracticeConsumer(String topic, String groupId) {
        Properties props = Configs.getConsumerConfigs(groupId);
        this.consumer = new KafkaConsumer<>(props);
        this.topic = topic;

        // 订阅主题
        consumer.subscribe(Collections.singletonList(topic), new ConsumerRebalanceListener() {
            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
                log.info("分区被撤销: {}", partitions);
                // 在再平衡发生前提交偏移量
                commitOffsetsSync();
            }

            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
                log.info("分配到新分区: {}", partitions);
                currentOffsets.clear();
            }
        });

        log.info("Kafka消费者初始化完成，主题: {}, 消费组: {}", topic, groupId);
    }

    /**
     * 开始消费消息
     */
    public void startConsuming() {
        log.info("开始消费消息...");

        try {
            while (running) {
                // 拉取消息，设置合理的超时时间
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));

                if (records.isEmpty()) {
                    continue;
                }

                // 处理批次消息
                processRecordBatch(records);

                // 异步提交偏移量（提高性能）
                commitOffsetsAsync();

            }
        } catch (Exception e) {
            log.error("消费过程中发生错误", e);
        } finally {
            // 最终同步提交确保偏移量正确
            commitOffsetsSync();
            close();
        }
    }

    /**
     * 处理批次消息
     */
    private void processRecordBatch(ConsumerRecords<String, String> records) {
        int processedCount = 0;

        for (ConsumerRecord<String, String> record : records) {
            try {
                // 处理单条消息
                processSingleRecord(record);
                processedCount++;

                // 记录已处理消息的偏移量
                TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition());
                currentOffsets.put(topicPartition, new OffsetAndMetadata(record.offset() + 1));

            } catch (Exception e) {
                log.error("处理消息失败: topic={}, partition={}, offset={}, key={}",
                        record.topic(), record.partition(), record.offset(), record.key(), e);

                // 根据业务需求决定是否继续处理后续消息
                // 对于非关键性错误，可以继续处理；对于关键错误，可能需要停止或进入死信队列
            }
        }

        log.debug("成功处理 {} 条消息", processedCount);
    }

    /**
     * 处理单条消息
     */
    private void processSingleRecord(ConsumerRecord<String, String> record) {
        // 这里实现你的业务逻辑
        log.info("消费消息: key={}, value={}, partition={}, offset={}",
                record.key(), record.value(), record.partition(), record.offset());

        // 模拟业务处理
        try {
            // 模拟处理时间
            Thread.sleep(10);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
    }

    /**
     * 异步提交偏移量
     */
    private void commitOffsetsAsync() {
        if (currentOffsets.isEmpty()) {
            return;
        }

        consumer.commitAsync(currentOffsets, new OffsetCommitCallback() {
            @Override
            public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
                if (exception != null) {
                    log.error("异步提交偏移量失败", exception);
                } else {
                    log.debug("异步提交偏移量成功: {}", offsets);
                }
            }
        });
    }

    /**
     * 同步提交偏移量
     */
    private void commitOffsetsSync() {
        try {
            if (!currentOffsets.isEmpty()) {
                consumer.commitSync(currentOffsets);
                log.debug("同步提交偏移量成功");
            }
        } catch (Exception e) {
            log.error("同步提交偏移量失败", e);
        }
    }

    /**
     * 停止消费
     */
    public void stop() {
        running = false;
        log.info("正在停止消费者...");
    }

    /**
     * 优雅关闭
     */
    public void close() {
        stop();
        if (consumer != null) {
            try {
                consumer.close(Duration.ofSeconds(10));
                log.info("Kafka消费者已关闭");
            } catch (Exception e) {
                log.error("关闭消费者时发生错误", e);
            }
        }
    }

    public static void main(String[] args) throws InterruptedException {
        // 使用示例
        KafkaBestPracticeConsumer consumer = new KafkaBestPracticeConsumer(
                "best-practices-topic",
                "best-practice-consumer-group"
        );

        // 添加关闭钩子
        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            System.out.println("收到关闭信号，正在优雅关闭...");
            consumer.close();
        }));

        // 开始消费
        consumer.startConsuming();
        TimeUnit.SECONDS.sleep(3);
        consumer.close();
    }
}
