package com.gjy.kafka.k10;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Header;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;

/**
 * 消费者功能：
 * <p>
 * 自动/手动提交偏移量
 * 指定分区消费
 * 从特定偏移量消费
 * 从头开始消费
 * 处理带headers的消息
 * 批量处理
 *
 * @author gjy
 * @version 1.0
 * @since 2025-10-03 11:09:01
 */
public class KafkaConsumerService {

    private static final Logger logger = LoggerFactory.getLogger(KafkaConsumerService.class);

    private final Consumer<String, String> consumer;
    private final AtomicBoolean running = new AtomicBoolean(false);

    public KafkaConsumerService(Properties props) {
        this.consumer = new KafkaConsumer<>(props);
    }

    /**
     * 获取内部的Consumer实例（用于自定义消费逻辑）
     */
    public Consumer<String, String> getConsumer() {
        return consumer;
    }

    /**
     * 1. 自动提交偏移量的消费者
     */
    public void consumeAutoCommit(String topic) {
        consumer.subscribe(Collections.singletonList(topic));
        running.set(true);

        try {
            while (running.get()) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));

                for (ConsumerRecord<String, String> record : records) {
                    processRecord(record);
                }
            }
        } finally {
            consumer.close();
        }
    }

    /**
     * 2. 手动同步提交偏移量
     */
    public void consumeManualSyncCommit(String topic) {
        consumer.subscribe(Collections.singletonList(topic));
        running.set(true);

        try {
            while (running.get()) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));

                for (ConsumerRecord<String, String> record : records) {
                    processRecord(record);
                }

                // 同步提交偏移量
                if (!records.isEmpty()) {
                    consumer.commitSync();
                    logger.info("手动同步提交偏移量完成");
                }
            }
        } finally {
            consumer.close();
        }
    }

    /**
     * 3. 手动异步提交偏移量
     */
    public void consumeManualAsyncCommit(String topic) {
        consumer.subscribe(Collections.singletonList(topic));
        running.set(true);

        try {
            while (running.get()) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));

                for (ConsumerRecord<String, String> record : records) {
                    processRecord(record);
                }

                // 异步提交偏移量
                if (!records.isEmpty()) {
                    consumer.commitAsync(new OffsetCommitCallback() {
                        @Override
                        public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
                            if (exception != null) {
                                logger.error("异步提交偏移量失败: {}", exception.getMessage());
                            } else {
                                logger.info("异步提交偏移量成功");
                            }
                        }
                    });
                }
            }
        } finally {
            consumer.close();
        }
    }

    /**
     * 4. 指定分区消费
     */
    public void consumeSpecificPartitions(String topic, List<Integer> partitions) {
        List<TopicPartition> topicPartitions = new ArrayList<>();
        for (Integer partition : partitions) {
            topicPartitions.add(new TopicPartition(topic, partition));
        }

        consumer.assign(topicPartitions);
        running.set(true);

        try {
            while (running.get()) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));

                for (ConsumerRecord<String, String> record : records) {
                    logger.info("分区消费 - Partition: {}, Offset: {}, Key: {}, Value: {}",
                            record.partition(), record.offset(), record.key(), record.value());
                }

                consumer.commitSync();
            }
        } finally {
            consumer.close();
        }
    }

    /**
     * 5. 从特定偏移量开始消费
     */
    public void consumeFromOffset(String topic, int partition, long offset) {
        TopicPartition topicPartition = new TopicPartition(topic, partition);
        consumer.assign(Collections.singletonList(topicPartition));
        consumer.seek(topicPartition, offset);

        running.set(true);
        try {
            while (running.get()) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));

                for (ConsumerRecord<String, String> record : records) {
                    logger.info("指定偏移量消费 - Offset: {}, Value: {}", record.offset(), record.value());
                }

                consumer.commitSync();
            }
        } finally {
            consumer.close();
        }
    }

    /**
     * 6. 从开始位置消费
     */
    public void consumeFromBeginning(String topic) {
        consumer.subscribe(Collections.singletonList(topic));

        // 获取分区信息并定位到开始
        consumer.poll(Duration.ofMillis(1000)); // 触发分区分配
        Set<TopicPartition> partitions = consumer.assignment();
        consumer.seekToBeginning(partitions);

        running.set(true);
        try {
            while (running.get()) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));

                for (ConsumerRecord<String, String> record : records) {
                    logger.info("从头消费 - Offset: {}, Value: {}", record.offset(), record.value());
                }

                consumer.commitSync();
            }
        } finally {
            consumer.close();
        }
    }

    /**
     * 7. 消费带headers的消息
     */
    public void consumeWithHeaders(String topic) {
        consumer.subscribe(Collections.singletonList(topic));
        running.set(true);

        try {
            while (running.get()) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));

                for (ConsumerRecord<String, String> record : records) {
                    logger.info("消息内容 - Key: {}, Value: {}", record.key(), record.value());

                    // 处理headers
                    Iterable<Header> headers = record.headers();
                    for (Header header : headers) {
                        logger.info("Header - Key: {}, Value: {}", header.key(), new String(header.value()));
                    }
                }

                consumer.commitSync();
            }
        } finally {
            consumer.close();
        }
    }

    /**
     * 8. 批量处理消息
     */
    public void consumeBatch(String topic, int batchSize) {
        consumer.subscribe(Collections.singletonList(topic));
        running.set(true);

        try {
            while (running.get()) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));

                if (records.count() >= batchSize) {
                    List<String> batchMessages = new ArrayList<>();

                    for (ConsumerRecord<String, String> record : records) {
                        batchMessages.add(record.value());
                    }

                    // 批量处理
                    processBatch(batchMessages);
                    consumer.commitSync();
                    logger.info("批量处理了 {} 条消息", batchMessages.size());
                }
            }
        } finally {
            consumer.close();
        }
    }

    /**
     * 9. 停止消费
     */
    public void stop() {
        running.set(false);
        logger.info("消费者正在停止...");
    }

    /**
     * 10. 关闭消费者
     */
    public void close() {
        if (consumer != null) {
            consumer.close();
            logger.info("Kafka消费者已关闭");
        }
    }

    // 处理单条记录
    private void processRecord(ConsumerRecord<String, String> record) {
        logger.info("收到消息 - Topic: {}, Partition: {}, Offset: {}, Key: {}, Value: {}",
                record.topic(), record.partition(), record.offset(), record.key(), record.value());
    }

    // 批量处理
    private void processBatch(List<String> messages) {
        // 这里可以实现批量处理逻辑
        logger.info("批量处理消息: {}", messages);
    }

}
