package com.yang.consumer.kafka.manual;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.regex.Pattern;

public class KafkaConsumerSeekDemo {
    public static final String bootstrapServers = "192.168.137.31:9092,192.168.137.32:9092,192.168.137.33:9092";
    public static final String topic = "topic-demo";
    public static final String groupId = "consumer-demo";
    public static final String clientId = "consumer-demo-1";
    public static final AtomicBoolean isRunning = new AtomicBoolean(true);

    public static Properties initConfig() {
        Properties properties = new Properties();
        // kafka集群broker地址
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        // key反序列化器
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        // value序列化器
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        // 消费组 group.id
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        // 客户端id 不设置，默认 consumer-1，consumer-2
        properties.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId);
        // 手动提交位移
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        // 找不到消费位移，位移越界.重置位移 默认：latest  可选：earliest latest none（抛异常）
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
        return properties;
    }

    /**
     * 暂定方法 consumer.pause();
     * 恢复方法 consumer.resume();
     * 停止消费 consumer.wakeup();
     *
     * @param args
     */
    public static void main(String[] args) {
        Properties properties = initConfig();
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);
        // 订阅 多次订阅，最后一次为准 subscribe方式订阅会自动再均衡
        consumer.subscribe(Arrays.asList(topic));
        // 获取分区信息
        Set<TopicPartition> assignment = new HashSet<>();
        while (assignment.isEmpty()) { // 不为空，已经成功分配到分区
            consumer.poll(Duration.ofMillis(100));
            assignment = consumer.assignment();
        }
        // 知道位移，直接指定
//        for (TopicPartition topicPartition : assignment) {
//            consumer.seek(topicPartition, 10);
//        }

        // 方式1 末尾位置
//        Map<TopicPartition, Long> endOffsets = consumer.endOffsets(assignment);
//        for (TopicPartition topicPartition : assignment) {
//            consumer.seek(topicPartition, endOffsets.get(topicPartition));
//        }

        // 方式1 起始位置
//        Map<TopicPartition, Long> beginOffsets = consumer.beginningOffsets(assignment);
//        for (TopicPartition topicPartition : assignment) {
//            consumer.seek(topicPartition, beginOffsets.get(topicPartition));
//        }

        // 方式2
//        consumer.seekToEnd(assignment);
        consumer.seekToBeginning(assignment);

        // 指定时间 大于等于带查询的第一条消息的位置  （offsetsForTimes获取一天前的消息位置）
//        Map<TopicPartition, Long> timestampToSearch = new HashMap<>();
//        for (TopicPartition topicPartition : assignment) {
//            timestampToSearch.put(topicPartition, System.currentTimeMillis() - 1 * 24 * 3600 * 1000);
//        }
//        Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes = consumer.offsetsForTimes(timestampToSearch);
//        for (TopicPartition topicPartition : assignment) {
//            OffsetAndTimestamp offsetAndTimestamp = offsetsForTimes.get(topicPartition);
//            if (offsetAndTimestamp != null) {
//                consumer.seek(topicPartition, offsetAndTimestamp.offset());
//            }
//        }

        // 保存数据库
        // saveDB(topicPartition, consumerRecord.offset() + 1);
        // consumer.seek(topicPartition, 数据库查询);

        try {
            while (isRunning.get()) {
                ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofMillis(1000));

                for (ConsumerRecord record : consumerRecords) {
                    System.out.println(record.toString());
                }
                consumer.commitSync();
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            consumer.close();
        }
    }


}
