package com.enjoy.kafka.hellokafka;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.*;

public class ConsumerTest {
    static Logger logger = LoggerFactory.getLogger("enjoy");


    @Test
    public void test() {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.126.128:9092");
        properties.put("zookeeper.connect", "192.168.126.128:2182");
        properties.put("zookeeper.session.timeout.ms", 60000);
        properties.put("zookeeper.sync.time.ms", 2000);
        properties.put("request.required.acks", 1);
        properties.put("num.partitions", 3);
        properties.put("threadPerTopic", 3);
        // 消费组
        properties.put("group.id", "HaiMQLogAccess");
        // 是否自动提交offset,默认就是true,假如是false,就会存在重复消费的情况
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
        // 自动提交offset的间隔时间
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");

        // 序列化
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        //群组并非完全必须
        KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(properties);

        // 主题
//        consumer.subscribe(new ArrayList<String>() {{
//            add("hello-topic");
//        }});

        // 指定分区消费
        consumer.assign(new ArrayList<TopicPartition>() {{
            add(new TopicPartition("hello-topic", 0));
        }});

        // 消息回溯消费
        consumer.assign(Collections.singletonList(new TopicPartition("hello-topic", 0)));
        consumer.seekToBeginning(Collections.singletonList(new TopicPartition("hello-topic", 0)));

        // 指定offset消费
        consumer.assign(Collections.singletonList(new TopicPartition("hello-topic", 0)));
        consumer.seek(new TopicPartition("hello-topic", 0), 0);

        // 指定时间点消费
        List<PartitionInfo> partitionInfos = consumer.partitionsFor("hello-topic");
        // 从1小时前开始消费
        long fetchDataTime = new Date().getTime() - 1000 * 60 * 60;
        Map<TopicPartition, Long> map = new HashMap<>();
        for (PartitionInfo partitionInfo : partitionInfos) {
            map.put(new TopicPartition("hello-topic", partitionInfo.partition()), fetchDataTime);
        }

        Map<TopicPartition, OffsetAndTimestamp> parMap = consumer.offsetsForTimes(map);

        for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry : parMap.entrySet()) {
            TopicPartition key = entry.getKey();
            OffsetAndTimestamp value = entry.getValue();
            if (key == null || value == null) {
                continue;
            }
            long offset = value.offset();
            logger.info("partition-" + key.partition() + ",offset-" + offset);
            // 根据消费者里的timestamp确定offset
            consumer.assign(Collections.singletonList(key));
            consumer.seek(key, offset);
        }

        while (true) {
            // 各个分区上消息的集合
            ArrayList<ConsumerRecord<byte[], byte[]>> records = new ArrayList<>();
            // poll()是拉取消息的长轮询
            ConsumerRecords<byte[], byte[]> consumerRecords = consumer.poll(1000);
            for (TopicPartition partition : consumerRecords.partitions()) {
                List<ConsumerRecord<byte[], byte[]>> recordList = consumerRecords.records(partition);
                records.addAll(recordList);
            }
            logger.info("消息长度:{}", records.size());
            if (!records.isEmpty()) {
                logger.info(records.toString());

                // 手动同步提交offset，当前线程会阻塞直到offset提交成功
                // 一般使用同步提交，因为提交之后一般也没有什么逻辑代码了
                consumer.commitAsync();

                // 手动异步提交offset,当前线程提交offset不会阻塞，可以继续处理后面的程序逻辑
                consumer.commitAsync((offsets, exception) -> {
                    if (exception != null) {
                        logger.error(exception.toString());
                    }
                });
            }
        }

    }
}
