package com.zero.kafka.customer;

import com.zero.kafka.utils.InitKafkaConfig;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.Collections;
import java.util.Map;
import java.util.Properties;

public class MyKafkaCustomer extends Thread {


    private final KafkaConsumer<String, String> consumer;

    public MyKafkaCustomer(String topic) {

        Properties properties = InitKafkaConfig.initCustomerConfig();
        this.consumer = new KafkaConsumer<String, String>(properties);
        //1.订阅topic
        consumer.subscribe(Collections.singletonList(topic));

        //2.指定消费者分区
        //consumer.assign(Arrays.asList(new TopicPartition(topic,0)));

        //3.消息回溯消费,意思就是把分区0中的消息从offset=0处消费了一遍
        //consumer.assign(Arrays.asList(new TopicPartition(topic,0)));
        //consumer.seekToBeginning(Arrays.asList(new TopicPartition(topic,0)));

        //4.指定offset消费
        //consumer.assign(Arrays.asList(new TopicPartition(topic,0)));
        //consumer.seek(new TopicPartition(topic,0),10);

        //5.从指定时间点开始消费
        //获取topic对应的所有的partition
      /*  List<PartitionInfo> topicPartitions = consumer.partitionsFor(topic);
        //从1小时前开始消费
        long fetchDataTime = new Date().getTime() - 1000 * 60 * 60;
        Map<TopicPartition, Long> map = new HashMap<>();
        for (PartitionInfo par : topicPartitions) {
            map.put(new TopicPartition(topic, par.partition()), fetchDataTime);
        }
        Map<TopicPartition, OffsetAndTimestamp> parMap = consumer.offsetsForTimes(map);
        for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry : parMap.entrySet()) {
            TopicPartition key = entry.getKey();
            OffsetAndTimestamp value = entry.getValue();
            if (key == null || value == null) continue;
            Long offset = value.offset();
            System.out.println("partition-" + key.partition() + "|offset-" + offset);
            System.out.println();
            //根据消费里的timestamp确定offset
            if (value != null) {
                consumer.assign(Arrays.asList(key));
                consumer.seek(key, offset);
            }
        }*/

    }


    @Override
    public void run() {
        while (true) {
            //poll长轮询
            ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, String> record : consumerRecords) {
                System.out.printf("收到消息：partition = %d,offset = %d, key = %s, value = %s%n", record.partition(),
                        record.offset(), record.key(), record.value());
            }

            //当以下参数设置为false时需要手动提交
            // properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
            if(consumerRecords.count() > 0){
                // 手动同步提交offset，当前线程会阻塞直到offset提交成功
                // 一般使用同步提交，因为提交之后一般也没有什么逻辑代码了
                //consumer.commitSync();

                // 手动异步提交offset，当前线程提交offset不会阻塞，可以继续处理后面的程序逻辑
                consumer.commitAsync(new OffsetCommitCallback() {
                    @Override
                    public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
                        if (exception != null) {
                            System.err.println("Commit failed for " + offsets);
                            System.err.println("Commit failed exception: " + exception.getStackTrace());
                        }
                    }
                });
            }
        }
    }

    public static void main(String[] args) {
        String TOPIC_NAME = "my-replicated-topic";
        String CONSUMER_GROUP_NAME = "testGroup";
        new MyKafkaCustomer(TOPIC_NAME).start();
    }
}
