package com.example.kafka;

import cn.hutool.core.date.DateUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.junit.Before;
import org.junit.Test;
import org.springframework.kafka.annotation.KafkaListener;

import java.time.Duration;
import java.util.*;

@Slf4j
public class OffsetReset {

    String groupId = "bill-gantry";
    KafkaConsumer<byte[], byte[]> consumer;
    String resetTime = "2023-05-18 16:00:00";
    String topics = "GBUPLOAD_ETCTU";
    //,TB_LANESUMMATION_DEL,TRANSACTION_ETCPASSTD_JSON,ETCTS_EXITPTSD_JSON,ETCTS_PASSDD_JSON,JCGS_EXETCPU



    @Before
    public void init(){
        Properties consumerConfig = new Properties();
        consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "10.143.7.51:6667,10.143.7.52:6667,10.143.7.53:6667,10.143.7.54:6667,10.143.7.56:6667");
        //consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "hb-es001:6667,hb-es002:6667,hb-es003:6667");
        consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        consumerConfig.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
        consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
        consumerConfig.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 10);
        consumer = new KafkaConsumer<>(consumerConfig);
    }
    @Test
    public void offsetReset(){
        String[] split = topics.split("\\,");
        for (String topic : split) {
            setOffset(topic);
        }
    }

    @Test
    public void getData(){
        String topic = "GBUPLOAD_VIU";
        consumer.subscribe(Arrays.asList(topic));
        while (true) {
            //获取数据 如果获取不到等待10秒
            ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofSeconds(10));
            for (ConsumerRecord<byte[], byte[]> record : records) {
                log.info("当前offset：{}",record.offset());
                log.info("当前分区：{}",record.partition());

                try {
                    Thread.sleep(100);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
            //consumer.commitSync();
        }
    }

    /**
     * 重置offset
     */
    public void setOffset(String topic){
        //设置从什么时候开始消费
        long startTime = DateUtil.parse(resetTime).getTime();
        //long endTime = DateUtil.parse("2020-06-25 16:22:49").getTime();
        //获取该主题下的所有分区
        List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
        //key 分区,value 时间戳
        Map<TopicPartition, Long> mapPartition = new HashMap<>(170);
        for (PartitionInfo partitionInfo : partitionInfos) {
            mapPartition.put(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()), startTime);
        }
        //根据时间戳查找给定分区的偏移量 从分区到第一个消息的时间戳和偏移量的映射 如果没有分区，将返回null
        Map<TopicPartition, OffsetAndTimestamp> startOffsetMap = consumer.offsetsForTimes(mapPartition);
        //设置分区对应时间的offset
        for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry : startOffsetMap.entrySet()) {
            TopicPartition topicPartition = entry.getKey();
            OffsetAndTimestamp partitionOffset = entry.getValue();
            if( partitionOffset != null){
                long offset = partitionOffset.offset();
                log.info("重置的分区是：{}", topicPartition.partition());
                log.info("重置到offset是：{}", offset);
                if (partitionOffset != null) {
                    //手动将分区列表分配给该使用者
                    consumer.assign(Arrays.asList(topicPartition));
                    //覆盖消费者将在下一个拉取中使用的fetch偏移量
                    consumer.seek(topicPartition, offset);
                    consumer.commitSync();
                }
            }
        }
        //consumer.commitAsync(new MyOffsetCommitCallback());
        consumer.commitSync();
        try {
            Thread.sleep(2000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
        log.info("主题 {}重置offset成功",topic);
    }
    @KafkaListener
    public void ds(){

    }
}
class MyOffsetCommitCallback implements OffsetCommitCallback{

    @Override
    public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
        System.out.println(1);
    }
}
