package org.example.kafka.rebalance;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.example.kafka.common.CommonConstant;
import org.example.kafka.common.KafkaConstants;

import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;

public class ConsumerWorker  implements Runnable{

    private final KafkaConsumer<String,String> consumer;
    /**
     * 用来保存每个消费者当前读取分区的偏移量
     */
    private final Map<TopicPartition, OffsetAndMetadata> currOffsets;
    private final boolean isStop;

    public ConsumerWorker(boolean isStop) {
        this.isStop = isStop;
        /*消息消费者配置*/
        Properties properties = KafkaConstants.consumerConfig(
                RebalanceConsumer.GROUP_ID,
                StringDeserializer.class,
                StringDeserializer.class);
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);
        this.consumer = new KafkaConsumer<String, String>(properties);
        this.currOffsets = new HashMap<TopicPartition,OffsetAndMetadata>();
        consumer.subscribe(Collections.singletonList(CommonConstant.REBALANCE_TOPIC),
                new HandlerRebalance(currOffsets,consumer));

    }

    @Override
    public void run() {
        final String id = Thread.currentThread().getId()+"";
        int count = 0;
        TopicPartition topicPartition = null;
        long offset = 0;
        try{
            while (true){
                // 间隔500毫秒拉取一次消息
                ConsumerRecords<String, String> records
                        = consumer.poll(500);
                //业务处理，开始事务
                for (ConsumerRecord<String,String> record:records){
                    System.out.println(id+"|"+String.format(
                            "处理主题：%s，分区：%d，偏移量：%d，" +
                                    "key：%s，value：%s",
                            record.topic(),record.partition(),
                            record.offset(),record.key(),record.value()));
                    topicPartition = new TopicPartition(record.topic(),
                            record.partition());
                    offset = record.offset()+1;
                    currOffsets.put(topicPartition,new OffsetAndMetadata(offset,
                            "no"));
                    count++;
                }
                if(currOffsets.size()>0){
                    for (TopicPartition topicPartitionKey:currOffsets.keySet()){
                        HandlerRebalance.partitionOffsetMap.
                                put(topicPartitionKey,currOffsets.get(topicPartitionKey).offset());

                    }
                    //提交事务,同时将业务和偏移量入库
                }
                if(isStop&&count>=5){
                    System.out.println(id+"-将关闭，当前偏移量为："+currOffsets);
                    consumer.commitSync();
                    break;
                }
                consumer.commitSync();
            }
        }finally {
            consumer.close();
        }
    }
}
