package com.kafka.consumer;


import com.kafka.service.OffsetService;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

/**
 * @Author: Jiangxx
 * @Date: 2022/01/12
 * @Description:
 */
public class KafakaConsumerThread extends Thread {

    private KafkaConsumer<String, String> kafkaConsumer;

    private ExecutorService executorService;

    private int threadNumber;

    private OffsetService offsetService = new OffsetService();


    public KafakaConsumerThread(Properties props, String topic, int threadNumber) {
        kafkaConsumer = new KafkaConsumer<>(props);
        kafkaConsumer.subscribe(Arrays.asList(topic), new ConsumerRebalanceListener() {
            /**
             * 在再均衡开始之前和消费者停止读取消息之后被调用。
             * 可以通过这个回调方法来处理消费位移的提交，以此来避免一些不必要的重复消费现象的发生。
             * 参数partitions表示再均衡前所分配到的分区。
             * @param collection
             */
            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> collection) {
                Map<TopicPartition, OffsetAndMetadata> offsetAndMetadataMap = kafkaConsumer.committed((Set<TopicPartition>) collection);
                /* 将消费位移保存到数据库 */
                storeOffsetIntoDB(collection);
            }

            /**
             * 在重新分配分区之后和消费者开始读取消费之前被调用。
             * 参数partitions表示再均衡后所分配到的分区
             * @param collection
             */
            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> collection) {
                for (TopicPartition tp : collection) {
                    Long offsetFromDB = getOffsetFromDB(tp);
                    if (offsetFromDB == null) {
                        continue;
                    }
                    /* 从数据库中获取上一次的消费位移 */
                    kafkaConsumer.seek(tp, offsetFromDB);
                }
            }
        });
        this.threadNumber = threadNumber;
        executorService = new ThreadPoolExecutor(threadNumber, threadNumber,
                0L, TimeUnit.MILLISECONDS, new ArrayBlockingQueue<>(1000),
                new ThreadPoolExecutor.CallerRunsPolicy());
    }

    /**
     * 将消费位移保存到数据库
     * @param collection
     */
    private void storeOffsetIntoDB(Collection<TopicPartition> collection) {
        for (TopicPartition tp : collection) {
            /* 主题 */
            String topic = tp.topic();
            /* 分区编号 */
            int partition = tp.partition();
            /* 分区名 */
            String partitionName = tp.toString();
            /* 消费位移 */
            long offset = kafkaConsumer.position(tp);

            offsetService.saveOffset(topic, partitionName, partition, offset);
        }
    }

    /**
     * 从数据库中获取最新的消费位移
     * @param tp
     * @return
     */
    private Long getOffsetFromDB(TopicPartition tp) {
        /* 分区编号 */
        int partition = tp.partition();
        /* 主題 */
        String topic = tp.topic();

        return offsetService.getOffset(partition, topic);
    }

    @Override
    public void run() {
        try {
            while (true) {
                ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(1000));
                if (!records.isEmpty()) {
                    executorService.submit(new RecordsHandler(records));
                }
            }
        } catch (Exception e){
            e.printStackTrace();
        } finally {
            kafkaConsumer.close();
        }


    }
}
