package com.tcm.kafka.thread;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.*;

public class ThirdMultiThreadConsumerDemo {

    public static final String brokeList = "localhost:9092";
    public static final String topic = "topic-demo";
    public static final String groupId = "group.demo";
    public static Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap();

    public static Properties initConfig(){
        Properties properties = new Properties();
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("bootstrap.servers", brokeList);
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
        // 设置消费组的名称
        properties.put("group.id", groupId);
        return properties;
    }

    public static class KafkaConsumerThread extends Thread{
        private KafkaConsumer<String, String> kafkaConsumer;
        private ExecutorService executorService;
        private int threadNumber;

        public KafkaConsumerThread(Properties props, String topic, int threadNumber){
            kafkaConsumer = new KafkaConsumer(props);
            kafkaConsumer.subscribe(Collections.singleton(topic));
            this.threadNumber = threadNumber;
            executorService = new ThreadPoolExecutor(threadNumber, threadNumber, 0L, TimeUnit.MICROSECONDS
            , new ArrayBlockingQueue<>(1000), new ThreadPoolExecutor.CallerRunsPolicy());
        }

        public KafkaConsumerThread() {
        }

        @Override
        public void run() {
            try{
                while(true){
                    ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(100));
                    if(!records.isEmpty()){
                        executorService.submit(new RecordsHandler(records));
                    }
                }
            }catch (Exception e){
                e.printStackTrace();
            }finally {
                kafkaConsumer.close();
            }
        }
    }

    public static class RecordsHandler extends Thread{

        public final ConsumerRecords<String, String> records;

        public RecordsHandler(ConsumerRecords<String, String> records) {
            this.records = records;
        }

        @Override
        public void run() {
            records.partitions().forEach(p -> {
                List<ConsumerRecord<String, String>> tpRecords = records.records(p);
                long lastConsumedOffset = tpRecords.get(tpRecords.size() - 1).offset();
                synchronized (offsets){
                    if(!offsets.containsKey(p)){
                        offsets.put(p, new OffsetAndMetadata(lastConsumedOffset+1));
                    }else{
                        long position = offsets.get(p).offset();
                        if(position < lastConsumedOffset+1){
                            offsets.put(p, new OffsetAndMetadata(lastConsumedOffset+1));
                        }
                    }
                }


            });
        }
    }

}
