package com.wh.springkafka.consumer;


import com.wh.springkafka.util.PropertyManager;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;

/**
 * kafka的Consumer是线程不安全的，所以并发的时候需要自己解决线程问题
 *
 *
 * 创建Consumer过多，就会让程序过重
 */
public class ConsumerThreadHander {

    public static final String TOPIC_NAME = "wanghao-topic";
    public static final String kafkaServerIp = PropertyManager.getProperty("KafkaZKServerIp");


    //  相当于，Consumer(只管拉取和分配给Hander)只有一个，拉取消息，分发给其它线程执行任务。
    // 跟上面的区别是 Consumer创建的次数  处理任务是并行处理的。非阻塞
    public static void main(String[] args) throws InterruptedException {
        String brokerList = kafkaServerIp+":9092";
        String groupId = "test";
        int workerNum = 5;
        ConsumerExecutor consumers = new ConsumerExecutor(brokerList,groupId,TOPIC_NAME);
        consumers.execute(workerNum);


    }


    static class  ConsumerExecutor  {
        private final AtomicBoolean closed = new AtomicBoolean(false);

        private final KafkaConsumer consumer;
        private ExecutorService executors;

        public  ConsumerExecutor(String brokerList, String groupId, String topicName) {
            Properties properties = new Properties();
            properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,brokerList);
            properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG,groupId);
            // 设置自动提交为false
            properties.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
            properties.setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
            properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
            properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
            consumer = new KafkaConsumer(properties);

            consumer.subscribe(Arrays.asList(topicName));
        }

        public void execute(int workerNum) {

            executors = new ThreadPoolExecutor(workerNum,workerNum,0L, TimeUnit.MICROSECONDS,
                    new ArrayBlockingQueue<>(1000),new ThreadPoolExecutor.CallerRunsPolicy());

            try{
                while (!closed.get()){
                    // 定时间隔拉取
                    ConsumerRecords<String,String> records = consumer.poll(Duration.ofMillis(10000));
                    for (ConsumerRecord<String,String> record :records){
                        executors.submit(new ConsumerRecordWorker(record));
                    }
                }
            }catch (Exception e){
                e.printStackTrace();
            }finally {
                consumer.close();
            }
        }

        public void shutdown(){
            closed.set(true);
            consumer.wakeup();
        }


    }

    static class ConsumerRecordWorker implements Runnable{
        private ConsumerRecord<String,String> record ;
        public ConsumerRecordWorker(ConsumerRecord<String,String> record){
            this.record = record;
        }

        @Override
        public void run() {
            System.out.printf("ConsumerRecordWorker  partition = %d,offset = %d, key = %s,value=%s%n "
                    ,record.partition(),record.offset(),record.key(),record.value());
            System.out.println(Thread.currentThread().getName());
        }
    }

}
