package com.ln.kafka.v2_4_0.consumer;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

import java.time.Duration;
import java.util.Arrays;
import java.util.Properties;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

/**
 * <pre>
 *  kafka 客户端 多线程实例的主流的实现方式：
 *  类似于Netty，由一个KafkaConsumer来接受消息，多个EventHandle来处理消息
 *  场景：非业务系统，信息成功不成功是不需要管的,是为了快速处理数据
 * </pre>
 *
 * @Author zhangj
 * @Date 2022/6/8 18:10
 */
public class ConsumerRecordThreadSample {

    public static String TOPIC_NAME = "topic-3";

    public static void main(String[] args) throws InterruptedException {
        String brokerList = "localhost:9092";
        String groupId = "test";
        int workerNum = 5;

        ConsumerExecutor consumers = new ConsumerExecutor(brokerList, groupId, TOPIC_NAME);
        consumers.execute(workerNum);

        Thread.sleep(1000000);

        consumers.shutDown();

    }

    /**
     * 客户端处理器
     */
    static class ConsumerExecutor {
        private final KafkaConsumer<String, String> consumer;
        private ExecutorService executors;

        public ConsumerExecutor(String brokerList, String groupId, String topic) {
            Properties props = new Properties();
            props.setProperty("bootstrap.servers", brokerList);
            props.setProperty("group.id", groupId);
            props.setProperty("enable.auto.commit", "false");
            props.setProperty("auto.commit.interval.ms", "1000");
            props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            consumer = new KafkaConsumer<>(props);
            consumer.subscribe(Arrays.asList(topic));
        }


        public void execute(int workerNum) {
            executors = new ThreadPoolExecutor(workerNum, workerNum, 0L, TimeUnit.MILLISECONDS,
                    new ArrayBlockingQueue<>(1000), new ThreadPoolExecutor.CallerRunsPolicy());
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
                records.forEach(x -> executors.submit(new ConsumerRecordWorker(x)));
            }
        }

        public void shutDown() {
            if(consumer != null) {
                consumer.close();
            }
            if(executors != null) {
                executors.shutdown();
            }
            try {
                if(!executors.awaitTermination(10, TimeUnit.SECONDS)) {
                    System.out.println("Timeout... Ignore for this case");
                }
            } catch (InterruptedException e) {
                System.out.println("Other thread interruptd this shutdown, ignore for this case.");
                Thread.currentThread().interrupt();
            }
        }
    }

    /**
     * 实际处理消息的工作类
     */
    static class ConsumerRecordWorker implements Runnable {

        private ConsumerRecord<String, String> record;

        public ConsumerRecordWorker(ConsumerRecord<String, String> record) {
            this.record = record;
        }

        @Override
        public void run() {
            // 只处理数据
            System.out.println();
            System.out.printf("[Thread - " + Thread.currentThread().getName() + "]partition = %d, offset = %d, key = %s, value = %s%n",
                    record.partition(), record.offset(), record.key(), record.value());
        }
    }




}
