package com.wh.springkafka.consumer;


import com.wh.springkafka.util.PropertyManager;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;

/**
 * kafka的Consumer是线程不安全的，所以并发的时候需要自己解决线程问题
 */
public class ConsumerThreadSample {

    public static final String TOPIC_NAME = "wanghao-topic";
    public static final String kafkaServerIp = PropertyManager.getProperty("KafkaZKServerIp");


    // 这种类型经典模式，每个线程单独创建kafkaConsumer，用于保证线程安全  处理Partition 的时候是任务阻塞的   一个Consumer处理一个Partition 对消息处理更好
    public static void main(String[] args) throws InterruptedException {
        KafkaConsumerRunner r1 = new KafkaConsumerRunner();
        new Thread(r1).start();
        Thread.sleep(10000);

        r1.shutdown();

    }


    static class  KafkaConsumerRunner  implements Runnable{
        private final AtomicBoolean closed = new AtomicBoolean(false);

        private final KafkaConsumer consumer;

        public  KafkaConsumerRunner() {
            Properties properties = new Properties();
            properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,kafkaServerIp+":9092");
            properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG,"test");
            // 设置自动提交为false
            properties.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
            properties.setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
            properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
            properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
            consumer = new KafkaConsumer(properties);

            TopicPartition p0 = new TopicPartition(TOPIC_NAME,0 );
            TopicPartition p1 = new TopicPartition(TOPIC_NAME,1 );
            consumer.assign(Arrays.asList(p0,p1));
        }

        @Override
        public void run() {

            try{
                while (!closed.get()){
                    // 定时间隔拉取
                    ConsumerRecords<String,String> records = consumer.poll(Duration.ofMillis(10000));
                    Set<TopicPartition> partitions = records.partitions();
                    for (TopicPartition partition :
                            partitions) {
                        List<ConsumerRecord<String, String>> pRecords = records.records(partition);
                        for (ConsumerRecord<String, String> record : pRecords) {
                            System.out.printf("partition = %d,offset = %d, key = %s,value=%s%n "
                                    , record.partition(), record.offset(), record.key(), record.value());
                        }
                        if (true) {
                            // 如果成功则手动提交  注意这个方法提交
                            Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
                            // 参数2，获取最后一个元素的offset，然后加一就是下一次的起始位置
                            offsets.put(partition, new OffsetAndMetadata(pRecords.get(pRecords.size() - 1).offset() + 1));
                            consumer.commitSync(offsets);
                        } else {
                            // 如果失败则回滚
                        }
                        System.out.println("消费完成~~~~~~~~~~~partition " + partition.toString() + " end ~~~~~~~~~~~~~~~~");
                    }
                }
            }catch (Exception e){
                e.printStackTrace();
            }finally {
                consumer.close();
            }
        }

        public void shutdown(){
            closed.set(true);
            consumer.wakeup();
        }


    }

}
