package com.example.testkafkaproto.consumer.config;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;

public class ConsumerConfiguration {

    private static final String BROKER_LIST = "192.168.11.202:9092";

    public static final AtomicBoolean isRunninng = new AtomicBoolean(true);

    public static final AtomicBoolean isPausing = new AtomicBoolean(false);

    public static final AtomicBoolean isResuming = new AtomicBoolean(false);

    public static final AtomicBoolean isSeeking = new AtomicBoolean(false);
    public static String seekingTopic = null;
    public static int seekingPartition = 0;
    public static int seekingOffset = 0;


    public KafkaConsumer<String,String> kafkaConsumer(){

        Properties props = new Properties();
        //"bootstrap.servers"
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,BROKER_LIST);
        //"key.deserializer"
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
        //"value.deserializer"
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
        //"client.id"不配默认会生成
        props.put(ConsumerConfig.CLIENT_ID_CONFIG,"testConsumer");
        //"retries"重试次数(内部可处理异常)
        props.put(ConsumerConfig.GROUP_ID_CONFIG,"group1");
        //"partition.assignment.strategy"分区分配策略
        props.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG,"org.apache.kafka.clients.consumer.StickyAssignor");
        //"auto.offset.reset"消费者找不到消费位移时的读取策略:latest/earliest/none
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"latest");
        //"enable.auto.commit"是否开启自动提交
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);
        //"interceptor.classes"消费者拦截器
        props.put(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG,"com.example.testkafkaproto.consumer.interceptor.TestConsumerInterceptor");

        KafkaConsumer<String,String> kafkaConsumer = new KafkaConsumer<String,String>(props);
        //订阅主题并注册再均衡监听器
        kafkaConsumer.subscribe(Arrays.asList("proto-topic"), new ConsumerRebalanceListener() {
            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> collection) {

                System.out.println("@@@@@@@@@@@@@@@@@@@@@@进入再均衡监听器的onPartitionsRevoked方法");
                for(TopicPartition tp : collection){
                    System.out.println("@@@@@@@@@@@再均衡前分区信息TOPIC:" + tp.topic() + ",PARTITION:" + tp.partition());
                }
                kafkaConsumer.commitSync();
            }

            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> collection) {

                System.out.println("@@@@@@@@@@@@@@@@@@@@@@进入再均衡监听器的onPartitionsAssigned方法");
                for(TopicPartition tp : collection){
                    System.out.println("@@@@@@@@@@@再均衡后分区信息TOPIC:" + tp.topic() + ",PARTITION:" + tp.partition());
                }
            }
        });

        //自动提交
//        autoCommit(kafkaConsumer);
        //手动同步提交
//        manualSyncCommit(kafkaConsumer);
        //手动异步提交
        manualAsyncCommit(kafkaConsumer);

        return kafkaConsumer;
    }

    /**
     * 自动提交
     * @param kafkaConsumer
     */
    private void autoCommit(KafkaConsumer kafkaConsumer){
        try{
            while(ConsumerConfiguration.isRunninng.get()){
                checkPauseAndResuse(kafkaConsumer);
                checkSeeking(kafkaConsumer);
                ConsumerRecords<String,String> records = kafkaConsumer.poll(Duration.ofMillis(1000));
                for(ConsumerRecord<String,String> record : records){
                    System.out.println("消费者消费消息:topic=" + record.topic() + ",partition=" + record.partition() + ",offset=" + record.offset() + ",value=" + record.value() + ",timestampType=" + record.timestampType() + ",timestamp=" + record.timestamp());
                }
            }
        } catch(Exception e){
            e.printStackTrace();
        } finally {
            System.out.println("=======================消费者关闭=========================");
            kafkaConsumer.close();
        }
    }

    /**
     * 手动同步全部分区提交
     */
    private void manualSyncCommit(KafkaConsumer kafkaConsumer){

        try{
            while(ConsumerConfiguration.isRunninng.get()){
                checkPauseAndResuse(kafkaConsumer);
                checkSeeking(kafkaConsumer);
                ConsumerRecords<String,String> records = kafkaConsumer.poll(Duration.ofMillis(1000));
                for(ConsumerRecord<String,String> record : records){
                    System.out.println("消费者消费消息:topic=" + record.topic() + ",partition=" + record.partition() + ",offset=" + record.offset() + ",value=" + record.value() + ",timestampType=" + record.timestampType() + ",timestamp=" + record.timestamp());
                }
                //提交前输出分区已提交位移和下次拉取消息位置(position)
                for(TopicPartition tp : records.partitions()){
                    System.out.println("+++TOPIC:" + tp.topic() + ",PARTITION:" + tp.partition() + ",commited-offset:" + kafkaConsumer.committed(tp) + ",position:" + kafkaConsumer.position(tp));
                }
                //提交全部分区的位移
                kafkaConsumer.commitAsync(new OffsetCommitCallback() {
                    @Override
                    public void onComplete(Map<TopicPartition, OffsetAndMetadata> map, Exception e) {
                        if(e != null){
                            e.printStackTrace();
                        }else{
                            //提交后输出分区已提交位移和下次拉取消息位置(position)
                            for(TopicPartition tp : map.keySet()){
                                System.out.println("---TOPIC:" + tp.topic() + ",PARTITION:" + tp.partition() + ",commited-offset:" + kafkaConsumer.committed(tp) + ",position:" + kafkaConsumer.position(tp));
                            }
                        }
                    }
                });
            }
        } catch(Exception e){
            e.printStackTrace();
        } finally {
            System.out.println("=======================消费者关闭=========================");
            kafkaConsumer.close();
        }
    }

    /**
     * 手动异步部分分区提交
     * @param kafkaConsumer
     */
    private void manualAsyncCommit(KafkaConsumer kafkaConsumer){

        try{
            while(ConsumerConfiguration.isRunninng.get()){
                checkPauseAndResuse(kafkaConsumer);
                checkSeeking(kafkaConsumer);
                ConsumerRecords<String,String> records = kafkaConsumer.poll(Duration.ofMillis(1000));
                if(records.count() > 0){
                    for(ConsumerRecord<String,String> record : records){
                        System.out.println("消费者消费消息:topic=" + record.topic() + ",partition=" + record.partition() + ",offset=" + record.offset() + ",value=" + record.value() + ",timestampType=" + record.timestampType() + ",timestamp=" + record.timestamp());
                    }
                    //提交前输出分区已提交位移和下次拉取消息位置(position)
                    for(TopicPartition tp : records.partitions()){
                        System.out.println("+++TOPIC:" + tp.topic() + ",PARTITION:" + tp.partition() + ",commited-offset:" + kafkaConsumer.committed(tp) + ",position:" + kafkaConsumer.position(tp));
                    }
                    kafkaConsumer.commitSync();
                    //提交后输出分区已提交位移和下次拉取消息位置(position)
                    for(TopicPartition tp : records.partitions()){
                        System.out.println("---TOPIC:" + tp.topic() + ",PARTITION:" + tp.partition() + ",commited-offset:" + kafkaConsumer.committed(tp) + ",position:" + kafkaConsumer.position(tp));
                    }
                }
            }
        } catch(Exception e){
            e.printStackTrace();
        } finally {
            System.out.println("=======================消费者关闭=========================");
            kafkaConsumer.close();
        }
    }

    /**
     * 检查暂停/恢复消费状态
     */
    private void checkPauseAndResuse(KafkaConsumer kafkaConsumer){
        //暂停消费部分分区
        if(ConsumerConfiguration.isPausing.get()){
            ConsumerConfiguration.isPausing.set(false);
            //获取订阅分区列表
            Set<TopicPartition> tps = kafkaConsumer.assignment();
            List<TopicPartition> pauseTps = new ArrayList<>();
            for(TopicPartition tp : tps){
                if(tp.partition() % 2 == 0){
                    //暂停消费
                    pauseTps.add(tp);
                }
            }
            kafkaConsumer.pause(pauseTps);
            System.out.println("******************消费者暂停消费部分分区******************");
        }
        //恢复消费部分分区
        if(ConsumerConfiguration.isResuming.get()){
            ConsumerConfiguration.isResuming.set(false);
            //获取订阅分区列表
            Set<TopicPartition> tps = kafkaConsumer.assignment();
            List<TopicPartition> resumeTps = new ArrayList<>();
            for(TopicPartition tp : tps){
                if(tp.partition() % 2 == 0){
                    //暂停消费
                    resumeTps.add(tp);
                }
            }
            kafkaConsumer.resume(resumeTps);
            System.out.println("******************消费者恢复消费部分分区******************");
        }
    }

    /**
     * 检查消费者是否需要seek
     * @param kafkaConsumer
     */
    private void checkSeeking(KafkaConsumer kafkaConsumer){

        if(ConsumerConfiguration.isSeeking.get()){
            ConsumerConfiguration.isSeeking.set(false);
            TopicPartition tp = new TopicPartition(seekingTopic,seekingPartition);
            kafkaConsumer.seek(tp,seekingOffset);
        }
    }
}