package com.djk.daily.kafka.consumer;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.*;

/**
 * @Description 消费者
 * @Author dujunkai
 * @Date 2021/12/8 10:02 上午
 **/
public class ConsumerSample {
    private static final String TOPIC_NAME = "djk_topic";

    public static void main(String[] args) {
//        helloword();
        //手动提交offset
//        commitOffset();
        //手动提交offset,并且手动控制partition
//        commitOffsetWithPartition();
        //手动提交offset,并且手动控制partition2
//        commitOffsetWithPartition2();
//        hellowordWithSSL();
//        controlOffset();
        controlPause();
    }

    /**
     * 工作中不推荐
     */
    public static void helloword(){
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "101.35.134.133:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "true");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer consumer = new KafkaConsumer(props);

        //消费订阅哪一个Topic或者几个Topic
        consumer.subscribe(Arrays.asList(TOPIC_NAME));

        while (true){
            ConsumerRecords<String,String> records = consumer.poll(Duration.ofMillis(10000));
            for(ConsumerRecord<String,String> record : records){
                System.out.printf("partion = %d,offset = %d,key = %s,value = %s%n",
                        record.partition(),record.offset(),record.key(),record.value());
            }
        }

    }

    /**
     * 手动提交offset
     */
    public static void commitOffset(){
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "101.35.134.133:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "true");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer consumer = new KafkaConsumer(props);

        //消费订阅哪一个Topic或者几个Topic
        consumer.subscribe(Arrays.asList(TOPIC_NAME));

        while (true){
            ConsumerRecords<String,String> records = consumer.poll(Duration.ofMillis(10000));
            for(ConsumerRecord<String,String> record : records){
                //数据持久化到数据库
                System.out.printf("partion = %d,offset = %d,key = %s,value = %s%n",
                        record.partition(),record.offset(),record.key(),record.value());
                //如果处理失败则回滚
            }
            //如果成功，手动通知offset提交
            consumer.commitAsync();
        }

    }

    /**
     * 手动提交offset,并且手动控制partition
     */
    public static void commitOffsetWithPartition(){
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "101.35.134.133:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "true");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer consumer = new KafkaConsumer(props);

        //消费订阅哪一个Topic或者几个Topic
        consumer.subscribe(Arrays.asList(TOPIC_NAME));

        while (true){
            ConsumerRecords<String,String> records = consumer.poll(Duration.ofMillis(10000));

            for (TopicPartition partition : records.partitions()){
                List<ConsumerRecord<String, String>> pRecord = records.records(partition);
                for(ConsumerRecord<String,String> record : pRecord){
                    //数据持久化到数据库
                    System.out.printf("partion = %d,offset = %d,key = %s,value = %s%n",
                            record.partition(),record.offset(),record.key(),record.value());
                    //如果处理失败则回滚
                }
                long lastOffset = pRecord.get(pRecord.size() -1).offset();
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                offset.put(partition,new OffsetAndMetadata(lastOffset + 1));
                //提交
                consumer.commitSync(offset);
                System.out.println("============partition-" + partition + "end===============");
            }
        }

    }


    /**
     * 手动提交offset,并且手动控制partition,更高级
     */
    public static void commitOffsetWithPartition2(){
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "101.35.134.133:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "true");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer consumer = new KafkaConsumer(props);

        //djk_topic -0,1两个partition
        TopicPartition partition0 = new TopicPartition(TOPIC_NAME, 0);
        TopicPartition partition1 = new TopicPartition(TOPIC_NAME, 1);

        //消费订阅哪一个Topic或者几个Topic
//        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        consumer.assign(Arrays.asList(partition0));

        while (true){
            ConsumerRecords<String,String> records = consumer.poll(Duration.ofMillis(10000));

            for (TopicPartition partition : records.partitions()){
                List<ConsumerRecord<String, String>> pRecord = records.records(partition);
                for(ConsumerRecord<String,String> record : pRecord){
                    //数据持久化到数据库
                    System.out.printf("partion = %d,offset = %d,key = %s,value = %s%n",
                            record.partition(),record.offset(),record.key(),record.value());
                    //如果处理失败则回滚
                }
                long lastOffset = pRecord.get(pRecord.size() -1).offset();
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                offset.put(partition,new OffsetAndMetadata(lastOffset + 1));
                //提交
                consumer.commitSync(offset);
                System.out.println("============partition-" + partition + "end===============");
            }
        }

    }

    /**
     * helloword 携带SSL
     */
    public static void hellowordWithSSL(){
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "101.35.134.133:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "true");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        props.put("security.protocol","SSL");
        props.put("ssl.endpoint.identification.algorithm","");
        props.put("ssl.truststore.location","client.truststore.jks");
        props.put("ssl.truststore.password","jiangzh");

        KafkaConsumer<String,String> consumer = new KafkaConsumer(props);
        // 消费订阅哪一个Topic或者几个Topic
        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000));
            for (ConsumerRecord<String, String> record : records)
                System.out.printf("patition = %d , offset = %d, key = %s, value = %s%n",
                        record.partition(),record.offset(), record.key(), record.value());
        }

    }


    /**
     * 手动指定Offset的起始位置，及手动提交offset
     */
    public static void controlOffset(){
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "101.35.134.133:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "true");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer consumer = new KafkaConsumer(props);

        //djk_topic -0,1两个partition
        TopicPartition partition0 = new TopicPartition(TOPIC_NAME, 1);

        //消费订阅哪一个Topic或者几个Topic
//        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        consumer.assign(Arrays.asList(partition0));

        while (true){
            // 手动指定offset起始位置
            /*
                1、人为控制offset起始位置
                2、如果出现程序错误，重复消费一次
             */
            /*
                1、第一次从0消费【一般情况】
                2、比如一次消费了100条， offset置为101并且存入Redis
                3、每次poll之前，从redis中获取最新的offset位置
                4、每次从这个位置开始消费
             */
//            consumer.seek(partition0, 3);
            ConsumerRecords<String,String> records = consumer.poll(Duration.ofMillis(10000));

            for (TopicPartition partition : records.partitions()){
                List<ConsumerRecord<String, String>> pRecord = records.records(partition);
                for(ConsumerRecord<String,String> record : pRecord){
                    //数据持久化到数据库
                    System.out.printf("partion = %d,offset = %d,key = %s,value = %s%n",
                            record.partition(),record.offset(),record.key(),record.value());
                    //如果处理失败则回滚
                }
                long lastOffset = pRecord.get(pRecord.size() -1).offset();
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                offset.put(partition,new OffsetAndMetadata(lastOffset + 1));
                //提交
                consumer.commitSync(offset);
                System.err.println("============partition-" + partition + "end===============");
            }
        }

    }

    /**
     * 流量控制
     */
    public static void controlPause(){
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "101.35.134.133:9092");
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "true");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer consumer = new KafkaConsumer(props);

        //djk_topic -0,1两个partition
        TopicPartition partition0 = new TopicPartition(TOPIC_NAME, 0);
        TopicPartition partition1 = new TopicPartition(TOPIC_NAME, 1);

        //消费订阅哪一个Topic或者几个Topic
//        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        consumer.assign(Arrays.asList(partition0,partition1));
        long totalNum = 40;
        while (true){
            ConsumerRecords<String,String> records = consumer.poll(Duration.ofMillis(10000));
            /*
                        1、接收到record信息以后，去令牌桶中拿取令牌
                        2、如果获取到令牌，则继续业务处理
                        3、如果获取不到令牌， 则pause等待令牌
                        4、当令牌桶中的令牌足够， 则将consumer置为resume状态
             */

            long num = 0;
            for (TopicPartition partition : records.partitions()){
                List<ConsumerRecord<String, String>> pRecord = records.records(partition);
                for(ConsumerRecord<String,String> record : pRecord){
                    //数据持久化到数据库
                    System.out.printf("partion = %d,offset = %d,key = %s,value = %s%n",
                            record.partition(),record.offset(),record.key(),record.value());
                    //如果处理失败则回滚
                    num++;
                    if (record.partition() == 0){
                        if (num >= totalNum){
                            consumer.pause(Arrays.asList(partition0));
                        }
                    }

                    if (record.partition() == 1){
                        if (num == 40){
                            consumer.resume(Arrays.asList(partition0));
                        }
                    }
                }


                long lastOffset = pRecord.get(pRecord.size() -1).offset();
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                offset.put(partition,new OffsetAndMetadata(lastOffset + 1));
                //提交
                consumer.commitSync(offset);
                System.out.println("============partition-" + partition + "end===============");
            }
        }

    }
}
