package com.wh.springkafka.consumer;

import com.wh.springkafka.util.PropertyManager;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.*;

public class ConsumerSample {

    public static final String TOPIC_NAME = "wanghao-topic";
    public static final String kafkaServerIp = PropertyManager.getProperty("KafkaZKServerIp");


    public static void main(String[] args) {
        autoCommit();
//        manualCommitForPartition();
//        manualCommitForPartition2();
    }

    /*
        直接订阅topic下面的分区partition
     */
    private static void manualCommitForPartition2(){
        Properties properties = new Properties();
        properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,kafkaServerIp+":9092");
        properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG,"test");
        // 设置自动提交为false
        properties.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        properties.setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
        properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String,String> consumer = new KafkaConsumer(properties);
        // 消费者订阅某个topic或者几个Topic
//        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        TopicPartition p0 = new TopicPartition(TOPIC_NAME,0);
        TopicPartition p1 = new TopicPartition(TOPIC_NAME,1);
        consumer.assign(Arrays.asList(p0));
        while (true){
            // 定时间隔拉取
            ConsumerRecords<String,String> records = consumer.poll(Duration.ofMillis(10000));
            Set<TopicPartition> partitions = records.partitions();
            for (TopicPartition partition :
                    partitions) {
                List<ConsumerRecord<String, String>> pRecords = records.records(partition);
                for (ConsumerRecord<String,String> record :pRecords){
                    System.out.printf("partition = %d,offset = %d, key = %s,value=%s%n "
                            ,record.partition(),record.offset(),record.key(),record.value());
                }
                if(true){
                    // 如果成功则手动提交  注意这个方法提交
                    Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
                    // 参数2，获取最后一个元素的offset，然后加一就是下一次的起始位置
                    offsets.put(partition,new OffsetAndMetadata(pRecords.get(pRecords.size()-1).offset()+1));
                    consumer.commitSync(offsets);
                }else{
                    // 如果失败则回滚
                }
                System.out.println("消费完成~~~~~~~~~~~partition "+partition.toString()+" end ~~~~~~~~~~~~~~~~");
            }


        }
    }

    /*
        根据partition分别消费并手动提交
     */
    private static void manualCommitForPartition(){
        Properties properties = new Properties();
        properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,kafkaServerIp+":9092");
        properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG,"test");
        // 设置自动提交为false
        properties.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        properties.setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
        properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String,String> consumer = new KafkaConsumer(properties);
        // 消费者订阅某个topic或者几个Topic
        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        while (true){
            // 定时间隔拉取
            ConsumerRecords<String,String> records = consumer.poll(Duration.ofMillis(10000));
            Set<TopicPartition> partitions = records.partitions();
            for (TopicPartition partition :
                    partitions) {
                List<ConsumerRecord<String, String>> pRecords = records.records(partition);
                for (ConsumerRecord<String,String> record :pRecords){
                    System.out.printf("partition = %d,offset = %d, key = %s,value=%s%n "
                            ,record.partition(),record.offset(),record.key(),record.value());
                }
                if(true){
                    // 如果成功则手动提交  注意这个方法提交
                    Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
                    // 参数2，获取最后一个元素的offset，然后加一就是下一次的起始位置
                    offsets.put(partition,new OffsetAndMetadata(pRecords.get(pRecords.size()-1).offset()+1));
                    consumer.commitSync(offsets);
                }else{
                    // 如果失败则回滚
                }
                System.out.println("消费完成~~~~~~~~~~~partition "+partition.toString()+" end ~~~~~~~~~~~~~~~~");
            }


        }
    }

    /*
        手动提交版本
     */
    private static void manualCommit(){
        Properties properties = new Properties();
        properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,kafkaServerIp+":9092");
        properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG,"test");
        // 设置自动提交为false
        properties.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        properties.setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
        properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String,String> consumer = new KafkaConsumer(properties);
        // 消费者订阅某个topic或者几个Topic
        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        while (true){
            // 定时间隔拉取
            ConsumerRecords<String,String> records = consumer.poll(Duration.ofMillis(10000));
            for (ConsumerRecord<String,String> record :records){
                System.out.printf("partition = %d,offset = %d, key = %s,value=%s%n "
                        ,record.partition(),record.offset(),record.key(),record.value());
            }
            if(true){
                // 如果成功则手动提交
                consumer.commitAsync();
            }else{
                // 如果失败则回滚
                // 不提交就代表回滚
            }

        }
    }
    /*
        简单的自动提交版本
     */
    private static void autoCommit(){
        Properties properties = new Properties();
        properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,kafkaServerIp+":9092");
        properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG,"test");
        properties.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"true");
        properties.setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
        properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
        properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String,String> consumer = new KafkaConsumer(properties);
        // 订阅那个topic
        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        while (true){
            // 定时间隔拉取
            ConsumerRecords<String,String> records = consumer.poll(Duration.ofMillis(10000));
            for (ConsumerRecord<String,String> record :records){
                System.out.printf("partition = %d,offset = %d, key = %s,value=%s%n "
                        ,record.partition(),record.offset(),record.key(),record.value());
            }

        }
    }
}
