package vip.meeet.kafka;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.OffsetCommitCallback;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;

public class KfkConsumer {
//    private static final String TOPIC = "test_topic";
//    private static final String TOPIC = "kfk_topic";
    private static final String TOPIC = "multipar_topic";

    private static final Properties properties = new Properties();

    static {
        // ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG
        properties.put("bootstrap.servers", "127.0.0.1:9092,127.0.0.1:9093,127.0.0.1:9094");
        properties.put("auto.offset.reset","earliest");//从最早的offset开始，新的组才有效
        properties.put("max.poll.records",1000);//一次最多多少条消息
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    }

    public static void main(String[] args) {
//        autoCommit();
//        manulSynCommit();
//        manulAsynCommit();
        manulCommitOffset();
    }

    private static void autoCommit(){
        properties.put("group.id", "groupName");//消费者组
        properties.put("enable.auto.commit", "true");//自动提交
        properties.put("auto.commit.interval.ms", "1000");//自动提交时间间隔
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        Duration duration = Duration.of(5, ChronoUnit.SECONDS);
        consumer.subscribe(Collections.singleton(TOPIC));
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(duration);
            for (ConsumerRecord<String, String> record : records)
                System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
        }
    }

    private static void manulSynCommit(){
        properties.put("group.id", "manulSynCommit");
        properties.put("enable.auto.commit", "false");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        Duration duration = Duration.of(100, ChronoUnit.MILLIS);
        consumer.subscribe(Collections.singleton(TOPIC));
        int count = 0;
        try {
            while (true){
                ConsumerRecords<String,String> records = consumer.poll(duration);
                for(ConsumerRecord<String ,String> record : records){
                    count++;
                    System.out.printf(
                            "topic=%s, partition=%d,offset=%d,key=%s,value=%s",
                            record.topic(),
                            record.partition(),
                            record.offset(),
                            record.key(),
                            record.value()
                    );
                    System.out.println();
                }
                if(count >= 50) {
                    System.out.println(count);
                    consumer.commitSync();
                    count = 0;
                }
            }
        } finally {
            consumer.close();
        }
    }

    private static void manulAsynCommit(){
        properties.put("group.id", "manulAsynCommit");
        properties.put("enable.auto.commit", "false");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        Duration duration = Duration.of(5, ChronoUnit.SECONDS);
        consumer.subscribe(Collections.singleton(TOPIC));
        try {
            while (true){
                ConsumerRecords<String,String> records = consumer.poll(duration);
                for(ConsumerRecord<String ,String> record : records){
                    System.out.printf(
                            "topic=%s, partition=%d,offset=%d,key=%s,value=%s",
                            record.topic(),
                            record.partition(),
                            record.offset(),
                            record.key(),
                            record.value()
                    );
                    System.out.println();
                }
                //异步提交，并添加回调函数，检查是否成功
                consumer.commitAsync(new OffsetCommitCallback() {
                    @Override
                    public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
                        if(exception != null){
                            System.err.println(exception.getMessage());
                            exception.printStackTrace();
                        }else {
                            Set<Map.Entry<TopicPartition, OffsetAndMetadata>> entries = offsets.entrySet();
                            for(Map.Entry<TopicPartition, OffsetAndMetadata> entry : entries){
                                TopicPartition topicPartition = entry.getKey();
                                System.out.println(topicPartition.partition() + "--" + topicPartition.topic());
                                OffsetAndMetadata offsetAndMetadata = entry.getValue();
                                System.out.println(offsetAndMetadata.offset() + "--" + offsetAndMetadata.metadata());
                            }
                        }
                    }
                });
            }
        } finally {
            consumer.close();
        }
    }

    private static void manulCommitOffset(){
        properties.put("group.id", "manulCommitOffset");
        properties.put("enable.auto.commit", "false");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        Duration duration = Duration.of(5, ChronoUnit.SECONDS);
        consumer.subscribe(Collections.singleton(TOPIC));
        int count = 0;
        Map<TopicPartition, OffsetAndMetadata> topicPartitionOffsetAndMetadataHashMap = new HashMap<>();
        try {
            while (true){
                ConsumerRecords<String,String> records = consumer.poll(duration);
                String topic = TOPIC;
                int partition = -1;
                long nextOffset = -1;
                for(ConsumerRecord<String ,String> record : records){
                    count ++;
                    topic = record.topic();
                    partition = record.partition();
                    //下次消费者从offset + 1开始拉取消息
                    nextOffset = record.offset() + 1;
                    System.out.printf(
                            "topic=%s, partition=%d,offset=%d,key=%s,value=%s",
                            record.topic(),
                            record.partition(),
                            record.offset(),
                            record.key(),
                            record.value()
                    );
                    System.out.println();
                }
                if(count > 1000 && partition != -1 && nextOffset != -1){
                    System.out.println(count);
                    TopicPartition topicPartition = new TopicPartition(topic, partition);
                    OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(nextOffset, "no");
                    topicPartitionOffsetAndMetadataHashMap.put(topicPartition, offsetAndMetadata);
                    System.out.println(count);
                    consumer.commitSync(topicPartitionOffsetAndMetadataHashMap);
                    count = 0;
                }
            }
        } finally {
            consumer.close();
        }
    }
}
