package com.wp.kafka.consumer;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

import java.util.*;

public class KafkaConsumerWithOffsetControl2 {
    public static void main(String[] args) {
        Properties prop = new Properties();
        //bootstrap-server
        prop.put("bootstrap.servers", "192.168.100.141:9092");
        // 指定消费者组
        prop.put("group.id", "wp");
        // 指定消费位置: earliest/latest/none
//        prop.put("auto.offset.reset", "earliest");
        // 指定消费的key的反序列化方式
        prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        // 指定消费的value的反序列化方式
        prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        prop.put("enable.auto.commit", "false");
        prop.put("session.timeout.ms", "60000");
        prop.put("max.poll.interval.ms", "60000");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(prop);

        /*
        先消费一下数据，这时候会为消费者分配partition，此时才能用consumer.assignment();获取到分配的partition
         */
        consumer.subscribe(Collections.singletonList("chantree-all"));
        consumer.poll(10000);
        System.out.println(consumer.subscription());
        Set<TopicPartition> assignmentset = consumer.assignment();
        //重置offset到开始
//        consumer.seekToBeginning(assignmentset);
        //这里为每一个partition设置offset
        for (TopicPartition partition : assignmentset) {
            consumer.seek(partition, 0L);
        }
        for (TopicPartition partition : assignmentset) {
            long offset = consumer.position(partition);
            System.out.println(partition + "==>" + offset);
        }
        /*
        同步提交和异步提交相结合
         */
        boolean flag=true;
        try {
            Map<TopicPartition, OffsetAndMetadata> currentOffset = new HashMap();
            while (flag) {
//            for (int i = 0; i < 2; i++) {
                ConsumerRecords<String, String> records = consumer.poll(1000);
                //如果没有数据就退出
                if (!records.isEmpty()) {
                    flag=false;
                }
                System.out.println("count="+records.count());
                for (ConsumerRecord<String, String> record : records) {
                    System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
                    currentOffset.clear();
                    currentOffset.put(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset(), "metadata"));
                }
                consumer.commitAsync(currentOffset, null);
                for (TopicPartition partition : assignmentset) {
                    long offset = consumer.position(partition);
                    System.out.println(partition + "==>" + offset);
                }
            }
        } catch (Exception e) {
            System.out.println("commitAsync failed");
        } finally {
            try {
                consumer.commitSync();
            } finally {
                consumer.close();
            }
        }
    }
}
