package cn.doitedu.kafka.consumer;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;

/***
 * @author hunter.d
 * @qq 657270652
 * @wx haitao-duan
 * @date 2020/11/12
 **/
public class KafkaConsumerDemo {
    public static final String brokerList = "localhost : 9092 ";
    public static final String topic = "topic- demo";
    public static final String groupid = "group.demo";
    public static final String clientid = "client.demo";
    public static final AtomicBoolean isRunning = new AtomicBoolean(true);

    public static Properties initConfig() {
        Properties props = new Properties();
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, groupid);
        props.put(ConsumerConfig.CLIENT_ID_CONFIG, clientid);

        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("bootstrap.servers", brokerList);
        props.put("group.id", groupid);
        props.put("client.id", clientid);
        return props;
    }

    public static void main(String[] args) {
        Properties props = initConfig();
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Arrays.asList(topic));

        consumer.unsubscribe();
        consumer.unsubscribe();
        consumer.subscribe(new ArrayList<String>());
        consumer.assign(new ArrayList<TopicPartition>());

        try {
            while (isRunning.get()) {
                ConsumerRecords<String, String> records =
                        consumer.poll(Duration.ofMillis(1000));
                for (ConsumerRecord<String, String> record : records) {
                    record.topic();
                    record.partition();
                    record.offset();
                    record.value();
                    //do something to process record.
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            consumer.close();
        }


        /**
         * 订阅与消费方式2
         */
/*        TopicPartition tp1 = new TopicPartition("x", 0);
        TopicPartition tp2 = new TopicPartition("y", 0);
        TopicPartition tp3 = new TopicPartition("z", 0);
        List<TopicPartition> tps = Arrays.asList(tp1, tp2, tp3);
        consumer.assign(tps);

        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (TopicPartition tp : tps) {
                List<ConsumerRecord<String, String>> rList = records.records(tp);
                for (ConsumerRecord<String, String> r : rList) {
                    r.topic();
                    r.partition();
                    r.offset();
                    r.value();
                    //do something to process record.
                }
            }
        }*/


        /**
         * 指定位移消费
         */
        // 在调用seek方法之前，需要先调用一次poll，以分配到分区
        /*consumer.poll(Duration.ofMillis(1000));

        // 获取所分配到的分区信息
        Set<TopicPartition> assignment = consumer.assignment();
        for (TopicPartition topicPartition : assignment) {
            // 为指定partition设置读取起始offset
            consumer.seek(topicPartition, 80);
        }
        // 开始正式消费
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, String> record : records) {
                // do some process
            }
        }*/

        /**
         * 再均衡处理
         */
        consumer.subscribe(Collections.singletonList("tpc_5"), new ConsumerRebalanceListener() {
            // 再均衡开始前和消费者停止读取消息之后，被调用
            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> collection) {
                // store the current offset to db
            }

            // 重新分配到分区后和消费者开始读取消息之前，被调用
            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> collection) {
                // store the current offset to db
            }
        });


        /**
         * 手动提交offset
         */
/*        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, String> r : records) {
                //do something to process record.
                consumer.commitSync();
            }
        }*/


        /**
         * 手动提交offset
         */
/*        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, String> r : records) {
                long offset = r.offset();

                //do something to process record.
                TopicPartition topicPartition = new TopicPartition(r.topic(), r.partition());
                consumer.commitSync(Collections.singletonMap(topicPartition,new OffsetAndMetadata(offset+1)));
            }
        }*/

        /**
         * 异步提交offset
         */
        /*while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, String> r : records) {
                long offset = r.offset();

                //do something to process record.
                TopicPartition topicPartition = new TopicPartition(r.topic(), r.partition());
                consumer.commitSync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(offset + 1)));
                consumer.commitAsync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(offset + 1)), new OffsetCommitCallback() {
                    @Override
                    public void onComplete(Map<TopicPartition, OffsetAndMetadata> map, Exception e) {
                        if (e == null) {
                            System.out.println(map);
                        } else {
                            System.out.println("error commit offset");
                        }
                    }
                });
            }
        }*/

    }
}
