package hn.cch.kafka;

import hn.cch.KafkaApp;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.time.Duration;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.Set;

public class ConsumerDemo {

    private static final Logger logger = LoggerFactory.getLogger(ConsumerDemo.class);

    @Test
    public void test01() {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaApp.server);
        // 反序列化
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

        properties.put(ConsumerConfig.GROUP_ID_CONFIG, "ConsumerGroup");
        // 手动提交
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");


        try (KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties)) {
            int partition = 0;
            long offset = 0L;
            while (true) {
                // 消费者可以订阅多个主题，可以使用正则匹配模式
                kafkaConsumer.subscribe(Collections.singletonList(KafkaApp.topic));
                // kafkaConsumer.subscribe(Pattern.compile(KafkaApp.topic));


                // 设置超时时间
                ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(
                        Duration.ofMillis(KafkaApp.timeout));
                for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
                    logger.info("partition={},offset={},key={},value{}",
                            consumerRecord.partition(), consumerRecord.offset(),
                            consumerRecord.key(), consumerRecord.value());
                }

                TopicPartition topicPartition = new TopicPartition(KafkaApp.topic, partition);
                List<ConsumerRecord<String, String>> consumerRecordsList = consumerRecords.records(topicPartition);
                offset = consumerRecordsList.get(consumerRecordsList.size() - 1).offset();
                // 同步提交
                kafkaConsumer.commitSync();
                // 异步提交
                kafkaConsumer.commitAsync((map, e) -> {
                    // OffsetCommitCallback
                    if (e != null) {
                        logger.info(e.getMessage());
                    }

                });


            }
        } catch (Exception e) {
            logger.info(e.getMessage());
        }
    }

    @Test
    public void test02() {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaApp.server);
        // 反序列化
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

        properties.put(ConsumerConfig.GROUP_ID_CONFIG, "ConsumerGroup");
        // 手动提交
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");


        try (KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties)) {
            int partition = 0;
            long offset = 10L;
            while (true) {
                // 消费者可以订阅多个主题，可以使用正则匹配模式
                kafkaConsumer.subscribe(Collections.singletonList(KafkaApp.topic));
                // kafkaConsumer.subscribe(Pattern.compile(KafkaApp.topic));

                // 设置超时时间
                ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(
                        Duration.ofMillis(KafkaApp.timeout));
                Set<TopicPartition> topicPartitionSet = kafkaConsumer.assignment();
                for (TopicPartition topicPartition : topicPartitionSet) {
                    kafkaConsumer.seek(topicPartition, offset);
                }

                for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
                    logger.info("partition={},offset={},key={},value{}",
                            consumerRecord.partition(), consumerRecord.offset(),
                            consumerRecord.key(), consumerRecord.value());
                }


            }
        } catch (Exception e) {
            logger.info(e.getMessage());
        }
    }

}
