package com.yonyou.findata.kafka.newapi;

import com.google.common.collect.Lists;
import com.yonyou.findata.kafka.KafkaProperties;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;

/**
 *  自动提交offset
 *
 * @author pizhihui
 * @date 2017-11-08
 *
 */
public class ConsumerAutoCommit extends BaseConsumer{

    private static final Logger LOGGER = LoggerFactory.getLogger(ConsumerAutoCommit.class);

    public static void main(String[] args) {
        Properties props = new Properties();
        props.put("bootstrap.servers", "localhost:9092");
        props.put("group.id", "test11");
        // 自动提交
        props.put("enable.auto.commit", "true");
        // 自动提交的间隔时间
        props.put("auto.commit.interval.ms", "10");
        props.put("session.timeout.ms", "30000");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        //props.put("key.deserializer", "org.apache.kafka.common.serialization.IntegerDeserializer");
        //props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        // 类似于客户端 --from-beginning的功能 earliest,latest,none
        /**
         * 这里的面试题: 怎么重新消费一个主题的数据,1.换一个新的组;2.这里设置为earliest
         */
        props.put("auto.offset.reset", "earliest");

        // 创建消费者,线程不安全的
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        // 订阅需要消费的主题
        consumer.subscribe(Lists.newArrayList("first"));
        final int minBatchSize = 200;
        List<ConsumerRecord<String, String>> buffer = new ArrayList<>();
        // TopicPartition partition = new TopicPartition(KafkaProperties.TOPIC_NAME, 0);

        try {
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(100);
                for (ConsumerRecord<String, String> record : records) {
                    System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
                }
//                if (buffer.size() >= minBatchSize) {
//                    handle(buffer);
//                    consumer.commitSync();
//                    buffer.clear();
//                }
//
//                OffsetAndMetadata committed = consumer.committed(partition);
//                LOGGER.info("commited:{}", committed);
            }
        } finally {
            consumer.close();
        }
    }

}
