package com.vincent;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

/**
 * Created by vincent on 2018/7/27.
 */
public class ConsumerTest {
    public static void main(String[] args) {

        int availableProcessors = Runtime.getRuntime().availableProcessors();
        ExecutorService executorService = Executors.newFixedThreadPool(availableProcessors * 2);
        executorService.execute(new Runnable() {
            @Override
            public void run() {
                Properties props = new Properties();
                props.put("bootstrap.servers", "192.168.131.131:9092");
                props.put("group.id", "test");
                props.put("auto.offset.reset", "earliest");
                props.put("enable.auto.commit", "false");
                props.put("auto.commit.interval.ms", "1000");
                props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
                props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
                KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
                consumer.subscribe(Arrays.asList("test-spring-kafka"));
                while (true) {
                    //consumer.seek(new TopicPartition("test-spring-kafka", 0),10);
                    ConsumerRecords<String, String> records = consumer.poll(8000);
                    Set<TopicPartition> partitions = records.partitions();
                    partitions.forEach(tp -> {
                        List<ConsumerRecord<String, String>> recordList = records.records(tp);
                        recordList.forEach(record -> {
                            System.out.println(record.offset()   +"     "+record.key()+"     "+record.value());
                        });
                        //获取新这个partition中的最后一条记录的offset并加1 那么这个位置就是下一次要提交的offset
                        long newOffset = recordList.get(recordList.size() - 1).offset() + 1;
                        consumer.commitSync(Collections.singletonMap(tp,new OffsetAndMetadata(newOffset)));
                    });
                }
            }
        });

    }
}
