package com.mgface.kafka.consumer;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;

/**
 * @author wanyuxiang
 * @version 1.0
 * @project kafkademo
 * @create 2019-01-19 22:25
 **/
@Slf4j
public class ConsumerMain {
    private static final Map<TopicPartition, OffsetAndMetadata> currentOffsets = new ConcurrentHashMap<>();
    public static void main(String[] args) {
        Properties ps = new Properties();
        ps.put("bootstrap.servers","localhost:9092");
        ps.put("key.deserializer", StringDeserializer.class.getName());
        ps.put("value.deserializer",StringDeserializer.class.getName());
        ps.put("group.id","CGroup1");
        ps.put("fetch.min.bytes",1024*16);//16K消费者从服务器获取记录的最小字节数
        ps.put("fetch.max.wait.ms",200);//最多等待200ms就一定要返回数据，要么满足fetch.min.bytes，要么等200ms
        ps.put("max.partition.fetch.bytes",1024*1024);//默认服务器返回给消费者为1M，要大于服务器接收到最大的(max.message.size)值要大
        ps.put("session.timeout.ms",10_000);//消费者在被认为死亡之前可以和服务器断开连接的时间
        ps.put("heartbeat.interval.ms",1_000);//poll发送心跳的频率
        ps.put("auto.offset.reset","latest");//在无偏移量的时候或者偏移量无效的时候，从最新的记录读取数据
        //自动
        //ps.put("enable.auto.commit",true);//为了避免出现重复数据和数据丢失，可设置为false，由我们自己控制提交偏移量的时机
        //ps.put("auto.commit.interval.ms",5_000);//配合enable.auto.commit=true,默认5S
        //手动
        ps.put("enable.auto.commit",false);
        //ps.put("partition.assignment.strategy", RoundRobinAssignor.class.getName());
        ps.put("max.poll.records",1_00);//poll最大拉取的数据量
        KafkaConsumer<String,String> consumer = new KafkaConsumer<>(ps);
        consumer.subscribe(Collections.singletonList("topicC"));
        int count = 0;
        try {
            while (true){
                ConsumerRecords<String,String> records =  consumer.poll(Duration.ofSeconds(1));
                for (ConsumerRecord<String,String> record : records){
                    count++;
                    log.info("主题:{},分区:{},偏移量:{},key:{},value:{}",record.topic(),record.partition(),record.offset(),record.key(),record.value());
                    //③提交特定的偏移量，在读取每条记录后，传递期望处理的下一条消息的偏移量，下一次就从该内容进行提交
                    currentOffsets.put(new TopicPartition(record.topic(),record.partition()),new OffsetAndMetadata(record.offset()+1,"no metadata"));
                    if (count % 1_000 == 0){
                        consumer.commitAsync(currentOffsets,null);
                    }

                }
                //①同步
//                try{
//                    consumer.commitSync();
//                }catch (Exception e){
//                    log.error("提交失败.",e);
//                }
                //②异步
//                consumer.commitAsync((offsets,e)->{
//                    log.info("当前偏移量:{}",offsets.toString());
//                });
            }
        }finally {
            try{
                consumer.commitSync();
            }finally {
                consumer.close();
            }
        }
    }
}
