package com.tjhk.kafka.demo.listener;


import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.OffsetCommitCallback;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.listener.ContainerProperties;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;
import java.io.UnsupportedEncodingException;
import java.time.LocalDateTime;
import java.util.List;
import java.util.Map;


@Component
public class KafkaBatchConsumer {

    private static final Logger log= LoggerFactory.getLogger(KafkaBatchConsumer.class);


    @Bean("batchContainerFactory")
    public ConcurrentKafkaListenerContainerFactory listenerContainer(ConsumerFactory consumerFactory) {
        ConcurrentKafkaListenerContainerFactory container = new ConcurrentKafkaListenerContainerFactory();
        container.setConsumerFactory(consumerFactory);
        //设置并发量，小于或等于Topic的分区数 本项目采用动态配置见listener partitions concurrency
//        container.setConcurrency(2);
//        container.getContainerProperties().setPollTimeout(2000);
        // 异步提交
        container.getContainerProperties().setSyncCommits(false);
        container.getContainerProperties().setCommitCallback(new OffsetCommitCallback() {
            @Override
            public void onComplete(Map<org.apache.kafka.common.TopicPartition, OffsetAndMetadata> map, Exception e) {
                log.error("offset提交失败，map:{},err:{}",map,e.getMessage());
            }
        });
        //设置为批量监听
        container.setBatchListener(true);
        // 自动提交
//        container.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        // 手动提交
        container.getContainerProperties().setAckMode(ContainerProperties.AckMode.TIME);
        return container;
    }


    @KafkaListener(topics = {"test-topic"},topicPartitions = { @TopicPartition(topic = "test-topic",partitions = "#{'${partitions}'.split(',')}")},
            containerFactory = "batchContainerFactory",concurrency = "${concurrency}")
    public void batchListener(List<ConsumerRecord<String, byte[]>> records, Acknowledgment ack){
        log.info(">>>>>>.TreadID:{},接收消息数量：{}条", Thread.currentThread().getId(), records.size());
        for (ConsumerRecord<String, byte[]> record : records) {
            try {
                log.info("partition={}, offset={}, key ={},value ={}", record.partition(), record.offset(), record.key(), new String(record.value(),"GBK"));
            } catch (Exception e) {
                log.error("丢弃消息，数据格式转换错误！-->partition={}, offset={}, key ={},value ={}", record.partition(), record.offset(), record.key(), record.value());
            }
        }
        // 手动提交
        ack.acknowledge();
    }

}

