package com.springstack.kafka.consumer.config;


import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;


import java.util.HashMap;
import java.util.Map;

/**
 * @author qiangfanghao
 * @date 2022/7/24
 * kafkaConsumer配置
 */
@Slf4j
@Configuration
@EnableConfigurationProperties(KafkaConsumerProperties.class)
public class KafkaConsumerConfig {

    /**
     * 不使用spring boot默认方式创建的DefaultKafkaConsumerFactory，重新定义创建方式
     * @return
     */

    @Bean("consumerFactory")

    public DefaultKafkaConsumerFactory<String,String> consumerFactory(KafkaConsumerProperties kafkaConsumerProperties){
        Map<String, Object> configs = new HashMap<>();
        configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,kafkaConsumerProperties.getBootstrapServers());
        configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,kafkaConsumerProperties.getKeyDeserializer());
        configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,kafkaConsumerProperties.getValueDeserializer());
        //统一配置一个groupId 也可以通过 @KafkaListener 注解进行配置
        configs.put(ConsumerConfig.GROUP_ID_CONFIG,kafkaConsumerProperties.getGroupId());
        configs.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,kafkaConsumerProperties.getEnableAutoCommit());
        configs.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,kafkaConsumerProperties.getAutoCommitIntervalMs());
        configs.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,kafkaConsumerProperties.getAutoOffsetReset());
        configs.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG,kafkaConsumerProperties.getHeartbeatIntervalMs());
        configs.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG,kafkaConsumerProperties.getSessionTimeoutMs());
        configs.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,kafkaConsumerProperties.getMaxPollRecords());
        return new DefaultKafkaConsumerFactory<String,String>(configs);

    }


    /**
     * 可实现 kafka消息过滤功能
     * 不想使用批量的 可以重新注入一个bean
     * 引用 @KafkaListener(containerFactory = "listenerContainerFactory")
     * @param consumerFactory
     * @return
     */
    @Bean("listenerContainerFactory")
    public ConcurrentKafkaListenerContainerFactory<String,String> listenerContainerFactory(DefaultKafkaConsumerFactory<String,String> consumerFactory) {

        ConcurrentKafkaListenerContainerFactory<String,String> factory = new ConcurrentKafkaListenerContainerFactory<String,String>();
        factory.setConsumerFactory(consumerFactory);
        /**
         * 配置offset提交方式 自动提交 Kafka不能既开启消费端的自动应答又开启监听模式的自动应答
         * enable-auto-commit需要配置为 false
         */
        //factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);

        //批量处理消息
        factory.setBatchListener(true);
        //批量处理消息的数量
        factory.setConcurrency(3);

        // 设置记录筛选策略
        //factory.setRecordFilterStrategy(new RecordFilterStrategy() {
        //    @Override
        //    public boolean filter(ConsumerRecord consumerRecord) {
        //        String msg = consumerRecord.value().toString();
        //        if(Integer.parseInt(msg.substring(msg.length() - 1)) % 2 == 0){
        //            return false;
        //        }
        //        // 返回true消息将会被丢弃
        //        return true;
        //    }
        //});

        return factory;

    }


}
