package com.leigq.www.consumer.config;

import org.springframework.boot.autoconfigure.kafka.ConcurrentKafkaListenerContainerFactoryConfigurer;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.listener.ContainerProperties;
import org.springframework.kafka.listener.DeadLetterPublishingRecoverer;
import org.springframework.kafka.listener.SeekToCurrentErrorHandler;

import static com.leigq.www.common.constant.QueueConstant.MAX_FAILURES;

/**
 * 队列消费端相关配置
 *
 * @author leiguoqing
 */
@Configuration
public class KafKaConsumerConfig {

    /**
     * 批量消费者工厂
     *
     * @param kafkaProperties the kafka properties
     * @return the kafka listener container factory
     */
    @Bean("orderBatchFactory")
    public KafkaListenerContainerFactory<?> orderBatchFactory(KafkaProperties kafkaProperties) {
        ConcurrentKafkaListenerContainerFactory<Object, Object> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(kafkaProperties.buildConsumerProperties()));
        // 设置为批量消费，每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
        factory.setBatchListener(true);
        return factory;
    }


    /**
     * 订单手动签收消息工厂
     *
     * @param kafkaProperties the kafka properties
     * @return the kafka listener container factory
     */
    @Bean("orderManualFactory")
    public KafkaListenerContainerFactory<?> orderManualFactory(KafkaProperties kafkaProperties) {
        ConcurrentKafkaListenerContainerFactory<Object, Object> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(kafkaProperties.buildConsumerProperties()));
        factory.setBatchListener(false);
        // 设置确认模式
        // RECORD  每处理一条commit一次
        // BATCH(默认) 每次poll的时候批量提交一次，频率取决于每次poll的调用频率
        // TIME  每次间隔ackTime的时间去commit
        // COUNT 累积达到ackCount次的ack去commit
        // COUNT_TIME ackTime或ackCount哪个条件先满足，就commit
        // MANUAL listener负责ack，但是背后也是批量上去
        // MANUAL_IMMEDIATE listner负责ack，每调用一次，就立即commit
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);
        return factory;
    }


    /**
     * 错误消息工厂
     *
     * @param template the template
     * @return the concurrent kafka listener container factory
     */
    @Bean("errorListenerContainerFactory")
    public KafkaListenerContainerFactory<?> errorListenerContainerFactory(ConcurrentKafkaListenerContainerFactoryConfigurer configurer,
                                                                                       ConsumerFactory<Object, Object> kafkaConsumerFactory, KafkaTemplate<Object, Object> template) {
        ConcurrentKafkaListenerContainerFactory<Object, Object> factory = new ConcurrentKafkaListenerContainerFactory<>();
        configurer.configure(factory, kafkaConsumerFactory);
        // 最大重试三次
        factory.setErrorHandler(new SeekToCurrentErrorHandler(new DeadLetterPublishingRecoverer(template), MAX_FAILURES));
        return factory;
    }

}
