package com.xuegao.xuegaospringboottest.config;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.ConsumerRecordRecoverer;
import org.springframework.kafka.listener.ContainerProperties;
import org.springframework.kafka.listener.DeadLetterPublishingRecoverer;
import org.springframework.kafka.listener.DefaultErrorHandler;
import org.springframework.util.backoff.BackOff;
import org.springframework.util.backoff.FixedBackOff;

import java.util.HashMap;
import java.util.Map;

@Configuration
public class KafkaConfig {
    private static final Logger log = LoggerFactory.getLogger(KafkaConfig.class);
    @Value("${spring.kafka.bootstrap-servers:localhost:9092}")
    private String servers;

    @Value("${spring.kafka.producer.acks:1}")
    private String acks;
    @Value("${spring.kafka.producer.retries:3}")
    private String retries;
    @Value("${spring.kafka.producer.key-serializer:org.apache.kafka.common.serialization.StringSerializer}")
    private String producerKeySerializer;
    @Value("${spring.kafka.producer.value-serializer:org.springframework.kafka.support.serializer.JsonSerializer}")
    private String producerValueSerializer;

    @Value("${spring.kafka.consumer.group-id:xue-gao-group-dev}")
    private String groupId;
    @Value("${spring.kafka.consumer.auto-offset-reset:earliest}")
    private String autoOffsetReset;
    @Value("${spring.kafka.consumer.key-deserializer:org.apache.kafka.common.serialization.StringDeserializer}")
    private String consumerKeyDeSerializer;
    @Value("${spring.kafka.consumer.value-serializer:org.springframework.kafka.support.serializer.JsonDeserializer}")
    private String consumerValueDeSerializer;

    public Map<String, Object> produceConfig() {
        Map<String, Object> configMap = new HashMap<>(10);
        configMap.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
        configMap.put(ProducerConfig.ACKS_CONFIG, acks);
        configMap.put(ProducerConfig.RETRIES_CONFIG, retries);
        // key序列化
        configMap.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, producerKeySerializer);
        // value序列化
        configMap.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, producerValueSerializer);
        return configMap;
    }

    public Map<String, Object> consumerConfig() {
        Map<String, Object> configMap = new HashMap<>(10);
        configMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
        configMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        configMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        // key序列化
        configMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, consumerKeyDeSerializer);
        // value序列化
        configMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, consumerValueDeSerializer);
        return configMap;
    }

    public ProducerFactory<String, Object> producerFactory() {
        return new DefaultKafkaProducerFactory<>(produceConfig());
    }

    public ConsumerFactory<String, Object> consumerFactory() {
        return new DefaultKafkaConsumerFactory<>(consumerConfig());
    }

    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, Object> listenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, Object> listenerContainerFactory = new ConcurrentKafkaListenerContainerFactory<>();
        listenerContainerFactory.setConsumerFactory(consumerFactory());
        listenerContainerFactory.setBatchListener(Boolean.TRUE);
        listenerContainerFactory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);
        return listenerContainerFactory;
    }

    @Bean("kafkaTemplate")
    public KafkaTemplate<String, Object> kafkaTemplate() {
        return new KafkaTemplate<>(producerFactory());
    }

    /**
     * 创建 DeadLetterPublishingRecoverer 对象，它负责实现，在重试到达最大次数时，Consumer 还是消费失败时，该消息就会发送到死信队列。
     * 也可以选择 BackOff 的另一个子类 ExponentialBackOff 实现，提供指数递增的间隔时间
     * 创建 SeekToCurrentErrorHandler 对象，负责处理异常，串联整个消费重试的整个过程。
     * <p>
     * kafkaErrorHandler
     *
     * @param template:
     * @return org.springframework.kafka.listener.ErrorHandler
     * @author xuegao
     * @date 2022/7/31 14:50
     */
    @Bean
    public DefaultErrorHandler kafkaErrorHandler(KafkaTemplate<?, ?> template) {
        log.info("[xue-gao-spring-boot-test][KafkaConfig][kafkaErrorHandler][kafkaErrorHandler begin to Handle]");
        // <1> 创建 DeadLetterPublishingRecoverer 对象
        ConsumerRecordRecoverer recordRecoverer = new DeadLetterPublishingRecoverer(template);
        // <2> 创建 FixedBackOff 对象   设置重试间隔 10秒 次数为 3次
        BackOff backOff = new FixedBackOff(10 * 1000L, 3L);
        // <3> 创建 SeekToCurrentErrorHandler 对象
        return new DefaultErrorHandler(recordRecoverer, backOff);
    }
}
