#set( $symbol_pound = '#' )
#set( $symbol_dollar = '$' )
#set( $symbol_escape = '\' )
package ${package}.config.kafka;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import ${package}.config.kafka.property.KafkaTopicConfigDTO;
import ${package}.utils.ManualRegistBeanUtil;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.cloud.context.config.annotation.RefreshScope;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.core.annotation.Order;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaAdmin;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.listener.BatchErrorHandler;
import org.springframework.kafka.listener.ContainerProperties;
import org.springframework.kafka.listener.DeadLetterPublishingRecoverer;
import org.springframework.kafka.listener.ErrorHandler;
import org.springframework.kafka.listener.SeekToCurrentBatchErrorHandler;
import org.springframework.kafka.listener.SeekToCurrentErrorHandler;
import org.springframework.kafka.support.serializer.JsonDeserializer;
import org.springframework.util.backoff.BackOff;
import org.springframework.util.backoff.ExponentialBackOff;

import java.util.Map;

/**
 * @version: 1.00.00
 * @description: kafka配置
 * @copyright: Copyright (c) 2022 立林科技 All Rights Reserved
 * @company: 厦门立林科技有限公司
 * @author: hj
 * @date: 2022-05-18 10:21
 */
@Configuration
@RefreshScope
@Slf4j
@EnableKafka
@EnableConfigurationProperties({KafkaProperties.class,KafkaTopicConfigDTO.class})
@ConditionalOnProperty(prefix = "spring.kafka", name = "enable", havingValue = "true")
@Order(1)
public class KafKaMqConfig {

    @Autowired
    private KafkaProperties properties;

    @Autowired
    private KafkaAdmin kafkaAdmin;

//    @Bean
//    @RefreshScope
//    public List<NewTopic> createTopic(KafkaTopicConfigDTO kafkaTopicConfigDTO) throws Exception {
//        if (CollectionUtils.isEmpty(kafkaTopicConfigDTO.getTopicConfigList())) {
//            return null;
//        }
//        AdminClient adminClient = adminClient();
////        List<String> topicList = kafkaTopicConfigDTO.getTopicConfigList().stream()
////                .map(topicConfig -> topicConfig.getTopicName())
////                .collect(Collectors.toList());
////        DeleteTopicsResult deleteTopicsResult = adminClient.deleteTopics(topicList);
////        deleteTopicsResult.all().get();
////        log.info("deleteTopicsResult:{}", JSON.toJSONString(deleteTopicsResult));
//        ListTopicsResult listTopicsResult = adminClient.listTopics();
//        Collection<TopicListing> topicListings = listTopicsResult.listings().get();
//        List<String> topicNameList = topicListings.stream()
//                .map(topicListing -> topicListing.name())
//                .distinct()
//                .collect(Collectors.toList());
//        List<NewTopic> newTopicList = kafkaTopicConfigDTO.getTopicConfigList().stream()
//                .filter(topicConfig -> !topicNameList.contains(topicConfig.getTopicName()))
//                .map(topicConfig -> new NewTopic(topicConfig.getTopicName(), topicConfig.getPartitionNums(), topicConfig.getRepPartitionNums()))
//                .collect(Collectors.toList());
//        CreateTopicsResult topics = adminClient.createTopics(newTopicList);
//        topics.all().get();
//        log.info("topics:{}", JSON.toJSONString(topics));
//        return newTopicList;
//    }

    @Bean
    public NewTopic initialReTopic(KafkaTopicConfigDTO kafkaTopicConfigDTO, ApplicationContext applicationContext) {
        kafkaTopicConfigDTO.getTopicConfigList().forEach(topicConfig -> {
            ManualRegistBeanUtil.registerBean((ConfigurableApplicationContext) applicationContext, topicConfig.getTopicName(), NewTopic.class, topicConfig.getTopicName(), topicConfig.getPartitionNums(), topicConfig.getRepPartitionNums());
        });
        return null;
    }
//    @Bean
//    public NewTopic initRecordReportTopic(){
//        return new NewTopic("record-report", 20, (short) 1);
//    }
//    @Bean
//    public NewTopic initRecordReportFowardTopic(){
//        return new NewTopic("record-report-forward", 20, (short) 1);
//    }
//    @Bean
//    public NewTopic initRecordReportCommunityTopic(){
//        return new NewTopic("record-report-community", 20, (short) 1);
//    }
    @Bean
    @Primary
    public KafkaTemplate<String, Object> customKafkaTemplate() {
        KafkaTemplate<String, Object> kafkaTemplate = new KafkaTemplate<>(producerFactory());
        return kafkaTemplate;
    }

    @Bean
    @Primary
    public ProducerFactory<String, Object> producerFactory() {
        Map<String, Object> propMap = properties.buildProducerProperties();
        //自定义分区策略
//        propMap.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, myPartitionStategy.class);
        DefaultKafkaProducerFactory<String, Object> producerFactory = new DefaultKafkaProducerFactory<>(propMap);
        return producerFactory;
    }

    @Bean
    @Primary
    public AdminClient adminClient() {
        return AdminClient.create(kafkaAdmin.getConfigurationProperties());
    }

    @Bean
    @Primary
    public ConsumerFactory<String, Object> consumerFactory() {
        Map<String, Object> propMap = this.properties.buildConsumerProperties();
        //设置消费者分区策略
        propMap.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, "org.apache.kafka.clients.consumer.StickyAssignor,org.apache.kafka.clients.consumer.RoundRobinAssignor,org.apache.kafka.clients.consumer.RangeAssignor");
        return new DefaultKafkaConsumerFactory<>(propMap, new StringDeserializer(), new JsonDeserializer<>());
    }

    @Bean
    public ConsumerFactory<String, Object> batchConsumerFactory() {
        Map<String, Object> propMap = properties.buildConsumerProperties();
        //设置单次拉取的量，走公网访问时，该参数会有较大影响。
        propMap.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, 15728640);
        propMap.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, 15728640);
        //设置消费者分区策略
        propMap.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, "org.apache.kafka.clients.consumer.StickyAssignor,org.apache.kafka.clients.consumer.RoundRobinAssignor,org.apache.kafka.clients.consumer.RangeAssignor");
        //每次Poll的最大数量。
        //注意该值不要改得太大，如果Poll太多数据，而不能在下次Poll之前消费完，则会触发一次负载均衡，产生卡顿。
        propMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 100);
        return new DefaultKafkaConsumerFactory<>(propMap);
    }

    @Bean
    public KafkaListenerContainerFactory kafkaListenerContainerFactoryWithRetry() {
        ConcurrentKafkaListenerContainerFactory<String, Object> factory =
                new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setConcurrency(3);
        factory.setMissingTopicsFatal(false);
        factory.getContainerProperties().setPollTimeout(3000);
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        factory.setBatchListener(false);
        factory.setErrorHandler(errorHandler());
        return factory;
    }

    @Bean
    @Primary
    public KafkaListenerContainerFactory kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, Object> factory =
                new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setConcurrency(10);
        factory.setMissingTopicsFatal(false);
        factory.getContainerProperties().setPollTimeout(3000);
        factory.setBatchListener(false);
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        return factory;
    }

    @Bean
    public KafkaListenerContainerFactory batchCustomerContainerFactoryWithRetry() {
        ConcurrentKafkaListenerContainerFactory<String, Object> factory =
                new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(batchConsumerFactory());
        factory.setConcurrency(5);
        factory.getContainerProperties().setPollTimeout(3000);
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        factory.setBatchListener(true);
        factory.setBatchErrorHandler(batchErrorHandler());
        return factory;
    }

    @Bean
    public KafkaListenerContainerFactory batchCustomerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, Object> factory =
                new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(batchConsumerFactory());
        factory.setConcurrency(10);
        factory.getContainerProperties().setPollTimeout(3000);
        factory.setBatchListener(true);
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        //设置异步提交
//        factory.getContainerProperties().setSyncCommits(false);
        return factory;
    }

    /**
     * 消息重试失败后，发送到对应死信队列，队列名称默认为对应消费topic+“.DLT”
     *
     * @return
     */
    @Bean
    public ErrorHandler errorHandler() {
        DeadLetterPublishingRecoverer deadLetterPublishingRecoverer = new DeadLetterPublishingRecoverer(customKafkaTemplate());
        BackOff backOff = new ExponentialBackOff(5000L, 2.0D);
        return new SeekToCurrentErrorHandler(deadLetterPublishingRecoverer, backOff);
    }

    /**
     * 消息重试失败后,直接抛出异常,重试3次5/10/30
     *
     * @return
     */
    @Bean
    public BatchErrorHandler batchErrorHandler() {
        BackOff backOff = new ExponentialBackOff(5000L, 2.0D);
        SeekToCurrentBatchErrorHandler seekToCurrentBatchErrorHandler = new SeekToCurrentBatchErrorHandler();
        seekToCurrentBatchErrorHandler.setBackOff(backOff);
        return seekToCurrentBatchErrorHandler;
    }
}
