package com.piece.core.kafka.config;

import com.piece.core.framework.constant.RegisterConstants;
import com.piece.core.framework.support.convert.Convert;
import com.piece.core.framework.util.message.MessageFactory;
import com.piece.core.kafka.consumer.KafkaMessageConsumer;
import com.piece.core.kafka.producer.KafkaMessageProducer;
import com.piece.core.kafka.properties.KafkaProperties;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.context.properties.PropertyMapper;
import org.springframework.context.annotation.*;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.*;
import org.springframework.kafka.support.LoggingProducerListener;
import org.springframework.kafka.support.ProducerListener;
import org.springframework.kafka.support.serializer.JsonDeserializer;
import org.springframework.kafka.transaction.KafkaTransactionManager;
import org.springframework.util.unit.DataSize;
import javax.annotation.Resource;
import java.time.Duration;
import java.util.*;

@EnableKafka
@Configuration
@ConditionalOnProperty(prefix = "spring.data.kafka", name = "enable", havingValue = "true")
public class KafkaAutoConfig {

    @Value("${spring.application.name:#{null}}")
    private String applicationName;

    @Resource
    private KafkaProperties kafkaProperties;

    private Map<String, Object> buildCommonProperties() {
        Map<String, Object> properties = new HashMap();
        if (null != kafkaProperties.getBootstrapServers()) {
            properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaProperties.getBootstrapServers());
        }

        if (null != kafkaProperties.getClientId()) {
            properties.put(ProducerConfig.CLIENT_ID_CONFIG, kafkaProperties.getClientId());
        }

        properties.putAll(kafkaProperties.getSsl().buildProperties());
        properties.putAll(kafkaProperties.getSecurity().buildProperties());
        return properties;
    }

    private Map<String, Object> buildProducerProperties() {
        Map<String, Object> props = this.buildCommonProperties();
        //消息确认
        //0表示不进行消息接收是否成功的确认 1表示当Leader接收成功时确认  all -1表示Leader和Follower都接收成功时确认
        props.put(ProducerConfig.ACKS_CONFIG, "all");
        //控制批处理大小，单位为字节
        props.put(ProducerConfig.BATCH_SIZE_CONFIG, Convert.toInt(kafkaProperties.getProducer().getBatchSize().toBytes(), 16384));
        //生产端缓冲区
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, kafkaProperties.getProducer().getBufferMemory().toBytes());
        //重试，0为不启用重试机制
        props.put(ProducerConfig.RETRIES_CONFIG, Convert.toInt(kafkaProperties.getProducer().getRetries(), 3));
        //提交延时
        //props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
        //生产者空间不足时阻塞时间，默认60s
        //props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 60000);
        //生产者消息的键的序列化器
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        //生产者消息的值的序列化器  JsonSerializer
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        return props;
    }

    @Bean("kafkaProducerListener")
    public ProducerListener<String, Object> kafkaProducerListener() {
        return new LoggingProducerListener();
    }

    @Bean("kafkaProducerFactory")
    public DefaultKafkaProducerFactory<String, Object> kafkaProducerFactory() {
        DefaultKafkaProducerFactory<String, Object> factory = new DefaultKafkaProducerFactory(this.buildProducerProperties());
        String transactionIdPrefix = kafkaProperties.getProducer().getTransactionIdPrefix();
        if (null != transactionIdPrefix) {
            factory.setTransactionIdPrefix(transactionIdPrefix);
        }
        return factory;
    }

    @Bean
    @ConditionalOnProperty(name = {"spring.data.kafka.producer.transaction-id-prefix"})
    public KafkaTransactionManager<String, Object> kafkaTransactionManager(DefaultKafkaProducerFactory<String, Object> kafkaProducerFactory) {
        return new KafkaTransactionManager(kafkaProducerFactory);
    }

    @Bean
    public KafkaTemplate kafkaTemplate() {
        PropertyMapper map = PropertyMapper.get().alwaysApplyingWhenNonNull();
        KafkaTemplate kafkaTemplate = new KafkaTemplate(kafkaProducerFactory());
        map.from(kafkaProducerListener()).to(kafkaTemplate::setProducerListener);
        map.from(RegisterConstants.TOPIC).to(kafkaTemplate::setDefaultTopic);
        map.from(kafkaProperties.getTemplate().getTransactionIdPrefix()).to(kafkaTemplate::setTransactionIdPrefix);
        return kafkaTemplate;
    }

    @Bean("kafkaMessageProducer")
    public KafkaMessageProducer kafkaMessageProducer(KafkaTemplate kafkaTemplate, @Lazy MessageFactory messageFactory) {
        return new KafkaMessageProducer(messageFactory, kafkaTemplate);
    }

    private Map<String, Object> buildConsumerProperties() {
        Map<String, Object> props = this.buildCommonProperties();
        //自动提交时间间隔
        Duration autoCommitInterval = kafkaProperties.getConsumer().getAutoCommitInterval();
        autoCommitInterval = Optional.ofNullable(autoCommitInterval).orElseGet(() -> Duration.ofMillis(10000));
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, Convert.toInt(autoCommitInterval.toMillis(), 1000));
        //开始消费位置
        // earliest：当各分区下有已提交的offset时，从提交的offset开始消费；无提交的offset时，从头开始消费分区的记录
        // latest：当各分区下有已提交的offset时，从提交的offset开始消费；无提交的offset时，消费新产生的该分区下的数据（在消费者启动之后生成的记录）
        // none：当各分区都存在已提交的offset时，从提交的offset开始消费；只要有一个分区不存在已提交的offset，则抛出异常
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, kafkaProperties.getConsumer().getAutoOffsetReset());
        //自动确认提交
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        Duration fetchMaxWait = kafkaProperties.getConsumer().getFetchMaxWait();
        fetchMaxWait = Optional.ofNullable(fetchMaxWait).orElseGet(() -> Duration.ofMillis(10000));
        props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, Convert.toInt(fetchMaxWait.toMillis(), 10000));
        DataSize fetchMinSize = kafkaProperties.getConsumer().getFetchMinSize();
        fetchMinSize = Optional.ofNullable(fetchMinSize).orElseGet(() ->DataSize.ofBytes(1));
        props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, Convert.toInt(fetchMinSize.toBytes(), 1));
        props.put(ConsumerConfig.GROUP_ID_CONFIG, Convert.toStr(kafkaProperties.getConsumer().getGroupId(), applicationName));
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 50);
        //当broker多久没有收到consumer的心跳请求后就触发reBalance，默认值是10s
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 10000);
        //消费者配置消息的键的序列化器
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(JsonDeserializer.TRUSTED_PACKAGES, "*");
        props.put(JsonDeserializer.USE_TYPE_INFO_HEADERS, "false");

        return props;
    }

    @Bean("kafkaConsumerFactory")
    public DefaultKafkaConsumerFactory<String, Object> kafkaConsumerFactory() {
        return new DefaultKafkaConsumerFactory(this.buildConsumerProperties());
    }

    @Bean("kafkaListenerContainerFactory")
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, Object>> kafkaListenerContainerFactory(DefaultKafkaConsumerFactory<String, Object> kafkaConsumerFactory) {
        ConcurrentKafkaListenerContainerFactory<String, Object> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(kafkaConsumerFactory);
        //在侦听器容器中运行的线程数，一般设置为 机器数*分区数
        factory.setConcurrency(kafkaProperties.getListener().getConcurrency());
        //消费监听接口监听的主题不存在时，默认会报错，所以设置为false忽略错误
        factory.setMissingTopicsFatal(false);
        //自动提交关闭，需要设置手动消息确认
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        //两次poll之间的最大间隔，默认值为5分钟。如果超过这个间隔会触发reBalance
        factory.getContainerProperties().setPollTimeout(kafkaProperties.getListener().getPollTimeout().toMillis());
        //设置为批量监听，需要用List接收
        factory.setBatchListener(true);
        return factory;
    }

    @Bean("kafkaMessageConsumer")
    public KafkaMessageConsumer kafkaMessageConsumer(@Lazy MessageFactory messageFactory) {
        return new KafkaMessageConsumer(messageFactory);
    }
}
