package com.xaicode.ctoroad.kafka;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.ContainerProperties;
import org.springframework.kafka.transaction.KafkaTransactionManager;
import org.springframework.util.ObjectUtils;

import java.util.HashMap;
import java.util.Map;

/**
 * @author Locker cjxia@isoftstone.com
 * @version 1.0
 */
@Slf4j
@Configuration
public class KafkaConfig {

    private final KafkaProperties kafkaProperties;

    private final KafkaProperties.Producer kafkaProducerProperties;

    private final KafkaProperties.Consumer kafkaConsumerProperties;

    private final KafkaProducerCallback kafkaProducerCallback;

    public KafkaConfig(KafkaProperties kafkaProperties, KafkaProducerCallback kafkaProducerCallback) {
        this.kafkaProperties = kafkaProperties;
        this.kafkaProducerProperties = kafkaProperties.getProducer();
        this.kafkaConsumerProperties = kafkaProperties.getConsumer();
        this.kafkaProducerCallback = kafkaProducerCallback;
    }

    @Value("${spring.kafka.enable-transaction}")
    private Boolean kafkaEnableTransaction;

    /*
    producer配置
     */

    @Value("${spring.kafka.producer.linger}")
    private Integer kafkaProducerLinger;

    @Value("${spring.kafka.producer.batch-size}")
    private Integer kafkaProducerBatchSize;

    @Value("${spring.kafka.producer.buffer-memory}")
    private Integer kafkaProducerBatchMemory;

    /*
    consumer配置
     */

    @Value("${spring.kafka.consumer.auto-commit-interval}")
    private Integer kafkaConsumerAutoCommitInterval;

    @Value("${spring.kafka.consumer.max-poll-records}")
    private Integer kafkaConsumerMaxPollRecords;

    /**
     * kafka创建topic的代理
     * <p>
     * 使用 {@link org.apache.kafka.clients.admin.NewTopic} 创建新topic
     * <pre>
     *         new NewTopic("test", 10, (short) 2);
     *     </pre>
     * 第一个是参数是topic名字，第二个参数是分区个数，第三个是topic的复制因子个数(副本数)
     * <p>副本数不能大于broker个数，只有在集群中才能使用kafka的备份功能</p>
     * </p>
     */
    @Bean
    public KafkaAdmin kafkaAdmin() {
        Map<String, Object> configs = new HashMap<>(1);
        configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaProperties.getBootstrapServers());
        return new KafkaAdmin(configs);
    }

    /**
     * kafka客户端，在spring中创建这个bean之后可以注入并且创建topic,
     * 用于集群环境，创建多个副本
     */
    @Bean
    public AdminClient adminClient() {
        return AdminClient.create(kafkaAdmin().getConfig());
    }

    /**
     * 生产者配置
     */
    @Bean
    public Map<String, Object> producerConfigs() {
        Map<String, Object> props = new HashMap<>();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaProperties.getBootstrapServers());
        if (is(kafkaProducerProperties.getClientId())) {
            props.put(ProducerConfig.CLIENT_ID_CONFIG, kafkaProducerProperties.getClientId());
        }
        if (is(kafkaProducerProperties.getRetries())) {
            props.put(ProducerConfig.RETRIES_CONFIG, kafkaProducerProperties.getRetries());
        }
        if (is(kafkaProducerProperties.getBatchSize())) {
            props.put(ProducerConfig.BATCH_SIZE_CONFIG, kafkaProducerBatchSize);
        }
        if (is(kafkaProducerProperties.getBufferMemory())) {
            props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, kafkaProducerBatchMemory);
        }
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);

        // 生产者事务
        if (kafkaEnableTransaction) {
            // 幂等性：Producer 的幂等性指的是当发送同一条消息时，数据在 Server 端只会被持久化一次，数据不丟不重
            // 如果需要跨会话、跨多个 topic-partition 的情况，需要使用 Kafka 的事务性来实现
            // 为true时如果显性的将acks设置为0，-1则报错
            props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
            // producer端的消息确认机制,-1和all都表示消息不仅要写入本地的leader中还要写入对应的副本中
            // 单个brok可使用'1'
            props.put(ProducerConfig.ACKS_CONFIG, "all");
        } else {
            props.put(ProducerConfig.ACKS_CONFIG, kafkaProducerProperties.getAcks());
        }

        //延时时间，延时时间到达之后计算批量发送的大小没达到也发送消息
        props.put(ProducerConfig.LINGER_MS_CONFIG, kafkaProducerLinger);

        // 单条消息的最大值以字节为单位,默认值为1048576
        // props.put(ProducerConfig.xxx, 10485760);
        // 设置broker响应时间，如果broker在60秒之内还是没有返回给producer确认消息，则认为发送失败
        props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 60000);

        // 设置压缩算法(默认是木有压缩算法的)
        //props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");

        // 指定拦截器(value为对应的class)
        //props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, "com.x.handler.KafkaProducerInterceptor");

        return props;
    }

    @Bean
    public ProducerFactory<String, String> producerFactory() {
        DefaultKafkaProducerFactory<String, String> factory = new DefaultKafkaProducerFactory<>(producerConfigs());

        if (kafkaEnableTransaction) {
            // 在producerFactory中开启事务功能
            factory.transactionCapable();
            // 生成Transactional.id的前缀
            factory.setTransactionIdPrefix("vsm-kafka-transaction-");
        }

        return factory;
    }

    @Bean
    public KafkaTransactionManager<String, String> kafkaTransactionManager(ProducerFactory<String, String> producerFactory) {
        return new KafkaTransactionManager<>(producerFactory);
    }

    @Bean
    public KafkaProducer<String, String> kafkaProducer() {
        return new KafkaProducer<>(producerConfigs());
    }

    /**
     * 消费者配置
     */
    @Bean
    public Map<String, Object> consumerConfigs() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaProperties.getBootstrapServers());
        if (is(kafkaConsumerProperties.getGroupId())) {
            props.put(ConsumerConfig.GROUP_ID_CONFIG, kafkaConsumerProperties.getGroupId());
        }
        if (is(kafkaConsumerProperties.getAutoOffsetReset())) {
            props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, kafkaConsumerProperties.getAutoOffsetReset());
        }
        if (is(kafkaConsumerProperties.getEnableAutoCommit())) {
            props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, kafkaConsumerProperties.getEnableAutoCommit());
            if (!kafkaConsumerProperties.getEnableAutoCommit()) {
                props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, kafkaConsumerAutoCommitInterval);
            }
        }
        //Session超时设置
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "15000");
        if (is(kafkaConsumerProperties.getMaxPollRecords())) {
            props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, kafkaConsumerMaxPollRecords);
        }
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return props;
    }

    @Bean
    public ConsumerFactory<String, String> consumerFactory() {
        return new DefaultKafkaConsumerFactory<>(consumerConfigs());
    }

    @Bean
    public KafkaConsumer<String, String> kafkaConsumer() {
        return new KafkaConsumer<>(consumerConfigs());
    }

    /**
     * 监听者容器工厂，使用自定义消费者配置
     */
    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());

        // 多消费者共同消费，加快消息消耗，用于消息生产大于消费的缓解
        factory.setConcurrency(1);

        // 开启监听手动确认
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);

        // 设置为批量消费，每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
        // props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 50) 批量消费每次最多消费消息记录数
        // factory.setBatchListener(true);

        // 消费者最大获取数据间隔(ms)
        // factory.getContainerProperties().setPollTimeout(3000);

        return factory;
    }

    /**
     * kafka template 启用双 String
     */
    @Bean
    public KafkaTemplate<String, String> kafkaTemplate() {
        KafkaTemplate<String, String> stringStringKafkaTemplate = new KafkaTemplate<>(producerFactory());
        // 设置生产者消息推送回调
        stringStringKafkaTemplate.setProducerListener(kafkaProducerCallback);
        return stringStringKafkaTemplate;
    }

    private boolean is(Object obj) {
        return !ObjectUtils.isEmpty(obj);
    }

}
