package com.gaff.emp.core.config;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.*;

import java.util.HashMap;
import java.util.Map;

@Configuration
@ConditionalOnProperty(prefix = "kafka", name = "enable", havingValue = "true")
public class KafkaConfig {

    @Value("${kafka.servers:localhost:9092}")
    private String servers;

    // 写入失败时，重试次数,当retris为0时，produce不会重复
    @Value("${kafka.producer.retries:3}")
    private int retries;

    // acks = 0 则生产者将不会等待来自服务器的任何确认，该记录将立即添加到套接字缓冲区并视为已发送
    // acks = 1 意味着leader会将记录写入其本地日志，但无需等待所有副本服务器的完全确认即可做出回应
    // acks = all 意味着leader将等待完整的同步副本集以确认记录
    @Value("${kafka.producer.acks:1}")
    private String acks;

    @Value("${kafka.producer.compression:gzip}")
    private String compression;

    // 每次批量发送消息的数量,producer积累到一定数据，一次发送
    @Value("${kafka.producer.batch-size:131072}")
    private int batchSize;

    // producer积累数据一次发送，缓存大小达到buffer-memory就发送数据
    @Value("${kafka.producer.buffer-memory:33554432}")
    private int bufferMemory;

    @Value("${kafka.producer.linger-ms:100}")
    private int lingerMs;

    @Value("${kafka.producer.partitioner:org.apache.kafka.clients.producer.internals.DefaultPartitioner}")
    private String partitioner;

    @Value("${kafka.key-serializer:org.apache.kafka.common.serialization.StringSerializer}")
    private String keySerializer;

    @Value("${kafka.value-serializer:org.apache.kafka.common.serialization.StringSerializer}")
    private String valueSerializer;

    @Value("${kafka.key-deserializer:org.apache.kafka.common.serialization.StringDeserializer}")
    private String keyDeserializer;

    @Value("${kafka.value-deserializer:org.apache.kafka.common.serialization.StringDeserializer}")
    private String valueDeserializer;

    // 消费并发数,为了加快消费,一般和topic的分区数对应
    @Value("${kafka.consumer.concurrency:4}")
    private int concurrency;

    @Value("${kafka.consumer.batch-listener:true}")
    private boolean batchListener;

    @Value("${kafka.consumer.max-poll-records:50}")
    private int maxPollRecords;

    @Value("${kafka.consumer.poll-timeout:3000}")
    private int poolTimeout;

    // 指定默认消费者group id --> 在kafka中，同一组中的consumer不会读取到同一个消息
    @Value("${kafka.consumer.group-id:emp.consumer}")
    private String groupId;

    @Value("${kafka.consumer.enable-auto-commit:true}")
    private boolean autoCommit;

    @Value("${kafka.consumer.auto-commit-interval-ms:100}")
    private int autoCommitIntervalMs;

    // earliest 当各分区下有已提交的offset时，从提交的offset开始消费；无提交的offset时，从头开始消费
    // latest 当各分区下有已提交的offset时，从提交的offset开始消费；无提交的offset时，消费新产生的该分区下的数据
    // none 各分区都存在已提交的offset时，从offset后开始消费；只要有一个分区不存在已提交的offset，则抛出异常
    @Value("${kafka.consumer.auto-offset-reset:latest}")
    private String autoOffsetReset;

    @Value("${kafka.consumer.session-timeout-ms:30000}")
    private int sessionTimeoutMs;

    @Bean
    public KafkaTemplate<String, String> kafkaTemplate() {
        return new KafkaTemplate<>(producerFactory());
    }

    @Bean
    public ProducerFactory<String, String> producerFactory() {
        return new DefaultKafkaProducerFactory<>(producerConfigs());
    }

    @Bean
    public Map<String, Object> producerConfigs() {
        Map<String, Object> props = new HashMap<>(16);
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
        props.put(ProducerConfig.RETRIES_CONFIG, retries);
        props.put(ProducerConfig.ACKS_CONFIG, acks);
        props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, compression);
        props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
        props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
        props.put(ProducerConfig.LINGER_MS_CONFIG, lingerMs);
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializer);
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializer);
        props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, partitioner);
        return props;
    }

    @Bean
    public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setConcurrency(concurrency);
        factory.setBatchListener(batchListener);
        factory.getContainerProperties().setPollTimeout(poolTimeout);
        return factory;
    }

    @Bean
    public ConsumerFactory<String, String> consumerFactory() {
        return new DefaultKafkaConsumerFactory<>(consumerConfigs());
    }

    @Bean
    public Map<String, Object> consumerConfigs() {
        Map<String, Object> props = new HashMap<>(16);
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit);
        if (autoCommit) {
            props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitIntervalMs);
        }
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeoutMs);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, keyDeserializer);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, valueDeserializer);
        return props;
    }

}
