package org.hhy.cloud.crawl.config;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.ContainerProperties;
import java.util.Map;

/**
 * 爬虫模块kafka配置类
 * @author LiuHao
 * @date 2020/11/13 0:55
 * @description
*/
@Configuration
@EnableConfigurationProperties(CrawlKafkaProperties.class)
public class CrawlKafkaConfig {

    @Autowired
    private KafkaProperties kafkaProperties;

    @Autowired
    private CrawlKafkaProperties crawlKafkaProperties;

    /*********kafka 消费者配置**********/

    @Bean("dbKafkaListenerContainerFactory")
    public KafkaListenerContainerFactory<?> dbKafkaListenerContainerFactory() {
        final ConcurrentKafkaListenerContainerFactory<String, String> factory = commonKafkaListenerContainerFactoryConfig();
        factory.setConsumerFactory(dbConsumerFactory());
        return factory;
    }

    @Bean("esKafkaListenerContainerFactory")
    public KafkaListenerContainerFactory<?> esKafkaListenerContainerFactory() {
        final ConcurrentKafkaListenerContainerFactory<String, String> factory = commonKafkaListenerContainerFactoryConfig();
        factory.setConsumerFactory(esConsumerFactory());
        return factory;
    }

    private ConcurrentKafkaListenerContainerFactory<String, String> commonKafkaListenerContainerFactoryConfig(){
        final ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConcurrency(3);
        factory.getContainerProperties().setPollTimeout(3000);
        factory.setBatchListener(true);
        //设置提交偏移量的方式,详细请看代码结尾解释
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        return factory;
    }

    @Bean
    public Map<String, Object> commonConsumerConfigs() {
        Map<String, Object> propsMap = kafkaProperties.buildConsumerProperties();
        propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, crawlKafkaProperties.getBootstrapServers());
        propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, crawlKafkaProperties.getConsumer().getAutoCommitIntervalMs());
        propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, crawlKafkaProperties.getConsumer().getSessionTimeoutMs());
        propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, crawlKafkaProperties.getConsumer().getAutoOffsetReset());
        propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, crawlKafkaProperties.getConsumer().getMaxPollRecords());
        propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return propsMap;
    }

    /**
     * 数据库业务消费
     * @return consumerFactory
     */
    private ConsumerFactory<String, String> dbConsumerFactory() {
        Map<String, Object> commonConsumerConfigs = commonConsumerConfigs();
        commonConsumerConfigs.put(ConsumerConfig.GROUP_ID_CONFIG, crawlKafkaProperties.getConsumer().getDbGroupId());
        return new DefaultKafkaConsumerFactory<>(commonConsumerConfigs);
    }

    /**
     * es业务消费
     * @return consumerFactory
     */
    private ConsumerFactory<String, String> esConsumerFactory(){
        Map<String, Object> commonConsumerConfigs = commonConsumerConfigs();
        commonConsumerConfigs.put(ConsumerConfig.GROUP_ID_CONFIG, crawlKafkaProperties.getConsumer().getEsGroupId());
        return new DefaultKafkaConsumerFactory<>(commonConsumerConfigs);
    }




    /***kafka生产者 配置**/

    @Bean
    @SuppressWarnings("unchecked")
    public KafkaTemplate<String,String> kafkaTemplate() {
        return new KafkaTemplate(producerFactory(),true);
    }


    private ProducerFactory<String, String> producerFactory() {
        return new DefaultKafkaProducerFactory<>(producerConfigs());
    }

    @Bean
    public Map<String, Object> producerConfigs(){
        final Map<String, Object> propsMap = kafkaProperties.buildProducerProperties();
        propsMap.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, crawlKafkaProperties.getBootstrapServers());
        propsMap.put(ProducerConfig.ACKS_CONFIG, crawlKafkaProperties.getProducer().getAcks());
        propsMap.put(ProducerConfig.RETRIES_CONFIG, crawlKafkaProperties.getProducer().getRetries());
        propsMap.put(ProducerConfig.BATCH_SIZE_CONFIG, crawlKafkaProperties.getProducer().getBatchSize());
        propsMap.put(ProducerConfig.LINGER_MS_CONFIG, crawlKafkaProperties.getProducer().getLingerMs());
        propsMap.put(ProducerConfig.BUFFER_MEMORY_CONFIG, crawlKafkaProperties.getProducer().getBufferMemory());
        propsMap.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,  StringSerializer.class);
        propsMap.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        return propsMap;
    }



    /**
     * AckMode针对ENABLE_AUTO_COMMIT_CONFIG=false时生效，有以下几种：
     *
     * RECORD
     * 每处理一条commit一次
     *
     * BATCH(默认)
     * 每次poll的时候批量提交一次，频率取决于每次poll的调用频率
     *
     * TIME
     * 每次间隔ackTime的时间去commit(跟auto commit interval有什么区别呢？)
     *
     * COUNT
     * 累积达到ackCount次的ack去commit
     *
     * COUNT_TIME
     * ackTime或ackCount哪个条件先满足，就commit
     *
     * MANUAL
     * listener负责ack，但是背后也是批量上去
     *
     * MANUAL_IMMEDIATE
     * listner负责ack，每调用一次，就立即commit
     *
     */

}
