package com.study.kafkademo.kafka.custom.config;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.kafka.listener.ContainerProperties;

import java.util.HashMap;
import java.util.Map;

/**
 * @Classname KafkaConsumerConfig
 * @Description TODO 多线程并发消费，批量消费
 * @Author kexl
 * @Date 2020/7/3 14:20
 * @Version 1.0
 */
@Configuration
@EnableKafka
@Slf4j
public class CustomKafkaConsumerConfig {

    @Value("${spring.kafka.bootstrap-servers}")
    private String service;

    @Value("${spring.kafka.consumer.group-id}")
    private String groupid;

    @Value("${spring.kafka.consumer.enable-auto-commit}")
    private String autoCommit;

    @Value("${spring.kafka.consumer.auto-commit-interval}")
    private String interval;

    // 默认发送心跳时间为10000ms，超时时间需要大于发送心跳时间
    @Value("10000")
    private String timeout;

    @Value("${spring.kafka.consumer.key-deserializer}")
    private String keyDeserializer;

    @Value("${spring.kafka.consumer.value-deserializer}")
    private String valueDeserializer;

    @Value("${spring.kafka.consumer.auto-offset-reset}")
    private String offsetReset;


    /**
     * 获取kafka配置
     *
     * @return 配置map
     */
    private Map<String, Object> consumerConfig() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, service);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "topic.group1.custom");
        //每次接受消息的数量
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 4);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit);
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, interval);
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, timeout);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offsetReset);
        return props;
    }

    /**
     * 获取工厂
     * @return kafka工厂
     */
    /*private ConsumerFactory<String,String> consumerFactory(){
        Map<String, Object> props = consumerConfig();
        // 日志过滤入库一批量为1500条消息
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,1500);// 单次poll的数量,批量消费时配置
        return new DefaultKafkaConsumerFactory<>(consumerConfig());
    }*/

    /**
     * 批量设置
     *
     * @return
     */
    /*private ConcurrentKafkaListenerContainerFactory<String, String> kafkaBatchListenerContainerFactorySet() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        //批量消费
        factory.setBatchListener(batchListener);
        //如果消息队列中没有消息，等待timeout毫秒后，调用poll()方法。
        // 如果队列中有消息，立即消费消息，每次消费的消息的多少可以通过max.poll.records配置。
        //手动提交无需配置
        factory.getContainerProperties().setPollTimeout(pollTimeout);
        //设置提交偏移量的方式， MANUAL_IMMEDIATE 表示消费一条提交一次；MANUAL表示批量提交一次
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        return factory;
    }*/
    @Bean
    public ConsumerFactory<String, String> consumerFactory() {
        return new DefaultKafkaConsumerFactory<String, String>(consumerConfig());
    }

    /**
     * 获取kafka实例，该实例为批量消费
     *
     * @return kafka实例
     */
    @Bean(name = "kafkaListenerContainerFactory")
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
//        factory.setConcurrency(2);  // 连接池中消费者数量
        // 是否并发消费
        factory.setBatchListener(true);
        //拉取topic的超时时间
        factory.getContainerProperties().setPollTimeout(1500);
        // 关闭ack自动提交偏移
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        return factory;
    }

    /**
     * 实时推送使用的消费者工厂
     *
     * @return kafka消费者工厂
     */
    private ConsumerFactory<String, String> infoPushConsumerFactory() {
        Map<String, Object> props = consumerConfig();
        // 实时推送单次批量拉取数据设置为150
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 5);
        return new DefaultKafkaConsumerFactory<>(props);
    }
    /**
     * 实时推送获取的kafka实例，该实例为批量消费
     * @return kafka实例

     @Bean(name = "infoPushKafkaListenerContainerFactory")
     public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String,String>> infoPushKafkaListenerContainerFactory(){
     ConcurrentKafkaListenerContainerFactory<String,String> factory = new ConcurrentKafkaListenerContainerFactory<>();
     factory.setConsumerFactory(infoPushConsumerFactory());
     factory.setConcurrency(10); // 连接池中消费者数量
     factory.setBatchListener(true); // 是否并发消费
     factory.getContainerProperties().setPollTimeout(4000);  // 拉取topic的超时时间
     factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.BATCH); // 每次poll之前提交一次偏移
     //        factory.getConsumerFactory().getConfigurationProperties().put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,50); // 单次poll的数量
     return factory;
     }
     */
}
