
package com.showdor.springboot.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ContainerProperties;

import java.util.HashMap;
import java.util.Map;

@Configuration
@EnableKafka
public class KafkaConsumerConfig {

    @Value("${spring.kafka.bootstrap-servers}")
    private String service;

    @Value("${spring.kafka.consumer.group-id}")
    private String groupid;

    @Value("${spring.kafka.consumer.enable-auto-commit}")
    private String autoCommit;

    @Value("${spring.kafka.consumer.auto-commit-interval}")
    private String interval;
    @Value("${spring.kafka.consumer.max-poll-records}")
    private String maxPollRecords;

    // 默认发送心跳时间为10000ms，超时时间需要大于发送心跳时间
    @Value("20000")
    private String timeout;

    @Value("${spring.kafka.consumer.key-deserializer}")
    private String keyDeserializer;

    @Value("${spring.kafka.consumer.value-deserializer}")
    private String valueDeserializer;

    @Value("${spring.kafka.consumer.auto-offset-reset}")
    private String offsetReset;


    /**
     * 获取kafka配置
     *
     * @return 配置map
     */
    private Map<String, Object> consumerConfig() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, service);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, groupid);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit);
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, interval);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, offsetReset);
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, timeout);
        props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, timeout);
        return props;
    }


    /**
     * 实时推送使用的消费者工厂
     *
     * @return kafka消费者工厂
     */
    private ConsumerFactory<String, String> infoPushConsumerFactory() {
        Map<String, Object> props = consumerConfig();
        // 实时推送单次批量拉取数据设置为150
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 150);
        return new DefaultKafkaConsumerFactory<>(props);
    }

    /**
     * 获取kafka实例,该方法为单条消费
     *
     * @return kafka实例
     */
/*    @Bean(name = "kafkaListenerContainerFactory1")
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory1() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setConcurrency(1);  // 连接池中消费者数量
        factory.getContainerProperties().setPollTimeout(4000); //拉取topic的超时时间
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);// 关闭ack自动提交偏移
        System.out.println("调用的是自定义消费者池");
        return factory;
    }*/

    /**
     * 获取工厂
     *
     * @return kafka工厂
     */
    public ConsumerFactory<String, String> consumerFactory() {
        Map<String, Object> props = consumerConfig();
        props.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, "500000000");
        //props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG,1000*5);
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);// 单次poll的数量,批量消费时配置
        return new DefaultKafkaConsumerFactory<>(props);
    }

    /**
     * 获取kafka实例，该实例为批量消费
     *
     * @return kafka实例
     */
    @Bean(name = "kafkaListenerContainerFactory2")
    public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory2() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerFactory());
        factory.setConcurrency(8);  // 连接池中消费者数量
        factory.setBatchListener(true); // 是否并发消费
        factory.getContainerProperties().setPollTimeout(4000); //拉取topic的超时时间
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);// 关闭ack自动提交偏移
        return factory;
    }

    /**
     * 实时推送获取的kafka实例，该实例为批量消费
     *
     * @return kafka实例
     */
/*    @Bean(name = "infoPushKafkaListenerContainerFactory")
    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> infoPushKafkaListenerContainerFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(infoPushConsumerFactory());
        factory.setConcurrency(10); // 连接池中消费者数量
        factory.setBatchListener(true); // 是否并发消费
        factory.getContainerProperties().setPollTimeout(4000);  // 拉取topic的超时时间
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.BATCH); // 每次poll之前提交一次偏移
//        factory.getConsumerFactory().getConfigurationProperties().put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,50); // 单次poll的数量
        return factory;
    }*/
}