package com.sgcc.dlsc.kafka.consumer.Config;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;

import java.util.HashMap;
import java.util.Map;

/**
 * @Author: 李孟帅
 * @CreateTime: 2019-12-30 15:28
 * @Description: ${Description}
 */
@Configuration
@EnableKafka
public class KafkaConsumerConfigs {
    @Value("${spring.kafka.bootstrap-servers}")
    private String bootstrapServers;
    @Value("${spring.kafka.consumer.group-id}")
    private String group_id;
    @Value("${spring.kafka.consumer.auto-offset-reset}")
    private String auto_offset_reset;
    @Value("${spring.kafka.consumer.enable-auto-commit}")
    private String auto_commit;
    @Value("${spring.kafka.consumer.key-deserializer}")
    private String keySerializer;
    @Value("${spring.kafka.consumer.value-deserializer}")
    private String valueSerializer;
    @Value("${spring.kafka.consumer.max-poll-records}")
    private Integer max_poll_records;
    @Value("${spring.kafka.consumer.properties.max-partition-fetch-bytes-config}")
    private Integer max_partition_fetch_bytes_config;
    @Value("${spring.kafka.consumer.properties.request-timeout-ms-config}")
    private Integer request_timeout_fetch_bytes_config;
    @Value("${spring.kafka.consumer.properties.session-timeout-ms-config}")
    private Integer session_timeout_ms_config;

    @Bean  //有了EnableKafka注解 可以不需要这个注解
    public KafkaListenerContainerFactory<?> batchFactory() {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new
                ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfig()));
        factory.setBatchListener(true); // 开启批量监听
        return factory;
    }

    @Bean
    public Map<String, Object> consumerConfig() {
        System.out.println("consumer  config");
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, group_id);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, auto_offset_reset);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,auto_commit);
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, max_poll_records); //设置每次接收Message的数量
        props.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG ,max_partition_fetch_bytes_config);
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, session_timeout_ms_config);
        props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, request_timeout_fetch_bytes_config);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keySerializer);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueSerializer);
        return props;
    }

    @Bean
    public KafkaConsumer<String,String> kafkaConsumer(){
        return new KafkaConsumer<>(consumerConfig());
    }
}
