package com.lz.dsp.task.config.kafka;

import java.util.HashMap;
import java.util.Map;

import org.apache.commons.lang.StringUtils;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.AbstractMessageListenerContainer;

import com.lz.ad.common.mq.kafka.KafkaAdGroupCons;
import com.lz.dsp.task.common.Config;

import lombok.extern.slf4j.Slf4j;

/**
 * kafka消费者配置
 */
@Configuration
@EnableKafka
@Slf4j
public class KafkaConsumerConfig {
    @Autowired
    private KafkaConsumerConnectionInfo kafkaConsumerConnectionInfo;
    @Autowired
    private KafkaConsumerListenerInfo kafkaConsumerListenerInfo;

    /**
     * 消费者配置参数
     * @return
     */
    @Bean
    public Map<String, Object> consumerConfigs() {
        Map<String, Object> props = new HashMap<>();
        /** 可配置参数(zookeeper) */
        /** 与kafka集群的连接，逗号分隔*/
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConsumerConnectionInfo.getBootstrapServers());
        /**
         * 如果没有足够的数据立即满足“fetch.min.bytes”给出的要求，服务器在回答获取请求之前将阻塞的最长时间（以毫秒为单位）
         * 默认值为500
         */
        props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG,kafkaConsumerConnectionInfo.getFetchMaxWaitMs());
        /** 服务器应以字节为单位返回获取请求的最小数据量，默认值为1，对应的kafka的参数为fetch.min.bytes。 */
        props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG,kafkaConsumerConnectionInfo.getFetchMinBytes());
        /** 服务器获取请求返回的最大数据量 */
        props.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG,kafkaConsumerConnectionInfo.getFetchMaxBytes());
        /** 一次调用poll（）时返回的最大记录数 */
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG,kafkaConsumerConnectionInfo.getMaxPollRecords());

        /** 可配置参数(application.yml) */
        /** ID在发出请求时传递给服务器，用于服务器端日志记录 */
//        props.put(ConsumerConfig.CLIENT_ID_CONFIG,kafkaConsumerConnectionInfo.getClientId());
        /** 当Kafka中没有初始偏移量或者服务器上不再存在当前偏移量时，自动将偏移重置为最新偏移 */
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,kafkaConsumerConnectionInfo.getAutoOffsetReset());
        /** 分区分配策略的类名，客户端将在使用组管理时用于在使用者实例之间分配分区所有权 */
        if(StringUtils.isNotBlank(kafkaConsumerConnectionInfo.getPartitionAssignmentStrategy())){
            props.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG,kafkaConsumerConnectionInfo.getPartitionAssignmentStrategy());
        }
        /** 拦截器类列表 */
        if(StringUtils.isNotBlank(kafkaConsumerConnectionInfo.getInterceptorClasses())){
            props.put(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG,kafkaConsumerConnectionInfo.getInterceptorClasses());
        }

        /** 默认参数 */
        /** 手动提交偏移量 */
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        /** 密钥的反序列化器类，实现类实现了接口org.apache.kafka.common.serialization.Deserializer */
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        /** 值的反序列化器类，实现类实现了接口org.apache.kafka.common.serialization.Deserializer */
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

        return props;
    }

    /**
     * 监听器的工厂类
     * @return
     * @throws Exception
     */
    @Bean(name = Config.KAFKA_LISTENER_CONTAINER_FACTORY_SYNC_DSP_CREATIVE)
    ConcurrentKafkaListenerContainerFactory<String, String> syncDspCreativeKafkaListenerContainerFactory() throws Exception {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerDspCreativeFactory());
        factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL_IMMEDIATE);
        Map<String,ListenerDetails> listenerInfoMap=kafkaConsumerListenerInfo.getListenerConfMap();
        if(listenerInfoMap==null||listenerInfoMap.isEmpty()){
            log.error("kafka listener container factory lack concurrency parameter.");
            throw new Exception("kafka listener container factory lack concurrency parameter.");
        }
        if(!listenerInfoMap.containsKey(KafkaAdGroupCons.SYNC_DSP_CREATIVE)){
            log.error("kafka listener container factory lack concurrency parameter.");
            throw new Exception("kafka listener container factory lack concurrency parameter.");
        }
        int concurrencyNum=listenerInfoMap.get(KafkaAdGroupCons.SYNC_DSP_CREATIVE).getConcurrencyNum();
        if(concurrencyNum>1){
            factory.setConcurrency(concurrencyNum);
        }
        factory.setBatchListener(listenerInfoMap.get(KafkaAdGroupCons.SYNC_DSP_CREATIVE).isBatchFetch());
        log.info("kafka listener container factory is created over.");
        return factory;
    }

    /**
     * 创建消费者工厂
     * @return
     */
    @Bean
    public ConsumerFactory<String, String> consumerDspCreativeFactory() {
        Map<String, Object> properties = consumerConfigs();
        /** 用于标识此使用者所属的使用者组的唯一字符串。 */
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, KafkaAdGroupCons.SYNC_DSP_CREATIVE);
        ConsumerFactory<String,String> factory=new DefaultKafkaConsumerFactory<>(properties);
        log.info("kafka consumer factory is created over.");
        return factory;
    }
    
    /**
     * 监听器的工厂类
     * @return
     * @throws Exception
     */
    @Bean(name = Config.KAFKA_LISTENER_CONTAINER_FACTORY_SYNC_DSP_PUT_UNIT_DEV)
    ConcurrentKafkaListenerContainerFactory<String, String> syncDspUnitDevKafkaListenerContainerFactory() throws Exception {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerDspUnitDevFactory());
        factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL_IMMEDIATE);
        Map<String,ListenerDetails> listenerInfoMap=kafkaConsumerListenerInfo.getListenerConfMap();
        if(listenerInfoMap==null||listenerInfoMap.isEmpty()){
            log.error("kafka listener container factory lack concurrency parameter.");
            throw new Exception("kafka listener container factory lack concurrency parameter.");
        }
        if(!listenerInfoMap.containsKey(KafkaAdGroupCons.SYNC_DSP_PUT_UNIT_DEV)){
            log.error("kafka listener container factory lack concurrency parameter.");
            throw new Exception("kafka listener container factory lack concurrency parameter.");
        }
        int concurrencyNum=listenerInfoMap.get(KafkaAdGroupCons.SYNC_DSP_PUT_UNIT_DEV).getConcurrencyNum();
        if(concurrencyNum>1){
            factory.setConcurrency(concurrencyNum);
        }
        factory.setBatchListener(listenerInfoMap.get(KafkaAdGroupCons.SYNC_DSP_PUT_UNIT_DEV).isBatchFetch());
        log.info("kafka listener container factory is created over.");
        return factory;
    }

    /**
     * 创建消费者工厂
     * @return
     */
    @Bean
    public ConsumerFactory<String, String> consumerDspUnitDevFactory() {
        Map<String, Object> properties = consumerConfigs();
        /** 用于标识此使用者所属的使用者组的唯一字符串。 */
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, KafkaAdGroupCons.SYNC_DSP_PUT_UNIT_DEV);
        ConsumerFactory<String,String> factory=new DefaultKafkaConsumerFactory<>(properties);
        log.info("kafka consumer factory is created over.");
        return factory;
    }
    
    /**
     * 监听器的工厂类
     * @return
     * @throws Exception
     */
    @Bean(name = Config.KAFKA_LISTENER_CONTAINER_FACTORY_SYNC_SSPADSLOTRTB)
    ConcurrentKafkaListenerContainerFactory<String, String> syncSspAdSlotRtbKafkaListenerContainerFactory() throws Exception {
        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(consumerSspAdSlotRtbFactory());
        factory.getContainerProperties().setAckMode(AbstractMessageListenerContainer.AckMode.MANUAL_IMMEDIATE);
        Map<String,ListenerDetails> listenerInfoMap=kafkaConsumerListenerInfo.getListenerConfMap();
        if(listenerInfoMap==null||listenerInfoMap.isEmpty()){
            log.error("kafka listener container factory lack concurrency parameter.");
            throw new Exception("kafka listener container factory lack concurrency parameter.");
        }
        if(!listenerInfoMap.containsKey(KafkaAdGroupCons.SYNC_SSP_AD_SLOT_RTB)){
            log.error("kafka listener container factory lack concurrency parameter.");
            throw new Exception("kafka listener container factory lack concurrency parameter.");
        }
        int concurrencyNum=listenerInfoMap.get(KafkaAdGroupCons.SYNC_SSP_AD_SLOT_RTB).getConcurrencyNum();
        if(concurrencyNum>1){
            factory.setConcurrency(concurrencyNum);
        }
        factory.setBatchListener(listenerInfoMap.get(KafkaAdGroupCons.SYNC_SSP_AD_SLOT_RTB).isBatchFetch());
        log.info("kafka listener container factory is created over.");
        return factory;
    }

    /**
     * 创建消费者工厂
     * @return
     */
    @Bean
    public ConsumerFactory<String, String> consumerSspAdSlotRtbFactory() {
        Map<String, Object> properties = consumerConfigs();
        /** 用于标识此使用者所属的使用者组的唯一字符串。 */
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, KafkaAdGroupCons.SYNC_SSP_AD_SLOT_RTB);
        ConsumerFactory<String,String> factory=new DefaultKafkaConsumerFactory<>(properties);
        log.info("kafka consumer factory is created over.");
        return factory;
    }
}
