package com.sbp.message.consumer;

import com.sbp.message.api.service.MessageService;
import com.sbp.message.conf.KafkaMQConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.InitializingBean;

import java.util.*;

/**
 * Created by wangmin on 2019/4/19.
 */
public class KafkaMessageConsumerMonitor implements InitializingBean {
    private static final Logger logger = LoggerFactory.getLogger(KafkaMessageConsumerMonitor.class);

    final List<KafkaBaseConsumerThread> consumerThreads = new ArrayList<>();

    private KafkaMQConfig kafkaMQConfig;
    public void setKafkaMQConfig(KafkaMQConfig kafkaMQConfig) {
        this.kafkaMQConfig = kafkaMQConfig;
    }

    private MessageService messageService;
    public void setMessageService(MessageService messageService) {
        this.messageService = messageService;
    }

    private ConsumerServicesManager consumerServicesManager;
    public void setConsumerServicesManager(ConsumerServicesManager consumerServicesManager) {
        this.consumerServicesManager = consumerServicesManager;
    }

    private MessageConsumer messageConsumer;
    public void setMessageConsumer(MessageConsumer messageConsumer) {
        this.messageConsumer = messageConsumer;
    }

    @Override
    public void afterPropertiesSet() {
        final Properties kafkaProperties = new Properties();
        kafkaProperties.put("bootstrap.servers", kafkaMQConfig.getBootstrapServers());
        kafkaProperties.put("key.deserializer", kafkaMQConfig.getKeyDeserializer());
        kafkaProperties.put("value.deserializer", kafkaMQConfig.getValueDeserializer());

        kafkaProperties.put("enable.auto.commit", false);

        kafkaProperties.put("fetch.min.bytes", 200);
        kafkaProperties.put("fetch.max.bytes", 1<<20);
        kafkaProperties.put("fetch.max.wait.ms", 2000);

        register(kafkaProperties);
    }



    private void register(Properties kafkaProperties) {
        Map<String, Map<String, ConsumerServicesManager.ServiceAndMetadata>> topicGroupMap = consumerServicesManager.resolveTopicGroupMap();

        final Map<String, Set<String>> exclusiveGroupTopicMap = new HashMap<>();
        final Map<String, Set<String>> normalGroupTopicMap = new HashMap<>();

        for (Map.Entry<String, Map<String, ConsumerServicesManager.ServiceAndMetadata>> topicGroupMapEntry : topicGroupMap.entrySet()) {
            final String topic = topicGroupMapEntry.getKey();
            final Map<String, ConsumerServicesManager.ServiceAndMetadata> groupConsumerMap = topicGroupMapEntry.getValue();

            for (String group : groupConsumerMap.keySet()) {
                messageService.updateTopicRegistry(topic, group);
            }

            for (Map.Entry<String, ConsumerServicesManager.ServiceAndMetadata> entry : groupConsumerMap.entrySet()) {
                final String group = entry.getKey();
                final ConsumerServicesManager.ServiceAndMetadata serviceAndMetadata = entry.getValue();
                final Map<String, Set<String>> groupTopicMap = serviceAndMetadata.exclusive ? exclusiveGroupTopicMap : normalGroupTopicMap;

                Set<String> topics = groupTopicMap.get(group);
                if (null == topics) {
                    topics = new HashSet<>();
                    groupTopicMap.put(group, topics);
                }
                topics.add(topic);
            }
        }

        if (!exclusiveGroupTopicMap.isEmpty() || !normalGroupTopicMap.isEmpty()) {
            startKafkaConsumerThread(exclusiveGroupTopicMap, normalGroupTopicMap, kafkaProperties);
        }
    }



    private void startKafkaConsumerThread(
            Map<String, Set<String>> exclusiveGroupTopicMap,
            Map<String, Set<String>> normalGroupTopicMap,
            Properties kafkaProperties) {

        // 单独消费的消息
        kafkaProperties.put("max.poll.interval.ms", 300000);
        kafkaProperties.put("max.poll.records", 1);
        kafkaProperties.put("session.timeout.ms", 300000);
        kafkaProperties.put("heartbeat.interval.ms", 10000);
        for (Map.Entry<String, Set<String>> entry : exclusiveGroupTopicMap.entrySet()) {
            final String group = entry.getKey();
            final Set<String> topics = entry.getValue();
            for (String topic : topics) {
                KafkaExclusiveConsumerThread consumerThread = new KafkaExclusiveConsumerThread(messageConsumer, group, topic, kafkaProperties);
                consumerThreads.add(consumerThread);
            }
        }

        // 不需要单独消费的消息
        kafkaProperties.put("max.poll.interval.ms", 300000);
        kafkaProperties.put("max.poll.records", 20);
        kafkaProperties.put("session.timeout.ms", 30000);
        kafkaProperties.put("heartbeat.interval.ms", 10000);
        for (Map.Entry<String, Set<String>> entry : normalGroupTopicMap.entrySet()) {
            for (int i=0; i<2; ++i) {
                KafkaNormalConsumerThread consumerThread = new KafkaNormalConsumerThread(messageConsumer, entry.getKey(), entry.getValue(), i, kafkaProperties);
                consumerThreads.add(consumerThread);
            }
        }

        final ArrayList<Thread> threads = new ArrayList<>();
        for (KafkaBaseConsumerThread consumerThread : consumerThreads) {
            Thread thread = new Thread(consumerThread, consumerThread.threadName());
            thread.start();
            threads.add(thread);
        }

        Runtime.getRuntime().addShutdownHook(new Thread() {
            @Override
            public void run() {
                for (KafkaBaseConsumerThread consumerThread : consumerThreads) {
                    consumerThread.shutdown();
                }

                for (Thread thread : threads) {
                    try {
                        thread.join(300 * 1000L);
                    } catch (InterruptedException e) {
                        logger.error("", e);
                    }
                }
            }
        });
    }
}

