package cn.com.greatwall.kafka.util;

import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;

import javax.management.InstanceNotFoundException;
import javax.management.MBeanServerConnection;
import javax.management.ObjectName;
import javax.management.remote.JMXConnector;
import javax.management.remote.JMXConnectorFactory;
import javax.management.remote.JMXServiceURL;

import org.apache.commons.collections4.map.HashedMap;

import cn.com.greatwall.kafka.service.dto.BrokerDto;
import lombok.extern.slf4j.Slf4j;

/**
 * @Author herw
 * @Time 2021-04-16 09:22:41
 * @Version 1.0
 * @Description: TODO(用一句话描述该文件做什么)
 */
@Slf4j
public class JmxCollector {
    private static final String FORMAT_URL = "service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi";
    private static Map<String, JMXConnector> jmxMap = new ConcurrentHashMap<>();
    public static final String REGEX = "[a-zA-Z]+";
    
    private static class SingletonHolder {
        private static final JmxCollector INSTANCE = new JmxCollector();
    }
    
    public static JmxCollector getInstance() {
        return JmxCollector.SingletonHolder.INSTANCE;
    }
    
    private JMXConnector getJmxConnector(String host, int port) throws IOException {
        JMXServiceURL jmxUrl = new JMXServiceURL(String.format(FORMAT_URL, host, port));
        if(!jmxMap.containsKey(host)) {
            JMXConnector jmxConnector = this.connect(jmxUrl);
            jmxMap.put(host, jmxConnector);
        }
        return jmxMap.get(host);
    }
    
    private void removeJmxConnector(String host) {
        jmxMap.remove(host);
        log.error("jmx client closed");
    }
    
    private JMXConnector connect(JMXServiceURL jmxUrl) throws IOException {
        Map<String, Object> env = new HashMap<>(1 << 3);
        return JMXConnectorFactory.connect(jmxUrl, env);
    }
    
    /**
     * return the topic size according to cluster`s brokers
     * @param brokerList
     * @param topicMap
     * @return
     */
    public Map<String, Long> topicLogSizeByBroker(List<BrokerDto> brokerList, Map<String, Set<Integer>> topicMap) {
        Map<Integer, Map<String, Long>> brokerMap = new HashedMap<>(1 << 3);
        for (BrokerDto broker : brokerList) {
            try {
                JMXConnector jmxConnector = this.getJmxConnector(broker.getHost(), broker.getJmxPort());
                MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection();
                Map<String, Long> topicSizeMap = getTopicLogSize(topicMap, mBeanServerConnection);
                brokerMap.put(broker.getBid(), topicSizeMap);
            } catch (IOException e) {
                removeJmxConnector(broker.getHost());
                log.error("connect closed:", e);
            } catch (Exception e) {
                throw e;
            }
        }
        return mergeBrokerLogSize(brokerMap);
    }
    
    /**
     * merge every broker according to topicName
     * @param topicLogSizeMap
     * @return
     */
    private Map<String, Long> mergeBrokerLogSize(Map<Integer, Map<String, Long>> topicLogSizeMap) {
        Map<String, Long> sizeMap = new HashMap<>(1 << 3);
        topicLogSizeMap.keySet().forEach(key -> {
            Map<String, Long> map = topicLogSizeMap.getOrDefault(key, new HashMap<>(1));
            map.keySet().forEach(topic -> {
                long oldSize = sizeMap.getOrDefault(topic, 0L);
                sizeMap.put(topic, oldSize + map.getOrDefault(topic, 0L));
            });
        });
        return sizeMap;
    }
    
    /**
     * return the topic file size by topic information and merge every partitions value
     * @param topicMap
     * @param mBeanServerConnection
     * @return
     */
    private Map<String, Long> getTopicLogSize(Map<String, Set<Integer>> topicMap, MBeanServerConnection mBeanServerConnection) {
        Map<String, Long> topicLogSize = new HashMap<>(1 << 3);
        topicMap.keySet().forEach(key -> {
            try {
                Set<Integer> partitions = topicMap.get(key);
                long size = getLogSizeMetricValue(key, partitions, mBeanServerConnection);
                topicLogSize.put(key, size);
            } catch (Exception e) {
                log.error("get topic log size has error",e);
            }
        });
        return topicLogSize;
    }
    
    /**
     * return the topic file size by partitions
     * @param topicName
     * @param partitions
     * @param mBeanServerConnection
     * @return
     */
    private long getLogSizeMetricValue(String topicName, Set<Integer> partitions, MBeanServerConnection mBeanServerConnection) {
        return partitions.stream().mapToLong(partition -> {
            String value = null;
            try {
                value = mBeanServerConnection.getAttribute(getLogSizeObjectName(topicName, partition), "values").toString();
            } catch (InstanceNotFoundException e) {

            }catch (Exception e) {
                log.warn("get topic log size has error",e);
            }
            return value != null ? Long.parseLong(value) : 0L;
        }).sum();
    }
    
    /**
     * return the kafka log size Object Name
     * @param topicName
     * @param partition
     * @return
     */
    private ObjectName getLogSizeObjectName(String topicName, int partition) {
        ObjectName objectName = null;
        try {
            objectName = new ObjectName("kafka.log:type=Log,name=Size,topic=" + topicName + ",partition=" + partition);
        } catch (Exception e) {
            log.debug("Get topic Log Size ObjectName error! " + e.getMessage());
        }
        return objectName;
    }
}
