package org.kafka;

import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.admin.*;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.Closeable;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

public class KafkaMonitorService implements Closeable {


    private AdminClient client;
    private final String address;
    private final Logger logger;
    private final int DEFAULT_M = 3;

    /**
     * 使用完相关资源，请注意调用{@link #close()},如果你期望长期持有可不调用
     * 但是在销毁对象的时候，请将close方法放置到业务销毁流程中
     * @param kafkaAddress kafka地址，集群地址以逗号隔开
     */
    public KafkaMonitorService(String kafkaAddress){
        logger = LoggerFactory.getLogger(KafkaMonitorService.class);
        if(!validateKafka(kafkaAddress)){
            logger.warn("kafka连接或zookeeper连接失败");
        }
        this.address = kafkaAddress;
    }

    /**
     * 校验kafka是否连接成功
     * @param kafkaAddress kafka地址
     * @return 是否连接成功
     */
    private boolean validateKafka(String kafkaAddress) {
        Properties props = new Properties();
        props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, kafkaAddress);
        client = KafkaAdminClient.create(props);
        return client!=null;
    }

    private AdminClient getClient(){return this.client;}

    /**
     * 获得目前所有的topics
     * @return 所有的topic内容
     */
    public Set<String> getTopics(){
        try {
            final ListTopicsResult listTopicsResult = getClient().listTopics();
            return listTopicsResult.names().get(DEFAULT_M, TimeUnit.SECONDS);
        }catch (Exception ignore){}
        return Collections.emptySet();
    }

    /**
     * 获取所有的节点
     *
     * 需要使用zk相关包，获取brokers/ids/*
     * 数量保证拥有多少个节点
     */
    public int getBrokerCount(){
        return getBrokers().split(",").length;
    }

    private String getBrokers(){return this.address;}

    /**
     * 获得所有的消费组
     * @return 消费组的集合
     */
    public List<String> getConsumerGroups(){
        try {
            final ListConsumerGroupsResult listConsumerGroupsResult = getClient().listConsumerGroups();
            final Collection<ConsumerGroupListing> consumerGroupListings = listConsumerGroupsResult.all().get(DEFAULT_M, TimeUnit.SECONDS);
            return consumerGroupListings.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList());
        }catch (Exception ignore){}
        return Collections.emptyList();
    }

    /**
     * 获得生产者的相关信息
     * 生产者，发送到不同消费组中日志文件情况
     */
    public Map<String,Long> listProducerLogSizes(){
        final Set<String> topics = getTopics();
        final List<String> consumerGroups = getConsumerGroups();
        Map<String,Long> details = new HashMap<>(topics.size()*consumerGroups.size());
        topics.forEach(topic-> consumerGroups.forEach(group->{
            Map<TopicPartition, Long> logSizeMap = getLogSize(getBrokers(), group, topic);
            long sumLogSize = logSizeMap.values().stream().mapToLong(l -> l).sum();
            details.put(String.format("%s.%s",topic,group),sumLogSize);
        }));
        return details;
    }

    /**
     * 列出所有消费组的详情
     */
    public List<TopicConsumerGroupState> listConsumerDetail(){

        final Set<String> topics = getTopics();
        final List<String> consumerGroups = getConsumerGroups();
        final int size = topics.size()* consumerGroups.size();
        List<TopicConsumerGroupState> topicConsumerGroupStates= describeConsumerGroups(topics, consumerGroups,size);
        // 填充lag/logEndOffset
        topics.forEach(topic-> topicConsumerGroupStates.forEach(topicConsumerGroupState -> {
            String groupId = topicConsumerGroupState.getGroupId();
            List<PartitionAssignmentState> partitionAssignmentStates = topicConsumerGroupState
                    .getPartitionAssignmentStates();
            partitionAssignmentStates.sort(Comparator.comparingInt(PartitionAssignmentState::getPartition));
            Map<Integer, Long> logSizeMap = this.getPartitionLogSizeMap(getBrokers(), groupId, topic);
            for (PartitionAssignmentState partitionAssignmentState : partitionAssignmentStates) {
                int partition = partitionAssignmentState.getPartition();
                long logEndOffset = logSizeMap.get(partition);
                partitionAssignmentState.setLogEndOffset(logEndOffset);
                partitionAssignmentState.setLag(getLag(partitionAssignmentState.getOffset(), logEndOffset));
            }
        }));
        return topicConsumerGroupStates;
    }

    private long getLag(Long offset, Long leo) {

        // 如果offset为-1,则Lag计算值同样为-1
        if (offset < 0) {
            return -1L;
        }
        long lag = leo - offset;
        return lag < 0 ? 0 : lag;
    }

    private Map<Integer, Long> getPartitionLogSizeMap(String brokers, String groupId, String topic) {
        Map<Integer, Long> logSizeTempMap = new HashMap<>();
        Map<TopicPartition, Long> logSizeMap = getLogSize(brokers, groupId, topic);
        if (logSizeMap == null || logSizeMap.isEmpty()) {
            return logSizeTempMap;
        }
        logSizeMap.forEach((k, v) -> logSizeTempMap.put(k.partition(), v));
        return logSizeTempMap;
    }

    private List<TopicConsumerGroupState> describeConsumerGroups(Set<String> topics, List<String> consumerGroups,int size) {
        Set<String> groupIds = new HashSet<>();
        Map<String, Set<Map.Entry<TopicPartition, OffsetAndMetadata>>> consumerPatitionOffsetMap = new HashMap<>(size);
        final List<TopicConsumerGroupState> topicConsumerGroupStates = new ArrayList<>(size);
        topics.forEach(topic-> consumerGroups.forEach(group->{
            try {
                Map<TopicPartition, OffsetAndMetadata> partitionsToOffsetAndMetadataMap = getClient()
                        .listConsumerGroupOffsets(group).partitionsToOffsetAndMetadata().get();
                boolean isWithGroup = false;
                Set<Map.Entry<TopicPartition, OffsetAndMetadata>> thisTopicPartitionsToOffsetAndMetadataSet = new HashSet<>();
                for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : partitionsToOffsetAndMetadataMap.entrySet()) {
                    String getTopic = entry.getKey().topic();
                    if (topic.equalsIgnoreCase(getTopic)) {
                        isWithGroup = true;
                        thisTopicPartitionsToOffsetAndMetadataSet.add(entry);
                    }
                }
                // 属于该topic下的group
                if (isWithGroup) {
                    groupIds.add(group);
                    consumerPatitionOffsetMap.put(group, thisTopicPartitionsToOffsetAndMetadataSet);
                }

            } catch (InterruptedException | ExecutionException e) {
                logger.error("", e);
            }
        }));

        try {
            Map<String, ConsumerGroupDescription> groupDetails = getClient().describeConsumerGroups(groupIds).all()
                    .get(DEFAULT_M, TimeUnit.SECONDS);
            groupDetails.forEach((groupId, description) -> {

                TopicConsumerGroupState topicConsumerGroupState = new TopicConsumerGroupState(groupId, "broker");

                topicConsumerGroupState.setConsumerGroupState(description.state());
                topicConsumerGroupState.setSimpleConsumerGroup(description.isSimpleConsumerGroup());
                //判断group下是否有members
                topicConsumerGroupState.setHasMembers(description.members().size() > 0);
                // 获取group下不同patition消费offset信息
                Set<Map.Entry<TopicPartition, OffsetAndMetadata>> consumerPatitionOffsets = consumerPatitionOffsetMap
                        .get(groupId);
                List<PartitionAssignmentState> partitionAssignmentStates = new ArrayList<>();

                if (!description.members().isEmpty()) {
                    // 获取存在consumer(memeber存在的情况)
                    partitionAssignmentStates = this.withMembers(consumerPatitionOffsets, groupId, description);
                } else {
                    // 获取不存在consumer
                    partitionAssignmentStates = this.withNoMembers(consumerPatitionOffsets, groupId);
                }

                // 这块增加这个的逻辑是因为可能存在member，但是这个memeber不属于这个topic，因此会存在partitionAssignmentStates为空的状况
                if (partitionAssignmentStates.isEmpty()) {
                    partitionAssignmentStates = this.withNoMembers(consumerPatitionOffsets, groupId);
                }
                topicConsumerGroupState.setPartitionAssignmentStates(partitionAssignmentStates);
                topicConsumerGroupStates.add(topicConsumerGroupState);
            });
        }catch (Exception e){logger.error(e.getMessage());}
        return topicConsumerGroupStates;
    }


    private List<PartitionAssignmentState> withMembers(
            Set<Map.Entry<TopicPartition, OffsetAndMetadata>> consumerPatitionOffsets, String groupId,
            ConsumerGroupDescription description) {
        Set<PartitionAssignmentState> partitionAssignmentStates = new HashSet<>();
        Map<Integer, Long> consumerPatitionOffsetMap = new HashMap<>();
        Set<String> topics = new HashSet<>();
        consumerPatitionOffsets.forEach(entryInfo -> {
            TopicPartition topicPartition = entryInfo.getKey();
            OffsetAndMetadata offsetAndMetadata = entryInfo.getValue();
            consumerPatitionOffsetMap.put(topicPartition.partition(), offsetAndMetadata.offset());
            topics.add(topicPartition.topic());
        });
        description.members().forEach(memberDescription -> {
            memberDescription.assignment().topicPartitions().forEach(topicPation -> {
                String topic = topicPation.topic();
                if (topics.contains(topic)) {
                    PartitionAssignmentState partitionAssignmentState = new PartitionAssignmentState();
                    partitionAssignmentState.setPartition(topicPation.partition());
                    partitionAssignmentState.setTopic(topic);
                    partitionAssignmentState.setClientId(Optional.ofNullable(memberDescription.clientId()).orElse("-"));
                    partitionAssignmentState.setGroup(groupId);
                    partitionAssignmentState
                            .setConsumerId(Optional.ofNullable(memberDescription.consumerId()).orElse("-"));
                    partitionAssignmentState.setHost(Optional.ofNullable(memberDescription.host()).orElse("-"));
                    partitionAssignmentState.setOffset(
                            Optional.ofNullable(consumerPatitionOffsetMap.get(topicPation.partition())).orElse(-1L));
                    partitionAssignmentStates.add(partitionAssignmentState);
                }
            });
        });
        return new ArrayList<>(partitionAssignmentStates);
    }


    private List<PartitionAssignmentState> withNoMembers(
            Set<Map.Entry<TopicPartition, OffsetAndMetadata>> consumerPatitionOffsets, String groupId) {
        List<PartitionAssignmentState> partitionAssignmentStates = new ArrayList<>();
        consumerPatitionOffsets.forEach(entryInfo -> {
            TopicPartition topicPartition = entryInfo.getKey();
            String topic = topicPartition.topic();
            OffsetAndMetadata offsetAndMetadata = entryInfo.getValue();
            PartitionAssignmentState partitionAssignmentState = new PartitionAssignmentState();
            partitionAssignmentState.setPartition(topicPartition.partition());
            partitionAssignmentState.setTopic(topic);
            partitionAssignmentState.setGroup(groupId);
            partitionAssignmentState.setOffset(Optional.ofNullable(offsetAndMetadata.offset()).orElse(-1L));
            partitionAssignmentStates.add(partitionAssignmentState);
        });
        return partitionAssignmentStates;
    }



    /**
     * 获取某个节点的文件大小
     * @param topic 主体
     * @param brokerID 节点的id，对于kafka，server.properties中的broker.id
     * @return 单位为byte
     */
    public  long getTopicDiskSizeForSomeBroker(String topic, int brokerID)
            throws ExecutionException, InterruptedException {
        long sum = 0;
        DescribeLogDirsResult ret = getClient().describeLogDirs(Collections.singletonList(brokerID));
        Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>> tmp = ret.all().get();
        for (Map.Entry<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>> entry : tmp.entrySet()) {
            Map<String, DescribeLogDirsResponse.LogDirInfo> tmp1 = entry.getValue();
            for (Map.Entry<String, DescribeLogDirsResponse.LogDirInfo> entry1 : tmp1.entrySet()) {
                DescribeLogDirsResponse.LogDirInfo info = entry1.getValue();
                Map<TopicPartition, DescribeLogDirsResponse.ReplicaInfo> replicaInfoMap = info.replicaInfos;
                for (Map.Entry<TopicPartition, DescribeLogDirsResponse.ReplicaInfo> replicas : replicaInfoMap.entrySet()) {
                    if (topic.equals(replicas.getKey().topic())) {
                        sum += replicas.getValue().size;
                    }
                }
            }
        }
        return sum;
    }

    /**
     * 获得日志大小
     * @param brokers brokers 节点
     * @param groupName 消费组
     * @param topicName topicName
     * @return 每个分区的日志大小
     */
    private Map<TopicPartition, Long> getLogSize(String brokers, String groupName, String topicName) {

        KafkaConsumer<String, String> kafkaConsumer = null;
        Map<TopicPartition, Long> result = new HashMap<>();
        List<TopicPartition> topicPartitions = new ArrayList<>();
        try {
            kafkaConsumer = generatorKafkaCustomer(brokers, groupName, false);

            List<PartitionInfo> partitions = kafkaConsumer.partitionsFor(topicName);
            if (partitions == null) {
                return result;
            }
            partitions.forEach(partition -> {
                TopicPartition topicPartition = new TopicPartition(topicName, partition.partition());
                topicPartitions.add(topicPartition);
            });
            result = kafkaConsumer.endOffsets(topicPartitions);
        } catch (Exception e) {
            logger.error("consumer getLogSize error.", e);
        } finally {
            try {
                if(kafkaConsumer!=null)
                kafkaConsumer.close();
            } catch (Exception e) {
                logger.error("close consumer error! ", e);
            }
        }

        return result;
    }

    private Properties generatorConsumerProps(String brokers, String group, boolean isAutoCommit) {
        Properties consumerProps = new Properties();
        consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
        consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, group);
        consumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, isAutoCommit);
        consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        return consumerProps;
    }

    private KafkaConsumer<String, String> generatorKafkaCustomer(String brokers, String group, boolean isAutoCommit) {
        Properties consumerProps = generatorConsumerProps(brokers, group, isAutoCommit);
        return new KafkaConsumer<>(consumerProps);
    }



    @Override
    public void close() throws IOException {
        if (client!=null) {
            client.close(3, TimeUnit.SECONDS);
            logger.info("connection has be closed ");
        }
    }
}
