package org.kafka;

import org.apache.kafka.clients.admin.*;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.ConsumerGroupState;
import org.apache.kafka.common.TopicPartition;
import org.jmx.pojo.BrokerInfo;
import org.zk.ZKUtil;

import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;

/**
 * 使用服务器连接方式，获取Kafka的监控指标
 */
public class MonitorServer {

    public static void main(String[] args) throws InterruptedException, ExecutionException, TimeoutException {


        try (KafkaMonitorService monitorService = new KafkaMonitorService(
                "192.168.10.102:9092,192.168.10.103:9092,192.168.10.104:9092"
        );
             ZKUtil zkClient = new ZKUtil("192.168.10.102:2181")){
            final Set<String> topics = monitorService.getTopics();
            System.out.printf("topics:%s,brokers:%d,consumers:%s\n",
                    topics.toString(),
                    monitorService.getBrokerCount(),
                    monitorService.getConsumerGroups().toString());
            final Map<String, Long> stringLongMap = monitorService.listProducerLogSizes();
            System.out.println("********生产者详情******* start");
            stringLongMap.forEach((k,v)-> System.out.printf("%s->%d\n",k,v));
            System.out.println("********生产者详情******* end");
            System.out.println("********消费者详情******* start");
            final List<TopicConsumerGroupState> topicConsumerGroupStates = monitorService.listConsumerDetail();
            topicConsumerGroupStates.forEach(topicConsumerGroupState -> topicConsumerGroupState.getPartitionAssignmentStates().forEach(t->{
                System.out.printf("topic:%s,groupId:%s,partition:%d,offset:%d,lag:%d,logEndOffset:%d\n",
                        t.getTopic(),t.getGroup(),t.getPartition(),
                        t.getOffset(),t.getLag(),t.getLogEndOffset());
            }));
            System.out.println("********消费者详情******* end");
            System.out.println("********磁盘详情******* start");
            System.out.println("********zookeeper*******");
            final List<BrokerInfo> brokers = zkClient.getBrokers();
            topics.forEach(topic->{
                for (BrokerInfo broker : brokers) {
                    try {
                        final int bid = broker.getBid();
                        final long topicDiskSizeForSomeBroker = monitorService.getTopicDiskSizeForSomeBroker(topic, bid);
                        System.out.printf("broker.id=%d->%s->%d\n",bid,topic,topicDiskSizeForSomeBroker);
                    } catch (Exception e) {
                        e.printStackTrace();
                    }
                }

            });
            System.out.println("********磁盘详情******* end");



        }catch (Exception ignore){

        }
        //        test01();

    }

    private static void test01() throws InterruptedException, ExecutionException, TimeoutException {
        Properties properties = new Properties();
        properties.put("bootstrap.servers","192.168.10.102:9092,192.168.10.103:9092,192.168.10.104:9092");
        try (AdminClient adminClient = KafkaAdminClient.create(properties)){
            // 获取消费组
            ListConsumerGroupsResult listConsumerGroupsResult = adminClient.listConsumerGroups();
            //看看每个消费组的分区和残留情况，带有超时时间的获取
            final Set<String> groupIds = listConsumerGroupsResult.all().get(60, TimeUnit.SECONDS).stream()
                    .map(ConsumerGroupListing::groupId).collect(Collectors.toSet());
            //获取每个分组的内容
            groupIds.forEach(gid->{

                final Map<TopicPartition, OffsetAndMetadata> metadataMap;
                try {
                    metadataMap = adminClient.listConsumerGroupOffsets(gid).partitionsToOffsetAndMetadata().get();
                    //获得元数据
                    metadataMap.forEach((k,v)->{

                        System.out.printf("topic:%s,partition:%s,offset:%s\n",k.topic(),k.partition(),v.offset());

                    });
                } catch (InterruptedException | ExecutionException e) {
                    e.printStackTrace();
                }

            });
            //获得消费组的详情
            Map<String, ConsumerGroupDescription> groupDetails = adminClient.describeConsumerGroups(groupIds).all()
                    .get(60, TimeUnit.SECONDS);

            groupDetails.forEach((groupId, description) -> {
                // 状态
                final ConsumerGroupState state = description.state();
                System.out.printf("state:%s",state.toString());
                // 该消费组里是否有消费成员
                final boolean hasMembers = description.members().size() > 0;
                final Collection<MemberDescription> members = description.members();
                if (hasMembers) {
                    // 获取存在consumer(memeber存在的情况)
                    members.forEach(des->{

                        final MemberAssignment assignment = des.assignment();

                        final Set<TopicPartition> topicPartitions = assignment.topicPartitions();
                        topicPartitions.forEach(e->{

                            System.out.printf("clientId:%s,consumerId:%s,partition:%s,topic:%s",
                                    des.clientId(),des.consumerId(),
                                    e.partition(),e.topic());

                        });

                    });
                }
//                else {
                    // 获取不存在consumer
//                    partitionAssignmentStates = this.withNoMembers(consumerPatitionOffsets, groupId);
//                }
            });
            // 后续想要获得，可以使用kafka的consumer来获取你想要的数值

        }
    }
}
