package personal.wang;

import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.admin.DescribeLogDirsResult;
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.*;
import java.util.concurrent.ExecutionException;

/**
 * @className: CheckKafkaDisk
 * @Description:
 * @Author: wangyifei
 * @Date: 2025/4/22 15:57
 */
public class CheckKafkaDisk {
    private static Logger logger = LoggerFactory.getLogger(CheckKafkaDisk.class);

    public static void main(String[] args) {

        Properties props = new Properties();
        props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "kafka8:9092,kafka9:9092,kafka10:9092,kafka11:9092,kafka12:9092");
        props.put("request.timeout.ms", 600000);
        List<Integer> list = Arrays.asList(1, 2, 3 , 4 , 5 , 6, 7, 8, 9, 10, 11, 12 , 13 , 14 , 15 , 17);
        try (AdminClient client = AdminClient.create(props)) {
            for(Integer e : list){
                DescribeLogDirsResult ret = client.describeLogDirs(Collections.singleton(e)); // 指定Broker id
                long size = 0L;
                for (Map<String, DescribeLogDirsResponse.LogDirInfo> logDirInfoMap : ret.all().get().values()) {
                    size += logDirInfoMap.values().stream().map(logDirInfo -> logDirInfo.replicaInfos).flatMap(
                                    topicPartitionReplicaInfoMap ->
                                            topicPartitionReplicaInfoMap.values().stream().map(replicaInfo -> replicaInfo.size))
                            .mapToLong(Long::longValue).sum();
                }
                System.out.println(String.format("brokder:%s ,size: %s , %s G"  , e , size , ((size>>10)>>10)>>10 ));
            }
        } catch (ExecutionException e) {
            throw new RuntimeException(e);
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }
    }
}
