package demo.kafka.apache;

import com.alibaba.fastjson2.JSON;
import demo.utils.Constants;
import demo.utils.ThreadUtil;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.admin.*;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartitionReplica;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.config.ConfigResource;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.*;
import java.util.concurrent.ExecutionException;

/**
 * 一般情况下，我们都习惯使用Kafka中bin目录下的脚本工具来管理查看Kafka，但是有些时候需要将某些管理查看的功能集成到系统（比如Kafka
 * Manager）中，那么就需要调用一些API来直接操作Kafka了。在Kafka0.11.0.0版本之前，可以通过kafka-core包（Kafka的服务端代码，采用Scala编写）下的
 * AdminClient和AdminUtils来实现部分的集群管理操作。在Kafka0.11.0.0版本之后，又多了一个AdminClient，这个是在kafka-client包下的，这是一个抽象类，具体的实现是
 * org.apache.kafka.clients.admin.KafkaAdminClient
 */
public class ApacheAdminClientDemo {

    static Logger logger = LoggerFactory.getLogger(ApacheAdminClientDemo.class);

    public static AdminClient adminClient;

    /**
     * The AdminClient API supports managing and inspecting topics, brokers, acls, and other Kafka
     * objects. 其内部原理是使用Kafka自定义的一套二进制协议来实现，详细可以参见Kafka协议。主要实现步骤：
     * <ol>
     * <li>客户端根据方法的调用创建相应的协议请求，比如创建Topic的createTopics方法，其内部就是发送CreateTopicRequest请求。
     * <li>客户端发送请求至Kafka Broker。
     * <li>Kafka Broker处理相应的请求并回执，比如与CreateTopicRequest对应的是CreateTopicResponse。
     * <li>客户端接收相应的回执并进行解析处理。
     * </ol>
     * 和协议有关的请求和回执的类基本都在org.apache.kafka.common.requests包中，AbstractRequest和AbstractResponse是这些请求和回执类的两个基本父类。
     */
    public void kafkaAdminClient() {
        KafkaAdminClient adminClient = (KafkaAdminClient) AdminClient.create(new HashMap<>());
        // 创建Topic
        Collection<NewTopic> newTopics = null;
        adminClient.createTopics(newTopics);
        // 删除Topic
        Collection<String> topics = null;
        adminClient.deleteTopics(topics);
        // 罗列所有Topic
        adminClient.listTopics();
        // 查询Topic
        Collection<String> topicNames = null;
        adminClient.describeTopics(topicNames);
        // 查询集群信息
        adminClient.describeCluster();
        // 查询ACL信息
        AclBindingFilter filter = null;
        adminClient.describeAcls(filter);
        // 创建ACL信息
        Collection<AclBinding> acls = null;
        adminClient.createAcls(acls);
        // 删除ACL信息
        Collection<AclBindingFilter> filters = null;
        adminClient.deleteAcls(filters);
        // 查询配置信息
        Collection<ConfigResource> resources = null;
        adminClient.describeConfigs(resources);
        // 修改配置信息
        Map<ConfigResource, Config> configs = null;
        adminClient.alterConfigs(configs);
        // 修改副本的日志目录
        Map<TopicPartitionReplica, String> replicaAssignment = null;
        adminClient.alterReplicaLogDirs(replicaAssignment);
        // 查询节点的日志目录信息
        Collection<Integer> brokers = null;
        adminClient.describeLogDirs(brokers);
        // 查询副本的日志目录信息
        Collection<TopicPartitionReplica> replicas = null;
        adminClient.describeReplicaLogDirs(replicas);
        // 增加分区
        Map<String, NewPartitions> newPartitions = null;
        adminClient.createPartitions(newPartitions);

    }

    @BeforeClass
    public static void beforeClass() {
        Properties properties = new Properties();
        properties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, Constants.KAFKA_BOOTSTRAP_SERVERS);
        adminClient = AdminClient.create(properties);
    }

    @AfterClass
    public static void afterClass() {
        adminClient.close();
    }

    @Test
    public void listTopics() throws InterruptedException, ExecutionException {
        DescribeClusterResult cluster = adminClient.describeCluster();
        logger.info("cluster:{},tojson:{}", cluster, JSON.toJSONString(cluster));
        KafkaFuture<Collection<Node>> nodesFuture = cluster.nodes();
        Collection<Node> nodes = nodesFuture.get();
        for (Node node : nodes) {
            logger.info("node:{}", node);
        }
        ListTopicsResult topics = adminClient.listTopics();
        logger.info("topics:{},tojson:{}", topics, JSON.toJSONString(topics));
        KafkaFuture<Collection<TopicListing>> topicFuture = topics.listings();
        Collection<TopicListing> topicListings = topicFuture.get();
        for (TopicListing topicListing : topicListings) {
            logger.info("topic:{}", topicListing);
        }
    }

    @Test
    public void testDeleteTopic() throws InterruptedException {
        Properties props = new Properties();
        props.put("bootstrap.servers", Constants.KAFKA_BOOTSTRAP_SERVERS);
        AdminClient adminClient = AdminClient.create(props);
        // 删除Topic
        Collection<String> topics = Arrays.asList("pika_log");
        DeleteTopicsResult deleteTopicsResult = adminClient.deleteTopics(topics);
        System.out.println("deleteTopicsResult = " + deleteTopicsResult);
        Map<String, KafkaFuture<Void>> map = deleteTopicsResult.values();
        for (Map.Entry<String, KafkaFuture<Void>> entry : map.entrySet()) {
            String key = entry.getKey();
            KafkaFuture<Void> value = entry.getValue();
            while (!value.isDone()) {
                ThreadUtil.safeSleep(1000);
            }
            System.out.println(key + " isDone");
        }
    }


    @Test
    public void testCreateTopic() {
        NewTopic newTopic = new NewTopic("pika_log", 4, (short) 2);
        // 创建topic
        Properties props = new Properties();
        props.put("bootstrap.servers", Constants.KAFKA_BOOTSTRAP_SERVERS);
        AdminClient adminClient = AdminClient.create(props);
        ArrayList<NewTopic> topics = new ArrayList<NewTopic>();

        topics.add(newTopic);
        CreateTopicsResult result = adminClient.createTopics(topics);
        try {
            result.all().get();
        } catch (InterruptedException e) {
            e.printStackTrace();
        } catch (ExecutionException e) {
            e.printStackTrace();
        }
    }
}
