package com.bird.admin;

import org.apache.kafka.clients.admin.*;
import org.apache.kafka.common.config.ConfigResource;

import java.util.*;
import java.util.concurrent.ExecutionException;

/**
 * @author sijunqiang
 * @data 2021/7/18 10:26
 * @description: kafka 连接客户端
 */
public class KafkaAdminClient {


    private final static String TOPIC_NAME = "sijunqiang-topic";


    public static void main(String[] args) throws ExecutionException, InterruptedException {

        // 连接topic
        //AdminClient adminClient = KafkaAdminClient.create();
// System.out.println(adminClient);

        // 创建topic
        //  KafkaAdminClient.createTopic();

        // 删除topic
        //   deleteTopics();

        // 查看topic
        //  topicLists();

        // 查看topic详情
        describeTopics();

        // 修改topic 配置信息
        //  alterConfig();

        // 查看topic-config 的配置信息
        //   describeTopicsConfigs();

        // 增加partition 的数量
        //incrPartitions(2);
    }

    /**
     * 创建kafka的连接
     */
    public static AdminClient create() {

        // kafka的一些配置属性
        Properties properties = new Properties();
        // 配置kafka的集群连接地址
        properties.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.223.128:9092,192.168.223.128:9093,192.168.223.128:9094");
        //创建连接
        AdminClient adminClient = AdminClient.create(properties);
        return adminClient;
    }


    /**
     * 创建topic
     */
    public static void createTopic() {

        // 1:先构建kafka客户端连接工具
        AdminClient adminClient = KafkaAdminClient.create();
        // 副本因子
        short re = 1;
        NewTopic topic = new NewTopic(TOPIC_NAME, 1, re);
        // 创建topic 可以创建多topic
        CreateTopicsResult topics = adminClient.createTopics(Arrays.asList(topic));
        System.out.println("CreateTopicsResult: " + topic);
    }


    /**
     * 查看topic
     */
    public static void topicLists() throws ExecutionException, InterruptedException {
        AdminClient adminClient = create();

        // 是否设置查看所有的topic
        ListTopicsOptions options = new ListTopicsOptions();
        options.listInternal(true);

        // 获取kafka内部所有的topic
        ListTopicsResult listTopicsResult = adminClient.listTopics(options);
        Set<String> names = listTopicsResult.names().get();
        names.stream().forEach(System.out::println);
    }


    /**
     * 删除topic
     */
    public static void deleteTopics() {
        AdminClient adminClient = KafkaAdminClient.create();
        DeleteTopicsResult deleteTopicsResult = adminClient.deleteTopics(Arrays.asList(TOPIC_NAME));
        System.out.println("deleteTopicsResult: " + deleteTopicsResult);
    }


    /**
     * 查看topic详情
     * topic-name: sijunqiang-topic
     * desc: (name=sijunqiang-topic, internal=false,
     * partitions= 可以有多个分区
     * (partition=0, # 分区
     * leader=192.168.223.128:9092 (id: 0 rack: null),  主节点
     * replicas=192.168.223.128:9092 (id: 0 rack: null), 副本节点
     * isr=192.168.223.128:9092 (id: 0 rack: null)),
     * authorizedOperations=null)
     */
    public static void describeTopics() throws ExecutionException, InterruptedException {
        AdminClient adminClient = KafkaAdminClient.create();
        DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(Arrays.asList(TOPIC_NAME));
        Map<String, TopicDescription> map = describeTopicsResult.all().get();
        Set<Map.Entry<String, TopicDescription>> entries = map.entrySet();
        entries.stream().forEach((entry) -> {
            System.out.println("topic-name: " + entry.getKey() + "desc: " + entry.getValue());
        });
    }


    /**
     * 查看topic的配置信息
     * ConfigResource(type=TOPIC, name='sijunqiang-topic') ,
     * Config(
     * entries=[
     * ConfigEntry(
     * name=compression.type,
     * value=producer,
     * source=DEFAULT_CONFIG,
     * isSensitive=false,
     * isReadOnly=false,
     * synonyms=[]),
     * ConfigEntry(
     * name=leader.replication.throttled.replicas,
     * value=,
     * source=DEFAULT_CONFIG,
     * isSensitive=false,
     * isReadOnly=false,
     * synonyms=[]))
     */
    public static void describeTopicsConfigs() throws ExecutionException, InterruptedException {

        AdminClient adminClient = KafkaAdminClient.create();
        // 创建配置实例 param1: topic类型 param2:topic名称
        ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, TOPIC_NAME);
        DescribeConfigsResult describeConfigsResult = adminClient.describeConfigs(Arrays.asList(configResource));
        Map<ConfigResource, Config> configResourceConfigMap = describeConfigsResult.all().get();
        Set<Map.Entry<ConfigResource, Config>> entries = configResourceConfigMap.entrySet();
        entries.stream().forEach((entry) -> {
            System.out.println(entry.getKey() + " , " + entry.getValue());
        });
    }


    /**
     * 修改topic的配置信息
     */
    public static void alterConfig() throws ExecutionException, InterruptedException {

        AdminClient adminClient = KafkaAdminClient.create();
        // 修改topic-里面的配置
        Map<ConfigResource, Config> configMap = new HashMap<>();
        ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, TOPIC_NAME);
        Config config = new Config(Arrays.asList(new ConfigEntry("preallocate", "false")));
        configMap.put(configResource, config);

        // 执行修改操作
        AlterConfigsResult alterConfigsResult = adminClient.alterConfigs(configMap);
        alterConfigsResult.all().get();
    }

    /**
     * 增加partition的数量，partition只能增加 不能减少
     */
    public static void incrPartitions(int partition) throws ExecutionException, InterruptedException {

        // 构建连接
        AdminClient adminClient = KafkaAdminClient.create();

        // 设置增加的数量
        Map<String, NewPartitions> partitionsMap = new HashMap<>();
        NewPartitions newPartitions = NewPartitions.increaseTo(partition);
        partitionsMap.put(TOPIC_NAME, newPartitions);

        // 执行
        CreatePartitionsResult partitions = adminClient.createPartitions(partitionsMap);
        partitions.all().get();
    }
}
