package com.tiantian.kafka;

import org.apache.kafka.clients.admin.*;
import org.apache.kafka.common.KafkaFuture;

import java.util.*;
import java.util.concurrent.ExecutionException;

/**
 * @ClassName Constants
 * @Description TODO
 * @Author tant
 * @Date 2025/3/5 10:04
 */
public class Constants {

    public static final String BASE_USE_TOPIC = "BASE_USE_TOPIC";
    public static final String CONSUMER_GROUP_TOPIC = "CONSUMER_GROUP_TOPIC";
    public static final String PRODUCER_STRATEGY_TOPIC = "PRODUCER_STRATEGY_TOPIC";
    public static final String CONSUMER_REBALANCE_TOPIC = "CONSUMER_REBALANCE_TOPIC";
    public static final String CONSUMER_STRATEGY_TOPIC = "CONSUMER_STRATEGY_TOPIC";
    public static final String PRODUCER_ACK_TOPIC = "PRODUCER_ACK_TOPIC";
    public static final String IDEMPOTENCE_TOPIC = "IDEMPOTENCE_TOPIC";
    public static final String BOOTSTRAP_SERVERS = "192.168.217.132:9092";


    public static void main(String[] args) {
        Constants.createTopicAndPartitions(CONSUMER_GROUP_TOPIC, 2, 1);
        Constants.createTopicAndPartitions(CONSUMER_GROUP_TOPIC, 2, 1);
        getTopicsInfo();
//        deleteTopic(null);
//        getTopicsInfo();
    }

    public static void getTopicsInfo() {
        // 1. 配置Kafka连接参数
        Properties props = new Properties();
        props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS);

        // 2. 创建AdminClient实例
        try (AdminClient adminClient = AdminClient.create(props)) {
            // 3. 获取所有主题
            ListTopicsResult listTopicsResult = adminClient.listTopics();
            KafkaFuture<Set<String>> topicsFuture = listTopicsResult.names();
            Set<String> topics = topicsFuture.get();

            // 4. 遍历主题，获取每个主题的分区信息
            for (String topic : topics) {
                KafkaFuture<TopicDescription> topicDescriptionFuture = adminClient.describeTopics(Collections.singleton(topic)).values().get(topic);
                TopicDescription topicDescription = topicDescriptionFuture.get();
                System.out.println("Topic: " + topic);
                System.out.println("Partitions: " + topicDescription.partitions().size());
                System.out.println("Partition Details: " + topicDescription.partitions());
            }
        } catch (InterruptedException | ExecutionException e) {
            e.printStackTrace();
        }
    }


    /**
     * 创建Topic
     *
     * @param topic       主题名称
     * @param partitions  分区数
     * @param replication 副本数
     */
    public static void createTopicAndPartitions(String topic, int partitions, int replication) {
        Properties props = new Properties();
        props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS);
        try (AdminClient adminClient = AdminClient.create(props)) {
            NewTopic newTopic = new NewTopic(topic, partitions, (short) replication); // 3个分区，1个副本
            adminClient.createTopics(Collections.singleton(newTopic)).all().get();
            System.out.println("Topic created successfully.");
        } catch (InterruptedException | ExecutionException e) {
            e.printStackTrace();
        }
    }

    public static void createTopic(String topicName, int newPartitionCount, int replication) {
        Set<String> topicsNames = getTopicsNames();
        if (null == topicsNames || !topicsNames.contains(topicName)) {
            createTopicAndPartitions(topicName, newPartitionCount, replication);
        } else {
            incrementPartitions(topicName, newPartitionCount);
        }
    }


    /**
     * 修改分区数
     *
     * @param topicName         主题名称
     * @param newPartitionCount 新分区数量
     */
    private static void incrementPartitions(String topicName, int newPartitionCount) {
        // 1. 配置Kafka连接参数
        Properties props = new Properties();
        props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS);
        // 2. 创建AdminClient实例
        try (AdminClient adminClient = AdminClient.create(props)) {
            // 4. 创建NewPartitions对象
            Map<String, NewPartitions> newPartitions = new HashMap<>();
            newPartitions.put(topicName, NewPartitions.increaseTo(newPartitionCount));

            // 5. 修改主题的分区数量
            adminClient.createPartitions(newPartitions).all().get();
            System.out.println("Topic " + topicName + " partitions increased to " + newPartitionCount);
        } catch (InterruptedException | ExecutionException e) {
            e.printStackTrace();
        }
    }


    /**
     * 删除Topic
     *
     * @param topicName 要删除的Topic名称
     */
    public static void deleteTopic(String topicName) {
        if (Objects.nonNull(topicName) && !topicName.isEmpty()) {
            Properties props = new Properties();
            props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS);
            try (AdminClient adminClient = AdminClient.create(props)) {
                adminClient.deleteTopics(Collections.singletonList(topicName)).all().get();
                System.out.println("Topic " + topicName + " deleted successfully.");
            } catch (InterruptedException | ExecutionException e) {
                e.printStackTrace();
            }
        }
    }

    /**
     * 获取所有Topic名称
     *
     * @return Set<String>
     */

    private static Set<String> getTopicsNames() {
        // 1. 配置Kafka连接参数
        Properties props = new Properties();
        props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS);
        // 2. 创建AdminClient实例
        try (AdminClient adminClient = AdminClient.create(props)) {
            // 3. 获取所有主题
            ListTopicsResult listTopicsResult = adminClient.listTopics();
            KafkaFuture<Set<String>> topicsFuture = listTopicsResult.names();
            return topicsFuture.get();
        } catch (InterruptedException | ExecutionException e) {
            e.printStackTrace();
        }
        return new HashSet<>();
    }
}


    