package com.lkx.kafka.monitor.service;

import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.retrytopic.DestinationTopic;
import org.springframework.stereotype.Service;

import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;

@Slf4j
@Service
@RequiredArgsConstructor
public class KafkaAdminServiceImpl implements KafkaAdminService{


    @Autowired
    private AdminClient adminClient;

    /**
     * 创建Kafka主题
     * @param topicName 主题名称
     * @param partitions 分区数
     * @param replicas 副本数
     * @param retentionMs 消息保留时间(毫秒)
     */
    public void createTopic(String topicName, int partitions, int replicas, long retentionMs) {
        try {

            // 检查可用broker数量
            int availableBrokers = getAvailableBrokerCount();

            // 调整副本数不超过可用broker数量
            if (replicas > availableBrokers) {
                log.warn("Requested replication factor {} is larger than available brokers {}. Setting replication factor to {}.",
                        replicas, availableBrokers, availableBrokers);
                replicas = availableBrokers;

            }

            // 构建主题配置
            Map<String, String> configs = new HashMap<>();
            configs.put("retention.ms", String.valueOf(retentionMs));

            // 创建主题请求
            NewTopic newTopic = new NewTopic(topicName, partitions, (short) replicas)
                    .configs(configs);

            // 执行创建操作
            CreateTopicsResult result = adminClient.createTopics(Collections.singletonList(newTopic));
            result.all().get(30, TimeUnit.SECONDS); // 等待操作完成，超时时间30秒

            log.info("Successfully created topic: {}", topicName);
        } catch (Exception e) {
            log.error("Failed to create topic: {}", topicName, e);
            throw new RuntimeException("Failed to create topic: " + e.getMessage(), e);
        }
    }

    /**
     * 删除Kafka主题
     * @param topicName 主题名称
     */
    public void deleteTopic(String topicName) {
        try {

            // 执行删除操作
            DeleteTopicsResult result = adminClient.deleteTopics(Collections.singletonList(topicName));
            result.all().get(30, TimeUnit.SECONDS); // 等待操作完成，超时时间30秒

            log.info("Successfully deleted topic: {}", topicName);
        } catch (Exception e) {
            log.error("Failed to delete topic: {}", topicName, e);
            throw new RuntimeException("Failed to delete topic: " + e.getMessage(), e);
        }
    }

    public void saveKafkaConfig(Map<String, Object> config) {
        try {
            // 从配置中提取相关参数
            String bootstrapServers = (String) config.get("bootstrapServers");
            Integer adminTimeout = (Integer) config.get("adminTimeout");
            Integer metricsRefreshInterval = (Integer) config.get("metricsRefreshInterval");
            String consumerGroupId = (String) config.get("consumerGroupId");

            // 更新当前服务中的配置（如果这些字段在类中存在的话）
            // 这里需要根据实际的类字段结构来实现
            // 例如：
            // if (bootstrapServers != null) this.bootstrapServers = bootstrapServers;
            // if (adminTimeout != null) this.adminTimeout = adminTimeout;
            // if (metricsRefreshInterval != null) this.metricsRefreshInterval = metricsRefreshInterval;
            // if (consumerGroupId != null) this.consumerGroupId = consumerGroupId;

            log.info("Successfully saved Kafka config: {}", config);
        } catch (Exception e) {
            log.error("Failed to save Kafka config: {}", e.getMessage(), e);
            throw new RuntimeException("Failed to save Kafka config: " + e.getMessage(), e);
        }
    }
    /**
     * 创建Kafka主题前检查broker数量
     */
    private int getAvailableBrokerCount() throws InterruptedException, ExecutionException {
        DescribeClusterResult clusterResult = adminClient.describeCluster();
        return clusterResult.nodes().get().size();
    }

}
