package com.inspur.cloud.service.dataspace;

import com.inspur.cloud.configuration.AmbariConfig;
import com.inspur.cloud.configuration.ApiConfig;
import com.inspur.cloud.entity.dataspace.dto.KafkaTopicInfo;
import com.inspur.cloud.exception.KafkaException;
import com.inspur.cloud.util.PrintStackTrace;
import com.inspur.cloud.util.TConstants;
import org.apache.kafka.clients.admin.*;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartitionInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import java.util.*;
import java.util.concurrent.ExecutionException;

@Service
public class KafkaService {
    @Autowired
    private ApiConfig apiConfig;

    @Autowired
    private AmbariConfig ambariConfig;

    @Autowired
    private ClusterConfigUtil clusterConfigUtil;

    private static Logger log = LoggerFactory.getLogger(KafkaService.class);
//    private static final String BOOTSTRAPSERVERS = "manager.171.bigdata:6667,master.171.bigdata:6667,worker.171.bigdata:6667";
    private static final String JAVA_SECURITY_KRB5_CONF_KEY = "java.security.krb5.conf";
    private static final String JAVA_SECURITY_LOGIN_CONF_KEY = "java.security.auth.login.config";

    /**
     * 权限认证
     */
    private void kafkaAuth() {
        // princal账号
        String principal;
        // keytab文件路径
        String keytabPath;
        // kerberos配置文件路径
        String krb5ConfPath;
        // jaas配置文件路径
        String jaasPath;

        if (apiConfig.isDev()) {
            //本地调试
            keytabPath = Thread.currentThread().getContextClassLoader().getResource("tempfiles/dataspace.keytab").toString();
            // 本地路径类似于file:/a/b/c 5表示去掉file:
            keytabPath = keytabPath.substring(5);

            krb5ConfPath = Thread.currentThread().getContextClassLoader().getResource("tempfiles/krb5.conf").toString();
            krb5ConfPath = krb5ConfPath.substring(5);

            jaasPath = Thread.currentThread().getContextClassLoader().getResource("tempfiles/module.jaas.conf").toString();
            jaasPath = jaasPath.substring(5);
        } else {
            // 服务器模式
            keytabPath = ambariConfig.getDataspaceKeytab();
            krb5ConfPath = TConstants.KRB5CONF_PATH;
            jaasPath = ambariConfig.getDataspaceConfigDir() + "/" + ambariConfig.getDataspaceJaasFileName();
        }
        System.setProperty(JAVA_SECURITY_KRB5_CONF_KEY, krb5ConfPath);
        System.setProperty(JAVA_SECURITY_LOGIN_CONF_KEY, jaasPath);

        log.info("keytab路径是:" + keytabPath);
        log.info("krbConf目录:" + krb5ConfPath);
        log.info("jaas配置文件目录是：" + jaasPath);
        log.info("系统" + JAVA_SECURITY_KRB5_CONF_KEY + "目录位于: " + System.getProperty(JAVA_SECURITY_KRB5_CONF_KEY));
        log.info("系统" + JAVA_SECURITY_LOGIN_CONF_KEY + "目录位于： " + System.getProperty(JAVA_SECURITY_LOGIN_CONF_KEY));
    }

    /**
     * 获取kafka AdminClient实例
     *
     * @return 返回AdminClient实例
     */
    private AdminClient getAdminClient() {
        Properties properties = new Properties();
        // 获取kafka配置
        Map kafkaConf = getKafkaConf();
        String protocol = (String) kafkaConf.get("security.inter.broker.protocol");
        if ("SASL_PLAINTEXT".equals(protocol)) {
            log.info("安全模式已开启");
            kafkaAuth();
        }

        // 获取kafka broker列表
        List<String> brokers = (List<String>) kafkaConf.get(TConstants.KAFKA_BROKER_LIST);
        if (brokers == null) {
            log.error("Kafka broker list is empty!");
            throw new KafkaException("Kafka broker list is empty!");
        }
        String listeners = (String) kafkaConf.get("listeners");
        if (listeners == null || "".equals(listeners)) {
            log.error("Kafka listeners is empty!");
            throw new KafkaException("Kafka listeners is empty!");
        }
        String[] split = listeners.split(":");
        String port = split[split.length - 1];
        StringBuilder bootstrapServers = new StringBuilder();
        // 拼接kafka bootstrap.servers
        for (int i = 0; i < brokers.size(); i++) {
            if (i < brokers.size() - 1) {
                bootstrapServers.append(brokers.get(i)).append(":").append(port).append(",");
            } else {
                bootstrapServers.append(brokers.get(i)).append(":").append(port);
            }
        }

        properties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers.toString());
        properties.put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, protocol);

        return AdminClient.create(properties);
    }

    /**
     * 获取kafka集群broker列表
     *
     * @return 返回kafka集群broker列表
     */
    public List<String> getBrokerList() {
        return (List<String>) getKafkaConf().get(TConstants.KAFKA_BROKER_LIST);
    }

    /**
     * 关闭kafka AdminClient实例
     *
     * @param adminClient AdminClient实例
     */
    private void closeAdminClient(AdminClient adminClient) {
        if (adminClient != null) {
            adminClient.close();
        }
    }

    /**
     * 利用AdminClient创建kafka topic
     *
     * @param topicName         待创建的topic name
     * @param partitions        待创建的topic的分区数
     * @param replicationFactor 待创建的分区数的副本数
     */
    public void createKafkaTopic(String topicName, int partitions, int replicationFactor) {
        AdminClient adminClient = getAdminClient();
        NewTopic newTopic = new NewTopic(topicName, partitions, (short) replicationFactor);
        CreateTopicsResult createTopicsResult = adminClient.createTopics(Collections.singletonList(newTopic));
        try {
            createTopicsResult.all().get();
            log.info("Create topic " + topicName + " success!");
        } catch (InterruptedException e) {
            log.error("Create topic " + topicName + " failed!", e);
            throw new KafkaException("Create topic " + topicName + " failed!" + PrintStackTrace.getStackTrace(e));
        } catch (ExecutionException e) {
            log.error("Create topic " + topicName + " failed!", e);
            throw new KafkaException("Create topic " + topicName + " failed!" + PrintStackTrace.getStackTrace(e));
        } finally {
            closeAdminClient(adminClient);
        }
    }

    /**
     * 利用AdminClient删除kafka topic
     *
     * @param topicName 待删除的topic name
     */
    public void deleteKafkaTopic(String topicName) {
        AdminClient adminClient = getAdminClient();
        DeleteTopicsResult deleteTopicsResult = adminClient.deleteTopics(Collections.singleton(topicName));
        try {
            deleteTopicsResult.all().get();
            log.info("Delete topic " + topicName + " success!");
        } catch (InterruptedException e) {
            log.error("Delete topic " + topicName + " failed!", e);
            throw new KafkaException("Delete topic " + topicName + " failed!" + PrintStackTrace.getStackTrace(e));
        } catch (ExecutionException e) {
            log.error("Delete topic " + topicName + " failed!", e);
            throw new KafkaException("Delete topic " + topicName + " failed!" + PrintStackTrace.getStackTrace(e));
        } finally {
            closeAdminClient(adminClient);
        }
    }

    /**
     * 获取kafka topic列表
     *
     * @return 返回kafka topic name列表
     */
    public Collection<String> getKafkaTopicNameSet() {
        AdminClient adminClient = getAdminClient();
        ListTopicsOptions options = new ListTopicsOptions();
        options.listInternal(true);
        ListTopicsResult listTopicsResult = adminClient.listTopics(options);

        try {
            Set<String> topicNameSet = listTopicsResult.names().get();
            log.info("Get kafka topic name set success!");
            return topicNameSet;
        } catch (InterruptedException e) {
            log.error("Get kafka topic name set failed!", e);
            throw new KafkaException("Get kafka topic name set failed!" + PrintStackTrace.getStackTrace(e));
        } catch (ExecutionException e) {
            log.error("Get kafka topic name set failed!", e);
            throw new KafkaException("Get kafka topic name set failed!" + PrintStackTrace.getStackTrace(e));
        } finally {
            closeAdminClient(adminClient);
        }
    }

    /**
     * 查询指定topic的信息
     *
     * @param topics topic列表
     * @return 返回topics元信息
     */
    public Map<String, List<KafkaTopicInfo>> getKafkaTopicInfo(List<String> topics) {
        Map<String, List<KafkaTopicInfo>> topicsInfoMap = new HashMap<>();
        Collection<String> kafkaTopicNameSet = getKafkaTopicNameSet();
        AdminClient adminClient = getAdminClient();
        DescribeTopicsResult describeTopicsResult = adminClient.describeTopics(kafkaTopicNameSet);
        try {
            Map<String, TopicDescription> topicDescriptionMap = describeTopicsResult.all().get();
            for (String topic : topics) {
                if (!topicDescriptionMap.containsKey(topic)) {
                    log.info("Topic " + topic + " is not exist!");
                } else {
                    List<KafkaTopicInfo> topicInfoList = new ArrayList<>();
                    // 获取指定topic的分区
                    List<TopicPartitionInfo> partitions = topicDescriptionMap.get(topic).partitions();
                    // 遍历topic分区
                    for (TopicPartitionInfo partition : partitions) {
                        log.info(partition.toString());
                        KafkaTopicInfo topicInfo = new KafkaTopicInfo();
                        // 设置topic name
                        topicInfo.setTopicName(topic);
                        // 设置topic分区数
                        topicInfo.setTopicPartitionsCnt(partitions.size());
                        // 获取并设置topic分区编号
                        topicInfo.setTopicPartitionIndex(partition.partition());
                        // 获取并设置topic leader分区id
                        if (partition.leader() != null) {
                            // 当前分区leader正常
                            topicInfo.setLeaderId(partition.leader().id());
                        } else {
                            // 当前分区leader查询为null
                            topicInfo.setLeaderId(-1);
                        }

                        List<Integer> replicas = new ArrayList<>();
                        // 遍历topic分区副本
                        for (Node replica : partition.replicas()) {
                            // 获取topic分区副本id
                            replicas.add(replica.id());
                        }
                        topicInfo.setReplicas(replicas);

                        List<Integer> isr = new ArrayList<>();
                        // 遍历topic in-sync副本
                        for (Node node : partition.isr()) {
                            isr.add(node.id());
                        }
                        topicInfo.setIsr(isr);

                        topicInfoList.add(topicInfo);
                    }
                    topicsInfoMap.put(topic, topicInfoList);
                }
            }
            log.info("Get kafka topics information success!");

            return topicsInfoMap;
        } catch (InterruptedException e) {
            log.error("Get kafka topics information failed!", e);
            throw new KafkaException("Get kafka topics information failed!" + PrintStackTrace.getStackTrace(e));
        } catch (ExecutionException e) {
            log.error("Get kafka topics information failed!", e);
            throw new KafkaException("Get kafka topics information failed!" + PrintStackTrace.getStackTrace(e));
        } finally {
            closeAdminClient(adminClient);
        }
    }

    /**
     * 更新指定topic的分区数
     *
     * @param topicName  待更新分区的topic name
     * @param partitions 指定topic的分区数
     */
    public void updateTopicPartitions(String topicName, int partitions) {
        AdminClient adminClient = getAdminClient();
        Map<String, NewPartitions> map = new HashMap<>();
        map.put(topicName, NewPartitions.increaseTo(partitions));
        CreatePartitionsResult createPartitionsResult = adminClient.createPartitions(map);
        try {
            createPartitionsResult.all().get();
            log.info("Increase kafka topic partitions success!");
        } catch (InterruptedException e) {
            log.error("Increase kafka topic partitions failed!", e);
            throw new KafkaException("Increase kafka topic partitions failed!" + PrintStackTrace.getStackTrace(e));
        } catch (ExecutionException e) {
            log.error("Increase kafka topic partitions failed!", e);
            throw new KafkaException("Increase kafka topic partitions failed!" + PrintStackTrace.getStackTrace(e));
        } finally {
            closeAdminClient(adminClient);
        }
    }

    /**
     * 获取kafka配置信息
     *
     * @return 返回kafka集群配置信息
     */
    private Map getKafkaConf() {
        Map kafkaConf = clusterConfigUtil.getKafkaConfig();
        if (kafkaConf == null) {
            log.error("未获取到kafka配置！");
            throw new KafkaException("未获取到kafka配置！");
        }
        return kafkaConf;
    }

}
