package com.ustcinfo.ishare.kafka.monitor.utils;

import java.util.*;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.ustcinfo.ishare.kafka.monitor.bean.BrokerInfo;
import com.ustcinfo.ishare.kafka.monitor.bean.MetadataInfo;
import kafka.admin.AdminClient;
import kafka.admin.AdminUtils;
import kafka.admin.RackAwareMode;
import kafka.coordinator.GroupOverview;
import kafka.utils.ZKStringSerializer$;
import kafka.utils.ZkUtils;
import org.I0Itec.zkclient.ZkClient;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.security.JaasUtils;
import org.apache.zookeeper.data.Stat;
import org.springframework.stereotype.Component;
import scala.Option;
import scala.Tuple2;
import scala.collection.*;
import scala.collection.Seq;
import scala.collection.immutable.Map;

import java.util.HashSet;
import java.util.List;
import java.util.Set;

/**
 * Created by Shinelon on 2018/3/20.
 */
@Component
public class KafkaInfoUtils {
    /**
     * krb5.conf配置文件
     **/
    private static final String KRB5_CONF = SystemConfigUtils.getProperty("krb5-conf") + "krb5.conf";
    /**
     * JAAS配置文件
     **/
    private static final String KAFKA_JAAS_CONF = SystemConfigUtils.getProperty("kafka-jaas-conf") + "kafka-jaas.conf";
    /**
     * kafka broker地址，多个broker用逗号分开
     **/
    private static final String KAFKA_BROKERS = SystemConfigUtils.getProperty("broker.url");
    private ZkClient zkClient;

    public KafkaInfoUtils() {
        /**添加Kerberos认证所需的JAAS配置文件到运行时环境**/
        System.setProperty("java.security.auth.login.config", KAFKA_JAAS_CONF);
        /**添加krb5配置文件到运行时环境**/
        System.setProperty("java.security.krb5.conf", KRB5_CONF);
    }

    /**
     * 初始化 kafka 消费者
     *
     * @param group
     */
    private KafkaConsumer<String, String> initKafka(String group) {
        Properties properties = new Properties();
        /**kafka集群地址及端口号，多台机器用逗号分隔**/
        properties.put("bootstrap.servers", KAFKA_BROKERS);
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("session.timeout.ms", "180000");
        properties.put("max.poll.records", "1");
        properties.put("auto.offset.reset", "earliest");
        properties.put("auto.commit.interval.ms", "500");
        properties.put("enable.auto.commit", "true");
        properties.put("heartbeat.interval.ms", "3000");
        properties.put("request.timeout.ms", "300000");
        properties.put("max.poll.interval.ms", "300000");
        /**Kerberos服务名,对应于kafka-jaas.conf中的serviceName**/
        properties.put("sasl.kerberos.service.name", "kafka");
        /**服务器端的安全协议,此处是Kerberos认证,所以是SASL_PLAINTEXT**/
        properties.put("security.protocol", "SASL_PLAINTEXT");
        /**消费者组名**/
        properties.put("group.id", group);
        /**添加Kerberos认证所需的JAAS配置文件到运行时环境**/
        System.setProperty("java.security.auth.login.config", SystemConfigUtils.getProperty("kafka-jaas-conf") + group + "-jaas.conf");
        /**添加krb5配置文件到运行时环境**/
        System.setProperty("java.security.krb5.conf", KRB5_CONF);

        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(properties);
        return kafkaConsumer;
    }

    /**
     * 初始化 zookeeper
     */
    private void initZkClient() {
        zkClient = new ZkClient(SystemConfigUtils.getProperty("zookeeper.url"), Integer.MAX_VALUE, 100000, ZKStringSerializer$.MODULE$);
    }

    /**
     * 从 zookeeper 获取 brokers 信息
     *
     * @return
     */
    public Set<BrokerInfo> getAllBrokersInfo() {
        Set<BrokerInfo> brokersInfos = new HashSet<BrokerInfo>();
        initZkClient();
        Seq<String> subBrokerIdsPaths = ZkUtils.apply(zkClient, false).getChildren("/brokers/ids");
        List<String> brokerIdss = JavaConversions.seqAsJavaList(subBrokerIdsPaths);
        for (String ids : brokerIdss) {
            try {
                Tuple2<Option<String>, Stat> tuple = ZkUtils.apply(zkClient, false).readDataMaybeNull("/brokers/ids/" + ids);
                String endpoints = JSON.parseObject(tuple._1.get()).getString("endpoints");
                String tmp = endpoints.split("//")[1];
                String host = tmp.substring(0, tmp.length() - 2).split(":")[0];
                int port = Integer.valueOf(tmp.substring(0, tmp.length() - 2).split(":")[1]);
                BrokerInfo brokerInfo = new BrokerInfo();
                brokerInfo.setId(Integer.valueOf(ids));
                brokerInfo.setHost(host);
                brokerInfo.setPort(port);
                brokerInfo.setStatus(1);
                brokersInfos.add(brokerInfo);
            } catch (Exception ex) {
                System.out.println(ex.getMessage());
            }
        }
        zkClient.close();
        return brokersInfos;
    }

    /**
     * 获取 所有 主题
     *
     * @return
     */
    public List<String> getAllTopics() {
        initZkClient();
        Seq<String> topics = ZkUtils.apply(zkClient, false).getAllTopics();
        List<String> topicList = JavaConversions.seqAsJavaList(topics);
        zkClient.close();
        return topicList;
    }

    /**
     * 根据 主题 获取分区
     *
     * @param topic
     * @return
     */
    public List<String> getPartitionsByTopic(String topic) {
        initZkClient();
        Seq<String> brokerTopicsPaths = ZkUtils.apply(zkClient, false).getChildren("/brokers/topics/" + topic + "/partitions");
        List<String> topicAndPartitions = JavaConversions.seqAsJavaList(brokerTopicsPaths);
        zkClient.close();
        return topicAndPartitions;
    }

    /**
     * 根据 主题 和 分区 id 获取副本信息
     *
     * @param topic
     * @param partitionid
     * @return String.
     */
    public String getReplicasIsr(String topic, int partitionid) {
        initZkClient();
        Seq<Object> repclicasAndPartition = ZkUtils.apply(zkClient, false).getInSyncReplicasForPartition(topic, partitionid);
        List<Object> targets = JavaConversions.seqAsJavaList(repclicasAndPartition);
        zkClient.close();
        return targets.toString();
    }

    /**
     * 根据 topic 获取 副本信息
     * 返回数据格式： （topic-partition，replicas）
     * @param topic
     * @return
     */
    public java.util.Map<String,String> getReplicasByTopic(String topic){
        java.util.Map<String,String> replicasMap = new java.util.HashMap<String, String>();

        List<String> partitions = getPartitionsByTopic(topic);
        if (partitions.size()>0){
            for (int i=0;i<partitions.size();i++){
                String replicas = getReplicasIsr(topic,Integer.valueOf(partitions.get(i)));
                replicasMap.put(topic+"-"+partitions.get(i),replicas);
            }
        }

        return replicasMap;
    }

    /**
     * 根据 topic 获取 元数据 ，包括 分区 ，learer
     * @param topic
     * @return
     */
    private List<MetadataInfo> findKafkaLeaderWithoutReplicas(String topic) {
        initZkClient();
        List<MetadataInfo> targets = new ArrayList<MetadataInfo>();
        if (ZkUtils.apply(zkClient, false).pathExists("/brokers/topics")) {
            Seq<String> subBrokerTopicsPaths = ZkUtils.apply(zkClient, false).getChildren("/brokers/topics");
            List<String> topics = JavaConversions.seqAsJavaList(subBrokerTopicsPaths);
            if (topics.contains(topic)) {
                Tuple2<Option<String>, Stat> tuple = ZkUtils.apply(zkClient, false).readDataMaybeNull("/brokers/topics/" + topic);
                JSONObject partitionObject = JSON.parseObject(tuple._1.get()).getJSONObject("partitions");
                for (String partition : partitionObject.keySet()) {
                    String path = String.format("/brokers/topics/%s/partitions/%s/state", topic, Integer.valueOf(partition));
                    Tuple2<Option<String>, Stat> tuple2 = ZkUtils.apply(zkClient, false).readDataMaybeNull(path);
                    JSONObject topicMetadata = JSON.parseObject(tuple2._1.get());
                    MetadataInfo metadate = new MetadataInfo();
                    metadate.setIsr(topicMetadata.getString("isr"));
                    metadate.setLeader(topicMetadata.getInteger("leader"));
                    metadate.setPartitionId(Integer.valueOf(partition));
                    targets.add(metadate);
                }
            }
        }
        zkClient.close();
        return targets;
    }

    /**
     * 根据 topic 获取 元数据 ，包括 分区 ，learer ，replicas
     * @param topic
     * @return
     */
    public List<MetadataInfo> findMetadataInfoByTopic(String topic){
        List<MetadataInfo> metadataInfos = findKafkaLeaderWithoutReplicas(topic);

        if (metadataInfos.size()>0){
            for (int i=0;i<metadataInfos.size();i++){
                String replicas = getReplicasIsr(topic,metadataInfos.get(i).getPartitionId());
                metadataInfos.get(i).setReplicas(replicas);
                metadataInfos.get(i).setTopic(topic);
            }
        }
        return metadataInfos;
    }

    /**
     * 获取 topic 对应的 消费者组
     *
     * @param topic
     * @return
     */
    public Set<String> getAllGroupsForTopic(String topic) {
        Properties prop = new Properties();
        // broker 信息
        prop.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, KAFKA_BROKERS);
        prop.put("sasl.kerberos.service.name", "kafka");
        prop.put("security.protocol", "SASL_PLAINTEXT");
        System.setProperty("java.security.auth.login.config", KAFKA_JAAS_CONF);
        System.setProperty("java.security.krb5.conf", KRB5_CONF);

        AdminClient client = AdminClient.create(prop);
        try {
            List<GroupOverview> allGroups = scala.collection.JavaConversions.seqAsJavaList(client.listAllGroupsFlattened().toSeq());
            Set<String> groups = new HashSet<String>();
            for (GroupOverview overview : allGroups) {
                String groupID = overview.groupId();
                Map<TopicPartition, Object> offsets = client.listGroupOffsets(groupID);
                Set<TopicPartition> partitions = scala.collection.JavaConversions.setAsJavaSet(offsets.keySet());
                for (TopicPartition tp : partitions) {
                    if (tp.topic().equals(topic)) {
                        groups.add(groupID);
                    }
                }
            }
            return groups;
        } finally {
            client.close();
        }
    }

    /**
     * 根据 主题 ，组 ，分区 获取消息总量
     *
     * @param topic
     * @param groupId
     * @param partitionId
     * @return
     */
    public long getKafkaLogSize(String topic, String groupId, int partitionId) {
        KafkaConsumer<String, String> kafkaConsumer = initKafka(groupId);
        TopicPartition tp = new TopicPartition(topic, partitionId);
        kafkaConsumer.assign(Collections.singleton(tp));
        java.util.Map<TopicPartition, Long> logSize = kafkaConsumer.endOffsets(Collections.singleton(tp));
        kafkaConsumer.close();
        return logSize.get(tp).longValue();
    }

    /**
     * 获取 指定 主题 和 消费者的 消息总量
     *
     * @param topic
     * @return
     */
    public long getTotalLogSizeByTopic(String topic, String groupId) {
        long total = 0;
        List<String> partitions = getPartitionsByTopic(topic);
        for (String s : partitions) {
            total += getKafkaLogSize(topic, groupId, Integer.parseInt(s));
        }
        return total;
    }

    /**
     * 获取offset
     *
     * @param topicName
     */
    public long getOffsets(String topicName, String groupId) {
        long offsets = 0;
        KafkaConsumer<String, String> kafkaConsumer = initKafka(groupId);
        List<PartitionInfo> partitionInfoList = kafkaConsumer.partitionsFor(topicName);
        List<TopicPartition> tpList = new ArrayList<TopicPartition>();
        for (PartitionInfo info : partitionInfoList) {
            int pid = info.partition();
            TopicPartition topicPartition = new TopicPartition(topicName, pid);
            tpList.add(topicPartition);
        }
        for (TopicPartition tp : tpList) {
            long l;
            try {
                l = kafkaConsumer.committed(tp).offset();
            } catch (Exception e) {
                l = 0;
            }
            offsets += l;
        }
        kafkaConsumer.close();
        return offsets;
    }

    /**
     * 创建主题
     * @param topicName
     * @param partitions
     * @param replic
     * @return
     */
    public String createTopic(String topicName ,String partitions ,String replic){
        String flag = "";
        try{
            ZkUtils zkUtils = ZkUtils.apply(SystemConfigUtils.getProperty("zookeeper.url"), 30000, 30000, JaasUtils.isZkSecurityEnabled());
            AdminUtils.createTopic(zkUtils, topicName, Integer.parseInt(partitions), Integer.parseInt(replic), new Properties(), RackAwareMode.Enforced$.MODULE$);
            if (zkUtils != null) {
                zkUtils.close();
            }
            flag = "true";
        }catch (Exception e){
            flag = "false";
            e.printStackTrace();
        }
        return flag;
    }

    /**
     * 删除主题
     * @param topicName
     * @return
     */
    public String deleteTopic(String topicName){
        String flag = "true";
        try {
            ZkUtils zkUtils = ZkUtils.apply(SystemConfigUtils.getProperty("zookeeper.url"), 30000, 30000, JaasUtils.isZkSecurityEnabled());
            AdminUtils.deleteTopic(zkUtils, topicName);
            if (zkUtils != null) {
                zkUtils.close();
            }
            flag = "true";
        }catch (Exception e){
            flag = "false";
            e.printStackTrace();
        }
        return flag;
    }

//    public static void main(String[] args) {
//        KafkaInfoUtils kafkaInfoUtils = new KafkaInfoUtils();

        // 获取所有主题
//        List<String> topics = kafkaInfoUtils.getAllTopics();
//        System.out.println(topics);
//        for (String topic:topics){
//            获取对应主题的组名
//            Set<String> groups = kafkaInfoUtils.getAllGroupsForTopic("wzb032603");
//            System.out.println(groups.toString());
//        }
//        System.out.println(topics);

        // 获取对应主题 的 分区信息
//        for (String topic : topics){
//            List<String> partitions = kafkaInfoUtils.getPartitionsByTopic("wzb041003");
//            System.out.println(partitions);
//        }

        // 获取 主题对应组de 每个分区的总量
//        System.out.println(kafkaInfoUtils.getKafkaLogSize("wzb032602","kafka",0));
//        System.out.println(kafkaInfoUtils.getKafkaLogSize("wzb032602","kafka",1));
//        System.out.println(kafkaInfoUtils.getKafkaLogSize("wzb032602","kafka",2));

        // 获取 主题对应组 的 消息总量
//        System.out.println(kafkaInfoUtils.getTotalLogSizeByTopic("wzb032603","kafka"));

        // 获取 主题对应组 的消费情况
//        long offsets = kafkaInfoUtils.getOffsets("wzb032603","kafka");
//        System.out.println(offsets);

//        System.out.println(kafkaInfoUtils.getAllBrokersInfo());

//        System.out.println(kafkaInfoUtils.getReplicasIsr("wzb032603",0)+"-0");
//        System.out.println(kafkaInfoUtils.getReplicasIsr("wzb032603",1)+"-1");
//        System.out.println(kafkaInfoUtils.getReplicasIsr("wzb032603",2)+"-2");
//        System.out.println(kafkaInfoUtils.getReplicasIsr("wzb032603",3)+"-3");
//        System.out.println(kafkaInfoUtils.getReplicasIsr("wzb032603",4)+"-4");
//        System.out.println(kafkaInfoUtils.getReplicasIsr("wzb032603",5)+"-5");
//        System.out.println(kafkaInfoUtils.getReplicasIsr("wzb032603",6)+"-6");
//        System.out.println(kafkaInfoUtils.getReplicasIsr("wzb032603",7)+"-7");
//        System.out.println(kafkaInfoUtils.getReplicasIsr("wzb032603",8)+"-8");
//        System.out.println(kafkaInfoUtils.getReplicasIsr("wzb032603",9)+"-9");

//        System.out.println(kafkaInfoUtils.getReplicasIsr("wzb041101",0)+"-0");
//        System.out.println(kafkaInfoUtils.getReplicasIsr("wzb040201",1)+"-1");
//        System.out.println(kafkaInfoUtils.getReplicasIsr("wzb040201",2)+"-2");
//        System.out.println(kafkaInfoUtils.getReplicasIsr("wzb040201",3)+"-3");
//        System.out.println(kafkaInfoUtils.getReplicasIsr("wzb040201",4)+"-4");

//        System.out.println(kafkaInfoUtils.getReplicasByTopic("wzb032603"));

//        System.out.println(kafkaInfoUtils.findKafkaLeaderWithoutReplicas("wzb040201"));

//        List<MetadataInfo> metadataInfoList = kafkaInfoUtils.findMetadataInfoByTopic("wzb041101");
//        for (MetadataInfo metadataInfo : metadataInfoList) {
//            System.out.println(metadataInfo);
//        }

//        String flag = kafkaInfoUtils.createTopic("wzb041003","2","3");
//        System.out.println(flag);

//        String deleteFlag = kafkaInfoUtils.deleteTopic("wzb041003");
//        System.out.println(deleteFlag);
//    }
}
