package cn.com.greatwall.kafka.job;

import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.stream.Collectors;

import org.apache.kafka.clients.admin.Config;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.common.TopicPartitionInfo;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.util.CollectionUtils;

import com.alibaba.fastjson.JSONObject;

import cn.com.greatwall.common.utils.TimeUtil;
import cn.com.greatwall.kafka.config.InitConfig;
import cn.com.greatwall.kafka.domain.Cluster;
import cn.com.greatwall.kafka.domain.Topic;
import cn.com.greatwall.kafka.service.ClusterService;
import cn.com.greatwall.kafka.service.ElasticsearchService;
import cn.com.greatwall.kafka.service.KafkaService;
import cn.com.greatwall.kafka.service.TopicService;
import cn.com.greatwall.kafka.service.ZooKeeperService;
import cn.com.greatwall.kafka.service.dto.BrokerDto;
import cn.com.greatwall.kafka.service.dto.ClusterDto;
import cn.com.greatwall.kafka.util.JmxCollector;
import cn.com.greatwall.kafka.util.KafkaUtil;
import lombok.extern.slf4j.Slf4j;

/**
 * @Author herw
 * @Time 2021-04-15 13:39:14
 * @Version 1.0
 * @Description: TODO(用一句话描述该文件做什么)
 */
@Slf4j
@Component
public class CollectTopicJob {
    @Autowired
    ClusterService clusterService;
    @Autowired
    KafkaService kafkaService;
    @Autowired
    ZooKeeperService zooKeeperService;
    @Autowired
    ElasticsearchService elasticsearchService;
    @Autowired
    TopicService topicService;
    @Autowired
    InitConfig initConfig;

    /**
     * Synchronize cluster and Table topic information
     */
    void collectionTopicData() {
        List<ClusterDto> clusters = clusterService.queryAll(null);
        clusters.forEach(cluster -> {
            try {
                if (initConfig.isMonitorCollectorIncludeEnable()
                        && !cluster.getLocation().equalsIgnoreCase(initConfig.getMonitorCollectorIncludelocation())) {
                    return;
                }
                log.debug("collect topic data start,{}", cluster.toString());
                long start = System.currentTimeMillis();
                Set<String> clusterTopics = clusterTopic(cluster);
                Map<String, TopicDescription> topicDescription = takeTopicDescription(cluster.getId().toString(),
                        clusterTopics);
                Map<String, Config> topicConfig = takeTopicConfig(cluster.getId().toString(), clusterTopics);
                String version = cluster.getKafkaVersion().split("\\.")[0];
                Map<String, Long> topicSizeMap = Integer.parseInt(version) > 1 ? takeTopicSize(cluster)
                        : unSupportVersionTopicSize(cluster, clusterTopics, topicDescription);
                List<Topic> topicList = dbTopic(cluster);
                Set<Long> needDeleteFromDB = needToDeleteDB(clusterTopics, topicList);
                Set<Topic> needToInsertDB = mergeWithTopicSize(topicSizeMap,
                        needToInsertDB(clusterTopics, topicList, cluster, topicDescription, topicConfig));
                Set<Topic> needToUpdateDb = mergeWithTopicSize(topicSizeMap,
                        needToUpdateDB(clusterTopics, topicList, cluster, topicDescription, topicConfig));
                if (!needDeleteFromDB.isEmpty()) {
                    topicService.delete(needDeleteFromDB);
                }
                if (!needToInsertDB.isEmpty()) {
                    if (!topicService.batchInsert(needToInsertDB)) {
                        log.error("batch insert topic table failed,please check");
                    }
                }
                if (!needToUpdateDb.isEmpty()) {
                    if (!topicService.batchUpdate(needToUpdateDb)) {
                        log.error("batch update topic table failed,please check");
                    }
                }
                log.debug("collect topic data end,{},cost time is {}", cluster.toString(),
                        (System.currentTimeMillis() - start));
            } catch (Exception e) {
                log.error("cluster:" + cluster.getName() + " collection topic config has error", e);
            }
        });
    }

    /**
     * return topic list of cluster
     * 
     * @param clusterDto
     * @return
     */
    private Set<String> clusterTopic(ClusterDto clusterDto) {
        Set<String> topicList = new HashSet<>();
        String clusterId = clusterDto.getId().toString();
        KafkaUtil kafkaUtil = kafkaService.getKafkaUtil(clusterId);

        try {
            topicList = kafkaUtil.listTopics();
        } catch (InterruptedException | ExecutionException e) {
            // TODO: handle exception
        }
        return topicList;
    }

    /**
     * take topic describe by cluster
     * 
     * @param clusterId
     * @param topics
     * @return
     */
    private Map<String, TopicDescription> takeTopicDescription(String clusterId, Set<String> topics) {
        KafkaUtil kafkaUtil = kafkaService.getKafkaUtil(clusterId);
        return kafkaUtil.descTopics(topics);
    }

    /**
     * get topic of cluster configs
     * 
     * @param clusterId
     * @param topics
     * @return
     */
    private Map<String, Config> takeTopicConfig(String clusterId, Set<String> topics) {
        KafkaUtil kafkaUtil = kafkaService.getKafkaUtil(clusterId);
        Map<String, Config> topicConfig = new HashMap<>(1 << 3);
        try {
            topicConfig = kafkaUtil.descConfigs(topics);
        } catch (InterruptedException | ExecutionException e) {
            // TODO: handle exception
        }
        return topicConfig;
    }

    /**
     * return the topic size map
     * 
     * @param cluster
     * @return
     * @throws Exception
     */
    private Map<String, Long> takeTopicSize(ClusterDto cluster) throws Exception {
        try {
            String clusterId = cluster.getId().toString();
            List<BrokerDto> brokerInfos = zooKeeperService.getZooKeeper(clusterId).getBrokers();
            List<Integer> brokerIds = brokerInfos.stream().map(BrokerDto::getBid).collect(Collectors.toList());
            Map<String, Long> mapSize = kafkaService.getKafkaUtil(clusterId).getTopicDiskSizeForBroker(brokerIds);
            topicFileSizeWriteToEs(mapSize, cluster);

            return mapSize;
        } catch (UnsupportedVersionException e) {

        } catch (Exception e) {
            throw new Exception("get topic file size has error", e);
        }
        return new HashMap<>(0);
    }

    /**
     * topic file size write to es
     * 
     * @param fileSizeMap
     * @param cluster
     */
    private void topicFileSizeWriteToEs(Map<String, Long> fileSizeMap, ClusterDto cluster) {
        SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
        List<JSONObject> fileSizeList = new ArrayList<>();
        String index = elasticsearchService.getMonitorElasticsearchIndexName() + "-" + dateFormat.format(new Date());
        Calendar calendar = TimeUtil.nowCalendar();
        fileSizeMap.keySet().forEach(key -> {
            JSONObject obj = new JSONObject();
            SimpleDateFormat timeFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSZ");
            obj.put("date", timeFormat.format(new Date(calendar.getTimeInMillis())));
            obj.put("timestamp", calendar.getTimeInMillis());
            obj.put("topic", key);
            obj.put("fileSize", fileSizeMap.get(key));
            obj.put("clusterId", cluster.getId());
            obj.put("clusterName", cluster.getName());
            obj.put("type", "fileSize");

            fileSizeList.add(obj);
        });
        try {
            if (elasticsearchService.getESDB() != null) {
                elasticsearchService.getESDB().batchInsertES(fileSizeList, index);
            }
        } catch (Exception e) {
            log.error("file size write to es has error,", e);
        }
    }

    /**
     * return the topic log dir size with topic The kafka version is available below
     * 1.x take the topic of file size according to jmx
     * 
     * @param cluster
     * @param topics
     * @param topicDescriptionMap
     * @return
     * @throws Exception
     */
    public Map<String, Long> unSupportVersionTopicSize(ClusterDto cluster, Set<String> topics,
            Map<String, TopicDescription> topicDescriptionMap) throws Exception {
        Map<String, Set<Integer>> sizeMap = new HashMap<>(1 << 3);
        topics.stream().filter(Objects::nonNull).forEach(topic -> {
            TopicDescription topicDescription = topicDescriptionMap.get(topic);
            Set<Integer> partitions = topicDescription.partitions().stream().map(TopicPartitionInfo::partition)
                    .collect(Collectors.toSet());
            sizeMap.put(topic, partitions);
        });
        try {
            String clusterId = cluster.getId().toString();
            List<BrokerDto> brokers = zooKeeperService.getZooKeeper(clusterId).getBrokers();
            Map<String, Long> mapSize = JmxCollector.getInstance().topicLogSizeByBroker(brokers, sizeMap);
            topicFileSizeWriteToEs(mapSize, cluster);

            return mapSize;
        } catch (Exception e) {
            throw new Exception("get topic file size by jmx has error", e);
        }
    }

    /**
     * return the topicInfo list from db
     * 
     * @param cluster
     * @return
     */
    private List<Topic> dbTopic(ClusterDto cluster) {
        return topicService.findByClusterId(cluster.getId().toString());
    }

    /**
     * return the topic list need to delete form Db
     * 
     * @param topics
     * @param topicList
     * @return
     */
    private Set<Long> needToDeleteDB(Set<String> topics, List<Topic> topicList) {
        return topicList.stream().filter(topic -> topics != null && !topics.contains(topic.getName())).map(Topic::getId)
                .collect(Collectors.toSet());
    }

    /**
     * merge topic list and file size map to combination a topic list
     * 
     * @param topicSizeMap
     * @param topicSet
     * @return
     */
    private Set<Topic> mergeWithTopicSize(Map<String, Long> topicSizeMap, Set<Topic> topicSet) {
        return topicSet.stream().map(topic -> {
            String topicName = topic.getName();
            long size = topicSizeMap.getOrDefault(topicName, -1L);
            topic.setFileSize(size);
            return topic;
        }).collect(Collectors.toSet());
    }

    /**
     * return the topic list need to insert into db table
     * 
     * @param topics
     * @param topicList
     * @param cluster
     * @param topicDescriptionMap
     * @param topicConfig
     * @return
     */
    private Set<Topic> needToInsertDB(Set<String> topics, List<Topic> topicList, ClusterDto clusterDto,
            Map<String, TopicDescription> topicDescriptionMap, Map<String, Config> topicConfig) {
        Set<Topic> needInsert = new HashSet<>();
        Set<String> dbTopics = topicList.stream().map(Topic::getName).collect(Collectors.toSet());
        topics.stream().filter(topic -> !dbTopics.contains(topic)).forEach(topic -> {
            try {
                TopicDescription topicDescription = topicDescriptionMap.getOrDefault(topic, null);
                List<TopicPartitionInfo> topicPartitionInfos = Objects.isNull(topicDescription) ? new ArrayList<>()
                        : topicDescription.partitions();
                int partition = !CollectionUtils.isEmpty(topicPartitionInfos) ? topicPartitionInfos.size() : 0;
                int replicas = !CollectionUtils.isEmpty(topicPartitionInfos)
                        ? (!topicPartitionInfos.get(0).replicas().isEmpty()
                                ? topicPartitionInfos.get(0).replicas().size()
                                : 0)
                        : 0;
                Topic resource = this.updateTTL(topicConfig, clusterDto.getId().toString(), new Topic(), topic);
                resource.setPartition(partition);
//                resource.setClusterId(cluster.getId().toString());
                Cluster cluster = new Cluster();
                cluster.setId(clusterDto.getId());
                resource.setCluster(cluster);
                resource.setReplication((short) replicas);
                resource.setName(topic);

                needInsert.add(resource);
            } catch (Exception e) {
                log.error("need To insert db has error!", e);
            }
        });
        return needInsert;
    }

    /**
     * return the topic list needed to updated in db
     * 
     * @param topics
     * @param topicList
     * @param cluster
     * @param topicDescriptionMap
     * @param topicConfig
     * @return
     */
    private Set<Topic> needToUpdateDB(Set<String> topics, List<Topic> topicList, ClusterDto cluster,
            Map<String, TopicDescription> topicDescriptionMap, Map<String, Config> topicConfig) {
        String clusterId = cluster.getId().toString();
        return topicList.stream().filter(topic -> topics.contains(topic.getName())).map(topic -> {
            try {
                List<TopicPartitionInfo> topicPartitionInfos = topicDescriptionMap.get(topic.getName()).partitions();
                int partition = !CollectionUtils.isEmpty(topicPartitionInfos) ? topicPartitionInfos.size() : 0;
                int replicas = !CollectionUtils.isEmpty(topicPartitionInfos)
                        ? (!topicPartitionInfos.get(0).replicas().isEmpty()
                                ? topicPartitionInfos.get(0).replicas().size()
                                : 0)
                        : 0;
                Topic resource = this.updateTTL(topicConfig, clusterId, topic, topic.getName());
                resource.setPartition(partition);
                resource.setReplication((short) replicas);

                return resource;
            } catch (Exception e) {
                return new Topic();
            }
        }).filter(topic -> topic.getName() != null).collect(Collectors.toSet());
    }

    /**
     * set the topic ttl then return the topic
     * 
     * @param topicDescription
     * @param clusterId
     * @param topic
     * @param topicName
     * @return
     * @throws Exception
     */
    private Topic updateTTL(Map<String, Config> topicDescription, String clusterId, Topic topic, String topicName)
            throws Exception {
        try {
            if (!CollectionUtils.isEmpty(topicDescription)) {
                Config config = topicDescription.get(topicName);
                config.entries().forEach(entry -> {
                    if ("delete.retention.ms".equalsIgnoreCase(entry.name())) {
                        long ttl = Long.parseLong(entry.value());
                        topic.setTtl(ttl > 0 ? ttl : 0);
                    }
                });
            } else {
                JSONObject config = zooKeeperService.getZooKeeper(clusterId).descriptionConfig(topicName);
                if (config.containsKey("delete.retention.ms")) {
                    String ttlStr = config.getString("delete.retention.ms").trim();
                    long ttl = Long.parseLong(ttlStr);
                    topic.setTtl(ttl > 0 ? ttl : 0);
                } else if (config.containsKey("retention.ms")) {
                    String ttlStr = config.getString("retention.ms").trim();
                    long ttl = Long.parseLong(ttlStr);
                    topic.setTtl(ttl > 0 ? ttl : 0);
                } else {
                    topic.setTtl(0L);
                }
            }
        } catch (Exception e) {
            throw new Exception("topic collect update has error: " + clusterId + "|" + topicName, e);
        }

        return topic;
    }
}
