package com.ustcinfo.ishare.kafka.monitor.utils;

import com.ustcinfo.ishare.kafka.monitor.bean.KafkaMonitorInfo;
import com.ustcinfo.ishare.kafka.monitor.bean.MetadataInfo;
import com.ustcinfo.ishare.kafka.monitor.service.IKafkaInfoService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;

import javax.annotation.Resource;
import java.util.*;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;

/**
 * Created by Shinelon on 2018/3/21.
 */
@Component
public class GetAllKafkaInfo {
    private static ConcurrentLinkedQueue concurrentLinkedQueue = null;
    private static ScheduledExecutorService scheduledExecutorService = null;
//    private static KafkaInfoUtils kafkaInfoUtils = null;
    @Autowired
    private  IKafkaInfoService kafkaInfoService;

    @Autowired
    KafkaInfoUtils kafkaInfoUtils;

    static Map<String,KafkaMonitorInfo> latestMinuteMap = new HashMap<String, KafkaMonitorInfo>();

    static {
        concurrentLinkedQueue = new ConcurrentLinkedQueue();
        scheduledExecutorService = Executors.newScheduledThreadPool(5);
//        kafkaInfoUtils = new KafkaInfoUtils();
    }

    public void getAllInfo() {
        // 获取 kafka 参数的调度器 队列
        scheduledExecutorService.scheduleWithFixedDelay(new TopicGroupRunnable(), 500, 3000, TimeUnit.MILLISECONDS);

        // 从kafka 参数队列中获取参数 构造 kafkaConsumer
        for(int i=0;i<5;i++){
            scheduledExecutorService.scheduleWithFixedDelay(new MyRunnable(), 500, 5000, TimeUnit.MILLISECONDS);
        }
    }

    class MyRunnable implements Runnable{

        public void run() {
            double size = concurrentLinkedQueue.size();

            for (int i = 0; i < (int)Math.ceil(size/5); i++) {
                try {
//                    System.out.println("now size :" + size + " ,now i :" + i + " ,NOW W :" + Math.ceil(size/2) + " ,now thread :" + Thread.currentThread().getName());
                    if (!concurrentLinkedQueue.isEmpty()) {
                        // 从 队列拿一个参数
                        String param = (String) concurrentLinkedQueue.poll();
                        // 将刚刚获得的参数 放回队列末尾
//                        if (!concurrentLinkedQueue.contains(param)) {
//                            concurrentLinkedQueue.offer(param);
//                        }
                        String topicName = param.split("&")[0];
                        String groupName = param.split("&")[1];

                        // 获取当前 topic 的分区信息
                        List<String> partitions = kafkaInfoUtils.getPartitionsByTopic(topicName);

                        long logSize = kafkaInfoUtils.getTotalLogSizeByTopic(topicName, groupName);

                        long offset = kafkaInfoUtils.getOffsets(topicName, groupName);

                        if (!latestMinuteMap.containsKey(param)) {
                            KafkaMonitorInfo kafkaMonitorInfo = new KafkaMonitorInfo();
                            kafkaMonitorInfo.setTopic(topicName);
                            kafkaMonitorInfo.setPartitions(partitions.toString());
                            kafkaMonitorInfo.setGroupName(groupName);
                            kafkaMonitorInfo.setGroupJaasConf(groupName + "-jaas.conf");
                            kafkaMonitorInfo.setLogSize(logSize);
                            kafkaMonitorInfo.setLogSizeIncrement(0L);
                            kafkaMonitorInfo.setOffset(offset);
                            kafkaMonitorInfo.setOffsetIncrement(0L);
                            kafkaMonitorInfo.setCommitTime(new Date().getTime());
                            latestMinuteMap.put(param, kafkaMonitorInfo);

//                        System.out.println("*************** q1: 开始 start insert to mysql ***************");
                            kafkaInfoService.insert(kafkaMonitorInfo);
//                        System.out.println("*************** q1: 结束 end insert to mysql ***************");
                        } else {
                            KafkaMonitorInfo kafkaMonitorInfo = latestMinuteMap.get(param);

                            boolean flag1 = true;
                            boolean flag2 = true;

                            kafkaMonitorInfo.setTopic(topicName);
                            kafkaMonitorInfo.setPartitions(partitions.toString());
                            kafkaMonitorInfo.setGroupName(groupName);
                            kafkaMonitorInfo.setGroupJaasConf(groupName + "-jaas.conf");
                            kafkaMonitorInfo.setLogSizeIncrement(logSize - kafkaMonitorInfo.getLogSize());

                            if (logSize == kafkaMonitorInfo.getLogSize()) {
//                                kafkaMonitorInfo.setCommitTime(kafkaMonitorInfo.getCommitTime());
                                flag1 = false;
                            }

                            kafkaMonitorInfo.setLogSize(logSize);
                            kafkaMonitorInfo.setOffsetIncrement(offset - kafkaMonitorInfo.getOffset());
                            if (offset == kafkaMonitorInfo.getOffset()) {
//                                kafkaMonitorInfo.setCommitTime(kafkaMonitorInfo.getCommitTime());
                                flag2 = false;
                            }
                            kafkaMonitorInfo.setOffset(offset);
                            kafkaMonitorInfo.setCommitTime(new Date().getTime());

                            if (flag1 || flag2) {
//                                kafkaMonitorInfo.setCommitTime(new Date().getTime());
//                            System.out.println("*************** q2: 开始 start insert to mysql ***************");
                                kafkaInfoService.insert(kafkaMonitorInfo);
//                            System.out.println("*************** q2: 结束 end insert to mysql ***************");
                            }
                        }

                        System.out.println("*************** 开始遍历集合 map ***************");
                        System.out.println("getAllKafkaInfo map is null? -----> " + latestMinuteMap.size());
                        System.out.println("*************** 结束遍历集合 map ***************");

                    } else {
                        System.out.println("is null");
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        }
    }

    class TopicGroupRunnable implements Runnable{

        public void run() {
            Set<String> allGroupsWithTopic = new HashSet<String>();
            List<String> topics = kafkaInfoUtils.getAllTopics();
            if (topics.size() > 0) {
                for (String topic : topics) {
                    Set<String> groupsWithTopic = kafkaInfoUtils.getAllGroupsForTopic(topic);
                    if (groupsWithTopic.size() > 0) {
                        for (String group : groupsWithTopic) {
                            allGroupsWithTopic.add(topic + "&" + group);
                        }
                    }
                }
            }
            if (allGroupsWithTopic.size() > 0) {
                for (String topicWithGroup : allGroupsWithTopic) {
                    if (!concurrentLinkedQueue.contains(topicWithGroup)) {
                        concurrentLinkedQueue.offer(topicWithGroup);
                    }
                }
            }
        }
    }

    public static Map<String, KafkaMonitorInfo> getLatestMinuteMap() {
        return latestMinuteMap;
    }

    public static void setLatestMinuteMap(Map<String, KafkaMonitorInfo> latestMinuteMap) {
        GetAllKafkaInfo.latestMinuteMap = latestMinuteMap;
    }

    public List<MetadataInfo> getAllMetadataInfo(){
        List<MetadataInfo> metadataInfoList = new ArrayList<MetadataInfo>();
        List<String> topics = kafkaInfoUtils.getAllTopics();
        if (topics.size()>0){
            for (int i =0;i<topics.size();i++){
                List<MetadataInfo> metadataInfos = kafkaInfoUtils.findMetadataInfoByTopic(topics.get(i));
                if (metadataInfos.size()>0){
                    for (int j=0;j<metadataInfos.size();j++){
                        metadataInfoList.add(metadataInfos.get(j));
                    }
                }
            }
        }
        return metadataInfoList;
    }

    public List<MetadataInfo> getTopicList(){
        List<MetadataInfo> topicList = new ArrayList<MetadataInfo>();
        List<String> topics = kafkaInfoUtils.getAllTopics();

        if(topics.size()>0){
            for (int i=0;i<topics.size();i++){
                MetadataInfo metadataInfo = new MetadataInfo();
                List<String> partitions = kafkaInfoUtils.getPartitionsByTopic(topics.get(i));
                String replication = kafkaInfoUtils.getReplicasIsr(topics.get(i),0);
                int l = replication.split(",").length;

                metadataInfo.setTopic(topics.get(i));
                metadataInfo.setPartitionSize(partitions.size());
                metadataInfo.setReplicasSize(l);
                topicList.add(metadataInfo);
            }
        }
        return topicList;
    }

//    public static void main(String[] args) {
//        GetAllKafkaInfo getAllKafkaInfo = new GetAllKafkaInfo();
//        getAllKafkaInfo.getAllInfo();
//        KafkaMonitorInfo kafkaMonitorInfo = new KafkaMonitorInfo();
//        kafkaMonitorInfo.setTopic("1");
//        kafkaMonitorInfo.setPartitions("0,1,2");
//        kafkaMonitorInfo.setGroupName("kafka");
//        kafkaMonitorInfo.setGroupJaasConf("kafka-jaas.conf");
//        kafkaMonitorInfo.setLogSize(200L);
//        kafkaMonitorInfo.setLogSizeIncrement(0L);
//        kafkaMonitorInfo.setOffset(50L);
//        kafkaMonitorInfo.setOffsetIncrement(0L);
//        kafkaMonitorInfo.setCommitTime(new Date().getTime());
//        System.out.println(kafkaInfoService);
//        kafkaInfoService.insert(kafkaMonitorInfo);

//        System.out.println(getAllKafkaInfo.getAllMetadataInfo());
//    }
}
