package com.nju.crawler.service;

import cn.hutool.core.lang.Pair;
import cn.hutool.json.JSON;
import com.nju.crawler.constants.QueryTopEnum;
import com.nju.crawler.domain.WordCloudPair;
import com.nju.crawler.repository.PersistService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.relational.core.sql.In;
import org.springframework.stereotype.Service;

import java.util.*;

/**
 * 查询top排行榜（star,fork,issue,size）
 * 需要严格保证 一批次的25、25、25、25 考虑丢失数据情况呢？
 * @authorliyunfei
 * @date2022/11/19
 **/
@Service
public class QueryTopService {
    @Autowired
    private PersistService persistService;
    private final int LIMIT_COUNT = 100;
    private final String DEFAULT_TYPE = "default";
    private final String DB_TYPE = "db";
    private final int DEFAULT_TOP_LIMIT_SIZE = 5;
    private final int DEFAULT_BATCH_SIZE = 25;// 动态修改，同大数据端


    public Map<String, Map> queryTop(){
        Map<String,Map> map = new HashMap<>();
        // Redis、或Java做缓存，或直接监听变化，队列消息，暂时都从DB拿数据
        List<Map<String,Object>>  topList  = persistService.topListQuery(LIMIT_COUNT);
        if(topList.isEmpty()||topList.size()!=LIMIT_COUNT){
            return convertToResp(topList,DEFAULT_TYPE);// 若存在数据丢失则为异常数据，返回默认数据，处理比较！
        }
        return convertToResp(topList,DB_TYPE);
    }

    public Map<String, Map> convertToResp(List<Map<String,Object>>  topList ,String type){
           Map<String,Map> rs = new HashMap<>(3);
           List<String> starNameList = new ArrayList<>(DEFAULT_TOP_LIMIT_SIZE);
           List<String> forkNameList = new ArrayList<>(DEFAULT_TOP_LIMIT_SIZE);
//           List<String> issueNameList = new ArrayList<>(DEFAULT_BATCH_SIZE);
           List<Integer> starValList = new ArrayList<>(DEFAULT_TOP_LIMIT_SIZE);
           List<Integer> forkValList = new ArrayList<>(DEFAULT_TOP_LIMIT_SIZE);;
//           List<Integer> issueValList = new ArrayList<>(DEFAULT_BATCH_SIZE);
           List<WordCloudPair<String,Integer>> issuePairList = new ArrayList<>(DEFAULT_BATCH_SIZE);
           //Pair<String,Integer> pair = new cn.hutool.core.lang.Pair<>();
           //顺序性的保证--防止数据库的顺序丢失，需对单独小波批次再排序处理？ -.- FIXME
//           Pair<String,Integer> pair = new Pair<>("repo1",1);
//           List<Pair<String,Integer>> pairs = new ArrayList<>();
//           pairs.sort(Comparator.comparing(Pair::getValue));

           final int topMaxCount = DEFAULT_TOP_LIMIT_SIZE;
           final int issueMaxCount = DEFAULT_BATCH_SIZE;
           if (!DEFAULT_TYPE.equals(type)) {

//              int starIndex = 0;
//              int forkIndex = 0;
//              boolean starFlag = false;
//              boolean forkFlag = false;
//              // 0-99() -- 通过偏移量 多线程取--[小规模，无需]
//              // 查出来是 size,issue,star,fork (个 25,从小到大排)
//              for(Map<String,Object> map:topList){
//                  String topic = (String) map.get("topic");
//                  String repoName = (String) map.get("repo_name");
//                  Integer cnt = (Integer) map.get("cnt");
//                  System.out.println("repoName,---"+repoName);
//                  // 枚举--- ENUM
//                  switch (topic){
//                      // star、fork TOP分析只需要前5
//                      case "star":{
//                          // 类型转换出错问题
//                          if(starIndex++>=5){
//                              starFlag = true;
//                          }else{
//                              starNameList.add(repoName);
//                              starValList.add(cnt);
//                          }
//                          // 时间点绑定---
//                          break;
//                      }
//                      case "fork":{
//                          if(forkIndex++>=5){
//                              forkFlag = true;
//                          }else{
//                              forkNameList.add(repoName);
//                              forkValList.add(cnt);
//                          }
//                      }
//                  }
//                  if(forkFlag&&starFlag){
//                      break;
//                  }
//              }

              //  0 - （DEFAULT_BATCH_SIZE*4(爬取数，稳定)-1）
              int issueOffset = DEFAULT_BATCH_SIZE;
              int starOffset = DEFAULT_BATCH_SIZE*2;//50
              int forkOffset = DEFAULT_BATCH_SIZE*3;//75
              // 倒回来 由大到小 [严格绑定了--顺序]
              // 校验topic
              String repoNameKey = "repo_name";
              String cntKey = "cnt";
              for(int i=DEFAULT_BATCH_SIZE-1;i>=0;i--){
                  if(DEFAULT_BATCH_SIZE - i <= topMaxCount){// 限定top数
                      Map<String,Object> forkMap  = topList.get(forkOffset+i);
                      Map<String,Object> starMap  = topList.get(starOffset+i);

                      starNameList.add((String) starMap.get(repoNameKey));
                      starValList.add((Integer) starMap.get(cntKey));
                      forkNameList.add((String) forkMap.get(repoNameKey));
                      forkValList.add((Integer) forkMap.get(cntKey));
                  }
                  Map<String,Object> issueMap  = topList.get(issueOffset+i);
                  issuePairList.add(new WordCloudPair<>((String) issueMap.get(repoNameKey),(Integer) issueMap.get(cntKey)));
//                  if("issue".equals(issueMap.get("topic"))){}
                  //issueNameList.add((String) issueMap.get(repoNameKey));
                  //issueValList.add((Integer) issueMap.get(cntKey));
              }


           } else {
               for(int i=0;i<topMaxCount;i++){
                   starNameList.add("starRepo-"+new Random().nextInt(10));
                   forkNameList.add("forkRepo"+new Random().nextInt(10));
                   starValList.add(new Random().nextInt(100));
                   forkValList.add(new Random().nextInt(100));
               }
               final String[] repos = {"",""};
               for(int i=0;i<issueMaxCount;i++){
//                   issueNameList.add("repo-"+i);
//                   issueValList.add(100+new Random().nextInt(100));
                   issuePairList.add(new WordCloudPair<>("repo-"+i,100+new Random().nextInt(100)));
               }
           }

           Map<String,List<Object>> starTop = new HashMap<>();
           starTop.put("nameList", Collections.singletonList(starNameList));
           starTop.put("valList", Collections.singletonList(starValList));
           Map<String,List<Object>> forkTop = new HashMap<>();
           forkTop.put("nameList", Collections.singletonList(forkNameList));
           forkTop.put("valList", Collections.singletonList(forkValList));

           Map<String,List<WordCloudPair<String,Integer>>> issueHot = new HashMap<>();
//           issueHot.put("nameList", Collections.singletonList(forkNameList));
//           issueHot.put("valList", Collections.singletonList(forkValList));
           issueHot.put("issuePairList",issuePairList);
           rs.put("starTop",starTop);
           rs.put("forkTop",forkTop);
           rs.put("issueHot",issueHot);
           return rs;
    }

}
