package com.yuntsg.subject_database.servic;

import cn.hutool.core.collection.ConcurrentHashSet;
import cn.hutool.db.Db;
import cn.hutool.db.Entity;
import cn.hutool.json.JSONUtil;
import com.baidu.bjf.remoting.protobuf.annotation.ProtobufClass;
import com.yuntsg.subject_database.entity.PublicEntity;
import com.yuntsg.subject_database.util.OftenUtils;
import com.yuntsg.subject_database.util.PublicUtils;
import lombok.Data;

import java.io.IOException;
import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;

// 研究热点、新词发现、赞助基金、年发文量、年被引量五个部分的计算。
public class SubjectDatabaseWord {

    // 一定要注意，equals与allStr是不一样的，allStr是词组全部的字符串，而equals，如果是一个或二个词组时，也是指全部的字符串，但如果是三位时，则是[0]+[1]和[1]+[2]，如果是四位时则是[0]+[1]+[2]和[1]+[2]+[3]
    public static void addDisjointSet(String forKey, String allStr, Set<String> set, String spl, Set<String> disjointSet, String equals) {
        if (forKey.equals(equals)) {
            StringBuilder str = new StringBuilder(allStr);
            for (String s : set) {
                str.append(spl).append(s);
            }
            disjointSet.add(str.toString());
        }
    }

    // 将相近的词组成词组，方法是：单个词的词如果在两个词里面，合并之，两个词的词如果在三个词里面，合并之，三个词的词如果在四个词里面，合并之，最后，再次排序插表。
    public static void wordGroup(String saveTable, Map<String, Research> researchFocusMap, Integer top, String code, String specialist, String type, Integer jourId, List<PublicEntity.TableField> tableList, boolean isInsertTable) throws IOException, SQLException {
        Set<String> researchFocusSet = new ConcurrentHashSet<>();
        int researchFocusNum = 0;
        Map<Integer, Map<String, Set<String>>> wordGroupMap = new LinkedHashMap<>();
        // 第一次循环，拿到词组的Map（wordGroupMap），结构为词组的长度、词组的名称、词组所对应的全字符串的集合，比如：covid 19 severity和covid 19 etiology，则词组的名称为：covid 19，所对应的集合为：["covid 19 severity","covid 19 etiology"]
        for (Map.Entry<String, Research> entry : researchFocusMap.entrySet()) {
            String[] list = entry.getKey().split(" ");
            if (list.length == 2) {
                OftenUtils.putIntStrSetStr(2, list[0], wordGroupMap, entry.getKey());
                OftenUtils.putIntStrSetStr(2, list[1], wordGroupMap, entry.getKey());
            }
            if (list.length == 3) {
                OftenUtils.putIntStrSetStr(3, list[0] + " " + list[1], wordGroupMap, entry.getKey());
                OftenUtils.putIntStrSetStr(3, list[1] + " " + list[2], wordGroupMap, entry.getKey());
            }
            if (list.length == 4) {
                OftenUtils.putIntStrSetStr(4, list[0] + " " + list[1] + " " + list[2], wordGroupMap, entry.getKey());
                OftenUtils.putIntStrSetStr(4, list[1] + " " + list[2] + " " + list[3], wordGroupMap, entry.getKey());
            }
        }

        Set<String> disjointSet = new HashSet<>();
        String spl = "ĺļ";
        // 第二次循环，将相同的词组保存到并查集中。这里要注意，比如，长度为3的，只能与长度为3和4的进行比对，且为3的时需要取[0]+[1]和[1]+[2]与对方的3的[0]+[1]和[1]+[2]进行比较，对方为4时，则需要3的全部的字符串进行比较，这样才匹配。
        for (Map.Entry<String, Research> entry : researchFocusMap.entrySet()) {
            String allStr = entry.getKey();
            String[] list = allStr.split(" ");
            if (list.length == 1 && wordGroupMap.containsKey(2)) {
                Map<String, Set<String>> map = wordGroupMap.get(2);
                for (Map.Entry<String, Set<String>> entity : map.entrySet()) {
                    addDisjointSet(entity.getKey(), allStr, entity.getValue(), spl, disjointSet, allStr);
                }
            }
            if (list.length == 2 && wordGroupMap.containsKey(3)) {
                Map<String, Set<String>> map = wordGroupMap.get(3);
                for (Map.Entry<String, Set<String>> entity : map.entrySet()) {
                    addDisjointSet(entity.getKey(), allStr, entity.getValue(), spl, disjointSet, allStr);
                }
            }
            if (list.length == 3 && wordGroupMap.containsKey(3)) {
                Map<String, Set<String>> map = wordGroupMap.get(3);
                for (Map.Entry<String, Set<String>> entity : map.entrySet()) {
                    addDisjointSet(entity.getKey(), allStr, entity.getValue(), spl, disjointSet, list[0] + " " + list[1]);
                    addDisjointSet(entity.getKey(), allStr, entity.getValue(), spl, disjointSet, list[1] + " " + list[2]);
                }
            }
            if (list.length == 3 && wordGroupMap.containsKey(4)) {
                Map<String, Set<String>> map = wordGroupMap.get(4);
                for (Map.Entry<String, Set<String>> entity : map.entrySet()) {
                    addDisjointSet(entity.getKey(), allStr, entity.getValue(), spl, disjointSet, allStr);
                }
            }
            if (list.length == 4 && wordGroupMap.containsKey(4)) {
                Map<String, Set<String>> map = wordGroupMap.get(4);
                for (Map.Entry<String, Set<String>> entity : map.entrySet()) {
                    addDisjointSet(entity.getKey(), allStr, entity.getValue(), spl, disjointSet, list[0] + " " + list[1] + " " + list[2]);
                    addDisjointSet(entity.getKey(), allStr, entity.getValue(), spl, disjointSet, list[1] + " " + list[2] + " " + list[3]);
                }
            }
        }

        List<Set<String>> disjointList = OftenUtils.disjointSet(disjointSet, spl);
        Map<Set<String>, Map<String, Research>> wordResultMap = new ConcurrentHashMap<>();
        Map<Set<String>, Double> wordScoreMap = new ConcurrentHashMap<>();
        Map<Set<String>, Integer> wordNumMap = new ConcurrentHashMap<>();

        // 第三次循环，如果某个关键词的共词中含有下面的关键词，则下面的关键词不再显示；其二，如果该关键词在并查集中包含，说明有相同的词已合并在了一起，此时需要合并到一起的取出来保存到wordSet，形成集合一起插表。如果此集合包含了下面的某个词，则下面的也不再显示。if (!key.equals(s))与语句与下面的if (!researchFocusSet.contains(key)) 相对应，若不加则下面的进入不了程序。
        for (Map.Entry<String, Research> entry : researchFocusMap.entrySet()) {
            String key = entry.getKey();
            Map<String, Set<Integer>> coWordMap = entry.getValue().getCoWordMap();
            for (Map.Entry<String, Set<Integer>> coWord : coWordMap.entrySet()) {
                researchFocusSet.add(coWord.getKey());
            }
            Set<String> wordSet = new HashSet<>();
            for (Set<String> set : disjointList) {
                if (set.contains(key)) {
                    for (String s : set) {
                        wordSet.add(s);
                        if (!key.equals(s)) {
                            researchFocusSet.add(s);
                        }
                    }
                }
            }
            double score = 0.0;
            int paperNum = 0;
            if (!researchFocusSet.contains(key)) {
                wordSet.add(key);
                researchFocusSet.add(key);
                Map<String, Research> wordMap = new ConcurrentHashMap<>();
                for (String s : wordSet) {
                    if (researchFocusMap.containsKey(s)) {
                        score += researchFocusMap.get(s).getScore();
                        paperNum += researchFocusMap.get(s).idList.size();
                        wordMap.put(s, researchFocusMap.get(s));
                    }
                }
                researchFocusNum++;
                wordResultMap.put(wordSet, wordMap);
                wordScoreMap.put(wordSet, score);
                wordNumMap.put(wordSet, paperNum);
            }
            if (researchFocusNum == top) {
                break;
            }
        }
        // 最后，执行插表，其得分和论文的篇数，取自上面的计算，使用Map直接get即可。
        researchFocusNum = 0;
        Map<Set<String>, Double> setDoubleMap = mapSortValueInt(wordScoreMap);
        for (Map.Entry<Set<String>, Double> entry : setDoubleMap.entrySet()) {
            Set<String> key = entry.getKey();
            researchFocusNum++;
            if (wordResultMap.containsKey(key) && wordNumMap.containsKey(key)) {
                PublicUtils.insertTable(saveTable, JSONUtil.toJsonStr(daiBiaoWord(key)), JSONUtil.toJsonStr(wordResultMap.get(key)), code, type, researchFocusNum, OftenUtils.takeFourDigits(entry.getValue()), wordNumMap.get(key), specialist, jourId, "", "", tableList, isInsertTable);
            }
        }
    }

    // 获取代表性的词，比如：covid 19 severity和covid 19 etiology，其代表性的词为：covid 19，输出时代表性的词在前，原来的词组在后->covid 19[covid 19 severity,covid 19 etiology]
    public static DaiBiao daiBiaoWord(Set<String> keySet) {
        DaiBiao daiBiao = new DaiBiao();
        if (keySet.size() == 1) {
            daiBiao.setWord(String.join(",", keySet));
            daiBiao.setWordSet(keySet);
            return daiBiao;
        }
        Map<String, Integer> daiBiaoWordMap = new ConcurrentHashMap<>();
        for (String str : keySet) {
            String[] list = str.split(" ");
            if (list.length == 2) {
                daiBiaoWordMap.compute(list[0], (key, value) -> value != null ? (value + 1) : 1);
                daiBiaoWordMap.compute(list[1], (key, value) -> value != null ? (value + 1) : 1);
            }
            if (list.length == 3) {
                daiBiaoWordMap.compute(list[0] + " " + list[1], (key, value) -> value != null ? (value + 1) : 1);
                daiBiaoWordMap.compute(list[1] + " " + list[2], (key, value) -> value != null ? (value + 1) : 1);
            }
            if (list.length == 4) {
                daiBiaoWordMap.compute(list[0] + " " + list[1] + " " + list[2], (key, value) -> value != null ? (value + 1) : 1);
                daiBiaoWordMap.compute(list[1] + " " + list[2] + " " + list[3], (key, value) -> value != null ? (value + 1) : 1);
            }
        }
        Map<String, Integer> stringIntegerMap = OftenUtils.mapSortValueIntLen(daiBiaoWordMap, 1);
        for (Map.Entry<String, Integer> entry : stringIntegerMap.entrySet()) {
            daiBiao.setWord(entry.getKey());
            daiBiao.setWordSet(keySet);
            return daiBiao;
        }
        return daiBiao;
    }

    // 对Map进行排序，此排序为最大的在前面
    public static Map<Set<String>, Double> mapSortValueInt(Map<Set<String>, Double> mapName) {
        Map<Set<String>, Double> result = new LinkedHashMap<>();
        mapName.entrySet().stream().sorted(Map.Entry.<Set<String>, Double>comparingByValue().reversed()).forEachOrdered(e -> result.put(e.getKey(), e.getValue()));
        return result;
    }

    // 结果的输出
    public static void wordOut(boolean isPm, String saveTable, Map<String, RankScore> rankScoreMap, Double startYear, Integer top, String code,
                               String specialist, Integer jourId,
                               Map<Integer, SubjectDatabaseWord.ArticleInfo> allArticleInfoMapAllYearBefore,
                               Map<Integer, SubjectDatabaseWord.ArticleInfo> allArticleInfoMap,
                               Map<String, PublicEntity.TypeAndRank> unitOrAuthAndRankMap,
                               List<PublicEntity.TableField> tableList, boolean isInsertTable, boolean isCoWord) throws SQLException, IOException {
        SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
        long t0 = System.currentTimeMillis();


        //保存基金和单位的篇均被引以及篇均分区
        Map<String, Map<String, List<Double>>> fundAndUnitScoreMap = new ConcurrentHashMap<>();
        Result result = getFundAndUnitScore(isPm, fundAndUnitScoreMap, allArticleInfoMapAllYearBefore, allArticleInfoMap, rankScoreMap, startYear, top, isCoWord);
        long t1 = System.currentTimeMillis();
        System.out.println(format.format(new Date()) + " result用时：" + OftenUtils.Arith.div((t1 - t0), 1000) + " 秒。");


        Map<String, Research> researchFocusMap = result.getResearchFocusMap();
        Map<String, Research> focusUnitAndIdMap = result.getFocusUnitAndIdMap();
        Map<String, Research> focusFundAndIdMap = result.getFocusFundAndIdMap();
        Map<String, Research> newWordDiscoveryMap = result.getNewWordDiscoveryMap();
        Map<String, Research> CoreAuthorMap = result.getCoreAuthorMap();
        Map<Integer, List<Integer>> yearAndIdListMap = result.getYearAndIdListMap();
        Map<Integer, Integer> yearAndTcMap = result.getYearAndTcMap();

        // 研究热点和新词发现，从最多到最少逐次保存各个关键词对应的共词，如果某个关键词是它前面的某个关键词的共词，则不再显示。在取研究热点时，程序内已对top进行了倍数操作，top = top * 3，用于确保数量还能达到top的设置数量。
        wordGroup(saveTable, researchFocusMap, top, code, specialist, "研究热点", jourId, tableList, isInsertTable);
        wordGroup(saveTable, newWordDiscoveryMap, top, code, specialist, "新词发现", jourId, tableList, isInsertTable);
        long t2 = System.currentTimeMillis();
//        System.out.println(format.format(new Date()) + " 研究热点和新词发现用时：" + OftenUtils.Arith.div((t2 - t1), 1000) + " 秒。");


        if (isPm) {
            top = 10;
        }
        int chinaUniv = 0;
        int chinaHosp = 0;
        int OtherUniv = 0;
        int OtherHosp = 0;
        for (Map.Entry<String, Research> entry : focusUnitAndIdMap.entrySet()) {
            String unit = entry.getKey();
            String value = JSONUtil.toJsonStr(entry.getValue());
            int pageNum = entry.getValue().getIdList().size();
            double score = entry.getValue().getScore();
            String country = "Other";
            if (isChina(unit)) {
                country = "China";
            }
            String unitType = "univ";
            if (isHosp(unit)) {
                unitType = "hosp";
            }
            if (country.equals("China") && unitType.equals("univ") && chinaUniv < top && isRemoveUnit(unit)) {
                chinaUniv++;
                putUnitOrAuthAndRankMap(unitOrAuthAndRankMap, unit, "中国大学", chinaUniv, code, specialist, jourId);
                PublicUtils.insertTable(saveTable, unit, value, code, "中国大学", chinaUniv, score, pageNum, specialist, jourId, "", "", tableList, isInsertTable);
            }
            if (country.equals("China") && unitType.equals("hosp") && chinaHosp < top && isRemoveUnit(unit)) {
                chinaHosp++;
                putUnitOrAuthAndRankMap(unitOrAuthAndRankMap, unit, "中国医院", chinaHosp, code, specialist, jourId);
                PublicUtils.insertTable(saveTable, unit, value, code, "中国医院", chinaHosp, score, pageNum, specialist, jourId, "", "", tableList, isInsertTable);
            }
            if (country.equals("Other") && unitType.equals("univ") && OtherUniv < top && isRemoveUnit(unit)) {
                OtherUniv++;
                putUnitOrAuthAndRankMap(unitOrAuthAndRankMap, unit, "其它大学", OtherUniv, code, specialist, jourId);
                PublicUtils.insertTable(saveTable, unit, value, code, "其它大学", OtherUniv, score, pageNum, specialist, jourId, "", "", tableList, isInsertTable);
            }
            if (country.equals("Other") && unitType.equals("hosp") && OtherHosp < top && isRemoveUnit(unit)) {
                OtherHosp++;
                putUnitOrAuthAndRankMap(unitOrAuthAndRankMap, unit, "其它医院", OtherHosp, code, specialist, jourId);
                PublicUtils.insertTable(saveTable, unit, value, code, "其它医院", OtherHosp, score, pageNum, specialist, jourId, "", "", tableList, isInsertTable);
            }
            if (chinaUniv == top && chinaHosp == top && OtherUniv == top && OtherHosp == top) {
                break;
            }
        }
        long t3 = System.currentTimeMillis();
        System.out.println(format.format(new Date()) + " 国内国外单位用时：" + OftenUtils.Arith.div((t3 - t2), 1000) + " 秒。");

        int chinaFund = 0;
        int OtherFund = 0;
        for (Map.Entry<String, Research> entry : focusFundAndIdMap.entrySet()) {
            String key = entry.getKey();
            String country = "Other";
            int pageNum = entry.getValue().getIdList().size();
            double score = entry.getValue().getScore();
            if (isChina(key)) {
                country = "China";
            }
            if (country.equals("China") && chinaFund < top) {
                chinaFund++;
                PublicUtils.insertTable(saveTable, key, JSONUtil.toJsonStr(entry.getValue()), code, "中国基金", chinaFund, score, pageNum, specialist, jourId, "", "", tableList, isInsertTable);
            }
            if (country.equals("Other") && OtherFund < top) {
                OtherFund++;
                PublicUtils.insertTable(saveTable, key, JSONUtil.toJsonStr(entry.getValue()), code, "其它基金", OtherFund, score, pageNum, specialist, jourId, "", "", tableList, isInsertTable);
            }
            if (chinaFund == top && OtherFund == top) {
                break;
            }
        }
        long t4 = System.currentTimeMillis();
        System.out.println(format.format(new Date()) + " 计算基金用时：" + OftenUtils.Arith.div((t4 - t3), 1000) + " 秒。");

        int rank = 0;
        for (Map.Entry<String, Research> entry : CoreAuthorMap.entrySet()) {
            rank++;
            String enAuth = entry.getKey();
            putUnitOrAuthAndRankMap(unitOrAuthAndRankMap, enAuth, "核心作者", rank, code, specialist, jourId);
            List<String> authList = new ArrayList<>();
//            if (authIdAndAuthMap.containsKey(idAndAuth)) {
//                authList = authIdAndAuthMap.get(idAndAuth);
//            } else {
//                authList.add(entry.getKey());
//            }
            authList.add(entry.getKey());
            PublicUtils.insertTable(saveTable, entry.getKey(), JSONUtil.toJsonStr(entry.getValue()), code, "核心作者", rank, entry.getValue().getScore(), entry.getValue().getIdList().size(), specialist, jourId, "", "", tableList, isInsertTable);
        }
        long t5 = System.currentTimeMillis();
        System.out.println(format.format(new Date()) + " 核心用时：" + OftenUtils.Arith.div((t5 - t4), 1000) + " 秒。");
        rank = 0;
        for (Map.Entry<Integer, List<Integer>> entry : yearAndIdListMap.entrySet()) {
            rank++;
            PublicUtils.insertTable(saveTable, entry.getKey().toString(), JSONUtil.toJsonStr(entry.getValue()), code, "年发文量", rank, 0.0, entry.getValue().size(), specialist, jourId, "", "", tableList, isInsertTable);
        }
        long t6 = System.currentTimeMillis();
        System.out.println(format.format(new Date()) + " 年发文量用时：" + OftenUtils.Arith.div((t6 - t5), 1000) + " 秒。");
        rank = 0;
        for (Map.Entry<Integer, Integer> entry : yearAndTcMap.entrySet()) {
            rank++;
            PublicUtils.insertTable(saveTable, entry.getKey().toString(), entry.getValue().toString(), code, "年被引量", rank, 0.0, entry.getValue(), specialist, jourId, "", "", tableList, isInsertTable);
        }
        long t7 = System.currentTimeMillis();
        System.out.println(format.format(new Date()) + " 年被引量用时：" + OftenUtils.Arith.div((t7 - t6), 1000) + " 秒。");
        long t8 = System.currentTimeMillis();
        System.out.println(format.format(new Date()) + " 输出部分总用时：" + OftenUtils.Arith.div((t8 - t0), 1000) + " 秒。");

    }

    public static void putUnitOrAuthAndRankMap(Map<String, PublicEntity.TypeAndRank> unitOrAuthAndRankMap, String name, String type, Integer rank, String code, String specialist, Integer jourId) {
        PublicEntity.TypeAndRank typeAndRank = new PublicEntity.TypeAndRank();
        typeAndRank.setRank(rank);
        typeAndRank.setType(type);
        typeAndRank.setCode(code);
        typeAndRank.setSpecialist(specialist);
        typeAndRank.setJid(jourId);
        unitOrAuthAndRankMap.put(name, typeAndRank);
    }

    // 对一些规范的不准确的单位，给予移除。
    public static boolean isRemoveUnit(String unit) {
        if (null == unit || unit.length() < 4) {
            return false;
        }
        if (unit.equals("university hospital")) {
            return false;
        }
        return true;
    }

    // 计算各个基金或单位的篇均得分，list是每一篇的被引次数或者分区值，先累加，再除了总条数，得到篇均得分。
    public static Map<String, Double> getSum(Map<String, Map<String, List<Double>>> fundAndUnitScoreMap, String type) {
        Map<String, Double> strDouMap = new ConcurrentHashMap<>();
        fundAndUnitScoreMap.keySet().parallelStream().forEach(unitOrFund -> {
            Map<String, List<Double>> unitListMap = fundAndUnitScoreMap.get(unitOrFund);
            if (unitOrFund.equals(type)) {
                for (Map.Entry<String, List<Double>> entry : unitListMap.entrySet()) {
                    List<Double> list = entry.getValue();
                    double sum = 0.0;
                    for (Double dou : list) {
                        sum += dou;
                    }
                    if (list.size() > 0) {
                        strDouMap.put(entry.getKey(), OftenUtils.Arith.div(sum, list.size()));
                    }
                }
            }
        });
        return strDouMap;
    }

    // 主方法。1、求单位和基金的篇均得分，包含：被引、分区、排行榜。2、求各参数的最大得分（基金和单位部分，按在本篇论文中的累加）。3、获取每一篇论文的得分。4、基于每一篇论文的得分，计算热点、主要机构、作者等，然后输出。新词的计算是近一年内的词，热点的计算是近三年内的词。
    public static Result getFundAndUnitScore(boolean isPm, Map<String, Map<String, List<Double>>> fundAndUnitScoreMap,
                                             Map<Integer, SubjectDatabaseWord.ArticleInfo> allArticleInfoMapAllYearBefore,
                                             Map<Integer, SubjectDatabaseWord.ArticleInfo> articleInfoMap, Map<String, RankScore> rankScoreMap,
                                             Double startYear, Integer top, boolean isCoWord) {
        SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
        long t0 = System.currentTimeMillis();
        // 区分新词与旧词的分水岭
        double oldKeyWordWatershed = startYear + 5;
        double lengMenWatershed = startYear + 3;
        double fullUnitTc = 0.5;
        double fullUnitZky = 0.5;
        double fullUnitRank = 0.5;
        double fullFundTc = 0.5;
        double fullFundZky = 0.5;
        double fullReference = 0.75;
        double fullCitation = 0.5;
        double fullZkyDaLei = 1.5;
        double fullAltmetric = 0.25;
        double fullScieIf = 0.5;
        double fullTc = 2.0;
        double fullPubDate = 2.0;
        // 单位、基金的被引和分区，是篇均得分，而排行榜只有一个排行榜得分。
        Map<String, Double> unitTcArticleScoreMap = getSum(fundAndUnitScoreMap, "unit_citation_num");
        Map<String, Double> unitZkyArticleScoreMap = getSum(fundAndUnitScoreMap, "unit_zky_dalei");
        Map<String, Double> unitRankUnivScoreMap = new ConcurrentHashMap<>();
        Map<String, Double> unitRankHospScoreMap = new ConcurrentHashMap<>();
        Map<String, Double> unitRankFuDanScoreMap = new ConcurrentHashMap<>();
        Map<String, Double> unitRankXiaoYouHuiScoreMap = new ConcurrentHashMap<>();
        getRankScore(rankScoreMap, unitTcArticleScoreMap, unitRankUnivScoreMap, unitRankHospScoreMap, unitRankFuDanScoreMap, unitRankXiaoYouHuiScoreMap);
        Map<String, Double> fundTcArticleScoreMap = getSum(fundAndUnitScoreMap, "fund_citation_num");
        Map<String, Double> fundZkyArticleScoreMap = getSum(fundAndUnitScoreMap, "fund_zky_dalei");
        long t1 = System.currentTimeMillis();
        System.out.println(format.format(new Date()) + " 篇均用时：" + OftenUtils.Arith.div((t1 - t0), 1000) + " 秒。");

        // 获取单位和基金，按篇计算（即每篇中可能有多个单位或基金，获取其累加值）最大值，并插入两个Map中。
        Map<Integer, Double> idAndUnitTcScoreMap = new ConcurrentHashMap<>();
        Map<Integer, Double> idAndUnitZkyScoreMap = new ConcurrentHashMap<>();
        Map<Integer, Double> idAndUnitRankScoreMap = new ConcurrentHashMap<>();
        Map<Integer, Double> idAndFundTcScoreMap = new ConcurrentHashMap<>();
        Map<Integer, Double> idAndFundZkyScoreMap = new ConcurrentHashMap<>();

        // 获取各单位和基金的总得分，以及各单位和基金相对应的论文ID号集合。此部分用于主要的发文机构和赞助基金，在这里只选取中科院分区的累加分，得分最多的显示出来。
        Map<String, Double> unitAndScoreMap = new ConcurrentHashMap<>();
        Map<String, Double> fundAndScoreMap = new ConcurrentHashMap<>();
        Map<String, Set<Integer>> unitAndIdListMap = new ConcurrentHashMap<>();
        Map<String, Set<Integer>> fundAndIdListMap = new ConcurrentHashMap<>();
        Set<String> oldKeyWordSet = new ConcurrentHashSet<>();
        // 用于存放当前年以前所有词
        Set<String> oldKeyWordSetAllYearBefore = new ConcurrentHashSet<>();
        Set<String> lengMenSet = new ConcurrentHashSet<>();
        Map<Integer, Set<String>> idAndKeyWordSetMap = new ConcurrentHashMap<>();

        // 共词的集合（键为共词A，值为与共词A共现出现的共词B，以及共同出现的次数），以及共词组与论文id号的集合（键为共词A与B的集合，值为共同出现的论文ID号的集合）
        Map<String, Map<String, Integer>> coWordMap = new ConcurrentHashMap<>();
        Map<List<String>, Set<Integer>> coWordAndIdMap = new ConcurrentHashMap<>();
        Map<String, Map<String, Integer>> authCoWordMap = new ConcurrentHashMap<>();
        Map<List<String>, Set<Integer>> authCoWordAndIdMap = new ConcurrentHashMap<>();
        Map<String, Map<String, Integer>> unitCoWordMap = new ConcurrentHashMap<>();
        Map<List<String>, Set<Integer>> unitCoWordAndIdMap = new ConcurrentHashMap<>();
        Map<String, Map<String, Integer>> fundCoWordMap = new ConcurrentHashMap<>();
        Map<List<String>, Set<Integer>> fundCoWordAndIdMap = new ConcurrentHashMap<>();

        // 获取各参数的最大值部分，这两分为两种情况，一种是需要先累加起来再求的（包含：unitTcScore、fundTcScore、unitZkyScore、fundZkyScore、unitRankScore，即程序中的两个for循环），另一种是可以直接获取的。
        double maxAltmetric = 0.0;
        double maxTimeCited = 0.0;
        double maxPubDate = 0.0;
        double maxZkyDaLei = 0.0;
        double maxScieIf = 0.0;
        double maxCitationQuota = 0.0;
        double maxReferenceQuota = 0.0;
        double maxUnitTcScore = 0.0;
        double maxFundTcScore = 0.0;
        double maxUnitZkyScore = 0.0;
        double maxFundZkyScore = 0.0;
        double maxUnitRankScore = 0.0;

        // 存储
        if (allArticleInfoMapAllYearBefore != null) {
            for (Map.Entry<Integer, SubjectDatabaseWord.ArticleInfo> entry : allArticleInfoMapAllYearBefore.entrySet()) {
                ArticleInfo articleInfo = entry.getValue();
                Set<String> keyWordSet = getKeyWordSet(articleInfo.getEnKwMap(), articleInfo.getCnKwList());
                oldKeyWordSetAllYearBefore.addAll(keyWordSet);
            }
        }


        for (Map.Entry<Integer, SubjectDatabaseWord.ArticleInfo> entry : articleInfoMap.entrySet()) {
            int aId = entry.getKey();
            ArticleInfo articleInfo = entry.getValue();
            Set<String> keyWordSet = getKeyWordSet(articleInfo.getEnKwMap(), articleInfo.getCnKwList());
            idAndKeyWordSetMap.put(aId, keyWordSet);
            double pubTime = articleInfo.getPubTime();
            // 如果allArticleInfoMapAllYearBefore 不是空则是只用于计算新词用的 需要排除
            if (pubTime < lengMenWatershed) {
                // 存放了近三年之前的
                lengMenSet.addAll(keyWordSet);
            }


            List<String> unitList = articleInfo.getUnitList();
            List<String> fundList = articleInfo.getFundList();
            double unitTcScore = 0.0;
            double fundTcScore = 0.0;
            double unitZkyScore = 0.0;
            double fundZkyScore = 0.0;
            double unitRankScore = 0.0;
            for (String aUnit : unitList) {
                OftenUtils.putStrSetInt(aUnit, aId, unitAndIdListMap);
                double zky = PublicUtils.addSumDouble(unitZkyArticleScoreMap, aUnit, 0.0);
                unitAndScoreMap.compute(aUnit, (key, value) -> value != null ? (value + zky) : zky);//该单位的分区得分之和
                unitTcScore = PublicUtils.addSumDouble(unitTcArticleScoreMap, aUnit, unitTcScore);
                unitZkyScore = PublicUtils.addSumDouble(unitZkyArticleScoreMap, aUnit, unitZkyScore);
                unitRankScore = PublicUtils.addSumDouble(unitRankUnivScoreMap, aUnit, unitRankScore);
                unitRankScore = PublicUtils.addSumDouble(unitRankHospScoreMap, aUnit, unitRankScore);
                unitRankScore = PublicUtils.addSumDouble(unitRankFuDanScoreMap, aUnit, unitRankScore);
                unitRankScore = PublicUtils.addSumDouble(unitRankXiaoYouHuiScoreMap, aUnit, unitRankScore);
            }
            maxUnitTcScore = OftenUtils.getMaxDouValue(maxUnitTcScore, unitTcScore);
            maxUnitZkyScore = OftenUtils.getMaxDouValue(maxUnitZkyScore, unitZkyScore);
            maxUnitRankScore = OftenUtils.getMaxDouValue(maxUnitRankScore, unitRankScore);
            idAndUnitTcScoreMap.put(aId, unitTcScore);
            idAndUnitZkyScoreMap.put(aId, unitZkyScore);
            idAndUnitRankScoreMap.put(aId, unitRankScore);
            for (String aFund : fundList) {
                // 是否共词，在首页状态时，不要共词
                if (isCoWord) {
                    for (String bFund : fundList) {
                        if (!aFund.equals(bFund)) {
                            List<String> list = OftenUtils.sortList(aFund, bFund);
                            OftenUtils.putListStrListInt(list, aId, fundCoWordAndIdMap);
                            OftenUtils.putStrStrInt(aFund, bFund, fundCoWordMap);
                        }
                    }
                }
                OftenUtils.putStrSetInt(aFund, aId, fundAndIdListMap);
                double zky = PublicUtils.addSumDouble(fundZkyArticleScoreMap, aFund, 0.0);
                fundAndScoreMap.compute(aFund, (key, value) -> value != null ? (value + zky) : zky);//该基金的分区得分之和
                fundTcScore = PublicUtils.addSumDouble(fundTcArticleScoreMap, aFund, fundTcScore);
                fundZkyScore = PublicUtils.addSumDouble(fundZkyArticleScoreMap, aFund, fundZkyScore);
            }
            maxFundTcScore = OftenUtils.getMaxDouValue(maxFundTcScore, fundTcScore);
            maxFundZkyScore = OftenUtils.getMaxDouValue(maxFundZkyScore, fundZkyScore);
            idAndFundTcScoreMap.put(aId, fundTcScore);
            idAndFundZkyScoreMap.put(aId, fundZkyScore);
            // 直接获取最大值部分
            maxAltmetric = OftenUtils.getMaxDouValue(maxAltmetric, articleInfo.getAltmetric());
            maxTimeCited = OftenUtils.getMaxDouValue(maxTimeCited, OftenUtils.strToDouble(articleInfo.getTimeCited().toString()));
            double pubDate = articleInfo.getPubTime();
            maxPubDate = OftenUtils.getMaxDouValue(maxPubDate, pubDate);
            maxZkyDaLei = OftenUtils.getMaxDouValue(maxZkyDaLei, articleInfo.getZkyDaLeiDou());
            maxScieIf = OftenUtils.getMaxDouValue(maxScieIf, articleInfo.getScieIf());
            maxCitationQuota = OftenUtils.getMaxDouValue(maxCitationQuota, articleInfo.getCitationQuota());
            maxReferenceQuota = OftenUtils.getMaxDouValue(maxReferenceQuota, articleInfo.getReferenceQuota());
        }
        long t2 = System.currentTimeMillis();
        System.out.println(format.format(new Date()) + " 最大用时：" + OftenUtils.Arith.div((t2 - t1), 1000) + " 秒。");

        // 获取每一篇文章的得分部分，并产生核心作者、研究热点、主要的基金和单位、新词发现等的基础数据，这些都是基于第一篇论文的得分。如果出版时间大于设定的开始计算年，给予一定的分值，如果不大于，则不给分。
        Map<String, Set<Integer>> authAndIdListMap = new ConcurrentHashMap<>();//各个作者出现在论文ID中的集合
        Map<String, Double> authAndScoreMap = new ConcurrentHashMap<>();//各个作者的得分
        Map<String, Set<Integer>> kwAndIdListMap = new ConcurrentHashMap<>();//各个关键词出现在论文ID中的集合
        Map<String, Double> kwAndScoreMap = new ConcurrentHashMap<>();//各个关键词的得分

        Map<Integer, List<Integer>> yearAndIdListMap = new ConcurrentHashMap<>();//每年的发文量
        Map<Integer, Integer> yearAndTcMap = new ConcurrentHashMap<>();//每年的被引总次数

        double finalMaxAltmetric = maxAltmetric;
        double finalMaxTimeCited = maxTimeCited;
        double finalMaxZkyDaLei = maxZkyDaLei;
        double finalMaxScieIf = maxScieIf;
        double finalMaxCitationQuota = maxCitationQuota;
        double finalMaxReferenceQuota = maxReferenceQuota;
        double finalMaxUnitTcScore = maxUnitTcScore;
        double finalMaxUnitZkyScore = maxUnitZkyScore;
        double finalMaxUnitRankScore = maxUnitRankScore;
        double finalMaxFundZkyScore = maxFundZkyScore;
        double finalMaxPubDate = maxPubDate;
        articleInfoMap.keySet().parallelStream().forEach(aId -> {
            ArticleInfo articleInfo = articleInfoMap.get(aId);
            int year = articleInfo.getPubYear();
            int tc = articleInfo.getTimeCited();
            if (year > 1000 && year < 3000) {
                OftenUtils.putIntListInt(yearAndIdListMap, year, aId);
                if (tc > 0) {
                    yearAndTcMap.compute(year, (key, value) -> value != null ? (value + tc) : tc);
                }
            }
            double pubDate = 0.0;
            double altmetric = PublicUtils.countScore(articleInfo.getAltmetric(), fullAltmetric, finalMaxAltmetric);
            double timeCited = PublicUtils.countScore(OftenUtils.strToDouble(articleInfo.getTimeCited().toString()), fullTc, finalMaxTimeCited);
            double zkyDaLei = PublicUtils.countScore(articleInfo.getZkyDaLeiDou(), fullZkyDaLei, finalMaxZkyDaLei);
            double scieIf = PublicUtils.countScore(articleInfo.getScieIf(), fullScieIf, finalMaxScieIf);
            double citationQuota = PublicUtils.countScore(articleInfo.getCitationQuota(), fullCitation, finalMaxCitationQuota);
            double referenceQuota = PublicUtils.countScore(articleInfo.getReferenceQuota(), fullReference, finalMaxReferenceQuota);
            double unitTcScore = PublicUtils.countScore(OftenUtils.getMapValue(idAndUnitTcScoreMap, aId), fullUnitTc, finalMaxUnitTcScore);
            double unitZkyScore = PublicUtils.countScore(OftenUtils.getMapValue(idAndUnitZkyScoreMap, aId), fullUnitZky, finalMaxUnitZkyScore);
            double unitRankScore = PublicUtils.countScore(OftenUtils.getMapValue(idAndUnitRankScoreMap, aId), fullUnitRank, finalMaxUnitRankScore);
            double fundTcScore = PublicUtils.countScore(OftenUtils.getMapValue(idAndFundTcScoreMap, aId), fullFundTc, finalMaxUnitTcScore);
            double fundZkyScore = PublicUtils.countScore(OftenUtils.getMapValue(idAndFundZkyScoreMap, aId), fullFundZky, finalMaxFundZkyScore);
            double pubTime = articleInfo.getPubTime();
            if (pubTime > startYear) {
                pubDate = PublicUtils.countScore((pubTime - startYear), fullPubDate, finalMaxPubDate);
            }
            double articleScore = pubDate + altmetric + timeCited + zkyDaLei + scieIf + citationQuota + referenceQuota + unitTcScore + unitZkyScore + unitRankScore + fundTcScore + fundZkyScore;

            // 将所有的中、英文关键词保存到Set中，其中，en_kw_map的要值，即斯坦福后的数据，且全部转为小写。
            if (idAndKeyWordSetMap.containsKey(aId)) {
                Set<String> set = idAndKeyWordSetMap.get(aId);
                Set<String> keyWordSet = new ConcurrentHashSet<>();
                for (String s : set) {
                    if (isPm) {
                        keyWordSet.add(s);
                    } else {
                        if (!lengMenSet.contains(s)) {
                            keyWordSet.add(s);
                        }
                    }

                }
                for (String aKeyWord : keyWordSet) {
                    // 是否共词，在首页状态时，不要共词
                    if (isCoWord) {
                        for (String bKeyWord : keyWordSet) {
                            if (!aKeyWord.equals(bKeyWord)) {
                                List<String> list = OftenUtils.sortList(aKeyWord, bKeyWord);
                                OftenUtils.putListStrListInt(list, aId, coWordAndIdMap);
                                OftenUtils.putStrStrInt(aKeyWord, bKeyWord, coWordMap);
                            }
                        }
                    }
                    if (oldKeyWordSetAllYearBefore != null && oldKeyWordSetAllYearBefore.size() > 0) {
                        // 则是取得最近三年值 需要独立判断
                        if (!oldKeyWordSetAllYearBefore.contains(aKeyWord)) {
                            oldKeyWordSet.add(aKeyWord);
                        }
                    } else {
                        if (pubTime < oldKeyWordWatershed) {
                            oldKeyWordSet.add(aKeyWord);
                        }
                    }

                    kwAndScoreMap.compute(aKeyWord, (key, value) -> value != null ? (value + articleScore) : articleScore);
                    OftenUtils.putStrSetInt(aKeyWord, aId, kwAndIdListMap);
                }
            }

            Set<String> authSet = new HashSet<>();
            List<String> enAuthList = articleInfo.getEnAuthList();
            for (String enAuth : enAuthList) {
                String s = PublicUtils.reName(enAuth);
                if (s.length() > 3) {
                    authSet.add(PublicUtils.reName(enAuth));
                }
            }
            for (String aAuth : authSet) {
                OftenUtils.putStrSetInt(aAuth, aId, authAndIdListMap);
                authAndScoreMap.compute(aAuth, (key, value) -> value != null ? (value + articleScore) : articleScore);
            }
        });

        long t3 = System.currentTimeMillis();
//        System.out.println(format.format(new Date()) + " 每篇得分：" + OftenUtils.Arith.div((t3 - t2), 1000) + " 秒。");

        // 输出研究热点
        Map<String, Research> researchFocusMap = getResearchFocus(kwAndIdListMap, kwAndScoreMap, coWordMap, coWordAndIdMap, top, oldKeyWordSet, "all", 3);
        long t4 = System.currentTimeMillis();
//        System.out.println(format.format(new Date()) + " 研究热点：" + OftenUtils.Arith.div((t4 - t3), 1000) + " 秒。");

        // 输出主要的发文机构和赞助基金
        Map<String, Research> focusUnitAndIdMap = getResearchFocus(unitAndIdListMap, unitAndScoreMap, unitCoWordMap, unitCoWordAndIdMap, top, oldKeyWordSet, "all", 20);
        Map<String, Research> focusFundAndIdMap = getResearchFocus(fundAndIdListMap, fundAndScoreMap, fundCoWordMap, fundCoWordAndIdMap, top, oldKeyWordSet, "all", 10);
        long t5 = System.currentTimeMillis();
//        System.out.println(format.format(new Date()) + " 发文机构：" + OftenUtils.Arith.div((t5 - t4), 1000) + " 秒。");

        // 输出新词发现
        Map<String, Research> newWordDiscoveryMap = getResearchFocus(kwAndIdListMap, kwAndScoreMap, coWordMap, coWordAndIdMap, top, oldKeyWordSet, "new", 3);
        long t6 = System.currentTimeMillis();
//        System.out.println(format.format(new Date()) + " 新词发现：" + OftenUtils.Arith.div((t6 - t5), 1000) + " 秒。");

        // 输出高产作者
        Map<String, Research> CoreAuthorMap = getResearchFocus(authAndIdListMap, authAndScoreMap, authCoWordMap, authCoWordAndIdMap, top, oldKeyWordSet, "all", 1);
        long t7 = System.currentTimeMillis();
//        System.out.println(format.format(new Date()) + " 核心作者：" + OftenUtils.Arith.div((t7 - t6), 1000) + " 秒。");

        Result result = new Result();
        result.setCoreAuthorMap(CoreAuthorMap);
        result.setFocusFundAndIdMap(focusFundAndIdMap);
        result.setFocusUnitAndIdMap(focusUnitAndIdMap);
        result.setNewWordDiscoveryMap(newWordDiscoveryMap);
        result.setResearchFocusMap(researchFocusMap);
        result.setYearAndIdListMap(OftenUtils.mapSortByKeyListInt(yearAndIdListMap));
        result.setYearAndTcMap(OftenUtils.mapSortByKeyInt(yearAndTcMap));
        long t8 = System.currentTimeMillis();
        System.out.println(format.format(new Date()) + " getFundAndUnitScore方法，总共用时：" + OftenUtils.Arith.div((t8 - t0), 1000) + " 秒。");

        return result;
    }

    // 方法是先对各研究热点排序要前10个（aTop10），对关键词aKeyWord所有的共词集合，再取前10个（bTop10），aKeyWord与bKeyWord排序后存入list，再检查coWordAndIdMap是否包含此list，若包含取出此list相对应的共现出现的论文ID号集合。
    // 倍数指的是需要多保留的数量，因为单位需要一分为4，为了保证（中国大学、中国医院、其它大学、其它医院）都可以得到值，故选择20倍，基金需要一分二，选择10倍，热点和新词因为需要合并，故选择3倍，作者不需要扩大倍数，直接为1即可。
    public static Map<String, Research> getResearchFocus(Map<String, Set<Integer>> kwAndIdListMap,
                                                         Map<String, Double> kwAndScoreMap,
                                                         Map<String, Map<String, Integer>> coWordMap,
                                                         Map<List<String>, Set<Integer>> coWordAndIdMap,
                                                         Integer top, Set<String> oldKeyWordSet, String type, Integer beiShu) {
        Map<String, Research> researchFocusMap = new LinkedHashMap<>();
        Map<String, Double> kwAndScoreMapTop = OftenUtils.mapSortValueDouLen(kwAndScoreMap, top * 300);
        int num = 0;
        for (Map.Entry<String, Double> aTop : kwAndScoreMapTop.entrySet()) {
            String aKeyWord = aTop.getKey();
            // 如果是新词发现使用此方法，如果aKeyWord该词出现在旧词oldKeyWordSet中，则跳过，研究热点则不受影响。上面的top * 300是为了保证有足够多的词而设置的。
            if (type.equals("new") && oldKeyWordSet.contains(aKeyWord)) {
                continue;
            }
            Research researchFocus = new Research();
            researchFocus.setScore(OftenUtils.takeFourDigits(aTop.getValue()));
            // 这是主体的关键词所对应的论文ID号集合
            if (kwAndIdListMap.containsKey(aKeyWord)) {
                researchFocus.setIdList(kwAndIdListMap.get(aKeyWord));
            }
            Map<String, Set<Integer>> coWordAndIdListMap = new LinkedHashMap<>();
            if (coWordMap.containsKey(aKeyWord)) {
                // 主体的关键词的共词可以只获取前10个。
                Map<String, Integer> aKeyWordCoWordMapTop = OftenUtils.mapSortValueIntLen(coWordMap.get(aKeyWord), 10);
                for (Map.Entry<String, Integer> bTop : aKeyWordCoWordMapTop.entrySet()) {
                    String bKeyWord = bTop.getKey();
                    List<String> list = OftenUtils.sortList(aKeyWord, bKeyWord);
                    if (coWordAndIdMap.containsKey(list)) {
                        coWordAndIdListMap.put(bKeyWord, coWordAndIdMap.get(list));
                    }
                }
            }
            researchFocus.setCoWordMap(coWordAndIdListMap);
            researchFocusMap.put(aKeyWord, researchFocus);
            num++;
            if (top * beiShu == num) {
                break;
            }
        }
        return researchFocusMap;
    }

    // 对多种写法的新冠病毒作统一的规范，也可以用于校正其它不规范的关键词。
    public static String reKeyWord(String keyWord) {
        String r = "covid 19";
        return keyWord.toLowerCase().replace("-", " ").replaceAll(" +", " ").replace("the coronavirus disease 2019 (covid 19)", r).replace("severe coronavirus disease 2019", r).replace("coronavirus 2019", r)
                .replace("sars cov 2", r).replace("sar cov 2", r).replace("coronavirus disease 2019", r).replace("2019 ncov", r).replace("新型冠状病毒肺炎", r).replace("covid 19 (covid 19)", r).trim();
    }

    // 获取关键词的集合，包括一个Map，一个List，并保存一个所有关键词的数量，这个数量需要排序后取前3万个保存，用于分析、整理停用词。
    public static Set<String> getKeyWordSet(Map<String, String> enKwMap, List<String> cnKwList) {
        Set<String> keyWordSet = new ConcurrentHashSet<>();
        for (Map.Entry<String, String> map : enKwMap.entrySet()) {
            String str = reKeyWord(map.getValue()).replace("[subheading ]", " ").replace("[publication type ]", " ").replace("  ", " ").trim();
            String keyWord = stopWord(str);
            if (keyWord.length() > 1) {
                keyWordSet.add(keyWord);
            }
        }
        for (String s : cnKwList) {
            String str = reKeyWord(s).replace("[subheading ]", " ").replace("[publication type ]", " ").replace("  ", " ").trim();
            String keyWord = stopWord(str);
            if (keyWord.length() > 1) {
                keyWordSet.add(keyWord);
            }
        }
        return keyWordSet;
    }

    // 停用词处理，后期最好弄一个停用词集合。
    public static String stopWord(String s) {
        s = s.replaceAll(" +", " ").toLowerCase().replace("< i class = \"italic\" > ", "").replace(" </i>", "").replace("[subheading ]", "").replace("[publication type ]", "").trim();
        if (s.contains("<sub> ") && s.contains(" </sub>")) {
            s = s.replace("<sub> ", "").replace(" </sub>", "");
        }
        if (s.equals("human") || s.equals("female") || s.equals("male") || s.equals("animal") || s.equals("adult") || s.equals("aged") || s.equals("study") || s.equals("effect") || s.equals("adolescent") || s.equals("child") || s.equals("mouse") ||
                s.equals("rat") || s.equals("between") || s.equals("patient") || s.equals("method") || s.equals("research") || s.equals("analysis of") || s.equals("analysis") || s.equals("induce") || s.equals("system") || s.equals("the effect")) {
            return "";
        }
        if (OftenUtils.isContainChinese(s) && s.length() > 2) {
            return s;
        }
        if (s.contains(" ") || s.contains("-") || s.length() > 3) {
            return s;
        }
        return "";
    }

    // 获取单位的四个排行榜的分别得分，这里使用unit_citation_num，实际上只使用了它的单位名称，作为四个排行榜的单位名称。
    public static void getRankScore(Map<String, RankScore> rankScoreMap, Map<String, Double> unitTimeCited, Map<String, Double> unitUnivScore, Map<String, Double> unitHospScore, Map<String, Double> unitFuDanScore, Map<String, Double> unitXiaoYouHuiScore) {
        for (Map.Entry<String, Double> entry : unitTimeCited.entrySet()) {
            String unit = entry.getKey();
            if (rankScoreMap.containsKey(unit)) {
                RankScore rankScore = rankScoreMap.get(unit);
                unitUnivScore.put(unit, rankScore.getUnivScore());
                unitHospScore.put(unit, rankScore.getHospScore());
                unitFuDanScore.put(unit, rankScore.getFuDanScore());
                unitXiaoYouHuiScore.put(unit, rankScore.getXiaoYouHuiScore());
            }
        }
    }

    // 世界大学最大100，最小65.7。世界医院最大0.9843，最小0.6139。复旦医院最大1，最小100。校友会最大1，最小504。这里需要归一化，世界大学满分200，世界医院满分100，复旦医院满分100，校友会满分100，最低都是1分，没有在排行榜的则为0.1分。
    public static Map<String, Map<String, RankScore>> getUnitRank(String ruleTable, Map<String, Map<String, Double>> fuDanSpecialistMap) throws SQLException {
        String select = "id`,`branch`,`hospital`,`univ_score`,`hosp_score`,`fudan_score`,`xiao_you_hui_score`,`fudan_name`,`univ_alternative`,`hosp_alternative`,`xiao_you_hui_alternative`,`fudan_alternative";
        Map<String, Map<String, RankScore>> result = new ConcurrentHashMap<>();
        for (Map.Entry<String, Map<String, Double>> entity : fuDanSpecialistMap.entrySet()) {
            String specialist = entity.getKey();
            List<Entity> tableData = Db.use().query("select `" + select + "` from " + ruleTable);
            Map<String, RankScore> rankScoreMap = new ConcurrentHashMap<>();
            for (Entity entry : tableData) {
                if (null != entry) {
                    String branch = OftenUtils.getStrField(entry, "branch");
                    String hospital = OftenUtils.getStrField(entry, "hospital");
                    double univScore = 0.1;
                    if (OftenUtils.getStrField(entry, "univ_alternative").contains("1.")) {
                        univScore = OftenUtils.strToDouble(OftenUtils.getStrField(entry, "univ_score"));
                        if (univScore >= 65.7 && univScore <= 100.0) {
                            univScore = ((univScore - 65.7) * 200 + (100 - univScore) * 1.0) / (100 - 65.7);
                        }
                    }

                    double hospScore = 0.1;
                    if (OftenUtils.getStrField(entry, "hosp_alternative").contains("1.")) {
                        hospScore = OftenUtils.strToDouble(OftenUtils.getStrField(entry, "hosp_score"));
                        if (hospScore >= 0.6139 && hospScore <= 0.9843) {
                            hospScore = ((hospScore - 0.6139) * 100 + (0.9843 - hospScore) * 1.0) / (0.9843 - 0.6139);
                        }
                    }

                    double fuDanScore = 0.1;
                    if (OftenUtils.getStrField(entry, "fudan_alternative").contains("1.")) {
                        fuDanScore = OftenUtils.strToDouble(OftenUtils.getStrField(entry, "fudan_score"));
                        String fuDanName = OftenUtils.getStrField(entry, "fudan_name");
                        if (fuDanScore >= 1.0 && fuDanScore <= 100.0) {
                            fuDanScore = Math.abs(fuDanScore - 101);
                        }
                        // 仅对复旦医院排行榜中的专科声誉，如果某医院在指定的专科（specialist的值）中存在，则将对应的得分给拿出来，并与原来的进行累加。
                        if (fuDanName.length() > 3 && fuDanSpecialistMap.containsKey(specialist) && fuDanSpecialistMap.get(specialist).containsKey(fuDanName)) {
                            fuDanScore += fuDanSpecialistMap.get(specialist).get(fuDanName);
                        }
                    }

                    double xiaoYouHuiScore = 0.1;
                    if (OftenUtils.getStrField(entry, "xiao_you_hui_alternative").contains("1.")) {
                        xiaoYouHuiScore = OftenUtils.strToDouble(OftenUtils.getStrField(entry, "xiao_you_hui_score"));
                        if (xiaoYouHuiScore >= 1 && xiaoYouHuiScore <= 504) {
                            double shiJi = Math.abs(xiaoYouHuiScore - 505);
                            xiaoYouHuiScore = ((shiJi - 1) * 100 + (504 - shiJi) * 1.0) / (504 - 1);
                        }
                    }
                    RankScore rankScore = new RankScore();
                    rankScore.setBranch(branch);
                    rankScore.setHospital(hospital);
                    rankScore.setUnivScore(univScore);
                    rankScore.setHospScore(hospScore);
                    rankScore.setFuDanScore(fuDanScore);
                    rankScore.setXiaoYouHuiScore(xiaoYouHuiScore);
                    if (branch.length() > 3) {
                        rankScoreMap.put(branch, rankScore);
                    }
                    if (hospital.length() > 3) {
                        rankScoreMap.put(hospital, rankScore);
                    }
                }
            }
            result.put(specialist, rankScoreMap);
        }
        return result;
    }

    // 复旦医院排行榜的专科声誉，原始数据中最大为11.757，最小为0.942，大多数为10以下，因此采用最简单的方法，直接 * 20，不足20的，赋值为20。Map结构为专科名称、医院名称、医院得分。默认弄一个不加权的空值"journal"，为了单本的期刊使用。
    public static Map<String, Map<String, Double>> readFuDanRank(String fuDanTable) throws SQLException {
        Map<String, Map<String, Double>> fuDanSpecialistMap = new ConcurrentHashMap<>();
        Map<String, Double> temp = new ConcurrentHashMap<>();
        temp.put("journal", 0.0);
        fuDanSpecialistMap.put("journal", temp);
        String select = "type`,`specialist_name`,`specialist_reputation`,`hospital_alias";
        List<Entity> tableData = Db.use().query("select `" + select + "` from " + fuDanTable);
        for (Entity entry : tableData) {
            if (OftenUtils.getStrField(entry, "type").equals("专科声誉")) {
                String specialist = OftenUtils.getStrField(entry, "specialist_name");
                double score = OftenUtils.strToDouble(OftenUtils.getStrField(entry, "specialist_reputation")) * 20;
                if (score < 20) {
                    score = 20;
                }
                String hospName = OftenUtils.getStrField(entry, "hospital_alias");
                if (specialist.length() > 1 && hospName.length() > 3) {
                    OftenUtils.putStrStrDou(specialist, hospName, fuDanSpecialistMap, score);
                }
            }
        }
        return fuDanSpecialistMap;
    }

    // 是否为医院的判断。
    public static Boolean isHosp(String str) {
        if (null == str) {
            return false;
        }
        str = str.toLowerCase().trim();
        List<String> list = Arrays.asList("hospital", "university health network", "癌症研究所", "癌症中心", "妇女保健院", "妇婴保健院", "妇幼保健院", "疾病预防控制中心", "临床中心", "肾病研究所", "卫生中心", "心肺研究所", "血液病研究所", "眼科中心", "医疗中心", "医学中心", "医院", "诊所");
        for (String s : list) {
            if (str.contains(s)) {
                return true;
            }
        }
        return false;
    }

    // 是否为中国的判断。
    public static Boolean isChina(String str) {
        if (null == str) {
            return false;
        }
        str = str.toLowerCase().trim();
        List<String> list = Arrays.asList("anhui ", "beijing ", "changchun ", "changsha ", "chengdu ", "china ", "chongqing ", "fujian ", "fuzhou ", "gansu ", "guangdong ", "guangxi ", "guangzhou ", "guiyang ", "guizhou ", "haikou ", "hainan ", "hangzhou ", "harbin ",
                "hebei ", "hefei ", "heilongjiang ", "henan ", "hohhot ", "hong kong ", "hubei ", "hunan ", "inner mongolia ", "jiangsu ", "jiangxi ", "jilin ", "jinan ", "kunming ", "lanzhou ", "lhasa ", "liaoning ", "macao ", "nanchang ", "nanjing ", "nanning ",
                "ningxia ", "qinghai ", "shaanxi ", "shandong ", "shanghai ", "shanxi ", "shenyang ", "shijiazhuang ", "sichuan ", "taipei ", "taiwan ", "taiyuan ", "tianjin ", "tibet ", "urumqi ", "wuhan ", "xi'an ", "xining ", "xinjiang ", "yinchuan ", "yunnan ",
                "zhejiang ", "zhengzhou ", "中国", "中华人民共和国", "北京", "浙江", "天津", "安徽", "上海", "福建", "重庆", "江西", "香港", "山东", "澳门", "河南", "内蒙古", "湖北", "新疆", "湖南", "宁夏", "广东", "西藏", "海南", "广西", "四川", "河北", "贵州", "山西", "云南", "辽宁", "陕西",
                "吉林", "甘肃", "黑龙江", "青海", "江苏", "台湾", "杭州", "合肥", "福州", "南昌", "济南", "郑州", "呼和浩特", "武汉", "乌鲁木齐", "长沙", "银川", "广州", "拉萨", "海口", "南宁", "成都", "石家庄", "贵阳", "太原", "昆明", "沈阳", "西安", "长春", "兰州", "哈尔滨", "西宁", "南京", "台北",
                "解放军", "重大专项", "温州", "深圳");
        for (String s : list) {
            if (str.contains(s)) {
                return true;
            }
        }
        return false;
    }

    @Data
    public static class DaiBiao {
        private String word = "";
        private Set<String> wordSet = new HashSet<>();
    }

    // 输出结果的实体类
    @Data
    public static class Result {
        private Map<String, Research> researchFocusMap = new LinkedHashMap<>();
        private Map<String, Research> focusUnitAndIdMap = new LinkedHashMap<>();
        private Map<String, Research> focusFundAndIdMap = new LinkedHashMap<>();
        private Map<String, Research> newWordDiscoveryMap = new LinkedHashMap<>();
        private Map<String, Research> CoreAuthorMap = new LinkedHashMap<>();
        private Map<Integer, List<Integer>> yearAndIdListMap = new LinkedHashMap<>();
        private Map<Integer, Integer> yearAndTcMap = new LinkedHashMap<>();
    }

    // 研究热点的实体类
    @Data
    public static class Research {
        private Map<String, Set<Integer>> coWordMap = new ConcurrentHashMap<>();
        private Set<Integer> idList = new HashSet<>();//主体的论文ID号集合
        private Double score = 0.0;
    }

    // 排行榜实体类
    @Data
    public static class RankScore {
        private String branch = "";
        private String hospital = "";
        private Double univScore = 0.0;
        private Double hospScore = 0.0;
        private Double fuDanScore = 0.0;
        private Double xiaoYouHuiScore = 0.0;
    }

    // 论文信息实体类
    @ProtobufClass
    @Data
    public static class ArticleInfo {
        private Integer aId = 0;//鲍论文关联表中的自增主键id：basic_core_paper
        private Integer jid = 0;//期刊关联表中的期刊唯一id号即自增主键help_jour_relational_table
        private Integer pubYear = 1900;//论文的发表年
        private Set<String> corrAuthSet = new HashSet<>();//通讯作者的集合
        private Set<String> firstAuthSet = new HashSet<>();//第一作者的集合
        private Map<String, String> enKwMap = new HashMap<>();//论文的英文关键词集合，包括从标题中提取的、原来的关键词、原来的关键词所对应的主题词、主题词。
        private List<String> cnKwList = new ArrayList<>();//论文的中文关键词集合，包括从标题中提取的、原来的关键词。
        private Integer isBeida = 0;//是否北大核心期刊收录，是为1，否为0
        private Integer isTongJiYuan = 0;//是否统计源期刊收录，是为1，否为0
        private Integer isCscd = 0;//是否CSCD期刊收录，是为1，否为0
        private Integer isPubmed = 0;//是否PubMed期刊收录，是为1，否为0
        private Integer isCite = 0;//是否威望指数SJR期刊收录，是为1，否为0
        private Integer pmid = 0;//pmid号码
        private Integer isZky = 0;//是否中科院期刊收录，是为1，否为0
        private Integer zkyDaleiInt = 0;//中科院大类分区最佳值，1，2，3，4
        private List<String> zkySubject = new ArrayList<>();//中科院大区名称
        private Integer isZHua = 0;//是否中华医学期刊，是为1，否为0 //is_zhong_hua
        private Integer isSixJ = 0;//是否六大刊，是为1，否为0 //is_six_j
        private Integer timeCited = 0;//引文的数量，即被引次数 //citation_num
        private double score = 0;//每篇论文的得分，算法为（当前年-2010）/10*影响因子*被引次数*1/中科院分区值，若当前年小于2010按2010算，若无分区，是核心的按5区算，不是核心的按6区算，无影响因子或无被引次数的，分别按0.01，0.5算。
        private List<String> zkySmall = new ArrayList<>();//中科院的小类分区
        private List<String> fundList = new ArrayList<>();//本篇论文的基金集合
        private List<String> pubMedSubject = new ArrayList<>();//pubMed的期刊学科
        private String beiDaDaLei = "";//北大核心的大类
        private String beiDaXiaoLei = "";//北大核心的小类
        private Integer isChina = 0;
        // 以下是为了合并（专科、单位得分及篇数）而添加的，添加后可以使用同一个实体类
        private List<String> unitList = new ArrayList<>();//单位清单
        private List<String> deptList = new ArrayList<>();//部门清单
        private Double pubTime = 0.0;//出版时间，是年加上出版月合成的
        private List<String> enAuthList = new ArrayList<>();//英文作者清单
        private List<String> cnAuthList = new ArrayList<>();//中文作者清单
        private Double altmetric = 0.0;//替代计量
        private Double zkyDaLeiDou = 0.0;//中科院得分转为小数，1->1.0；2->0.5；3->0.33；4->0.25；
        private Double scieIf = 0.0;//影响因子值
        private Double citationQuota = 0.0;//引文指标
        private Double referenceQuota = 0.0;//参文指标
        private List<SubjectDatabaseWord.DataInfo> dataInfoList = new ArrayList<>();//原来此信息在另一张表中的result字段中，现给合并到一张表，只保留了核心的字段。
    }

    // 原data_auth_same_data表中result的实体类，现只要本程序中所使用的字段，即简化版。
    @ProtobufClass
    @Data
    public static class DataInfo {
        private String auth = "";
        private String cnAu = "";
        private String str = "";
        private Set<String> hosp = new HashSet<>();
        private Set<String> univ = new HashSet<>();
        private Set<String> dept = new HashSet<>();
        private Set<String> coll = new HashSet<>();
    }


// end
}
