package qf.index;

import cn.hutool.core.collection.CollUtil;
import cn.hutool.core.util.NumberUtil;
import cn.hutool.db.Db;
import cn.hutool.db.Entity;
import cn.hutool.json.JSON;
import cn.hutool.json.JSONArray;
import cn.hutool.json.JSONUtil;
import com.alibaba.fastjson.JSONObject;

import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Pattern;

public class ReadPm {

    public static void main(String[] args) throws SQLException {

        SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
        System.out.println(format.format(new Date()) + " 程序开始了~~~");

        int onePageNum = 1000;
        String limit = "";
//        String limit = " limit 1";

        String newDataName = "scholar_25_01.";
        String summaryTable = newDataName + "pm_ut_aid_info_0120";
        String spiderCnCoreTable = newDataName + "spider_cn_core";
        String spiderCnKiAnalysisTable = newDataName + "spider_cnki_analysis";
        String spiderCqVipAnalysisTable = newDataName + "spider_cqvip_analysis";
        String spiderWanFangAnalysisTable = newDataName + "spider_wanfang_analysis";
        String spiderYiiGleAnalysisTable = newDataName + "spider_yiigle_analysis";
        String pmArticleTable = newDataName + "spider_pubmed_vip_extend";
        String utArticleTable = newDataName + "spider_tb_true_92007022_1125";
        String jourMergeTable = newDataName + "basic_jour_merge";
        String quotaTable = newDataName + "basic_core_paper_quota";
        String basicCoreTable = newDataName + "basic_core_paper";
        String wordAndCodeSetMapTable = newDataName + "word_and_code_set_map";
        String fullAbbrKeyWordTable = newDataName + "full_abbr_key_word_1218";
        String pmOrUtIdTablePm = newDataName + "pm_ut_aid_info_pm";

        // 词（药物，数字代号：1; 基因，数字代号：2; 通路，数字代号：3; 疾病，数字代号：4）和code（1，2，3，4）代码集合。
        Map<String, Set<Integer>> wordAndCodeSetMap = Drug.getWordAndCodeSetMap(wordAndCodeSetMapTable, "id", "word`,`code_set", limit);

        String field = "pmid`,`keyword`,`article_title`,`ab_text`,`jour`,`pub_year`,`author_extend`,`pts`,`med_nlm_id`,`pub_date`,`doi`,`volume`,`issue`,`pages`,`mesh_headingy`,`lan";
        readPm(pmArticleTable, onePageNum, limit, "id", field, true, 10, summaryTable, wordAndCodeSetMap);

    }

    // field:vip`,`pmid`,`jour`,`article_title`,`volume`,`issue`,`pages`,`pub_date`,`pub_year`,`med_nlm_id`,`author_extend`,`pmcid`,`doi`,`keyword`,`ab_text`,`mesh_headingy`,`pts
    public static void readPm(String pmArticleTable, int onePageNum, String limit, String autoId, String field, boolean print, int allTotal, String summaryTable, Map<String, Set<Integer>> wordAndCodeSetMap) throws SQLException {
        Map<Integer, Integer> dataIdAndAidMap = getDataIdAndAidMap(summaryTable, "id", "ncbi_id");
        System.out.println("dataIdAndAidMap.size(): " + dataIdAndAidMap.size());
        SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
        List<Entity> tableNum = Db.use().query("select count(1) as count from " + pmArticleTable);
        String s = "<p> <strong class=\"sub-title\"> Keywords: </strong>";
        if (tableNum.get(0).getInt("count") < 1) {
            return;
        }
        Set<String> pinYinSet = PmUnit.getPinYinSet();
        int tableMax = Db.use().queryNumber("select max(" + autoId + ") from " + pmArticleTable).intValue();
        Collection<Integer> tableList = new ArrayList<>();
        NumberUtil.appendRange(0, tableMax + onePageNum, onePageNum, tableList);
        AtomicInteger sumNum = new AtomicInteger(0);
        int showNum = Math.max((tableList.size() / allTotal), 1) + (tableList.size() % allTotal);
        AtomicInteger startNum = new AtomicInteger(1);
        tableList.parallelStream().forEach(startId -> {
            try {
                List<Entity> tableData = Db.use().query("select `" + autoId + "`,`" + field + "` from " + pmArticleTable + " where " + autoId + " between " + startId + " and " + (startId + onePageNum - 1) + limit);
                if (print && sumNum.getAndIncrement() % showNum == 0) {
//                    System.out.println("\033[31;1m" + format.format(new Date()) + " 读表现在开始：" + startNum.getAndIncrement() + " 总次数：" + allTotal + " 表名：" + pmArticleTable + Utils.printMemoryStr() + "\033[0m");
                }
                for (Entity entry : tableData) {
                    Integer dataId = entry.getInt(autoId);
                    if (!dataIdAndAidMap.containsKey(dataId)) {
                        continue;
                    }
                    Integer pmId = entry.getInt("pmid");
                    String keyword = Utils.getStrField(entry, "keyword");
                    keyword = keyword.startsWith(s) && keyword.endsWith("</p>") ? keyword.substring(s.length(), keyword.length() - "</p>".length()).trim() : keyword;
                    String title = Utils.getStrField(entry, "article_title");
                    String abText = Utils.getStrField(entry, "ab_text");
                    abText = abText.endsWith("</p>") ? abText.substring(0, abText.length() - "</p>".length()).trim() : abText;
                    abText = abText.startsWith("<p>") ? abText.substring("<p>".length()).trim() : abText;
                    abText = abText.replace("<strong class=\"sub-title\">", " ").replace(" </strong>", " ").replace("</p><p>", " ").replaceAll(" +", " ").trim();
                    String journal = Utils.getStrField(entry, "jour");
                    String pubYear = Utils.getStrField(entry, "pub_year");
                    int year = (pubYear.trim().length() == 4 && NumberUtil.isInteger(pubYear.trim())) ? Integer.parseInt(pubYear.trim()) : 1900;
                    String authorExtend = entry.getStr("author_extend");
                    Set<String> mailSet = new HashSet<>();
                    List<String> authList = getPmAuthList(authorExtend, mailSet);
                    String pts = entry.getStr("pts");
                    List<En.PubType> pubTypes = JSONObject.parseArray((null == pts || pts.isEmpty()) ? "[]" : pts, En.PubType.class);
                    Set<String> ptSet = new HashSet<>(); // 出版类型
                    for (En.PubType pubType : pubTypes) {
                        String typeTitle = pubType.getTitle().trim();
                        if (!typeTitle.isEmpty()) {
                            ptSet.add(typeTitle);
                        }
                    }
                    String nlmId = Utils.getStrField(entry, "med_nlm_id");
                    int isCns = Utils.is6DaJournals(nlmId, journal, "", "");
                    String pubDate = Utils.getStrField(entry, "pub_date").replace("--", "-").trim();
                    pubDate = pubDate.endsWith("-") ? pubDate.substring(0, pubDate.length() - 1) : pubDate;
                    String di = Utils.dellEnd(Utils.getStrField(entry, "doi"));
                    String volume = Utils.getStrField(entry, "volume");
                    String issue = Utils.getStrField(entry, "issue");
                    String pages = Utils.getStrField(entry, "pages");
                    di = di.endsWith(".") ? di.substring(0, di.length() - 1) : di;
                    String info = (journal + ". " + (pubDate + ";") + volume + (issue.isEmpty() ? ":" : "(" + issue + "):") + (pages + ". ") + (di.length() > 6 ? ("doi: " + di + ". ") : ""));
                    List<En.CategoryAndInfo> categoryAndInfoList = Drug.getDrug(title, keyword, abText, wordAndCodeSetMap);
                    List<En.C1Auth> pmC1ListSort = Utils.getPmC1ListSort(authorExtend, pinYinSet);
                    try {
                        Db.use().update(
                                Entity.create().set("pmId", pmId).set("doi", di).set("mesh", Utils.getStrField(entry, "mesh_headingy")).set("ab", abText)
                                        .set("language", Utils.getStrField(entry, "lan")).set("keyword", ReadUt.strToJson(keyword)).set("title", title).set("journal", journal)
                                        .set("c1_au", JSONUtil.toJsonStr(pmC1ListSort)).set("year", year).set("is_cns", isCns).set("pt", JSONUtil.toJsonStr(ptSet))
                                        .set("nlmId", nlmId.toUpperCase().trim()).set("auth", JSONUtil.toJsonStr(authList)).set("is_retraction", getIsRetraction(title, ptSet))
                                        .set("drug_gene_pathway_disease", JSONUtil.toJsonStr(categoryAndInfoList)).set("mail_list", JSONUtil.toJsonStr(mailSet)).set("info", Fusion.reInfo(info)),
                                Entity.create(summaryTable).set("id", dataIdAndAidMap.get(dataId)));
                    } catch (SQLException e) {
                        e.printStackTrace();
                    }
                }
            } catch (SQLException e) {
                e.printStackTrace();
            }
        });
        dataIdAndAidMap.clear();
    }

    public static Map<Integer, Integer> getDataIdAndAidMap(String summaryTable, String autoId, String field) throws SQLException {
        int onePageNum = 1000;
        List<Entity> tableNum = Db.use().query("select count(1) as count from " + summaryTable);
        Map<Integer, Integer> dataIdAndAidMap = new ConcurrentHashMap<>();
        if (tableNum.get(0).getInt("count") > 0) {
            int tableMax = Db.use().queryNumber("select max(" + autoId + ") from " + summaryTable).intValue();
            Collection<Integer> tableList = new ArrayList<>();
            NumberUtil.appendRange(0, tableMax + onePageNum, onePageNum, tableList);
            tableList.parallelStream().forEach(startId -> {
                try {
                    List<Entity> tableData = Db.use().query("select `" + autoId + "`,`" + field + "` from " + summaryTable + " where " + autoId + " between " + startId + " and " + (startId + onePageNum - 1));
                    for (Entity entry : tableData) {
                        Integer dataId = entry.getInt(field);
                        Integer aId = entry.getInt(autoId);
                        if (null != dataId && dataId > 0 && null != aId && aId > 0) {
                            dataIdAndAidMap.put(dataId, aId);
                        }
                    }
                } catch (SQLException e) {
                    throw new RuntimeException(e);
                }
            });
        }
        return dataIdAndAidMap;
    }

    public static List<String> getPmAuMailList(String authorExtend, Set<String> mailSet) {
        List<String> list = new ArrayList<>();
        if (null != authorExtend && authorExtend.contains("Affiliation")) {
            Set<String> set = new HashSet<>();
            JSONArray objects = JSONUtil.parseArray(authorExtend);
            for (Object object : objects) {
                JSON parse = JSONUtil.parse(object);
                if (null != parse.getByPath("Affiliation")) {
                    String affiliation = parse.getByPath("Affiliation").toString();
                    List<String> unitList = com.alibaba.fastjson.JSON.parseArray(affiliation, String.class);
                    for (String s : unitList) {
                        s = s.endsWith(".") ? s.substring(0, s.length() - ".".length()).trim() : s.trim();
                        if (!set.contains(s.toLowerCase()) && s.length() > 6) {
                            set.add(s.toLowerCase());
                            list.add(s);
                        }
                        String[] split = s.split(" ");
                        for (String mail : split) {
                            if (Utils.fuHeiMail(mail)) {
                                mailSet.add(mail);
                            }
                        }
                    }
                }
            }
        }
        return list;
    }


    public static List<String> getPmAuthList(String authorExtend, Set<String> mailSet) {
        List<String> authorList = new ArrayList<>();
        if (null != authorExtend && authorExtend.contains("Affiliation")) {
            JSONArray objects = JSONUtil.parseArray(authorExtend);
            for (Object object : objects) {
                JSON parse = JSONUtil.parse(object);
                String lastName = null != parse.getByPath("LastName") ? parse.getByPath("LastName").toString().trim() : "";
                String foreName = null != parse.getByPath("ForeName") ? parse.getByPath("ForeName").toString().trim() : "";
                if ("-".equals(foreName)) {
                    authorList.add(lastName);
                } else {
                    if ((lastName + ", " + foreName).length() > 3) {
                        authorList.add(lastName + ", " + foreName);
                    }
                }
                String affiliation = parse.getByPath("Affiliation").toString();
                List<String> infoList = com.alibaba.fastjson.JSON.parseArray(affiliation, String.class);
                for (String info : infoList) {
                    String[] split = info.split(" ");
                    for (String mail : split) {
                        if (Utils.fuHeiMail(mail)) {
                            mailSet.add(Utils.dellEnd(mail));
                        }
                    }
                }
            }
        }
        return authorList;
    }


    public static En.AbKeyMethod getTransStr(Set<String> backgroundSet, Set<String> discussionSet, Set<String> methodsSet, Set<String> resultsSet, List<En.Trans> transList) {
        List<En.Trans> backgroundList = new ArrayList<>();
        List<En.Trans> discussionList = new ArrayList<>();
        List<En.Trans> methodsList = new ArrayList<>();
        List<En.Trans> resultsList = new ArrayList<>();
        for (En.Trans trans : transList) {
            String l = trans.getLabel().toLowerCase().trim();
            // 为什么作下面的两行？原因是部分pm的存在序号，比如：1) PURPOSE: To demonstrate，而这个序号在往标签中添加时，并没有去掉，但在标签集合中，是没有这着序号的，因此需要先去掉再使用。
            l = (l.startsWith("1)") || l.startsWith("2)") || l.startsWith("3)") || l.startsWith("4)") || l.startsWith("5)") || l.startsWith("6)") || l.startsWith("7)") || l.startsWith("8)") || l.startsWith("9)")) ? l.substring(2).trim() : l;
            l = (l.startsWith("(1)") || l.startsWith("(2)") || l.startsWith("(3)") || l.startsWith("(4)") || l.startsWith("(5)") || l.startsWith("(6)") || l.startsWith("(7)") || l.startsWith("(8)") || l.startsWith("(9)")) ? l.substring(3).trim() : l;
            l = l.toLowerCase().trim();
            l = l.endsWith(":") ? l.substring(0, l.length() - ":".length()).trim() : l;
            l = l.endsWith(".") ? l.substring(0, l.length() - ".".length()).trim() : l;
            if (backgroundSet.contains(l)) {
                backgroundList.add(trans);
            }
            if (discussionSet.contains(l)) {
                discussionList.add(trans);
            }
            if (methodsSet.contains(l)) {
                methodsList.add(trans);
            }
            if (resultsSet.contains(l)) {
                resultsList.add(trans);
            }
        }
        En.AbKeyMethod abKeyMethod = new En.AbKeyMethod();
        abKeyMethod.setBackground(JSONUtil.toJsonStr(backgroundList));
        abKeyMethod.setDiscussion(JSONUtil.toJsonStr(discussionList));
        abKeyMethod.setMethods(JSONUtil.toJsonStr(methodsList));
        abKeyMethod.setResults(JSONUtil.toJsonStr(resultsList));
        return abKeyMethod;
    }


    public static List<En.Trans> transAbstract(String ab, List<String> originalWordList, Set<String> smallWordSet) {
        List<En.Trans> transList = new ArrayList<>();
        if (null != ab && ab.length() > 3) {
            String[] split = ab.trim().split("</p>");
            for (String s : split) {
                s = s.startsWith("<p>") ? s.substring("<p>".length()) : s;
                if (s.contains("<strong class=")) {
                    if (s.contains("<strong class=\"sub-title\">") && s.contains("</strong>")) {
                        s = s.replace("<strong class=\"sub-title\">", " ").replace("</strong>", "âãä").replaceAll(" +", " ").trim();
                    }
                    transList.add(addTrans(s));
                } else {
                    String t = ". " + s.replace("：", ":").replace("。", ". ").replace("（", "(").replace("）", ")").replaceAll(" +", " ").trim();
                    t = addLabel(t, originalWordList, smallWordSet);
                    String[] strings = t.split("ôõö");
                    if (strings.length <= 2) {
                        transList.add(addTrans(t.replace("ôõö", "").replace("âãä", "")));
                    } else {
                        for (String string : strings) {
                            if (string.replace(".", "").trim().length() > 1) {
                                transList.add(addTrans(string));
                            }
                        }
                    }
                }
            }
        }
        return transList;
    }

    public static En.Trans addTrans(String str) {
        En.Trans trans = new En.Trans();
        for (int i = 0; i < str.length(); i++) {
            str = str.trim();
            if (str.startsWith(".")) {
                str = str.substring(".".length()).trim();
            } else if (str.endsWith(".")) {
                str = str.substring(0, str.length() - ".".length()).trim();
            } else {
                break;
            }
        }
        str = !str.endsWith(".") ? str + ". " : str;
        if (!Utils.isContainChinese(str)) {
            if (str.contains("âãä") && str.length() > 5) {
                String label = str.substring(0, str.indexOf("âãä")).trim();
                label = label.endsWith(":") ? label.substring(0, label.length() - ":".length()) : label;
                String en = str.substring(str.indexOf("âãä") + "âãä".length()).trim();
                en = en.startsWith(":") ? en.substring(":".length()).trim() : en;
                trans.setLabel(label);
                trans.setEn(en);
//                trans.setCn(StudyUtils.enToCnTrans((label + ": " + en)).replace("::", ":"));
            } else if (str.trim().length() > 1) {
                trans.setEn(str);
//                trans.setCn(StudyUtils.enToCnTrans(str));
            }
        }
        return trans;
    }


    // 这种是标签后面冒号加空格型的
    public static boolean isColon(String str, Set<String> smallWordSet) {
        String n = str.replace(". ", ".").toLowerCase();
        for (String s : smallWordSet) {
            if (n.contains("." + s.toLowerCase() + ": ")) {
                return true;
            }
        }
        return false;
    }

    // 1) PURPOSE: To demonstrate 针对有序号的
    public static boolean isBracket(String str, Set<String> smallWordSet) {
        String n = str.replace("（", "(").replace(". ", ".").replace("）", ")").toLowerCase();
        int sum = 0;
        for (String s : smallWordSet) {
            for (int i = 1; i < 10; i++) {
                if (n.contains("." + i + ") " + s.toLowerCase() + ": ") || n.contains(".(" + i + ") " + s.toLowerCase() + ": ")) {
                    sum++;
                }
            }
        }
        return sum > 1;
    }

    // 这种是没有冒号，而是标签后面有一个空格，空格后面必须又是一个大写的字母，但仅限于出现次数在一万条以上的标签。
    public static boolean isBlank(String str, Set<String> smallWordSet) {
        String[] split = str.split("\\.");
        int sum = 0;
        for (String s : split) {
            s = s.trim();
            for (String o : smallWordSet) {
                if (s.toLowerCase().startsWith(o.toLowerCase()) && s.length() > o.length() + 1 && o.trim().length() > 2) {
                    String e1 = s.substring(o.length(), o.length() + 1);
                    String e2 = s.substring(o.length() + 1, o.length() + 2);
                    if (" ".equals(e1) && Pattern.compile("[A-Z]").matcher(e2).find()) {
                        sum++;
                    }
                }
            }
        }
        return sum > 1;
    }

    // 如果是标签后面紧跟着大写字母，而不是冒号、句号或空格，这种仅限于出现次数在一万条以上的标签。
    public static boolean isCapital(String str, Set<String> smallWordSet) {
        String[] split = str.split("\\.");
        int sum = 0;
        for (String s : split) {
            s = s.trim();
            for (String o : smallWordSet) {
                if (s.toLowerCase().startsWith(o.toLowerCase()) && s.length() > o.length() && o.trim().length() > 2) {
                    String e = s.substring(o.length(), o.length() + 1);
                    if (Pattern.compile("[A-Z]").matcher(e).find()) { // 标签后的那个字符，如果是大写字母，则认为是标签，如果是标签，就要加冒号
                        sum++;
                    }
                }
            }
        }
        return sum > 1;
    }

    // originalWordList是从长到短的，匹配上最长的，即停止。simpWordSet是出现次数在一万条以上的，仅127条，全部为小写。
    public static String addLabel(String str, List<String> originalWordList, Set<String> smallWordSet) {
        String[] split = str.split("\\.");
        StringBuilder r = new StringBuilder();
        boolean isColon = isColon(str, smallWordSet);
        boolean isCapital = isCapital(str, smallWordSet);
        boolean isBlank = isBlank(str, smallWordSet);
        boolean isBracket = isBracket(str, smallWordSet);
        for (String s : split) {
            String f = s.startsWith(" ") ? " " : ""; // 如果有空格，那么，仍然保留。
            s = s.trim();
            boolean notFind = true; // 如果没有匹配上，则直接添加到结果中
            if (!s.isEmpty()) {
                for (String o : originalWordList) {
                    o = o.toLowerCase();
                    if (isBracket) { // 1) PURPOSE: To demonstrate 针对有序号的
                        for (int i = 1; i < 10; i++) {
                            if (s.toLowerCase().startsWith(i + ") " + o + ": ")) {
                                int len = o.length() + ") ".length() + Integer.toString(i).length();
                                r.append(".").append(f).append("ôõö").append(s, 0, len).append("âãä").append(s.substring(len));
                                notFind = false;
                                break;
                            } else if (s.toLowerCase().startsWith(i + ")" + o + ": ")) {
                                int len = o.length() + ")".length() + Integer.toString(i).length();
                                r.append(".").append(f).append("ôõö").append(s, 0, len).append("âãä").append(s.substring(len));
                                notFind = false;
                                break;
                            } else if (s.toLowerCase().startsWith("(" + i + ") " + o + ": ")) {
                                int len = o.length() + ("(" + i + ") ").length();
                                r.append(".").append(f).append("ôõö").append(s, 0, len).append("âãä").append(s.substring(len));
                                notFind = false;
                                break;
                            } else if (s.toLowerCase().startsWith("(" + i + ")" + o + ": ")) {
                                int len = o.length() + ("(" + i + ")").length();
                                r.append(".").append(f).append("ôõö").append(s, 0, len).append("âãä").append(s.substring(len));
                                notFind = false;
                                break;
                            }
                        }
                    } else if (s.toLowerCase().startsWith(o) && s.length() > o.length() + 1) {
                        String e1 = s.substring(o.length(), o.length() + 1);
                        String e2 = s.substring(o.length() + 1, o.length() + 2);
                        if (isColon) { // 如果最常用的一些标签后面含有冒号加空格，那么就认定本摘要是这种格式的，就不认冒号后面没有空格的为标签了。此方法仅127个出现一万次以上的标签，会漏掉一些，影响不大。
                            if (":".equals(e1) && " ".equals(e2)) { // 标签后的那个字符，如果是大写字母或冒号或句号，则认为是标签，如果是标签，就要加冒号
                                r.append(".").append(f).append("ôõö").append(s, 0, o.length()).append("âãä").append(s.substring(o.length()));
                                notFind = false;
                                break;
                            }
                        } else if (isCapital) { // 如果是标签后面紧跟着大写字母或数字，而不是冒号、句号或空格，这种仅限于出现次数在一万条以上的标签。
                            if (smallWordSet.contains(o) && Pattern.compile("[A-Z0-9]").matcher(e1).find()) { // 标签后的那个字符，如果是大写字母或冒号或句号，则认为是标签，如果是标签，就要加冒号
                                r.append(".").append(f).append("ôõö").append(s, 0, o.length()).append("âãä").append(s.substring(o.length()));
                                notFind = false;
                                break;
                            }
                        } else if (isBlank) { // 这种是没有冒号，而是标签后面有一个空格，空格后面必须又是一个大写的字母，但仅限于出现次数在一万条以上的标签。
                            if (smallWordSet.contains(o) && " ".equals(e1) && Pattern.compile("[A-Z]").matcher(e2).find()) { // 标签后的那个字符，如果是大写字母或冒号或句号或空格，则认为是标签，如果是标签，就要加冒号
                                r.append(".").append(f).append("ôõö").append(s, 0, o.length()).append("âãä").append(s.substring(o.length()));
                                notFind = false;
                                break;
                            }
                        } else { // 其它的，都走这种方法了，即标签后可以是大写字母，也可以是冒号，也可以是句号，但不能是空格。
                            if (Pattern.compile("[A-Z:.]").matcher(e1).find()) { // 标签后的那个字符，如果是大写字母或冒号或句号，则认为是标签，如果是标签，就要加冒号
                                r.append(".").append(f).append("ôõö").append(s, 0, o.length()).append("âãä").append(s.substring(o.length()));
                                notFind = false;
                                break;
                            }
                        }
                    }
                }
            }
            if (notFind) {
                r.append(".").append(f).append(s);
            }
        }
        return r.toString();
    }


    // 获取排序的作者信息，包含原始的作者、单位信息，以及规范后的作者和提取出来的单位信息，主要用于后续的同名同姓处理。
    public static List<En.C1AuthUnit> getPmSortAuthList(String authorExtend, Set<String> pinYinSet, Map<String, String> wordAndStandardMap, Set<String> enDeptSet, Map<String, Integer> deptAndSumMap) {
        List<En.C1AuthUnit> c1AuthUnitList = new ArrayList<>();
        if (null != authorExtend && authorExtend.contains("Affiliation")) {
            JSONArray objects = JSONUtil.parseArray(authorExtend);
            List<String> authorList = new ArrayList<>();
            for (Object object : objects) {
                JSON parse = JSONUtil.parse(object);
                String lastName = null != parse.getByPath("LastName") ? parse.getByPath("LastName").toString().trim() : "";
                String foreName = null != parse.getByPath("ForeName") ? parse.getByPath("ForeName").toString().trim() : "";
                if ("-".equals(foreName)) {
                    authorList.add(lastName);
                } else {
                    authorList.add(lastName + ", " + foreName);
                }
            }
            // Chang-Ming Huang -> isOrder = ture 统一使用：Huang ChangMing
            boolean isOrder = BasisTable.getPiYiOrder(CollUtil.join(authorList, "; "), pinYinSet);
            for (Object object : objects) {
                JSON parse = JSONUtil.parse(object);
                String lastName = null != parse.getByPath("LastName") ? parse.getByPath("LastName").toString().trim() : "";
                String foreName = null != parse.getByPath("ForeName") ? parse.getByPath("ForeName").toString().trim() : "";
                List<String> unitList = new ArrayList<>();
                if (null != parse.getByPath("Affiliation")) {
                    String affiliation = parse.getByPath("Affiliation").toString();
                    List<String> units = com.alibaba.fastjson.JSON.parseArray(affiliation, String.class);
                    for (String s : units) {
                        s = s.endsWith(".") ? s.substring(0, s.length() - ".".length()).trim() : s.trim();
                        unitList.add(s);
                    }
                }

                String originalAuth = "";
                String orderAuth = "";
                if ("-".equals(foreName)) {
                    originalAuth = lastName;
                    orderAuth = BasisTable.changeAuthOrder(lastName, pinYinSet);
                } else {
                    String auth = lastName + " " + foreName.replace("  ", " ").trim();
                    originalAuth = auth;
                    if (isOrder && auth.lastIndexOf(" ") > 0) {
                        String end = auth.substring(0, auth.lastIndexOf(" "));
                        String start = auth.substring(auth.lastIndexOf(" "));
                        // 部分英文作者的名称，可能有单个字母的，不给予转换。
                        if (end.length() > 1 && start.length() > 1) {
                            auth = start + " " + end;
                        }
                    }
                    orderAuth = BasisTable.changeAuthOrder(auth, pinYinSet);
                }

                if (unitList.isEmpty()) {
                    En.C1AuthUnit c1AuthUnit = new En.C1AuthUnit();
                    c1AuthUnit.setInfo(originalAuth);
                    c1AuthUnit.setAuth(orderAuth);
                    c1AuthUnitList.add(c1AuthUnit);
                } else {
                    for (String s : unitList) {
                        En.C1AuthUnit c1AuthUnit = new En.C1AuthUnit();
                        c1AuthUnit.setInfo(originalAuth + "; " + s);
                        c1AuthUnit.setAuth(orderAuth);
                        Set<String> oneC1 = getOneC1Dept(s, "pm", wordAndStandardMap, enDeptSet, deptAndSumMap);
//                        Set<String> oneC1 = ReadUnit.getOneC1(s, "pm", wordAndStandardMap);
                        c1AuthUnit.setUnitList(new ArrayList<>(oneC1));
                        c1AuthUnitList.add(c1AuthUnit);
                    }
                }
            }
        }
        return c1AuthUnitList;
    }

    public static Set<String> getOneC1Dept(String c1, String type, Map<String, String> wordAndStandardMap, Set<String> enDeptSet, Map<String, Integer> deptAndSumMap) {
        String info = "u000t".equals(type) ? cList.reArgs(c1) : c1;
        info = info.replace("，", ",").replace("。", ".").replace("；", ";").replace("“", "\"").replace("”", "\"")
                .replace(";", ",").replace(".", ",").replace("/", ",").replace(",", ", ").replaceAll(" +", " ").trim();
        String[] split = info.contains(", ") ? info.split(", ") : info.split(",");
        Set<String> oneOrdinarySet = new HashSet<>();

        for (int i = 0; i < split.length; i++) {
            String s = getStandard(split[i], wordAndStandardMap).trim(); // 不改变大小写的替换乱字符
            boolean bool = true;
            if ((s.length() > 4 || isAllDaXie(s)) && s.length() < 255) {
                String l = s.toLowerCase().trim();
                l = (l.endsWith(",") || l.endsWith(".") || l.endsWith(";")) ? l.substring(0, l.length() - 1) : l;
                if (l.contains("sch med") || l.contains("coll med") || l.contains("school medicine") || l.contains("school medical") || l.contains("school ofmedicine") || l.contains("schools of medicine") || l.contains("traditional chinese medicine")
                        || l.contains("tradit chinese medical") || l.contains("university of tcm") || l.contains("university tcm") || l.contains("tcm university") || l.contains("university ") || l.contains(" university") || l.contains("hospital ")
                        || l.contains(" hospital") || l.contains("unversity ") || l.contains(" unversity") || l.contains("mayo clinic") || l.contains("medical college") || l.contains("uniwersytet ") || l.contains(" uniwersytet") || l.contains("medicine college")
                        || l.contains("medical school") || l.contains("medicine school") || l.contains("chinese medicine") || l.contains("chinese medical") || l.contains("chinese academy science") || l.contains("chinese academy of sciences")
                        || l.contains("college of medicine") || l.contains("school of medicine") || l.contains("college of medical") || l.contains("school of medical") || l.contains("faculty medical") || l.contains("med coll")) {
                    oneOrdinarySet.add(s);
                    bool = false;
                }
                // 如果全部是大写，且不是最后一位（最后一位往往是UK、USA之类）
                if (isAllDaXie(s) && i != split.length - 1 && s.length() > 2) {
                    oneOrdinarySet.add(s);
                    bool = false;
                }
                String[] list = l.split(" ");
                for (String n : list) {
                    if ((n.startsWith("univ") || n.startsWith("hosp")) && !n.contains("@")) {
                        oneOrdinarySet.add(s);
                        bool = false;
                    }
                }
                if (bool && s.length() > 2) {
                    deptAndSumMap.compute(s, (key, value) -> value != null ? (value + 1) : 1);
                }
            }
        }
        return oneOrdinarySet;
    }

    // wordAndStandardMap中的键和值均为小写，sCh0lar_23_12.unIt_sȁm_ordinȀry -> sCh0lar_23_12.unIt_sam_ordinAry，此方法可以实现不改变大小写的乱字符替换。
    public static String getStandard(String str, Map<String, String> wordAndStandardMap) {
        StringBuilder result = new StringBuilder();
        char[] charArray = str.toCharArray(); // 将字符串转换为字符数组
        for (char ch : charArray) {
            String s = String.valueOf(ch); // 使用String.valueOf(char)将char转换为String
            if (wordAndStandardMap.containsKey(s.toLowerCase())) {
                String t = wordAndStandardMap.get(s.toLowerCase()); // 此时t肯定为小写
                if (Character.isUpperCase(ch)) {
                    result.append(t.toUpperCase());
                } else if (Character.isLowerCase(ch)) {
                    result.append(t.toLowerCase());
                } else { // 不是字母，直接添加进result中。
                    result.append(s);
                }
            } else {
                result.append(s);
            }
        }
        return result.toString();
    }

    // 只有全部是大写字母时，才返回true，空格、数字或符号等，全部认为是小写。
    public static boolean isAllDaXie(String str) {
        return str.replaceAll("[A-Z]+", "").isEmpty();
    }

    public static List<String> getCnCountryList(List<String> c1List, Map<String, Integer> notEnCountryAndSumMap, Map<String, String> enAndCnCountryMap) {
        List<String> cnCountryList = new ArrayList<>();
        for (String c1 : c1List) {
            if (null != c1 && c1.contains(", ")) {
                c1 = c1.substring(c1.lastIndexOf(", ") + ", ".length()).toLowerCase().trim();
                // ICN Business School, CEREFIGE, University of Lorraine, Nancy, France. Electronic address: khaled.lahlouh@icn-artem.com
                c1 = (c1.contains(". ")) ? c1.substring(0, c1.indexOf(". ")).trim() : c1;
                if (enAndCnCountryMap.containsKey(c1)) {
                    cnCountryList.add(enAndCnCountryMap.get(c1));
                } else {
                    notEnCountryAndSumMap.compute(c1, (key, value) -> value != null ? (value + 1) : 1);
                }
            }
        }
        return cnCountryList;
    }

    public static int getIsChina(List<String> c1List) {
        for (String c1 : c1List) {
            if (null != c1 && (c1.toLowerCase().contains(" china") || c1.toLowerCase().contains("china "))) {
                return 1;
            }
        }
        return 0;
    }

    public static int getIsRetraction(String ti, Set<String> ptSet) {
        if (null != ti && !ti.isEmpty()) {
            ti = ti.toLowerCase().trim();
            if (ti.startsWith("retraction") || ti.startsWith("retracted") || ti.contains("撤稿声明") || ti.contains("被撤回的出版物")) {
                return 1;
            }
        }
        for (String pt : ptSet) {
            pt = pt.toLowerCase().trim();
            if (pt.contains("withdrawn publication") || "retraction".equals(pt) || pt.contains("retracted publication") || pt.contains("retraction of publication")) {
                return 1;
            }
        }
        return 0;
    }

    public static Set<En.MethodAndInfo> getMethodAndInfo(List<String> originalList, String ab) {
        Set<En.MethodAndInfo> methodAndInfoSet = new HashSet<>();
        for (int i = 0; i < originalList.size(); i++) {
            String original = originalList.get(i);
            if (ab.contains(original + ":")) {
                int start = ab.indexOf(original + ":");
                ab = ab.substring(start + (original + ":").length());
                String next = originalList.size() > (i + 1) ? originalList.get(i + 1) : "";
                En.MethodAndInfo methodAndInfo = new En.MethodAndInfo();
                if (next.length() > 1 && ab.contains(next + ":")) {
                    int end = ab.indexOf(next + ":");
                    String substring = ab.substring(0, end);
                    ab = ab.substring(end);
                    if (substring.length() > 2) {
                        methodAndInfo.setName(original);
                        methodAndInfo.setInfo(reInfo(substring.trim().replace("：", ":")));
                        methodAndInfoSet.add(methodAndInfo);
                    }
                } else if (next.isEmpty() && ab.length() > 2) {
                    methodAndInfo.setName(original);
                    methodAndInfo.setInfo(reInfo(ab.trim().replace("：", ":")));
                    methodAndInfoSet.add(methodAndInfo);
                }
            }
        }
        return methodAndInfoSet;
    }

    public static String reInfo(String info) {
        info = info.endsWith("<p>") ? info.substring(0, info.length() - "<p>".length()) : info;
        info = info.endsWith("</p>") ? info.substring(0, info.length() - "</p>".length()) : info;
        info = info.endsWith("<p>") ? info.substring(0, info.length() - "<p>".length()) : info;
        return info;
    }

    /**
     * Journal Article|34634382
     * Research Support, Non-U.S. Gov't|8628514
     * Review|3342157
     * Case Reports|2411196
     * Comparative Study|1917859
     * English Abstract|1506884
     * Research Support, U.S. Gov't, P.H.S.|1485883
     * Research Support, N.I.H., Extramural|1447895
     * Letter|1257947
     * Comment|1035855
     * Research Support, U.S. Gov't, Non-P.H.S.|932288
     * Editorial|695984
     * Randomized Controlled Trial|614807
     * Clinical Trial|540033
     * Historical Article|370234
     * Multicenter Study|347681
     * Evaluation Study|262523
     * Systematic Review|261961
     * News|226017
     * Meta-Analysis|201178
     * Biography|181746
     * Published Erratum|156573
     * Observational Study|156409
     * Validation Study|110241
     * Controlled Clinical Trial|95611
     * Congress|67761
     * Research Support, N.I.H., Intramural|67596
     * Introductory Journal Article|58468
     * Portrait|49330
     * Video-Audio Media|41520
     * Clinical Trial, Phase II|41421
     * Practice Guideline|31576
     * Interview|31062
     * Overall|27760
     * Preprint|26364
     * Clinical Trial, Phase I|26001
     * Retraction of Publication|23427
     * Clinical Trial, Phase III|22835
     * Retracted Publication|21703
     * Newspaper Article|18297
     * Guideline|16388
     * Bibliography|16009
     * Clinical Trial Protocol|13402
     * Consensus Development Conference|12425
     * Legal Case|11084
     * Twin Study|9581
     * Study Guide|9456
     * Clinical Conference|7669
     * Lecture|6931
     * Classical Article|6836
     * Directory|6644
     * Personal Narrative|6341
     * Clinical Study|6000
     * Patient Education Handout|5772
     * Address|5474
     * Randomized Controlled Trial, Veterinary|5312
     * Autobiography|3964
     * Dataset|3286
     * Technical Report|3243
     * Clinical Trial, Phase IV|2501
     * Pragmatic Clinical Trial|2391
     * Expression of Concern|2168
     * Research Support, American Recovery and Reinvestment Act|2098
     * Clinical Trial, Veterinary|1789
     * Corrected and Republished Article|1713
     * Festschrift|1701
     * Legislation|1676
     * Equivalence Trial|1294
     * Webcast|1144
     * Duplicate Publication|907
     * Consensus Development Conference, NIH|801
     * Observational Study, Veterinary|640
     * Dictionary|545
     * Periodical Index|330
     * Scientific Integrity Review|284
     * Interactive Tutorial|280
     * Government Publication|175
     * Adaptive Clinical Trial|36
     * Evaluation Studies|26
     */

    /*
      CREATE TABLE `pm_ut_aid_info_pm` (
        `id` int(10) NOT NULL AUTO_INCREMENT,
        `key_word_list` longtext COMMENT '关键词集合，此是基于标题、摘要、关键词三处的整合',
        `pm_ut_doi_list` text COMMENT 'pm、doi、ut三者的集合，用于童工的ES检索',
        `journal` text COMMENT '期刊的名称',
        `aid` int(9) DEFAULT NULL COMMENT '因与原表字段重名，pid改为aid，论文的唯一主键，使用老鲍的论文基础表中的自增主键',
        `citation_quota` double DEFAULT NULL COMMENT '引文的质量（按中科院分区表）',
        `reference_quota` double DEFAULT NULL COMMENT '参文的质量',
        `ti_key` text COMMENT '从标题中提取的关键词',
        `ab_key` longtext COMMENT '从摘要中提取的关键词',
        `kw_key` longtext COMMENT '从关键词中提取的关键词(若是主题词给予转换)',
        `abb_word` longtext COMMENT '从简写中提取出来的关键词（是一对）',
        `zky_dalei` int(1) DEFAULT NULL COMMENT '中科院的期刊分区表最佳的大类',
        `jcr_if` double DEFAULT NULL COMMENT 'jcr(scie)期刊影响因子',
        `year` int(4) DEFAULT NULL COMMENT '论文的出版年',
        `title` text COMMENT '论文的原始标题',
        `keyword` longtext COMMENT '论文的原始关键词',
        `ab` longtext COMMENT '论文的原始摘要',
        `mesh` text COMMENT 'PubMed的主题词表，含主题词和副主题词',
        `jid` int(9) DEFAULT NULL COMMENT '期刊的唯一id号，此为期刊关联表的自增主键',
        `discussion` longtext COMMENT '摘要提取出来的特定字段',
        `results` longtext COMMENT '摘要提取出来的特定字段',
        `methods` longtext COMMENT '摘要提取出来的特定字段',
        `background` longtext COMMENT '摘要提取出来的特定字段',
        `c1_list` longtext COMMENT 'wos表中的c1或PubMed表中的作者单位信息字段，只保留单位信息',
        `c1_unit_list` longtext COMMENT '从c1中提取出来的单位信息',
        `rp_list` longtext COMMENT 'wos表中的rp字段，只保留单位信息',
        `rp_unit_list` longtext COMMENT '从rp中提取出来的单位信息',
        `cite` longtext COMMENT '参文格式（NLM、APA、MLA、AMA四种）',
        `cn_ti` text COMMENT '中文的文献标题',
        `cn_ab` longtext COMMENT '中文的文献摘要',
        `cn_kw` longtext COMMENT '中文的文献关键词',
        `cn_type` text COMMENT '中文的文献的类型，综述等',
        `pt` text COMMENT '英文的文献的类型，综述等，是一个集合',
        `cn_country` text COMMENT '中文的发文的国家，区分第一或通讯以及其它的，此为非第一或通讯',
        `cn_unit` longtext COMMENT '中文的发文的机构，区分第一或通讯以及其它的，此为非第一或通讯',
        `is_cns` int(1) DEFAULT NULL COMMENT '六大顶级刊，是为1，否为0',
        `cn_fund` longtext COMMENT '中文的基金的名称',
        `cn_country_1` text COMMENT '中文的发文的国家，区分第一或通讯以及其它的，此为第一或通讯',
        `cn_unit_1` text COMMENT '中文的发文的机构，区分第一或通讯以及其它的，此为第一或通讯',
        `pmId` int(8) DEFAULT NULL COMMENT '最大8位的pmId号',
        `ut` varchar(15) DEFAULT NULL COMMENT '此为15位的UT号',
        `doi` text COMMENT 'doi号集合',
        `nlmId` varchar(25) DEFAULT NULL COMMENT 'nlmId号，无其它字符',
        `cite_score` double DEFAULT NULL COMMENT '期刊citeScore影响因子值',
        `jcr_quartile` int(1) DEFAULT NULL COMMENT 'jcr(scie)期刊分区',
        `fund` longtext COMMENT '来自wos的基金名称(FU字段)',
        `fund_list` longtext COMMENT '从原始基金信息中提供出来的基金名称，待规范。',
        `method_info` longtext COMMENT '摘要提取出来的特定字段集合',
        `trans_ok` int(1) DEFAULT NULL COMMENT '翻译完成为1，没有完成为0',
        `drug_gene_pathway_disease` longtext COMMENT '药物、基因、通路、疾病',
        `auth` longtext COMMENT '原始的作者信息，全写',
        `sort_auth` longtext COMMENT 'Chang-Ming Huang  英文名字中，新志贾这类属于正序，反之贾新志这类属于反序，程序默认为正序转反序，如果是反序则不用再转换了。',
        `info` longtext COMMENT '备用字段',
        `is_retraction` int(1) DEFAULT NULL COMMENT '是否是撤稿，是为1，否为0；',
        `is_china` int(1) DEFAULT NULL COMMENT '是否是中国人发表的，是为1，否为0；',
        `cn_discussion` longtext COMMENT '中文摘要提取出来的特定字段',
        `cn_results` longtext COMMENT '中文摘要提取出来的特定字段',
        `cn_methods` longtext COMMENT '中文摘要提取出来的特定字段',
        `cn_background` longtext COMMENT '中文摘要提取出来的特定字段',
        PRIMARY KEY (`id`),
        KEY `aid` (`aid`),
        KEY `pmId` (`pmId`),
        KEY `ut` (`ut`),
        KEY `jid` (`jid`),
        KEY `trans_ok` (`trans_ok`)
      ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4;
     */

}
