package com.yuntsg.ruiijn.paperana.service;

import cn.hutool.core.collection.CollUtil;
import cn.hutool.core.collection.ConcurrentHashSet;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.io.IoUtil;
import com.hankcs.hanlp.seg.common.Term;
import com.hankcs.hanlp.tokenizer.StandardTokenizer;
import com.yuntsg.ruiijn.paperana.utils.CharUtil;
import com.yuntsg.ruiijn.paperana.utils.SmallTool;
import com.yuntsg.ruiijn.paperana.utils.StrUtils;
import common.util.StringUtil;
import common.util.VerifyUtil;
import common.util.WordUtil;

import edu.stanford.nlp.ling.CoreAnnotations;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.pipeline.Annotation;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.util.CoreMap;
import lombok.Data;

import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.math.BigDecimal;
import java.math.RoundingMode;
import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import java.util.stream.Stream;

import static com.yuntsg.ruiijn.paperana.test.extractEnCn.extractCnEn;
import static com.yuntsg.ruiijn.paperana.test.extractEnCn.isContainChinese;

/**
 * 统计两篇论文之间的，最大的相同单词的个数
 */

public class ArticleRepeatSummary {


//    public static void main(String[] args) throws SQLException, IOException {
//        SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
//        System.out.println(format.format(new Date()) + " 程序开始了~~~");
//        String directory = "D:\\PDF\\";
////        String a = "analysis of a diffusive virus infection model wit source physica a so 2019 dec 1 535.pdf";
//        String a = "identification of molecular mechanisms for achieving hiv 1 c source life sci 2021 265 118857.pdf";
////        String b = "identification of molecular mechanisms for achieving hiv 1 c source life sci 2021 265 118857.pdf";
//        String b = "UT000704408000019.pdf";
//        double threshold = 0.6;
//        List<String> bFileNameList = new ArrayList<>();
//        bFileNameList.add(b);
////        bFileNameList.add("UT000702905900001.pdf");
////        bFileNameList.add("UT000704408000019.pdf");
////        bFileNameList.add("UT000717245000001.pdf");
////        bFileNameList.add("UT000717592300001.pdf");
////        bFileNameList.add("UT000748485600003.pdf");
////        bFileNameList.add("UT000756689100003.pdf");
////        bFileNameList.add("UT000766804600001.pdf");
////        bFileNameList.add("UT000786261200001.pdf");
////        bFileNameList.add("UT000800386900003.pdf");
////        bFileNameList.add("UT000809204300002.pdf");
////        bFileNameList.add("UT000820484400016.pdf");
////        bFileNameList.add("UT000830779700001.pdf");
////        bFileNameList.add("UT000843504500002.pdf");
////        bFileNameList.add("UT000844301300004.pdf");
////        bFileNameList.add("UT000861226700006.pdf");
////        bFileNameList.add("UT000946187700006.pdf");
//        ComprehensiveInfo comprehensive = comprehensive(directory, a, bFileNameList, threshold);
//        double copyProportion = comprehensive.getCopyProportion();
//        List<CopyInfo> copyTop5List = comprehensive.getCopyTop5List();
//        List<RiskLevelInfo> riskLevelList = comprehensive.getRiskLevelList();
//        System.out.println(copyProportion);
//        for (CopyInfo copyInfo : copyTop5List) {
//            System.out.println(copyInfo);
//        }
//        for (RiskLevelInfo riskLevel : riskLevelList) {
//            System.out.println(riskLevel.getRiskLevel());
//            List<RiskSentence> sentenceList = riskLevel.getSentenceList();
//            for (RiskSentence s : sentenceList) {
//                System.out.println(s);
//            }
//        }
//        //校验查看完毕 可以匹配进行适配新版代码 2024年3月28日20:13:51 八点多了 做多少干多少
//
//// main
//    }

    // 综合得分，参数，filePath：文件路径；aFileName：检测的论文；bFileNameList：ES检到的且有大于阈值的论文的集合；threshold：阈值，一般设置为0.6。
    public static ComprehensiveInfo comprehensive(String filePath, String aFileName, List<String> bFileNameList, double threshold) throws IOException {
        List<String> strings;
        if (StrUtils.isRuiJin()) {
            strings = pdfToTxtRuijin(filePath, filePath + aFileName);
        } else {
            strings = pdfToTxt(filePath, filePath + aFileName);
        }

//        if (StrUtils.isRuiJin()) {
////            List<Change.TableField> tableFields = exRuijin(strings);
////            strings = tableFields.stream().map(o -> o.getStr()).collect(Collectors.toList());
//        }
        List<List<String>> aSentenceList;
        if (StrUtils.isRuiJin()) {
            aSentenceList = pdfToSentenceWordsRuijinTest(strings);
        } else {
            aSentenceList = pdfToSentenceWords(strings);
        }
        Map<String, Integer> fileNameAndIntersectionSumMap = new HashMap<>();
        Map<String, List<List<String>>> fileNameAndSentenceListMap = new HashMap<>();
        Map<String, Set<RiskSentence>> highRiskAndSentenceSetMap = new HashMap<>();
        for (String bFileName : bFileNameList) {
            if (StrUtils.isRuiJin()) {
                //  2024年9月1日16:19:32 最后为什么分许的句子都不一样 是因为老板两个文件比对时候 拆分都不一样 可能在英文没问题 中文就不行了
                fileNameAndSentenceListMap.put(bFileName, pdfToSentenceWordsRuijinTest(pdfToTxt(filePath, filePath + bFileName)));
            } else {
                fileNameAndSentenceListMap.put(bFileName, pdfToSentenceWords(pdfToTxt(filePath, filePath + bFileName)));

            }
        }
        //  2024-8-31 20:29:18 这里面的转换句子不一致 需要保持一致 后续进行改下
        // todo a 文章累加重复值
        int intersectionSum = 0;
        //todo 文章a 累加的单词书
        int aWordNumSum = 0;
        for (List<String> aWordList : aSentenceList) {
            // todo a 文章的每个句子循环检测 可能文章取最大重复的值
            int current = 0;
            // 抄袭的对象是一个文件的集合，计算每一句话对应的抄袭的文件的单词数量，并取出最大的返回，作为综合得分的计算项。
            // 2024年8月30日20:41:  还是不行 中文这里还是乱 乱死了...出来的什么结果,,
//            if (StrUtils.isRuiJin()) {
//                if (aWordList.size() != 1) {
//                    continue;
//                }
//            }
            for (Map.Entry<String, List<List<String>>> entry : fileNameAndSentenceListMap.entrySet()) {
                String bFileName = entry.getKey();
                List<List<String>> bSentenceList = entry.getValue();
                int maxSentenceWordNum = getMaxSentenceWordNum(aWordList, bSentenceList, threshold, highRiskAndSentenceSetMap, bFileName);
                current = Math.max(current, maxSentenceWordNum);
                fileNameAndIntersectionSumMap.compute(bFileName, (key, value) -> value != null ? (value + maxSentenceWordNum) : maxSentenceWordNum);
            }
            intersectionSum += current;
            // 待检测的论文的单词的总个数，不管相同的单词的个数的数量有多少，全部计算。
            if (aWordList.size() == 1) {
                aWordNumSum += StrUtils.cnEnToListWord(aWordList).size();
            } else {
                aWordNumSum += aWordList.size();
            }

        }
        // 相同单词最多的论文部分
        Map<String, Integer> mapSortValueIntLen = mapSortValueIntLen(fileNameAndIntersectionSumMap, 10);
        int rank = 1;
        List<CopyInfo> copyTop5List = new ArrayList<>();
        for (Map.Entry<String, Integer> entry : mapSortValueIntLen.entrySet()) {
            CopyInfo copyInfo = new CopyInfo();
            copyInfo.setRank(rank++);
            copyInfo.setFileName(entry.getKey());
            copyInfo.setCopyProportion(takeFourDigits((entry.getValue() + 0.0) / (aWordNumSum + 0.0) * 100));
            copyTop5List.add(copyInfo);
        }
        // 各风险等级及句子部分
        List<RiskLevelInfo> riskLevelList = new ArrayList<>();
//        for (Map.Entry<String, Set<RiskSentence>> entry : highRiskAndSentenceSetMap.entrySet()) {
//            RiskLevelInfo riskLevel = new RiskLevelInfo();
//            riskLevel.setRiskLevel(entry.getKey());
//            riskLevel.setSentenceList(new ArrayList<>(entry.getValue()));
//            riskLevelList.add(riskLevel);
//        }

        /************/
        Map<String, Double> sentenceAndMaxMap = new HashMap<>();
        for (Map.Entry<String, Set<RiskSentence>> entry : highRiskAndSentenceSetMap.entrySet()) {
            Set<RiskSentence> set = entry.getValue();
            for (RiskSentence riskSentence : set) {
                String aSentence = riskSentence.getASentence();
                double copyProportion = riskSentence.getCopyProportion();
                if (sentenceAndMaxMap.containsKey(aSentence)) {
                    if (copyProportion > sentenceAndMaxMap.get(aSentence)) {
                        sentenceAndMaxMap.put(aSentence, copyProportion);
                    }
                } else {
                    sentenceAndMaxMap.put(aSentence, copyProportion);
                }
            }
        }
        Set<String> aSentenceSet = new HashSet<>();
        for (Map.Entry<String, Set<RiskSentence>> entry : highRiskAndSentenceSetMap.entrySet()) {
            RiskLevelInfo riskLevel = new RiskLevelInfo();
            riskLevel.setRiskLevel(entry.getKey());
            List<RiskSentence> riskSentenceList = new ArrayList<>();
            Set<RiskSentence> riskSentenceSet = entry.getValue();
            for (RiskSentence riskSentence : riskSentenceSet) {
                String aSentence = riskSentence.getASentence();
                double copyProportion = riskSentence.getCopyProportion();
                if (sentenceAndMaxMap.containsKey(aSentence)) {
                    if (sentenceAndMaxMap.get(aSentence).equals(copyProportion) && !aSentenceSet.contains(aSentence)) {
                        aSentenceSet.add(aSentence);
                        riskSentenceList.add(riskSentence);
                    }
                } else {
                    riskSentenceList.add(riskSentence);
                }
            }
            riskLevel.setSentenceList(riskSentenceList);
            riskLevelList.add(riskLevel);
        }
        ////////////////////////
        ComprehensiveInfo comprehensiveInfo = new ComprehensiveInfo();
        comprehensiveInfo.setCopyProportion(takeFourDigits((intersectionSum + 0.0) / (aWordNumSum + 0.0) * 100));
        comprehensiveInfo.setCopyTop5List(copyTop5List);
        comprehensiveInfo.setRiskLevelList(riskLevelList);
        return comprehensiveInfo;
    }

    // 取两个集合的交集的个数，如果返回的不是0，说明都是符合要求的，一种情况是大于设定的阈值，一种情况是至少有6个单词相同，而且每个单词的长度至少5个字母，此时分母为最短的，也大于设定的阈值。
    public static IntersectionAndCopyProportion getIntersection(List<String> aList, List<String> bList, double threshold) {
        int a = 0;
        int c = 0;
        for (String s : aList) {
            if (bList.contains(s)) {
                a++;
                // 这是增加一个数字C，即单词的字母数至少4个，如果这样的单词数至少6个，不管它的抄袭率是多少，都返回。
                c = (s.length() > 4) ? (c + 1) : c;
            }
        }
        int b = 0;
        for (String s : bList) {
            b = (aList.contains(s)) ? (b + 1) : b;
        }
        int intersection = Math.min(a, b);
        // 在特定情况下，交集的数量有可能大于aList的数量，所以，当交集的数量大于aList的数量时，取aList的数量，即最大也不能超过aList的数量。
        intersection = Math.min(intersection, aList.size());
        IntersectionAndCopyProportion intersectionAndCopyProportion = new IntersectionAndCopyProportion();
        if (intersection > 0 && aList.size() > 1) {
            int maxWordListSize = Math.max(aList.size(), bList.size());
            double current = (intersection + 0.0) / (maxWordListSize + 0.0);
            // 如果当前的抄袭百分比（分母是最长的即最多单词个数的那个句子）大于设定的阈值，记录下最大的交集值返回。如果不大于阈值，全部是放弃的。
            if (current >= threshold) {
                intersectionAndCopyProportion.setIntersection(intersection);
                intersectionAndCopyProportion.setCopyProportion(current);
                return intersectionAndCopyProportion;
            } else if (c > 5) {
                int minWordListSize = Math.min(aList.size(), bList.size());
                intersectionAndCopyProportion.setIntersection(intersection);
                current = (intersection + 0.0) / (minWordListSize + 0.0);
                if (current >= threshold) {
                    intersectionAndCopyProportion.setCopyProportion(current);
                    return intersectionAndCopyProportion;
                }
            }
        }
        return intersectionAndCopyProportion;
    }

    // 取两个集合的交集的个数，如果返回的不是0，说明都是符合要求的，一种情况是大于设定的阈值，一种情况是至少有6个单词相同，而且每个单词的长度至少5个字母，此时分母为最短的，也大于设定的阈值。
    public static IntersectionAndCopyProportion getIntersectionChina(List<String> aList, String bListF, double threshold) {
        int a = 0;
        int c = 0;
        aList = StrUtils.cnEnToListWord(aList);

        List<String> bList = strToWordList(bListF);

        for (String s : aList) {
            if (bList.contains(s)) {
                a++;
                // 这是增加一个数字C，即单词的字母数至少4个，如果这样的单词数至少6个，不管它的抄袭率是多少，都返回。
                c = (s.length() > 4) ? (c + 1) : c;
            }
        }
        int b = 0;
        for (String s : bList) {
            b = (aList.contains(s)) ? (b + 1) : b;
        }
        int intersection = Math.min(a, b);
        // 在特定情况下，交集的数量有可能大于aList的数量，所以，当交集的数量大于aList的数量时，取aList的数量，即最大也不能超过aList的数量。
        intersection = Math.min(intersection, aList.size());
        IntersectionAndCopyProportion intersectionAndCopyProportion = new IntersectionAndCopyProportion();
        if (intersection > 0 && aList.size() > 1) {
            int maxWordListSize = Math.max(aList.size(), bList.size());
            double current = (intersection + 0.0) / (maxWordListSize + 0.0);
            // 如果当前的抄袭百分比（分母是最长的即最多单词个数的那个句子）大于设定的阈值，记录下最大的交集值返回。如果不大于阈值，全部是放弃的。
            if (current >= threshold) {
                intersectionAndCopyProportion.setIntersection(intersection);
                intersectionAndCopyProportion.setCopyProportion(current);
                return intersectionAndCopyProportion;
            } else if (c > 5) {
                int minWordListSize = Math.min(aList.size(), bList.size());
                intersectionAndCopyProportion.setIntersection(intersection);
                current = (intersection + 0.0) / (minWordListSize + 0.0);
                if (current >= threshold) {
                    intersectionAndCopyProportion.setCopyProportion(current);
                    return intersectionAndCopyProportion;
                }
            }
        }
        return intersectionAndCopyProportion;
        //        int a = 0;
//        int c = 0;
//        // 进来都是中文 进行处理
//        String sa = "";
//        String string1b = "";
//
//
//
//
//        if (aList.size() == 1) {
//            sa = aList.get(0);
//            String[] split = sa.split("");
//            StringBuilder stringBuilder = new StringBuilder();
//            for (String string : split) {
//                if (StringUtil.isContainsChina(string)) {
//                    stringBuilder.append(string);
//                }
//            }
//            sa = stringBuilder.toString();
//        }
//        String[] split1b = bList.split("");
//        StringBuilder stringBuilderb = new StringBuilder();
//        for (String string : split1b) {
//            if (StringUtil.isContainsChina(string)) {
//                stringBuilderb.append(string);
//            }
//        }
//        string1b = stringBuilderb.toString();
//        String[] split1bsp = string1b.split("");
//        String[] split = sa.split("");
//        for (String stringa : split) {
//            if (string1b.contains(stringa)) {
//                a++;
//            }
//        }
////        for (String s : aList) {
////            if (bList.contains(s)) {
////                a++;
////                // 这是增加一个数字C，即单词的字母数至少4个，如果这样的单词数至少6个，不管它的抄袭率是多少，都返回。
////                c = (s.length() > 4) ? (c + 1) : c;
////            }
////        }
//        int b = 0;
//        for (String s : split1bsp) {
//            if (sa.contains(s)) {
//                b++;
//            }
//        }
////        for (String s : split1bsp) {
////
////            b = (aList.contains(s)) ? (b + 1) : b;
////        }
//        int intersection = Math.min(a, b);
//        // 在特定情况下，交集的数量有可能大于aList的数量，所以，当交集的数量大于aList的数量时，取aList的数量，即最大也不能超过aList的数量。
//        intersection = Math.min(intersection, sa.length());
//        IntersectionAndCopyProportion intersectionAndCopyProportion = new IntersectionAndCopyProportion();
//        if (intersection > 0 && sa.length() >= 5) {
//            int maxWordListSize = Math.max(sa.length(), bList.length());
//            double current = (intersection + 0.0) / (maxWordListSize + 0.0);
//            // 如果当前的抄袭百分比（分母是最长的即最多单词个数的那个句子）大于设定的阈值，记录下最大的交集值返回。如果不大于阈值，全部是放弃的。
//            if (current >= threshold) {
//                intersectionAndCopyProportion.setIntersection(intersection);
//                intersectionAndCopyProportion.setCopyProportion(current);
//                return intersectionAndCopyProportion;
//            } else if (c > 5) {
//                int minWordListSize = Math.min(aList.size(), bList.length());
//                intersectionAndCopyProportion.setIntersection(intersection);
//                current = (intersection + 0.0) / (minWordListSize + 0.0);
//                if (current >= threshold) {
//                    intersectionAndCopyProportion.setCopyProportion(current);
//                    return intersectionAndCopyProportion;
//                }
//            }
//        }
//        return intersectionAndCopyProportion;
    }


    //  一段字符转词组
    public static List<String> strToWordList(String str) {
        String[] split = str.replace("。", ". ").split("\\. ");
        List<String> list = new ArrayList<>();
        for (String sentence : split) {

            sentence = (isContainChinese(sentence)) ? extractCnEn(sentence, list) : sentence;
            List<Term> terms = StandardTokenizer.segment(sentence);
            for (Term term : terms) {
                String s = term.word.trim();
                s = s.startsWith(",") ? s.substring(1).trim() : s.trim();
                s = s.endsWith(",") ? s.substring(0, s.length() - 1).trim() : s.trim();
                if (s.length() > 1) {
                    list.add(s);
                }
            }

        }
        return list;
    }

    // 取两个集合的交集的个数， 单独中文版进行测试
    public static IntersectionAndCopyProportion getIntersectionChinaV2(List<String> aList, List<String> bList, double threshold) {


        int a = 0;
        int c = 0;

        for (String s : aList) {
            if (bList.contains(s)) {
                a++;
                // 这是增加一个数字C，即单词的字母数至少4个，如果这样的单词数至少6个，不管它的抄袭率是多少，都返回。
                c = (s.length() > 4) ? (c + 1) : c;
            }
        }
        int b = 0;
        for (String s : bList) {
            b = (aList.contains(s)) ? (b + 1) : b;
        }
        int intersection = Math.min(a, b);
        // 在特定情况下，交集的数量有可能大于aList的数量，所以，当交集的数量大于aList的数量时，取aList的数量，即最大也不能超过aList的数量。
        intersection = Math.min(intersection, aList.size());
        IntersectionAndCopyProportion intersectionAndCopyProportion = new IntersectionAndCopyProportion();
        if (intersection > 0 && aList.size() > 1) {
            int maxWordListSize = Math.max(aList.size(), bList.size());
            double current = (intersection + 0.0) / (maxWordListSize + 0.0);
            // 如果当前的抄袭百分比（分母是最长的即最多单词个数的那个句子）大于设定的阈值，记录下最大的交集值返回。如果不大于阈值，全部是放弃的。
            if (current >= threshold) {
                intersectionAndCopyProportion.setIntersection(intersection);
                intersectionAndCopyProportion.setCopyProportion(current);
                return intersectionAndCopyProportion;
            } else if (c > 5) {
                int minWordListSize = Math.min(aList.size(), bList.size());
                intersectionAndCopyProportion.setIntersection(intersection);
                current = (intersection + 0.0) / (minWordListSize + 0.0);
                if (current >= threshold) {
                    intersectionAndCopyProportion.setCopyProportion(current);
                    return intersectionAndCopyProportion;
                }
            }
        }
        return intersectionAndCopyProportion;
        // 新版本不用下面的了
//        IntersectionAndCopyProportion intersectionAndCopyProportion = new IntersectionAndCopyProportion();
//        String sa = "";
//        if (aList.size() == 1) {
//            int intersection = 0;
//            // 这句是中文 必须对比b里面全部是数量是一的
//            bList = bList.stream().filter(o -> o.size() == 1).collect(Collectors.toList());
//            String s = aList.get(0);
//            String[] split = s.split("");
//            StringBuilder stringBuilder = new StringBuilder();
//            for (String string : split) {
//                if (StringUtil.isContainsChina(string)) {
//                    stringBuilder.append(string);
//                }
//            }
//            sa = stringBuilder.toString();
//            for (List<String> strings : bList) {
//                int a = 0;
//                String s1 = strings.get(0);
//                String[] split1b = s1.split("");
//                StringBuilder stringBuilderb = new StringBuilder();
//                for (String string : split1b) {
//                    if (StringUtil.isContainsChina(string)) {
//                        stringBuilderb.append(string);
//                    }
//                }
//                String string1b = stringBuilderb.toString();
//                String[] splitOne = sa.split("");
//                for (String stringa : splitOne) {
//                    if (string1b.contains(stringa)) {
//                        a++;
//                    }
//                }
//
//                int b = 0;
//                String[] split1bsp = string1b.split("");
//                for (String s2 : split1bsp) {
//                    if (sa.contains(s2)) {
//                        b++;
//                    }
//                }
//
//                intersection = Math.min(a, b);
//                intersection = Math.min(intersection, sa.length());
//                if (intersection > 0 && sa.length() >= 2) {
//                    int maxWordListSize = Math.max(sa.length(), string1b.length());
//                    double current = (intersection + 0.0) / (maxWordListSize + 0.0);
//                    // 如果当前的抄袭百分比（分母是最长的即最多单词个数的那个句子）大于设定的阈值，记录下最大的交集值返回。如果不大于阈值，全部是放弃的。
//                    if (current >= threshold) {
//                        intersectionAndCopyProportion.setIntersection(intersection);
//                        intersectionAndCopyProportion.setCopyProportion(current);
//                        return intersectionAndCopyProportion;
//                    }
//                }
//            }
//        }
//        return intersectionAndCopyProportion;
    }


    // 获取检测论文的句子中，在对比论文中，包含的最多单词个数的句子的单词的个数。
    public static int getMaxSentenceWordNum(List<String> aWordList, List<List<String>> bSentenceList, double threshold, Map<String, Set<RiskSentence>> highRiskAndSentenceSetMap, String bFileName) {
        int maxIntersection = 0;
        double maxThreshold = 0.0;
        for (List<String> bWordList : bSentenceList) {
//            if (StrUtils.isRuiJin()) {
//                int size = bWordList.size();
//                if (size != 1) {
//                    continue;
//                }
//            }
            if (bWordList.size() > 9) {
                // 计算两个单词集合的交集，即相同的单词的个数，以最小的准。
                IntersectionAndCopyProportion intersectionAndCopyProportion = new IntersectionAndCopyProportion();
                if (aWordList.size() == 1 && bSentenceList.size() == 1) {
                    intersectionAndCopyProportion = getIntersectionChinaV2(aWordList, bWordList, threshold);
                } else {
                    intersectionAndCopyProportion = getIntersection(aWordList, bWordList, threshold);
                }
                int intersection = intersectionAndCopyProportion.getIntersection();
                if (intersection > 0) {
                    double current = intersectionAndCopyProportion.getCopyProportion();
                    // 如果当前的抄袭百分比大于设定的阈值，记录下最大的交集值返回。如果不大于阈值，全部是放弃的。
                    if (current > maxThreshold) {
                        maxThreshold = current;
                        maxIntersection = intersection;
                    }
                    if (current >= threshold) {
                        RiskSentence riskSentence = new RiskSentence();
                        riskSentence.setASentence(join(aWordList));
                        riskSentence.setBSentence(join(bWordList));
                        //这个里的句子 如果有超长的这删除这个 与原始句子
                        boolean b = StrUtils.checkSentence(riskSentence.getASentence(), riskSentence.getBSentence());
                        if (b) {
                            riskSentence.setFileName(bFileName);
                            riskSentence.setCopyProportion(takeFourDigits(current * 100));
                            String riskLevel = "低风险";
                            // 默认风险为低风险，如果当前值大于阈值0.1，则为中风险，大于0.2，则为高风险。注意先后顺序，先中风险，再高风险，若弄反，就不会有高风险的了。
                            riskLevel = current >= (threshold + 0.1) ? "中风险" : riskLevel;
                            riskLevel = current >= (threshold + 0.2) ? "高风险" : riskLevel;
                            putStrSetStr(highRiskAndSentenceSetMap, riskLevel, riskSentence);
                        }
                    }
                }
            } else {
                // 可能是中文 进行处理
                int size = bWordList.size();
                if (size == 1) {
                    String stringsStar = bWordList.get(0);
                    if (StringUtil.isContainsChina(stringsStar)) {
                        // 计算两个单词集合的交集，即相同的单词的个数，以最小的准。
                        IntersectionAndCopyProportion intersectionAndCopyProportion = getIntersectionChina(aWordList, stringsStar, threshold);
                        int intersection = intersectionAndCopyProportion.getIntersection();
                        if (intersection > 0) {
                            double current = intersectionAndCopyProportion.getCopyProportion();
                            // 如果当前的抄袭百分比大于设定的阈值，记录下最大的交集值返回。如果不大于阈值，全部是放弃的。
                            if (current > maxThreshold) {
                                maxThreshold = current;
                                maxIntersection = intersection;
                            }

                            if (current >= threshold) {
                                RiskSentence riskSentence = new RiskSentence();
                                riskSentence.setASentence(join(aWordList));
                                riskSentence.setBSentence(join(bWordList));
                                //这个里的句子 如果有超长的这删除这个 与原始句子
                                boolean b = StrUtils.checkSentence(riskSentence.getASentence(), riskSentence.getBSentence());
                                if (b) {
                                    riskSentence.setFileName(bFileName);
                                    riskSentence.setCopyProportion(takeFourDigits(current * 100));
                                    String riskLevel = "低风险";
                                    // 默认风险为低风险，如果当前值大于阈值0.1，则为中风险，大于0.2，则为高风险。注意先后顺序，先中风险，再高风险，若弄反，就不会有高风险的了。
                                    riskLevel = current >= (threshold + 0.1) ? "中风险" : riskLevel;
                                    riskLevel = current >= (threshold + 0.2) ? "高风险" : riskLevel;
                                    putStrSetStr(highRiskAndSentenceSetMap, riskLevel, riskSentence);
                                }
                            }
                        }


                    }

                }

            }
        }
        return maxIntersection;
    }

    public static String join(List<String> wordList) {
        String join = CollUtil.join(wordList, " ").trim();
        return join.endsWith(".") ? join : (join + ".");
    }

    // 取两个集合的交集的个数
//    public static int getIntersection(List<String> aList, List<String> bList) {
//        int a = 0;
//        for (String s : aList) {
//            a = (bList.contains(s)) ? (a + 1) : a;
//        }
//        int b = 0;
//        for (String s : bList) {
//            b = (aList.contains(s)) ? (b + 1) : b;
//        }
//        return Math.min(a, b);
//    }

    // 获取检测论文的句子中，在对比论文中，包含的最多单词个数的句子的单词的个数。
//    public static int getMaxSentenceWordNum(List<String> aWordList, List<List<String>> bSentenceList, double threshold, Map<String, Set<RiskSentence>> highRiskAndSentenceSetMap, String bFileName) {
//        // todo gcr  最大相同单词数量
//        int maxIntersection = 0;
//        // todo  gcr 相似度
//        double maxThreshold = 0.0;
//        // 在词形还原时，保留了逗号、分号、冒号三个，现在给予删除。
////        String aWords = CollUtil.join(aWordList, " ").replace(",", " ").replace(";", " ").replace(":", " ").replace("(", " ").replace(")", " ").replaceAll(" +", " ").trim();
////        String aWords = CollUtil.join(aWordList, " ");
//        for (List<String> bWordList : bSentenceList) {
//            if (bWordList.size() > 9) {
//                // 计算两个单词集合的交集，即相同的单词的个数。
////                int intersection = 0;
//                int intersection = getIntersection(aWordList, bWordList);
////                for (String bWord : bWordList) {
////                    // 因为在词形还原时，保留了逗号、分号、冒号三个，论文a的给予了删除，论文b的也给予删除。
//////                    intersection = (aWords.contains(bWord.replace(",", "").replace(";", "").replace(":", "").replace("(", "").replace(")", ""))) ? (intersection + 1) : intersection;
////                    intersection = (aWords.contains(bWord)) ? (intersection + 1) : intersection;
////                }
//                // 在特定情况下，交集的数量有可能大于aWordList的数量，所以，当交集的数量大于aWordList的数量时，取aWordList的数量，即最大也不能超过aWordList的数量。
//                intersection = Math.min(intersection, aWordList.size());
//                if (intersection > 0) {
//                    int maxWordListSize = Math.max(aWordList.size(), bWordList.size());
//                    double current = (intersection + 0.0) / (maxWordListSize + 0.0);
//                    // 如果当前的抄袭百分比（分母是最长的即最多单词个数的那个句子）大于设定的阈值，记录下最大的交集值返回。如果不大于阈值，全部是放弃的。
//                    if (current > threshold) {
//                        if (current > maxThreshold) {
//                            maxThreshold = current;
//                            maxIntersection = intersection;
//                        }
//                        RiskSentence riskSentence = new RiskSentence();
//                        riskSentence.setASentence(join(aWordList));
//                        riskSentence.setBSentence(join(bWordList));
//                        riskSentence.setFileName(bFileName);
//                        riskSentence.setCopyProportion(takeFourDigits(current * 100));
//                        String riskLevel = "低风险";
//                        // 默认风险为低风险，如果当前值大于阈值0.1，则为中风险，大于0.2，则为高风险。注意先后顺序，先中风险，再高风险，若弄反，就不会有高风险的了。
//                        riskLevel = current >= (threshold + 0.1) ? "中风险" : riskLevel;
//                        riskLevel = current >= (threshold + 0.2) ? "高风险" : riskLevel;
//                        putStrSetStr(highRiskAndSentenceSetMap, riskLevel, riskSentence);
//                    }
//                }
//            }
//        }
//        return maxIntersection;
//    }

    // 将文本集合格式的pdf文件，再转换成句子，句子中包含单词，即List套List的格式，每个句子中包含的每个单词。
    // 2024年7月26日15:57:14  中文无效进行单独判断
    public static List<List<String>> pdfToSentenceWords(List<String> strList) {
        // 把整篇论文整合成一个大字符串，再按句号加空格分隔成一句句的集合，转换之前转换成小写。
        // 获取删除国自然部分语句
        List<String> nationFuDel = getNationDel();
        String[] sentenceSplit = CollUtil.join(strList, " ").toLowerCase().replaceAll(" +", " ").split("\\. ");
        List<List<String>> resultList = new ArrayList<>();
        List<List<String>> resultListRes = new ArrayList<>();
        List<String> list = new ArrayList<>();
        for (String sentence : sentenceSplit) {
            if (!isReferences(sentence)) {
                // 判断有中文 句号和有中文则根据句号进行划分
                boolean isChina = false;
                if (StringUtil.isContainsChina(sentence) && sentence.contains("。")) {
                    isChina = true;
                }
                if (!isChina) {
                    // 如果该句子不是参文，一句句话先进行斯坦福转换，再以空格切割成一个个的单词。这里保留部分符号，包含逗号、分号、冒号三个，这三个符号与它前面的单词之间没有空格，保证了不占用单词的个数这一个指标。
//                String[] words = getLemma(sentence).replace(",", ", ").replace(";", "; ").replace(":", ": ").replace("(", " (").replace(")", ") ").replaceAll(" +", " ")
//                        .replace(" ,", ",").replace(" ;", ";").replace(" :", ":").replace(" )", ")").replace("( ", "(").replaceAll(" +", " ").split(" ");
                    String[] words = getLemma(sentence).replace(",", " ").replace(";", " ").replace(":", " ").replace("(", " ").replace(")", " ").replaceAll(" +", " ").split(" ");
                    for (String word : words) {
                        if (word.length() > 1) {
                            list.add(word);
                        }
                    }
                    // 如果单词的个数至少10个，加入结果集合中，如果达不到10，继续累加。
                    if (list.size() > 9) {
                        resultList.add(list);
                        list = new ArrayList<>();
                    }
                } else {
                    // 中文单独处理
                    String[] split = sentence.split("。");
                    for (String s : split) {
                        String words = getLemma(s);
                        // 如果单词的个数至少10个，加入结果集合中，如果达不到10，继续累加。
                        if (words.length() > 9) {
                            ArrayList<String> objects = new ArrayList<>();
                            objects.add(s);
                            resultList.add(objects);
                            list = new ArrayList<>();
                        }

                    }
                }

            }
        }
        if (!list.isEmpty()) {
            resultList.add(list);
        }
        for (List<String> strings : resultList) {
            if (strings.size() == 1) {
                String s = strings.get(0);
                boolean isAdd = true;
                for (String string : nationFuDel) {
                    if (s.contains(string)) {
                        isAdd = false;
                    }
                }
                if (isAdd) {
                    resultListRes.add(strings);
                }
            } else {
                resultListRes.add(strings);
            }
        }

        return resultListRes;
    }

    // 将文本集合格式的pdf文件，再转换成句子，句子中包含单词，即List套List的格式，每个句子中包含的每个单词。
    // 2024年7月26日15:57:14  中文无效进行单独判断
    public static List<List<String>> pdfToSentenceWordsRuijin(List<String> strList) {
        // 把整篇论文整合成一个大字符串，再按句号加空格分隔成一句句的集合，转换之前转换成小写。
        // 获取删除国自然部分语句
        List<String> nationFuDel = getNationDel();
        //  瑞金这里有问题的.这里面是瑞金的版本 瑞金主要做中文的基金检测,需要进行合并
        String[] sentenceSplit = CollUtil.join(strList, " ").toLowerCase().replaceAll(" +", " ").replace("。", " .").replace(" .", ". ").split(". ");
        List<List<String>> resultList = new ArrayList<>();
        List<List<String>> resultListRes = new ArrayList<>();
        List<String> list = new ArrayList<>();
        for (String sentence : sentenceSplit) {
            if (!isReferences(sentence)) {
                // 判断有中文 句号和有中文则根据句号进行划分
                boolean isChina = false;
                if (StringUtil.isContainsChina(sentence) || sentence.contains("。")) {
                    isChina = true;
                }
                if (!isChina) {
                    // 如果该句子不是参文，一句句话先进行斯坦福转换，再以空格切割成一个个的单词。这里保留部分符号，包含逗号、分号、冒号三个，这三个符号与它前面的单词之间没有空格，保证了不占用单词的个数这一个指标。
                    String[] words = getLemma(sentence).replace(",", " ").replace(";", " ").replace(":", " ").replace("(", " ").replace(")", " ").replaceAll(" +", " ").split(" ");
                    for (String word : words) {
                        if (word.length() > 1) {
                            list.add(word);
                        }
                    }
                    // 如果单词的个数至少10个，加入结果集合中，如果达不到10，继续累加。
                    if (list.size() > 9) {
                        resultList.add(list);
                        list = new ArrayList<>();
                    }
                } else {
                    // 中文单独处理
                    String[] split = sentence.split("。");
                    for (String s : split) {
                        ArrayList<String> objects = new ArrayList<>();
                        if (s.length() > 5) {
                            objects.add(s);
                            resultList.add(objects);
                        }
                        list = new ArrayList<>();
                    }
                }

            }
        }
        if (!list.isEmpty()) {
            resultList.add(list);
        }
        for (List<String> strings : resultList) {
            if (strings.size() == 1) {
                String s = strings.get(0);
                boolean isAdd = true;
                for (String string : nationFuDel) {
                    if (s.contains(string)) {
                        isAdd = false;
                    }
                }
                if (isAdd) {
                    resultListRes.add(strings);
                }
            } else {
                resultListRes.add(strings);
            }
        }

        return resultListRes;
    }

    // 将文本集合格式的pdf文件，再转换成句子，句子中包含单词，即List套List的格式，每个句子中包含的每个单词。
    // 2024年7月26日15:57:14  中文无效进行单独判断
    public static List<List<String>> pdfToSentenceWordsRuijinTest(List<String> strList) {
        // 把整篇论文整合成一个大字符串，再按句号加空格分隔成一句句的集合，转换之前转换成小写。
        // 直接获取中文其他不管了 2024年9月10日14:09:59
        // 获取删除国自然部分语句
        List<String> nationFuDel = getNationDel();
        //  瑞金这里有问题的.这里面是瑞金的版本 瑞金主要做中文的基金检测,需要进行合并
        String join = CollUtil.join(strList, "");
        join = join.replace(".", "。");
        String[] sentenceSplit = join.split("。");
//        String[] sentenceSplit = CollUtil.join(strList, " ").toLowerCase().replaceAll(" +", " ").replace("。", " .").replace(" .", ". ").split(". ");
        List<List<String>> resultList = new ArrayList<>();
        List<List<String>> resultListRes = new ArrayList<>();
        List<String> list = new ArrayList<>();
        for (String sentence : sentenceSplit) {
            if (!isReferences(sentence)) {
                // 判断有中文 句号和有中文则根据句号进行划分
                boolean isChina = false;
                if (StringUtil.isContainsChina(sentence) || sentence.contains("。")) {
                    isChina = true;
                }
                if (!isChina) {
                    // 如果该句子不是参文，一句句话先进行斯坦福转换，再以空格切割成一个个的单词。这里保留部分符号，包含逗号、分号、冒号三个，这三个符号与它前面的单词之间没有空格，保证了不占用单词的个数这一个指标。
                    String[] words = getLemma(sentence).replace(",", " ").replace(";", " ").replace(":", " ").replace("(", " ").replace(")", " ").replaceAll(" +", " ").split(" ");
                    for (String word : words) {
                        if (word.length() > 1) {
                            list.add(word);
                        }
                    }
                    // 如果单词的个数至少10个，加入结果集合中，如果达不到10，继续累加。
                    if (list.size() > 9) {
                        resultList.add(list);
                        list = new ArrayList<>();
                    }
                } else {
                    // 中文单独处理
                    String[] split = sentence.split("。");
                    for (String s : split) {
                        ArrayList<String> objects = new ArrayList<>();
                        if (s.length() > 5) {
                            objects.add(s);
                            resultList.add(objects);
                        }
                        list = new ArrayList<>();
                    }
                }

            }
        }
        if (!list.isEmpty()) {
            resultList.add(list);
        }
        for (List<String> strings : resultList) {
            if (strings.size() == 1) {
                String s = strings.get(0);
                boolean isAdd = true;
                for (String string : nationFuDel) {
                    if (s.contains(string)) {
                        isAdd = false;
                    }
                }
                if (isAdd) {
                    resultListRes.add(strings);
                }
            } else {
                resultListRes.add(strings);
            }
        }

        return resultListRes;
    }

    public static List<String> getNationDel() {
        String a = "国家自然科学基金项目资金预算表编制说明";
        String b = "科学问题源于科研人员的灵感和新思想，且具有鲜明的首创性特征";
        String b1 = "科学问题源于世界科技前沿的热点、难点和新兴领域，且具有鲜";
        String b3 = "科学问题源于国家重大需求和经济主战场，且具有鲜明的需求导向、";
        String b4 = "科学问题源于多学科领域交叉的共性难题，具有鲜明的学科交叉特征";
        String b5 = "请按《国家自然科学基金项目资金预算表编制说明》中的要求，对各项支";
        String b6 = "参照以下提纲撰写，要求内容翔实、清晰，层次分明，";
        String b7 = "项目的研究内容、研究目标，以及拟解决的关键科学问题";
        List<String> collect = Stream.of(a, b, b1, b3, b4, b5, b6, b7).collect(Collectors.toList());
        return collect;

    }

    // 如果是参文，返回true，如果不是返回false，判断的依据是：包含年、et al.或doi号。
    public static boolean isReferences(String str) {
        String l = str.toLowerCase();
        int authSum = 0;
        int wordSum = 0;
        String[] authSplit = l.replace("(", ", ").replace(")", ", ").replace(";", ", ").replace(":", ", ")
                .replace(",", ", ").replaceAll(" +", " ").split(", ");
        for (String s : authSplit) {
            String[] list = s.split(" ");
            if (list.length == 2) {
                authSum = ((list[0].length() > 1 && list[1].length() == 1) || (list[1].length() > 1 && list[0].length() == 1)) ? (authSum + 1) : authSum;
            }
            if (list.length == 3) {
                authSum = ((list[0].length() > 1 && list[1].length() == 1 && list[2].length() == 1) || (list[2].length() > 1 && list[0].length() == 1 && list[1].length() == 1)) ? (authSum + 1) : authSum;
            }
        }
        String[] wordSplit = l.replace("(", " ").replace(")", " ").replace(";", " ").replace(",", " ").replace(":", " ")
                .replace("-", " ").replace("/", " ").replace("\\", " ").replace(".", " ").replaceAll(" +", " ").split(" ");
        for (String s : wordSplit) {
            wordSum = (s.length() == 1) ? (wordSum + 1) : wordSum;
        }
        if (l.contains(" et al.") || l.contains(" et al ") || l.endsWith(" et al") || l.contains(" et al,") || l.contains("dx.doi.org/10.") || l.contains("doi10.") ||
                l.contains("doi 10.") || l.contains("doi:10.") || l.contains("https //") || l.contains("https: //") || l.contains("https://")) {
            return true;
        }
        // 如果一个句子，60%都是由简写的作者，或者单个的字母构成的，返回true。
        return (authSum + 0.0) / (authSplit.length + 0.0) > 0.6 || (wordSum + 0.0) / (wordSplit.length + 0.0) > 0.6;
    }

    // 连续的单个字母，放弃，如果一个单个字母，后面不是一个单个字母，保留，如果最后一位也是单个字母，保留。
    public static void addWords(String[] words, List<String> list) {
        for (int i = 0; i < words.length; i++) {
            if (words[i].length() == 1) {
                if ((i + 1 < words.length) && words[i + 1].length() > 1) {
                    list.add(words[i]);
                }
                // 如果最后一位也是单个字母，保留。
                if (i + 1 == words.length) {
                    list.add(words[i]);
                }
            } else {
                list.add(words[i]);
            }
        }
    }

    // 对Map的值进行排序，此排序为最大的在前面，输入一个map，再返回一个排序后的新map，String, Integer类型，本排序增加了过滤功能，可以设置要前多少（len）个。
    public static Map<String, Integer> mapSortValueIntLen(Map<String, Integer> mapName, Integer len) {
        Map<String, Integer> result = new LinkedHashMap<>();
        mapName.entrySet().stream()
                .sorted(Map.Entry.<String, Integer>comparingByValue()
                        .reversed()).limit(len).forEachOrdered(e -> result.put(e.getKey(), e.getValue()));
        return result;
    }

    public static void putStrSetStr(Map<String, Set<RiskSentence>> highRiskAndSentenceSetMap, String riskLevel, RiskSentence riskSentence) {
        highRiskAndSentenceSetMap.compute(riskLevel, (k, v) -> {
            if (v == null) {
                v = new ConcurrentHashSet<>();
            }
            v.add(riskSentence);
            return v;
        });
    }

    public static List<String> pdfToTxt(String directory, String pdfName) throws IOException {
        // 2024年7月18日16:35:29 这里是多个地方使用 如果是传进来的是word 则进行处理word 是pdf 则处理pdf
        List<String> res;
        if (pdfName.toLowerCase().endsWith(".pdf")) {
            String name = pdfName.substring(0, pdfName.length() - ".pdf".length()); // 因为不同名，不好找，测试时使用这个。
            String txtFile = name + ".txt";
            ProcessBuilder processBuilder = new ProcessBuilder();
            processBuilder.directory(new File(directory));
            if (VerifyUtil.isServer()) {
                processBuilder.command("/usr/local/bin/mutool", "convert", "-o", txtFile, pdfName);
            } else {
                processBuilder.command("D:\\mutool\\mutool", "convert", "-o", txtFile, pdfName);
            }

            processBuilder.redirectErrorStream(true);
            Process process = processBuilder.start();
            InputStream inputStream = process.getInputStream();
            IoUtil.read(inputStream, StandardCharsets.UTF_8);
            inputStream.close();
//        Files.deleteIfExists(Paths.get(txtFile)); // 将中间的txt文档给予删除
            List<String> strings = FileUtil.readLines(txtFile, "utf-8");
            List<String> resBack = new ArrayList<>();
            for (String string : strings) {
                if (StringUtil.isNotNull(string)) {
                    resBack.add(string);
                }
            }
            return resBack;
        } else {
            SmallTool.printMess("走word 拆分文字 应该是打印下a 文件名称+" + pdfName);
            List<String> strings = WordUtil.wordToTextStatic(pdfName);
            return strings;
        }
    }

    public static List<String> pdfToTxtRuijin(String directory, String pdfName) throws IOException {
        // 2024年7月18日16:35:29 这里是多个地方使用 如果是传进来的是word 则进行处理word 是pdf 则处理pdf
        // 瑞金版本 直接空格杀掉不知道为什么出来的原始有空格
        List<String> res;
        if (pdfName.toLowerCase().endsWith(".pdf")) {
            String name = pdfName.substring(0, pdfName.length() - ".pdf".length()); // 因为不同名，不好找，测试时使用这个。
            String txtFile = name + ".txt";
            ProcessBuilder processBuilder = new ProcessBuilder();
            processBuilder.directory(new File(directory));
            if (VerifyUtil.isServer()) {
                processBuilder.command("/usr/local/bin/mutool", "convert", "-o", txtFile, pdfName);
            } else {
                processBuilder.command("D:\\mutool\\mutool", "convert", "-o", txtFile, pdfName);
            }

            processBuilder.redirectErrorStream(true);
            Process process = processBuilder.start();
            InputStream inputStream = process.getInputStream();
            IoUtil.read(inputStream, StandardCharsets.UTF_8);
            inputStream.close();
//        Files.deleteIfExists(Paths.get(txtFile)); // 将中间的txt文档给予删除
            List<String> strings = FileUtil.readLines(txtFile, "utf-8");
            List<String> resBack = new ArrayList<>();
            for (String string : strings) {
                if (StringUtil.isNotNull(string)) {
                    resBack.add(removeSpacesBetweenChineseChars(string));
                }
            }
            return resBack;
        } else {
            SmallTool.printMess("走word 拆分文字 应该是打印下a 文件名称+" + pdfName);
            List<String> strings = WordUtil.wordToTextStatic(pdfName);
            return strings;
        }
    }

    public static void main(String[] args) {
        String s = removeSpacesBetweenChineseChars(" 这 是 一 个 测 试 this is a test");
        System.out.println(s);

    }

    public static String removeSpacesBetweenChineseChars(String str) {
        // 正则表达式匹配两个中文字符之间的任意空白字符
        String regex = "(?<=[\\u4e00-\\u9fff])\\s+(?=[\\u4e00-\\u9fff])";
        Pattern pattern = Pattern.compile(regex);
        Matcher matcher = pattern.matcher(str);
        // 使用空字符串替换所有匹配到的空白字符
        return matcher.replaceAll("");
    }

    // 取小数点后4位
    public static double takeFourDigits(double d) {
        if (d > 0.0) {
            BigDecimal bd = new BigDecimal(d);
            BigDecimal bd2 = bd.setScale(4, RoundingMode.HALF_UP);
            return Double.parseDouble(bd2.toString());
        }
        return 0.0;
    }

    // 斯坦福词形还原
    public static String getLemma(String text) {
        if (text == null || text.length() < 3) {
            return text;
        }
        StringBuilder lemma = new StringBuilder();
        Properties properties = new Properties();
        //分词、分句、词性标注和次元信息。
        properties.put("annotators", "tokenize,ssplit,pos,lemma");
        StanfordCoreNLP pipeline = new StanfordCoreNLP(properties);
        Annotation document = new Annotation(CharUtil.reKw(text));
        pipeline.annotate(document);
        List<CoreMap> words = document.get(CoreAnnotations.SentencesAnnotation.class);
        for (CoreMap wordTemp : words) {
            for (CoreLabel token : wordTemp.get(CoreAnnotations.TokensAnnotation.class)) {
                String originalWord = token.get(CoreAnnotations.LemmaAnnotation.class);
                lemma.append(" ").append(originalWord);
            }
        }
        return reLemma(lemma.toString());
    }

    public static String reLemma(String str) {
        str = str.replace(" / ", "/").replace(" , ", ",").replace(" ' ", "'").replace(" '", "'").replace(" + ", "+").replace(" ; ", ";").replace(" ;", ";").replace(" ^ ", "^").replace(" ] ", "] ").replace(" } ", "} ").replace(" [ ", " [").replace(" { ", " {").replace(" \\ ", "\\").replace(" : ", ":").replace(" $ ", " $").replace(" % ", "% ").replace(" # ", "#").replace(" #", "#").replace(" - ", "-").replace(" . ", ".").replace("( ", "(").replace(" )", ")").replace("()", " ").replace(".)", ")").replaceAll(" +", " ").trim();
        if (str.startsWith("(") && !str.contains(")")) {
            str = str.substring(1);
        }
        if (str.endsWith(")") && !str.contains("(")) {
            str = str.substring(0, str.length() - "(".length());
        }
        // 因为斯坦福会对引号前后加上引号，因此需要还原。比如：ch " ildren's hos " pi " t " al -> ch "ildren's hos" pi "t" al
        if (str.contains("\"")) {
            List<String> list = Arrays.asList(str.split("\""));
            StringBuilder temp = new StringBuilder();
            for (int i = 0; i < list.size(); i++) {
                if (i % 2 == 0) {
                    temp.append(list.get(i).trim()).append(" \"");
                } else {
                    temp.append(list.get(i).trim()).append("\" ");
                }
            }
            if (!str.endsWith("\"") && temp.toString().endsWith("\"")) {
                temp = new StringBuilder(temp.substring(0, temp.length() - "\"".length()));
            }
            str = temp.toString().trim();
        }
        return str.trim();
    }

    @Data
    public static class IntersectionAndCopyProportion {
        private int intersection = 0;
        private double copyProportion = 0.0;
    }

    // 抄袭的综合得分，包含综合抄袭率，抄袭的前5篇论文、各种风险等级以及对应的句子等。
    @Data
    public static class ComprehensiveInfo {
        private List<CopyInfo> copyTop5List = new ArrayList<>();
        // todo 总体得分
        private double copyProportion = 0.0;
        private List<RiskLevelInfo> riskLevelList = new ArrayList<>();
    }

    // 相关的抄袭信息，包含排名、文件名以及抄袭率三个指标。
    @Data
    public static class CopyInfo {
        private int rank = 0;
        private String fileName = "";
        // todo 相似度
        private double copyProportion = 0.0;
    }

    // 风险等级信息，包括各风险等级的句子集合。
    @Data
    public static class RiskLevelInfo {
        private String riskLevel = "";
        private List<RiskSentence> sentenceList = new ArrayList<>();
    }

    // 风险等级信息，包括各风险等级的句子集合。
    @Data
    public static class RiskSentence {
        private String aSentence = "";
        private String bSentence = "";
        private String fileName = "";
        private double copyProportion = 0.0;
    }


}
