package com.yuntsg.ruiijn.paperana.utils;


import cn.hutool.core.collection.CollectionUtil;
import cn.hutool.core.io.FileUtil;
import com.hankcs.hanlp.seg.common.Term;
import com.hankcs.hanlp.tokenizer.StandardTokenizer;
import common.util.StringUtil;
import edu.stanford.nlp.ling.CoreAnnotations;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.pipeline.Annotation;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.util.CoreMap;
import lombok.extern.slf4j.Slf4j;

import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import static com.yuntsg.ruiijn.paperana.test.extractEnCn.extractCnEn;
import static com.yuntsg.ruiijn.paperana.test.extractEnCn.isContainChinese;

/**
 * @Author: gcr
 * @Date: 2023/8/30 11:14
 */
@Slf4j
public class StrUtils {

    //2023年8月31日09:17:35  用于论文查重工具类


    //句子拆单词
    public static Set<String> sentenceParseWord(String string) {
        if (StringUtil.isNull(string)) {
            return new LinkedHashSet<>();
        }
        String[] s = string.split(" ");
        Set<String> str = new LinkedHashSet<>();
        for (String s1 : s) {
            if (s1.length() > 3) {
                str.add(getLemma(s1));
            }
        }
        return str;
    }

    public static boolean existZH(String str) {
        String regEx = "[\\u4e00-\\u9fa5]";
        Pattern p = Pattern.compile(regEx);
        Matcher m = p.matcher(str);
        while (m.find()) {
            return true;
        }
        return false;
    }

    public static boolean isRuiJin() {

        // 动态获取瑞金我们服务器版本
        return true;
//        String path = getPath();
//        if (path.contains("ruijin")) {
//            return true;
//        }
//
//        if (FileUtil.exist("/home/gcr/ruijin/")) {
//
//        } else {
//            return false;
//        }
    }


    /**
     * 校验es 检出的句子与原始句子字数差距不能太大 否则则丢弃
     *
     * @param sentence
     * @param sentence2
     * @return
     */
    public static boolean checkSentence(String sentence, String sentence2) {
        boolean res = true;
        List<String> aWordList = Arrays.asList(sentence.split(" "));
        List<String> bWordList = Arrays.asList((sentence2).split(" "));
        if (aWordList.size() > 0 && bWordList.size() > 0) {

            if (bWordList.size() >= aWordList.size()) {
                Double s = Double.valueOf(bWordList.size() + "") / Double.valueOf(aWordList.size() + "");
                if (s >= 3.5) {
                    return false;
                } else {
                    return res;
                }
            } else {
                Double s = Double.valueOf(aWordList.size() + "") / Double.valueOf(bWordList.size() + "");
                if (s >= 3.5) {
                    return false;
                } else {
                    return res;
                }
            }
        } else {
            return false;

        }
    }

    public static boolean checkIsMyServer() {
        if (FileUtil.exist("/home/isdao19/")) {
            SmallTool.printMess("是我们的机器isdao19");
            return true;
        } else {
            return false;
        }

    }

    //句子拆单词 list 组合
    public static List<String> sentenceParseWordList(String string) {
        if (StringUtil.isNull(string)) {
            return new ArrayList<>();
        }
        String[] s = string.split(" ");
        List<String> str = new ArrayList<>();
        for (String s1 : s) {
            str.add(s1);
        }
        return str;
    }

    // 合并代码
    public static String repOtherStr(String s) {
        String res = "";
        if (StringUtil.isNull(s)) {
            return "";
        } else {
            res = s.replace(",", " ").replace("(", " ").replace(")", " ").replace("<", " ").replace(">", " ").replace(";", " ")
                    .replace(".", " ").replace(":", " ").replace("/", " ").replace("\\", " ").replaceAll(" +", " ").trim();

        }
        return res;
    }

    //删除 字符 汉字
    public static String DelOtherLower(String str) {
        return str;
//        return str.replaceAll("[^a-zA-Z0-9\\u4E00-\\u9FA5/,\\s/.]", "");
    }


    //句子拆单词
    public static Integer sentenceParseWordSize(String string) {
        int s11 = 0;
        if (StringUtil.isNull(string)) {
            return 0;
        }
        String[] s = string.split(" ");
        for (String s1 : s) {
            if (s1.length() > 3) {
                s11++;
            }
        }
        return s11;
    }

    //计算句子最大相似词数量

    /**
     * @param strFrom     原来分词
     * @param compareList // 许多句子分词
     * @return
     */
    public static int sentenceParseWord(List<String> strFrom, List<List<String>> compareList) {
        int maxSameSize = 0;
        for (List<String> strings : compareList) {
            // 前十的每句话
            Collection<String> intersection = CollectionUtil.intersection(strFrom, strings);
            if (intersection.size() >= maxSameSize) {
                maxSameSize = intersection.size();
            }
        }
        return maxSameSize;
    }

    /**
     * @param strFrom     原来分词
     * @param compareList // 许多句子分词
     * @return
     */
    public static int sentenceParseWordNew(List<String> strFrom, Set<String> compareList) {
        int maxSameSize = 0;
        for (String strings : strFrom) {
            // 前十的每句话
            if (compareList.contains(strings)) {
                maxSameSize++;
            }
        }
        return maxSameSize;
    }

    /**
     * @param org     原来句子
     * @param compare 比较句子
     * @return
     */
    public static Double sentenceParseWordNewListStanf(String org, String compare) {
        Integer maxSameSize = 0;
        // 首先进行standf 还原装进list

        String orgStandf = "";
        String compareStandf = "";

        String[] sTemp2 = StrUtils.repOtherStr(org).split(" ");
        StringBuffer stringBuffer = new StringBuffer();
        for (String s1 : sTemp2) {
            stringBuffer.append(getLemma(s1) + " ");
        }
        orgStandf = stringBuffer.toString();

        String[] sTemp22 = StrUtils.repOtherStr(compare).split(" ");
        StringBuffer compareStandfstringBuffer = new StringBuffer();
        for (String s1 : sTemp22) {
            compareStandfstringBuffer.append(getLemma(s1) + " ");
        }
        compareStandf = compareStandfstringBuffer.toString();
        List<String> strFrom = Arrays.asList(orgStandf.split(" "));
        List<String> compareList = Arrays.asList(compareStandf.split(" "));
        for (String strings : strFrom) {
            if (compareList.contains(strings)) {
                maxSameSize++;
            }
        }
        Integer size = strFrom.size();
        double v = PublicUtils.takeTwoDigits(maxSameSize.doubleValue() / size.doubleValue() * 100);
        return v;
    }

    /**
     * @param org     原来句子
     * @param compare 比较句子
     * @return
     */
    public static Double sentenceParseWordNewList(List<String> org, List<String> compare) {
        Integer maxSameSize = 0;
        List<String> strFrom = cnEnToListWord(org);
        List<String> compareList = cnEnToListWord(compare);
        // 这里改成中文和英文都可以使用的
        for (String strings : strFrom) {
            if (compareList.contains(strings)) {
                maxSameSize++;
            }
        }
        Integer size = strFrom.size();
        Integer comparesize = compareList.size();
        double v;
        if (org.size() >= compare.size()) {
            v = PublicUtils.takeTwoDigits(maxSameSize.doubleValue() / size.doubleValue() * 100);
        } else {
            v = PublicUtils.takeTwoDigits(maxSameSize.doubleValue() / comparesize.doubleValue() * 100);

        }

        return v;
    }


    public static List<String> cnEnToListWord(List<String> strFrom) {
        List<String> res = new ArrayList<>();
        for (String from : strFrom) {
            String[] split = from.replace("。", ". ").split("\\. ");
            for (String sentence : split) {
                List<String> list = new ArrayList<>();
                sentence = (isContainChinese(sentence)) ? extractCnEn(sentence, list) : sentence;
                List<Term> terms = StandardTokenizer.segment(sentence);
                for (Term term : terms) {
                    String s = term.word.trim();
                    s = s.startsWith(",") ? s.substring(1).trim() : s.trim();
                    s = s.endsWith(",") ? s.substring(0, s.length() - 1).trim() : s.trim();
                    if (s.length() > 1) {
                        list.add(s);
                    }
                }
                for (String s : list) {
                    res.add(s);
                }
            }
        }


        return res;
    }


    public static Integer sentenceParseWordNewListSameSize(List<String> org, List<String> compare) {
        Integer maxSameSize = 0;
        List<String> strFrom = org;
        List<String> compareList = compare;
        for (String strings : strFrom) {
            if (compareList.contains(strings)) {
                maxSameSize++;
            }
        }

        return maxSameSize;
    }


    /**
     * @param strFrom     原来分词
     * @param compareList // 许多句子分词
     * @return
     */
    public static int sentenceParseWordNewSet(Set<String> strFrom, Set<String> compareList) {
        int maxSameSize = 0;
        for (String strings : strFrom) {
            // 前十的每句话
            if (compareList.contains(strings)) {
                maxSameSize++;
            }
        }
        return maxSameSize;
    }


    /**
     * 单个匹配字符数
     *
     * @param strFrom     原来分词
     * @param compareList // 许多句子分词
     * @return
     */
    public static Integer sentenceParseWordSin(List<String> strFrom, List<String> compareList) {
        int maxSameSize = 0;
        strFrom = cnEnToListWord(strFrom);
        compareList = cnEnToListWord(compareList);
        Collection<String> intersection = CollectionUtil.intersection(strFrom, compareList);
        maxSameSize = intersection.size();
        return maxSameSize;
    }

    // 返回斯坦福还原的数据
    public static List<String> standBackStr(List<String> res) {
        List<String> back = new ArrayList<>();
        for (String re : res) {
            back.add(getLemma(re));
        }
        return back;
    }


    // 斯坦福词形还原
    public static String getLemma(String text) {

        if (text == null || text.length() < 3) {
            return text;
        }
        StringBuilder lemma = new StringBuilder();
        Properties properties = new Properties();
        //分词、分句、词性标注和次元信息。
        properties.put("annotators", "tokenize,ssplit,pos,lemma");
        StanfordCoreNLP pipeline = new StanfordCoreNLP(properties);
        Annotation document = new Annotation(CharUtil.reKw(text));
        pipeline.annotate(document);
        List<CoreMap> words = document.get(CoreAnnotations.SentencesAnnotation.class);
        for (CoreMap wordTemp : words) {
            for (CoreLabel token : wordTemp.get(CoreAnnotations.TokensAnnotation.class)) {
                String originalWord = token.get(CoreAnnotations.LemmaAnnotation.class);
                lemma.append(" ").append(originalWord);
            }
        }
        return reLemma(lemma.toString());
    }

    // 因为斯坦福会对标点符号前后加上空格，因此，需要作一定的处理。比如：ganoderma lucidum ( leyss . ex fr . ) karst -> ganoderma lucidum (leyss.ex fr) karst
    public static String reLemma(String str) {
        str = str.replace(" / ", "/")
                .replace(" , ", ",")
                .replace(" ' ", "'")
                .replace(" '", "'")
                .replace(" + ", "+")
                .replace(" ; ", ";")
                .replace(" ;", ";")
                .replace(" ^ ", "^")
                .replace(" ] ", "] ")
                .replace(" } ", "} ")
                .replace(" [ ", " [")
                .replace(" { ", " {")
                .replace(" \\ ", "\\")
                .replace(" : ", ":")
                .replace(" $ ", " $")
                .replace(" % ", "% ")
                .replace(" # ", "#")
                .replace(" #", "#")
                .replace(" - ", "-")
                .replace(" . ", ".")
                .replace("( ", "(")
                .replace(" )", ")")
                .replace("()", " ")
                .replace(".)", ")")
                .replaceAll(" +", " ").trim();
        if (str.startsWith("(") && !str.contains(")")) {
            str = str.substring(1);
        }
        if (str.endsWith(")") && !str.contains("(")) {
            str = str.substring(0, str.length() - "(".length());
        }
        // 因为斯坦福会对引号前后加上引号，因此需要还原。比如：ch " ildren's hos " pi " t " al -> ch "ildren's hos" pi "t" al
        if (str.contains("\"")) {
            List<String> list = Arrays.asList(str.split("\""));
            StringBuilder temp = new StringBuilder();
            for (int i = 0; i < list.size(); i++) {
                if (i % 2 == 0) {
                    temp.append(list.get(i).trim()).append(" \"");
                } else {
                    temp.append(list.get(i).trim()).append("\" ");
                }
            }
            if (!str.endsWith("\"") && temp.toString().endsWith("\"")) {
                temp = new StringBuilder(temp.substring(0, temp.length() - "\"".length()));
            }
            str = temp.toString().trim();
        }
        return str.trim();
    }

}
