package operation;

import edu.stanford.nlp.dcoref.CorefChain;
import edu.stanford.nlp.dcoref.CorefCoreAnnotations;
import edu.stanford.nlp.ling.CoreAnnotations;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.ling.IndexedWord;
import edu.stanford.nlp.pipeline.Annotation;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.semgraph.SemanticGraph;
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations;
import edu.stanford.nlp.trees.Tree;
import edu.stanford.nlp.trees.TreeCoreAnnotations;
import edu.stanford.nlp.util.CoreMap;
import util.PipelineUtil;
import util.StopWordUtil;

import java.util.*;

/**
 * @author ：hzs
 * @date ：Created in 2020/12/12 16:03
 * @description：
 * @modified By：
 * @version: $
 */
public class TextOperation {

    private boolean found;

    private StanfordCoreNLP pipeline = PipelineUtil.getPipeline("tokenize, ssplit, pos, lemma, parse");

    @Deprecated
    public List<String> getNounPhraseFromSentence_old(String sentence) {
        return null;
//        if (pipeline == null) {
//            /**
//             * 创建一个StanfordCoreNLP object
//             * tokenize(分词)、ssplit(断句)、 pos(词性标注)、lemma(词形还原)、
//             * ner(命名实体识别)、parse(语法解析)、指代消解？同义词分辨？
//             */
//            Properties props = new Properties();
//            // 七种Annotators
//            props.put("annotators", "tokenize,ssplit,pos,lemma");
//            this.pipeline = new StanfordCoreNLP(props);
//        }
//
//        List<String> nouns = new ArrayList<>();
//        // 利用text创建一个空的Annotation
//        Annotation document = new Annotation(sentence);
//        // 对text执行所有的Annotators（七种）
//        pipeline.annotate(document);
//
//        // 下面的sentences 中包含了所有分析结果，遍历即可获知结果。
//        List<CoreMap> sentences = document.get(CoreAnnotations.SentencesAnnotation.class);
//
//        for(CoreMap coreMap: sentences) {
//            LinkedList<String> wordLinkedList = new LinkedList<>();
//            LinkedList<String> posLinkedList = new LinkedList<>();
//            for (CoreLabel token: coreMap.get(CoreAnnotations.TokensAnnotation.class)) {
//                // 获取分词
//                String word = token.get(CoreAnnotations.TextAnnotation.class);
//                // 获取词性标注
//                String pos = token.get(CoreAnnotations.PartOfSpeechAnnotation.class);
//                String lemma = token.get(CoreAnnotations.LemmaAnnotation.class);
//                if (pos.startsWith("N")) {
//                    wordLinkedList.addLast(lemma);
//                } else {
//                    wordLinkedList.addLast(word);
//                }
//                posLinkedList.addLast(pos);
//            }
//            LinkedList<String> words = new LinkedList<>();
//            boolean found = false;
//            while (!wordLinkedList.isEmpty()) {
//                String lastWord = wordLinkedList.removeLast();
//                String lastPos = posLinkedList.removeLast();
//                if (lastPos.startsWith("N") && lastWord.length() > 1) {
//                    words.addFirst(lastWord);
//                    found = true;
//                } else if (found && lastPos.startsWith("JJ") && lastPos.startsWith("HYPH")) {
//                    words.addFirst(lastWord);
//                } else {
//                    if (found) {
//                        Iterator<String> iterator = words.iterator();
//                        boolean hasNext = true;
//                        StringBuilder stringBuilder = new StringBuilder();
//                        do {
//                            stringBuilder.append(iterator.next());
//                            hasNext = iterator.hasNext();
//                            if (hasNext) {
//                                stringBuilder.append(" ");
//                            } else {
//                                nouns.add(stringBuilder.toString());
//                                stringBuilder = new StringBuilder();
//                                words.clear();
//                            }
//                        } while (hasNext);
//                    }
//                    found = false;
//                }
//            }
//        }
//        return nouns;
    }

    public List<String> getNounPhraseFromSentence(String sentence) {
        if (pipeline == null) {
            /**
             * 创建一个StanfordCoreNLP object
             * tokenize(分词)、ssplit(断句)、 pos(词性标注)、lemma(词形还原)、
             * ner(命名实体识别)、parse(语法解析)、指代消解？同义词分辨？
             */
            Properties props = new Properties();
            // 七种Annotators
            props.put("annotators", "tokenize,ssplit,pos,lemma,parse");
            this.pipeline = new StanfordCoreNLP(props);
        }

        List<String> nouns = new ArrayList<>();
        // 利用text创建一个空的Annotation
        Annotation document = new Annotation(sentence);
        // 对text执行所有的Annotators（七种）
        pipeline.annotate(document);

        // 下面的sentences 中包含了所有分析结果，遍历即可获知结果。
        List<CoreMap> sentences = document.get(CoreAnnotations.SentencesAnnotation.class);

        for(CoreMap coreMap: sentences) {
            HashMap<Integer, CoreLabel> indexCoreLabelMap = new HashMap<>();
            for (CoreLabel token: coreMap.get(CoreAnnotations.TokensAnnotation.class)) {
                // 获取词性标注
                String pos = token.get(CoreAnnotations.PartOfSpeechAnnotation.class);
                if (pos.startsWith("N") || pos.startsWith("VBG")) {
                    indexCoreLabelMap.put(token.get(CoreAnnotations.IndexAnnotation.class), token);
                }
            }
            // 分析句法依存树
            SemanticGraph dependencies = coreMap.get(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class);
            List<List<CoreLabel>> lists = devideNounsByTree(indexCoreLabelMap, dependencies);
            // 后置处理：去重
            List<List<CoreLabel>> posLists = new ArrayList<>();
            w:
            while (!lists.isEmpty()) {
                List<CoreLabel> list = lists.get(0);
                for (CoreLabel coreLabel : list) {
                    List<CoreLabel> listInLists = getListInLists(lists, coreLabel, 1);
                    if (listInLists != null) {
                        conbineTwoList(list, listInLists);
                        lists.remove(listInLists);
                        continue w;
                    }
                }
                posLists.add(list);
                lists.remove(0);
            }
            // 后置处理：去停用词
            Set<String> stopWords = StopWordUtil.getStopWords();
            for (List<CoreLabel> list : posLists) {
                StringBuilder stringBuilder = new StringBuilder();
                for (CoreLabel coreLabel : list) {
                    String s = coreLabel.get(CoreAnnotations.TextAnnotation.class);
                    if (!"-".equals(s)) {
                        stringBuilder.append(s).append(" ");
                    }
                }
                String s = stringBuilder.toString().trim();
                if (!stopWords.contains(s) && !s.replaceAll("\\d", "").isEmpty()) {
                    nouns.add(s);
                }
            }
        }
        return nouns;
    }

    /**
     * 根据输入的coreMap返回
     * @param indexCoreLabelMap K:index, V:CoreLabel节点
     * @param dependencies 依存树
     * @param coreLabel 待处理的节点
     * @return
     */
    @Deprecated
    private String getSurroundNoun(HashMap<Integer, CoreLabel> indexCoreLabelMap, SemanticGraph dependencies, IndexedWord coreLabel, Map<Integer, String> map) {
        Integer coreLabelIndex = coreLabel.get(CoreAnnotations.IndexAnnotation.class);
        String text = coreLabel.get(CoreAnnotations.LemmaAnnotation.class);
        if (map.containsKey(coreLabelIndex)) {
            return "";
        }
        StringBuilder stringBuilder = new StringBuilder();
        if (!"-".equals(text)) {
            stringBuilder.append(text);
        }
        Set<IndexedWord> children = dependencies.getChildren(new IndexedWord(coreLabel));
        for (IndexedWord child : children) {
            Integer index = child.get(CoreAnnotations.IndexAnnotation.class);
            // 如果子节点也是名词
            if (indexCoreLabelMap.containsKey(index)) {
                // 如果这个子节点已被处理过，直接拿
                if (map.containsKey(index)) {
                    stringBuilder.append(" ").append(map.get(index));
                    // 还原子节点的map
                    map.put(index, "");
                    continue;
                } else {
                    // 先处理这个子节点
                    stringBuilder.append(" ").append(getSurroundNoun(indexCoreLabelMap, dependencies, child, map));
                }
            }
            String pos = child.get(CoreAnnotations.PartOfSpeechAnnotation.class);
            if (pos.startsWith("JJ")) {
                stringBuilder.append(" ").append(child.get(CoreAnnotations.LemmaAnnotation.class));
            }
            if (pos.startsWith("VBG") || pos.startsWith("HYPH")) {
//                stringBuilder.append(" ").append(child.get(CoreAnnotations.LemmaAnnotation.class));
                stringBuilder.append(" ").append(getSurroundNoun(indexCoreLabelMap, dependencies, child, map));
            }
            // 还原子节点的map
            map.put(index, "");
        }
        String result = stringBuilder.toString();
        map.put(coreLabelIndex, result);
        return result;
    }

    private List<List<CoreLabel>> devideNounsByTree(HashMap<Integer, CoreLabel> indexCoreLabelMap, SemanticGraph dependencies) {
        List<List<CoreLabel>> result = new ArrayList<>();
        for (CoreLabel coreLabel : indexCoreLabelMap.values()) {
            List<CoreLabel> list = new ArrayList<>();
            list.add(coreLabel);
            // 处理子节点
            Set<IndexedWord> children = dependencies.getChildren(new IndexedWord(coreLabel));
            for (IndexedWord child : children) {
                CoreLabel coreLabel1 = new CoreLabel(child);
                String pos = child.get(CoreAnnotations.PartOfSpeechAnnotation.class);
                if (pos.startsWith("JJ")) {
                    list.add(coreLabel1);
                }
                if (pos.startsWith("N") || pos.startsWith("VBG") || pos.startsWith("HYPH")) {
                    list.add(coreLabel1);
                }
            }
            // 加进结果
            result.add(list);
        }
        return result;
    }

    private List<CoreLabel> getListInLists(List<List<CoreLabel>> lists, CoreLabel coreLabel) {
        return getListInLists(lists, coreLabel, 0);
    }

    private List<CoreLabel> getListInLists(List<List<CoreLabel>> lists, CoreLabel coreLabel, int begin) {
        for (int i = begin; i < lists.size(); i++) {
            List<CoreLabel> list = lists.get(i);
            if (containCoreLabel(list, coreLabel)) {
                return list;
            }
        }
        return null;
    }

    private boolean containCoreLabel(List<CoreLabel> list, CoreLabel coreLabel) {
        Integer index = coreLabel.get(CoreAnnotations.IndexAnnotation.class);
        for (CoreLabel label : list) {
            if (index.equals(label.get(CoreAnnotations.IndexAnnotation.class))) {
                return true;
            }
        }
        return false;
    }

    /**
     * 将list2合并进list1，返回list1
     * @param list1
     * @param list2
     * @return
     */
    private List<CoreLabel> conbineTwoList(List<CoreLabel> list1, List<CoreLabel> list2) {
        for (CoreLabel coreLabel : list2) {
            if (!containCoreLabel(list1, coreLabel)) {
                list1.add(coreLabel);
            }
        }
        return list1;
    }

    public static void main(String[] args) {
        final String sentence1 = "This peculiar way of assigning a justification status\n" +
                "to odd-length cycles has recently been indicated as “puzzling” by Pollock [18].";
        final String sentence2 = "It\ncan be noted however that the treatment of odd and even-length cycles is still unequal (aspointed out for instance by Pollock in [18]).";
        final String sentence3 = "3 A system of planning based upon this observation was described in [30], and has been implemented in\n" +
                "OSCAR.";
        final String sentence4 = "Might we \n" +
                "be able to circumvent this problem by employing some other kind of r.e. planner? Erol et \n" +
                "al. [7] prove that for a wide variety of STRIPS planning domains, the problem of finding \n" +
                "a plan is at least semi-decidable, and hence r.e.";
        final String sentence5 = "As pointed out in [2,8], while standing at a high\n" +
                "abstraction level and leaving out any question about the order among arguments, it\n" +
                "provides several important results concerning the management of a possibly inconsistent\n" +
                "knowledge-base in a defeasible reasoning context.";
        final String sentence6 = "By Proposition 3.5 of [21], ΣS has a 6-least element σ , so ΣS  {σ } is a defeater of σ\n" +
                "that is in force.";
        final String sentence7 = "Let {a mathematical formula}〈X,A〉 be an af and S a subset of {a mathematical formula}X. \n" +
                " \n" +
                " \n" +
                "S is admissible (denoted as {a mathematical formula}S∈Eadm(〈X,A〉)) if S is conflict-free and every argument in S is acceptable wrt S, i.e. {a mathematical formula}S⊆F(S). \n" +
                " \n" +
                "S is a complete extension, (denoted as {a mathematical formula}S∈Ecomp(〈X,A〉)) if S is conflict-free and {a mathematical formula}x∈S if and only if x is acceptable wrt S, i.e. {a mathematical formula}S=F(S). \n" +
                " \n" +
                "S is a preferred extension (denoted as {a mathematical formula}S∈Epr(〈X,A〉)) if S is a maximal (wrt ⊆) admissible set. \n" +
                " \n" +
                "S is a stable extension (denoted as {a mathematical formula}S∈Estab(〈X,A〉)) if S is conflict-free and for any {a mathematical formula}y∉S, there is some {a mathematical formula}x∈S that attacks y, i.e. {a mathematical formula}S+=X∖S. \n" +
                " \n" +
                "S is the grounded extension of {a mathematical formula}〈X,A〉 (denoted as {a mathematical formula}S∈Egr(〈X,A〉)) if it is the (unique) least fixed point of {a mathematical formula}F, i.e. {a mathematical formula}S=F(S) and there is no {a mathematical formula}S′⊊S such that {a mathematical formula}S′=F(S′). \n" +
                " \n" +
                " \n" +
                "The existence and uniqueness of the grounded extension is established in [37] for all afs.";
        final  String sentence8 = "Major exclusion criteria were: ejection fraction less than 30%; exercise tolerance not limited by angina, symptomatic heart failure; treatment with more than 80 mg furosemide daily (or equivalent dose of another diuretic); left-ventricular wall thickness less than 8 mm (by echocardiography) in the region targeted for PTMR; renal insufficiency (serum creatinine >177 (μmol/L); aortic stenosis (valve area <1·5 cm ); severe peripheral vascular disease; evidence of left-ventricular thrombus; clinically significant ventricular arrhythmias; unstable angina (angina at rest requiring intravenous glyceryl trinitrate and anticoagulation); need for adjustment of antianginal medications within 2 weeks of screening; transmural myocardial infarction within 3 months; and non-transmural infarction within 6 weeks of study entry. ";
        TextOperation textOperation = new TextOperation();
        textOperation.getNounPhraseFromSentence(sentence8).forEach(System.out::println);
    }

}
