/**
 * @FileName: ExtractTrunk.java
 * @Package: top.swimmer.demo.nlp
 * @author youshipeng
 * @created 2017/4/27 8:32
 * <p>
 * Copyright 2016 ziroom
 */
package top.swimmer.demo.nlp;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.seg.common.Term;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.ling.IndexedWord;
import edu.stanford.nlp.parser.lexparser.LexicalizedParser;
import edu.stanford.nlp.trees.*;
import edu.stanford.nlp.trees.international.pennchinese.ChineseTreebankLanguagePack;
import top.swimmer.demo.entity.NatureWord;
import top.swimmer.demo.entity.SentenceTrunk;

import java.util.*;

import static com.hankcs.hanlp.HanLP.Config.CustomDictionaryPath;
//import static com.hankcs.hanlp.utility.Predefine.HANLP_DATA_ROOT_PATH;
//import static top.swimmer.demo.nlp.QuestionTemplate.getQuestionType;
import static top.swimmer.tools.collection.CollectionUtils.isEmpty;

/**
 * <p></p>
 *
 * <PRE>
 * <BR>	修改记录
 * <BR>-----------------------------------------------
 * <BR>	修改日期			修改人			修改内容
 * </PRE>
 *
 * @author youshipeng
 * @since 1.0
 * @version 1.0
 */
public class ExtractTrunk {

    private static LexicalizedParser lexicalizedParser;
    private static GrammaticalStructureFactory grammaticalStructureFactory;
    private static final ArrayList<String> depObjects = new ArrayList<String>() {{
        add("PN");
        add("NR");
        add("NN");
        add("NT");
        add("PRP");
    }};

    static {
        String modelPath = "chineseFactored.ser";
        lexicalizedParser = LexicalizedParser.loadModel(modelPath);
        grammaticalStructureFactory = new ChineseTreebankLanguagePack().grammaticalStructureFactory();
    }

    public static SentenceTrunk getSentenceTrunk(String sentence) {
        sentence = sentence.replace("\\s+", "");
        List<NatureWord> words = seg(sentence);
        return getSentenceTrunk(words);
    }

    private static SentenceTrunk getSentenceTrunk(List<NatureWord> words) {
        SentenceTrunk sentenceTrunk = new SentenceTrunk();
        if (isEmpty(words)) {
            return sentenceTrunk;
        }

        Tree tree = lexicalizedParser.apply(words);

        GrammaticalStructure grammaticalStructure = grammaticalStructureFactory.newGrammaticalStructure(tree);
        Collection<TypedDependency> dependencies = grammaticalStructure.typedDependenciesCollapsed();

        IndexedWord rootWord = findMainWord(dependencies);
        if (rootWord == null) {
            return null;
        }
        sentenceTrunk = new SentenceTrunk(rootWord);

        for (TypedDependency dependency : dependencies) {
            String relnName = dependency.reln().getShortName();
            IndexedWord gov = dependency.gov();
            IndexedWord dep = dependency.dep();

            if (rootWord.equals(gov)) {
                switch (relnName) {
                    case "nsubjpass":
                    case "dobj":
                    case "attr":
                        sentenceTrunk.object = dep;
                        break;
                    case "dep":
                        if (depObjects.contains(dep.tag()) && sentenceTrunk.object == null) {
                            sentenceTrunk.object = dep;
                        }
                        break;
                    case "nsubj":
                    case "top":
                        sentenceTrunk.subject = dep;
                        break;
                }
            }

            if (sentenceTrunk.object != null && sentenceTrunk.subject != null) {
                break;
            }
        }

        combineNN(dependencies, sentenceTrunk.subject);
        combineNN(dependencies, sentenceTrunk.object);
        if (!sentenceTrunk.isDone()) sentenceTrunk.done();

        return sentenceTrunk;
    }

    private static void combineNN(Collection<TypedDependency> dependencies, IndexedWord target) {
        if (target == null)
            return;
        for (TypedDependency dependency : dependencies) {
            // 依存关系的出发节点，依存关系，以及结束节点
            IndexedWord gov = dependency.gov();
            String shortName = dependency.reln().getShortName();
            IndexedWord dep = dependency.dep();
            if (target.equals(gov)) {
                switch (shortName) {
                    case "nn":
                        target.setValue(dep.toString(CoreLabel.OutputFormat.VALUE) + target.value());
                        return;
                }
            }
        }
    }

    private static IndexedWord findMainWord(Collection<TypedDependency> dependencies) {
        for (TypedDependency dependency : dependencies) {
            if (dependency.reln() == GrammaticalRelation.ROOT) {
                return dependency.dep();
            }
        }

        return null;
    }

    private static List<NatureWord> seg(String sentence) {
        List<Term> terms = HanLP.segment(sentence);
        return new ArrayList<NatureWord>() {{
            terms.forEach(term -> add(new NatureWord(term.word, term.nature)));
        }};
    }

    public static void main(String[] args) {
//        HANLP_DATA_ROOT_PATH = "E:\\elasticsearch-2.3.3\\plugins\\HanLP\\";

        CustomDictionaryPath = new String[] {
                "E:\\elasticsearch-2.3.3\\plugins\\HanLP\\data\\dictionary\\custom\\CustomDictionary.txt"
        };

        String[] testCaseArray = {
                "我家里厨房的冰箱坏了",
                "你好，我家壁挂炉坏了",
                "你好，我把洗衣机弄坏了",
                "你好，帮我把衣柜弄好吧",
                "你好，我家的微波炉需要修理了",
                "你好，帮我修理一下我家密码锁吧",
                "我要报修",
                "我要预约维修"
        };
        for (String testCase : testCaseArray) {
            SentenceTrunk sentenceTrunk = getSentenceTrunk(testCase);
//            sentenceTrunk.result = testCase;
            System.out.printf("%s\t%s\t%s\n", testCase, sentenceTrunk, sentenceTrunk.result);
        }
    }
}