package com.zhujiahui.MavenZhuNLP.parser;

import com.zhujiahui.MavenZhuNLP.domain.DependencyItem;
import edu.stanford.nlp.parser.lexparser.LexicalizedParser;
import edu.stanford.nlp.trees.Tree;
import edu.stanford.nlp.trees.TypedDependency;
import edu.stanford.nlp.trees.international.pennchinese.ChineseGrammaticalStructure;

import java.util.ArrayList;
import java.util.Collection;
import java.util.List;

public class StanfordDependencyParser implements IDependencyParser {

    private LexicalizedParser lexicalizedParser = null;

    public StanfordDependencyParser() {
        String modelPath = "edu/stanford/nlp/models/lexparser/xinhuaFactoredSegmenting.ser.gz";
        // 初始化句法分析类
        this.lexicalizedParser = LexicalizedParser.loadModel(modelPath);
    }

    private List<DependencyItem> dependencyParse(Tree parser) {
        // 实例化ChineseGrammaticalStructure类
        ChineseGrammaticalStructure cgs = new ChineseGrammaticalStructure(parser);
        // 调用typedDependenciesCollapsed方法对句子进行依存句法分析
        Collection<TypedDependency> tdc = cgs.typedDependenciesCollapsed();

        List<DependencyItem> parseResult = new ArrayList<DependencyItem>();
        for (TypedDependency td : tdc) {
            DependencyItem di = new DependencyItem();
            di.tag = td.reln().getShortName();
            di.formerWord = td.gov().word();
            di.formerWordPosition = String.valueOf(td.gov().index());
            di.latterWord = td.dep().word();
            di.latterWordPosition = String.valueOf(td.dep().index());
            parseResult.add(di);
        }

        return parseResult;
    }

    public List<DependencyItem> parser(List<String> tokenList) {
        Tree parser = this.lexicalizedParser.parseStrings(tokenList);
        return dependencyParse(parser);
    }

    public List<DependencyItem> parser(String sentence) {
        Tree parser = this.lexicalizedParser.parse(sentence);
        return dependencyParse(parser);
    }
}
