package pingce.share.util;

import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import edu.stanford.nlp.ling.HasWord;
import edu.stanford.nlp.ling.Word;
import edu.stanford.nlp.parser.lexparser.LexicalizedParser;
import edu.stanford.nlp.trees.GrammaticalStructure;
import edu.stanford.nlp.trees.GrammaticalStructureFactory;
import edu.stanford.nlp.trees.Tree;
import edu.stanford.nlp.trees.TreebankLanguagePack;
import edu.stanford.nlp.trees.TypedDependency;
import edu.stanford.nlp.trees.international.pennchinese.ChineseTreebankLanguagePack;

/**
 * 封装stanford parser的类，对于80字以下的句子，首先使用Factored parser进行分析；
 * 若出现失败情况或句子常与80字，则采用PCFG Parser。
 * record the raw text of a sentence and the parsed result of that sentence
 * @author ucai
 *
 */
public class SentenceProcessor {
	public Splitter splitter ;
	public LexicalizedParser lp ;
	public LexicalizedParser pp;
	public GrammaticalStructureFactory gsf;
	public static Pattern p = Pattern.compile("#\\S+#");
	public SentenceProcessor(){
		splitter = SplitterFactory.getSplitter("ICT","userdict.txt");
	    String grammar = "edu/stanford/nlp/models/lexparser/xinhuaFactored.ser.gz";
	    String[] options = { "-maxLength", "80", "-MAX_ITEMS", "800000"};
	    lp = LexicalizedParser.loadModel(grammar, options);
	    
	    String[] pcfgOptions = {"-maxLength", "140"};
	    String pcfgGrammar = "edu/stanford/nlp/models/lexparser/xinhuaPCFG.ser.gz";
	    pp = LexicalizedParser.loadModel(pcfgGrammar, pcfgOptions);
	    
	    TreebankLanguagePack tlp = new ChineseTreebankLanguagePack();
	    gsf = tlp.grammaticalStructureFactory();
	}
	
	public boolean checkFailure(Tree parse){
		
		if(parse == null  ||String.valueOf(parse.score()).equalsIgnoreCase("NaN")){
			return true;
		}
		return false;
	}
	
	public Tree getParsedTreeRaw(String[] splitResult, String rawStr){
		List<HasWord> sentence = new ArrayList<HasWord>();
		for (String word : splitResult) {
			sentence.add(new Word(word));
		}
		Tree parse = null;
		if(sentence.size() < 1)
			parse = null;
		else{
			if(rawStr.length() < 80){
				parse = lp.apply(sentence);
			}
			if (rawStr.length() > 80 || checkFailure(parse)){
				parse = pp.apply(sentence);
				System.out.println("using pcfg");
			}
		}
		if(rawStr.length() > 120)
			System.err.println("sentence is too long：" + rawStr + ":" + rawStr.length());	
		
		return parse;
	}
	
	public List<TypedDependency> getDependency(Tree parse) {
		GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
		List<TypedDependency> list = gs.typedDependenciesCCprocessed();
		return list;
	}
	
	
	public static void main(String[] args){
		SentenceProcessor sp = new SentenceProcessor();
		try{
			String rawStr = "这是一个简单句";
			String[] arr = {"这", "是", "一个", "简单", "句"};
			Tree tree = sp.getParsedTreeRaw(arr, rawStr);
			tree.pennPrint();
		}catch(Exception e){
			System.err.println("catch it");
		}
	}
}
