package net.wanglu.www.zzz.service.sp;

import java.io.StringReader;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;

import net.wanglu.www.zzz.utils.LoggerFactory;
import net.wanglu.www.zzz.utils.OpenNLPUtils;
import net.wanglu.www.zzz.utils.StanfordUtils;
import opennlp.tools.postag.POSTaggerME;
import opennlp.tools.tokenize.Tokenizer;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.ling.TaggedWord;
import edu.stanford.nlp.parser.lexparser.LexicalizedParser;
import edu.stanford.nlp.process.TokenizerFactory;
import edu.stanford.nlp.trees.GrammaticalStructure;
import edu.stanford.nlp.trees.GrammaticalStructureFactory;
import edu.stanford.nlp.trees.Tree;
import edu.stanford.nlp.trees.TypedDependency;

public class SentencePreprocessV00 extends SentencePreprocess {

	@Override
	public Object service(Object... objects) throws Exception {
		// TODO Auto-generated method stub
		String sentence = (String) objects[0];
		String[] words = null;
		try {
			Tokenizer tokenizer = OpenNLPUtils.getTokenizer();
			words = tokenizer.tokenize(sentence);
		} catch (Exception e) {
			// TODO Auto-generated catch block
			LoggerFactory.SelfLogger.error("", e);
			e.printStackTrace();
			words = null;
		}
		if ( words == null ) throw new Exception("亲！出错了！");
		if ( words.length > 128) throw new Exception("亲！句子太长了，包括标点符号在内不能超过128个单词");
		
		String[] tags = null;
		try {
			POSTaggerME postagMe = OpenNLPUtils.getPOSTaggerME();
			tags = postagMe.tag(words);
		} catch (Exception e) {
			// TODO Auto-generated catch block
			LoggerFactory.SelfLogger.error("", e);
			e.printStackTrace();
			tags = null;
		}
		if ( tags == null ) throw new Exception("亲！出错了！");
		
		LexicalizedParser lp = StanfordUtils.getLexicalizedParser();
		GrammaticalStructureFactory gsf = StanfordUtils
				.getGrammaticalStructureFactory();
		TokenizerFactory<CoreLabel> tokenizerFactory = StanfordUtils.getTokenizerFactory();

	    List<CoreLabel> wordList = 
	        tokenizerFactory.getTokenizer(new StringReader(sentence)).tokenize();
		Tree root = lp.apply(wordList);
		ArrayList<TaggedWord> twl = root.taggedYield();
		GrammaticalStructure gs = gsf.newGrammaticalStructure(root);
		Collection<TypedDependency> tdl = gs.typedDependencies();
		
		HashMap<String,Object> preprocessData = new HashMap<String,Object>(4);
		preprocessData.put("sentence", sentence);
		preprocessData.put("words", words);
		preprocessData.put("tags", tags);
		preprocessData.put("root", root);
		preprocessData.put("twl", twl);
		preprocessData.put("tdl", tdl);
		preprocessData.put("gs", gs);
		return preprocessData;
	}

}
