package net.wanglu.www.zzz.service.sp;

import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import net.wanglu.www.zzz.utils.StanfordUtilsCore;
import edu.stanford.nlp.dcoref.CorefChain;
import edu.stanford.nlp.dcoref.CorefCoreAnnotations.CorefChainAnnotation;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.ling.TaggedWord;
import edu.stanford.nlp.ling.CoreAnnotations.NamedEntityTagAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.PartOfSpeechAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.TextAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.TokensAnnotation;
import edu.stanford.nlp.pipeline.Annotation;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.semgraph.SemanticGraph;
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.BasicDependenciesAnnotation;
import edu.stanford.nlp.trees.GrammaticalStructure;
import edu.stanford.nlp.trees.GrammaticalStructureFactory;
import edu.stanford.nlp.trees.Tree;
import edu.stanford.nlp.trees.TypedDependency;
import edu.stanford.nlp.trees.TreeCoreAnnotations.TreeAnnotation;
import edu.stanford.nlp.util.CoreMap;

public class SentencePreprocessCore extends SentencePreprocess {

	@Override
	public Object service(Object... objects) throws Exception {
		// TODO Auto-generated method stub
		String sentence0 = (String) objects[0];

		Annotation document = new Annotation(sentence0);
		StanfordCoreNLP pipeline = StanfordUtilsCore.getStanfordCoreNLP();
		pipeline.annotate(document);

		List<CoreMap> sentences = document.get(SentencesAnnotation.class);
		CoreMap sentence = sentences.get(0);

		ArrayList<String> words = new ArrayList<String>(256);
		ArrayList<String> tags = new ArrayList<String>(256);
		ArrayList<String> ners = new ArrayList<String>(256);
		ArrayList<CoreLabel> tokens = new ArrayList<CoreLabel>(256);
		HashMap<String, Object> preprocessData = new HashMap<String, Object>(4);

		for (CoreLabel token : sentence.get(TokensAnnotation.class)) {

			tokens.add(token);
			String word = token.get(TextAnnotation.class);
			words.add(word);

			String pos = token.get(PartOfSpeechAnnotation.class);
			tags.add(pos);

			String ne = token.get(NamedEntityTagAnnotation.class);
			ners.add(ne);
		}

		Tree root = sentence.get(TreeAnnotation.class);
		ArrayList<TaggedWord> twl = root.taggedYield();

		SemanticGraph dependencies = sentence
				.get(BasicDependenciesAnnotation.class);
		Collection<TypedDependency> tdl = dependencies.typedDependencies();

		Map<Integer, CorefChain> coreChains = document
				.get(CorefChainAnnotation.class);
		
		GrammaticalStructureFactory gsf = StanfordUtilsCore.getGrammaticalStructureFactory();
		GrammaticalStructure gs = gsf.newGrammaticalStructure(root);
		
		preprocessData.put("sentence", sentence0);
		preprocessData.put("words", words);
		preprocessData.put("tags", tags);
		preprocessData.put("root", root);
		preprocessData.put("twl", twl);
		preprocessData.put("tdl", tdl);
		preprocessData.put("dependencies", dependencies);
		preprocessData.put("coreChains", coreChains);
		preprocessData.put("gs", gs);
		return preprocessData;
	}
}
