package hu.farago.wsj.parse.wordprocess;

import hu.farago.wsj.model.entity.Article;

import java.util.List;
import java.util.Map;
import java.util.Properties;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import edu.stanford.nlp.dcoref.CorefChain;
import edu.stanford.nlp.dcoref.CorefCoreAnnotations.CorefChainAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.NamedEntityTagAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.PartOfSpeechAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.TextAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.TokensAnnotation;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.pipeline.Annotation;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.semgraph.SemanticGraph;
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation;
import edu.stanford.nlp.trees.Tree;
import edu.stanford.nlp.trees.TreeCoreAnnotations.TreeAnnotation;
import edu.stanford.nlp.util.CoreMap;

/**
 * Split the given article's plain text to sentences, then to words. It's
 * managed by Stanford CoreNLP project.
 * 
 * @author Balázs
 *
 */
@Service("coreNLP")
public class CoreNLPWordsProcessor implements WordsProcessor {
	
	private static final Logger LOGGER = LoggerFactory.getLogger(CoreNLPWordsProcessor.class);

	@Value("${wsj.receiver.annotators}")
	private String annotators;

	private StanfordCoreNLP coreNLP;

	@Override
	public void parseArticlePlainTextAndBuildMapOfWords(Article existingArticle) {
		buildCoreNLPIfNull();
		Annotation document = annotatePlainTextOfArticle(existingArticle);
		
		List<CoreMap> sentences = document.get(SentencesAnnotation.class);
		for (CoreMap sentence : sentences) {
			for (CoreLabel token : sentence.get(TokensAnnotation.class)) {
				String word = token.get(TextAnnotation.class);
				String pos = token.get(PartOfSpeechAnnotation.class);
				String ner = token.get(NamedEntityTagAnnotation.class);
				
				LOGGER.info("word: " + word + ", pos: " + pos + ", ner: " + ner);
			}

			Tree tree = sentence.get(TreeAnnotation.class);
			LOGGER.info("===============\ntree: " + tree.toString() + "\ntree value: " + tree.value());

			SemanticGraph dependencies = sentence
					.get(CollapsedCCProcessedDependenciesAnnotation.class);
			
			LOGGER.info("===============\ndependencies: " + dependencies.toString() + "\ndependencies compact string: " + dependencies.toCompactString());
		}

		Map<Integer, CorefChain> graph = document
				.get(CorefChainAnnotation.class);
			
		LOGGER.info("\n\n**********************\nCoref chain\n\n");
		for (CorefChain chain : graph.values()) {
			LOGGER.info("chain: " + chain.toString());
		}
	}

	private Annotation annotatePlainTextOfArticle(Article existingArticle) {
		final String text = existingArticle.getPlainText();
		Annotation document = new Annotation(text);
		coreNLP.annotate(document);
		return document;
	}

	private void buildCoreNLPIfNull() {
		if (coreNLP == null) {
			Properties props = new Properties();
			props.setProperty("annotators", annotators);
			coreNLP = new StanfordCoreNLP(props);
		}
	}

}
