/*
 * Parser.java
 * 
 * Copyright 2012 Xinli Jia
 * 
 * Using Stanford parser to extract key expressions from sentences,
 * and then apply Sentiment Analysis to these expressions using SentiWordNet lexical resourse
 */

package ie.dit.comp.lukejia.fyp.stanford;

import ie.dit.comp.lukejia.fyp.model.TaggedSentence;
import ie.dit.comp.lukejia.fyp.model.TaggedSentence.TAG_TYPE;
import ie.dit.comp.lukejia.fyp.swn.SWN3;

import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.List;

import edu.stanford.nlp.ling.HasWord;
import edu.stanford.nlp.ling.Sentence;
import edu.stanford.nlp.parser.lexparser.LexicalizedParser;
import edu.stanford.nlp.process.DocumentPreprocessor;
import edu.stanford.nlp.trees.GrammaticalStructure;
import edu.stanford.nlp.trees.GrammaticalStructureFactory;
import edu.stanford.nlp.trees.PennTreebankLanguagePack;
import edu.stanford.nlp.trees.Tree;
import edu.stanford.nlp.trees.TreebankLanguagePack;
import edu.stanford.nlp.trees.TypedDependency;

public class Parser {
	private LexicalizedParser lp;
	private SWN3 swn;
	private final int NUM_OF_SENTENCES = 5;

	public Parser(String modulePath, String pathToSWN) {
		lp = new LexicalizedParser(modulePath);
		swn = new SWN3(pathToSWN);

	};

	public String parsingText(String text) {
		Iterable<List<HasWord>> sentences;
		DocumentPreprocessor dp = new DocumentPreprocessor(
				new BufferedReader(new InputStreamReader(
						new ByteArrayInputStream(text.getBytes()))));
		List<List<HasWord>> tmp = new ArrayList<List<HasWord>>();
		for (List<HasWord> sentence : dp) {
			tmp.add(sentence);
		}
		sentences = tmp;

		StringBuilder sb = new StringBuilder();
		TreebankLanguagePack tlp = new PennTreebankLanguagePack();
		GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
		GrammaticalStructure gs;
		for (List<HasWord> sentence : sentences) {
			Tree parse = lp.apply(sentence);

			gs = gsf.newGrammaticalStructure(parse);
			List<TypedDependency> tdl = gs.typedDependencies(true);// typedDependenciesCCprocessed();
			System.out.println(sentence.toString());
			// System.out.println(tdl);
			sentence = getSubVerbAndObj(tdl, sentence);
			sentence = getDependentAndPrepotion(tdl, sentence);
			sentence = getAdjModifier(tdl, sentence);

			sb.append(Sentence.listToString(sentence, false));
		}
		return sb.toString();
	}

	public List<TaggedSentence> parsingTextToMap(String text) {
		Iterable<List<HasWord>> sentences;
		DocumentPreprocessor dp = new DocumentPreprocessor(
				new BufferedReader(new InputStreamReader(
						new ByteArrayInputStream(text.getBytes()))));
		List<List<HasWord>> tmp = new ArrayList<List<HasWord>>();
		int count = 0;
		for (List<HasWord> sentence : dp) {
			tmp.add(sentence);
			if (++count >= NUM_OF_SENTENCES)
				break;
		}
		sentences = tmp;

		TreebankLanguagePack tlp = new PennTreebankLanguagePack();
		GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
		GrammaticalStructure gs;
		List<TaggedSentence> tagCloud = new ArrayList<TaggedSentence>();
		for (List<HasWord> sentence : sentences) {
			Tree parse = lp.apply(sentence);

			gs = gsf.newGrammaticalStructure(parse);
			List<TypedDependency> tdl = gs.typedDependencies(true);
			// System.out.println(sentence.toString());
			// System.out.println(tdl);
			TaggedSentence ts = getTag(tdl, sentence);
			if (ts != null) {
				tagCloud.add(ts);
				System.out.print(ts.getTagWord() + ", " + ts.getScore());
			}
		}
		System.out.println();
		return tagCloud;
	}

	public List<HasWord> getSubVerbAndObj(List<TypedDependency> tdl,
			List<HasWord> sentence) {
		for (TypedDependency td1 : tdl) {
			if (td1.reln().toString().equals("nsubj")) {
				for (TypedDependency td2 : tdl) {
					if (td2.reln().toString().equals("dobj")
							&& td1.gov().equals(td2.gov())) {
						String subj = td1.dep().toString("value");
						if (subj.equalsIgnoreCase("it")
								|| subj.equalsIgnoreCase("that"))
							continue;

						// System.out.println(td1.dep().toString("value") + " "
						// +
						// td1.gov().toString("value") + " " +
						// td2.dep().toString("value"));
						int n1, n2, n3;
						n1 = td1.dep().index() - 1;
						n2 = td1.gov().index() - 1;
						n3 = td2.dep().index() - 1;
						String word = sentence.get(n1).word();
						sentence.get(n1).setWord("<svao>" + word + "</svao>");
						word = sentence.get(n2).word();
						sentence.get(n2).setWord("<svao>" + word + "</svao>");
						word = sentence.get(n3).word();
						sentence.get(n3).setWord("<svao>" + word + "</svao>");
					}
				}
			}
		}
		return sentence;
	}

	public TaggedSentence getTag(List<TypedDependency> tdl,
			List<HasWord> sentence) {
		List<String> tags = new ArrayList<String>();
		List<String> singleWords = new ArrayList<String>();
		for (TypedDependency td1 : tdl) {
			if (td1.reln().toString().equals("amod")) {
				String tag = td1.dep().toString("value") + " "
						+ td1.gov().toString("value");
				int n1, n2;
				n1 = td1.dep().index() - 1;
				n2 = td1.gov().index() - 1;
				String word = sentence.get(n1).word();
				singleWords.add(word);
				sentence.get(n1).setWord("<amod>" + word + "</amod>");
				word = sentence.get(n2).word();
				singleWords.add(word);
				sentence.get(n2).setWord("<amod>" + word + "</amod>");
				tags.add(tag);
			}
		}
		if (!tags.isEmpty()) {
			String s = Sentence.listToString(sentence, false);
			return new TaggedSentence(tags, s,
					swn.extractSentence(singleWords), TAG_TYPE.ADJ);
		} else
			return null;
	}

	public List<HasWord> getDependentAndPrepotion(List<TypedDependency> tdl,
			List<HasWord> sentence) {
		for (TypedDependency td1 : tdl) {
			if (td1.reln().toString().equals("prep")) {
				for (TypedDependency td2 : tdl) {

					if (td2.reln().toString().equals("pobj")
							&& td1.dep().equals(td2.gov())) {
						// System.out.println(td1.gov().toString("value") + " "+
						// td1.dep().toString("value") + " "+
						// td2.dep().toString("value"));
						int n1, n2, n3;
						n1 = td1.gov().index() - 1;
						n2 = td1.dep().index() - 1;
						n3 = td2.dep().index() - 1;
						String word = sentence.get(n1).word();
						sentence.get(n1).setWord("<dpre>" + word + "</dpre>");
						word = sentence.get(n2).word();
						sentence.get(n2).setWord("<dpre>" + word + "</dpre>");
						word = sentence.get(n3).word();
						sentence.get(n3).setWord("<dpre>" + word + "</dpre>");
					}
				}
			}
		}
		return sentence;
	}

	public void getNumericPhase(List<TypedDependency> tdl) {
		for (TypedDependency td1 : tdl) {
			if (td1.reln().toString().equals("num")) {
				System.out.println(td1.dep().toString("value") + " "
						+ td1.gov().toString("value"));
			}
		}
	}

	public void getPossession(List<TypedDependency> tdl) {
		for (TypedDependency td1 : tdl) {
			if (td1.reln().toString().equals("poss")) {
				System.out.println(td1.dep().toString("value") + " "
						+ td1.gov().toString("value"));
			}
		}
	}

	public List<HasWord> getAdjModifier(List<TypedDependency> tdl,
			List<HasWord> sentence) {
		for (TypedDependency td1 : tdl) {
			if (td1.reln().toString().equals("amod")) {
				// System.out.println(td1.dep().toString("value") + " "+
				// td1.gov().toString("value"));
				int n1, n2;
				n1 = td1.dep().index() - 1;
				n2 = td1.gov().index() - 1;
				String word = sentence.get(n1).word();
				sentence.get(n1).setWord("<amod>" + word + "</amod>");
				word = sentence.get(n2).word();
				sentence.get(n2).setWord("<amod>" + word + "</amod>");
			}
		}
		return sentence;
	}
}
