package com.kyubi.tagger.decorators;

import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Set;
import java.util.Vector;

import weka.core.Debug;

import com.kyubi.common.config.ConfigKeys;
import com.kyubi.common.config.Configuration;
import com.kyubi.common.grammar.Tree;
import com.kyubi.common.grammar.TreeNode;
import com.kyubi.tagger.ITagger;
import com.kyubi.tagger.Term;
import com.kyubi.tagger.taggers.StanfordTreeTagger;
import com.kyubi.tagger.taggers.XMLWrapper;

import edu.stanford.nlp.trees.EnglishGrammaticalRelations.PhrasalVerbParticleGRAnnotation;

/**
 * This class represents the decorator which can be used for
 * tag frequency counting on the input stream.
 * @author anand
 *
 */
public class TagCollector implements ITagger {

	private ITagger tagger;
	private Set<String> tags;
	private HashMap<String, HashMap<String, Vector<Term>>> stats;
	private HashMap<String, Long> wordCounts;
	private Set<String> stopWords = null;
	
	public TagCollector(ITagger tagger, String[] tags, Set<String> stopWords) {
		this.tagger = tagger;
		this.stats = new HashMap<String, HashMap<String, Vector<Term>>>();
		this.wordCounts = new HashMap<String, Long>();
		this.stopWords = stopWords;

		this.tags = new HashSet<String>();
		for(int i=0; i<tags.length; i++) {
			this.tags.add(tags[i]);
		}
	}
	
	/**
	 * @see com.kyubi.tagger.ITagger
	 */
	public Vector<String> getOriginalValues() {
		Vector<String> dataVect = tagger.getOriginalValues();
		return dataVect;
	}

	/**
	 * @see com.kyubi.tagger.ITagger
	 */
	public Vector<String> getTaggedValues() {
		Vector<String> tValues = tagger.getTaggedValues();
		
		return tValues;
	}
	
	/**
	 * Function to collect and count the tagged phrases/terms
	 */
	public void collectPhrases() {
		Vector<String> tValues = getTaggedValues();
		
		for(int l=0; l<tValues.size(); l++) {
			String value = tValues.get(l);
			
			Tree tree = new Tree();
			tree.parse(value);
			
			Set<TreeNode> tNodes = tree.getNodes();
			for(TreeNode tNode : tNodes) {
				
				String phrase = tNode.getText();
				String tag = tNode.getHead();
				String key = tag;
				
				Term t = new Term();
				t.value = phrase;
				t.tag = tag;
				t.lineNum = l;
				t.freq++;
				
				// stopword check
				if(!stopWords.contains(t.value)) {
					// remove stopwords from phrase
					String[] tokens = t.value.split("[ ]+");
					StringBuffer temp = new StringBuffer();

					for(int i=0; i<tokens.length; i++) {
						if(!stopWords.contains(tokens[i])) {
							temp.append(tokens[i] + " ");
						}
					}
					
					t.value = temp.toString().trim();
					
					if(t.value.equals("")) {
						continue;
					}
				} else {
					continue;
				}
				
				if(this.tags.contains(tag)) {
					if(!stats.containsKey(key)) {
						HashMap<String, Vector<Term>> terms = new HashMap<String, Vector<Term>>();
						Vector<Term> vTerms = new Vector<Term>();
						vTerms.add(t);
						terms.put(t.value, vTerms);
						
						stats.put(key, terms);
					} else {
						HashMap<String, Vector<Term>> terms = stats.get(key);
						
						if(terms.containsKey(t.value)) {
							Vector<Term> vTerms = terms.get(t.value);
							vTerms.add(t);
							terms.put(t.value, vTerms);
						} else {
							Vector<Term> vTerms = new Vector<Term>();
							vTerms.add(t);
							terms.put(t.value, vTerms);
						}

						stats.put(key, terms);
					}
				}
			}
		}
	}
	
	/**
	 * Function to return the top tagged phrases/terms
	 * @return	the top terms
	 */
	public ArrayList<Term> getTopTagWords() {
		ArrayList<Term> retTerms = new ArrayList<Term>();
		
		Set<String> tags = stats.keySet();
		
		for(String tag : tags) {
			HashMap<String, Vector<Term>> termMap = stats.get(tag);
			
			Set<String> phrases = termMap.keySet();
			
			ArrayList<Term> phraseCount = new ArrayList<Term>();
			for(String phrase : phrases) {
				Term t = new Term();
				t.value = phrase;
				t.tag = tag;
				t.freq = termMap.get(phrase).size();
				
				phraseCount.add(t);
			}
			
			Collections.sort(phraseCount);
			
			// copy top terms for tag into retTerms
			// discard terms that appear is less than 1% of the sentences
			double onePercent = 0.01 * getOriginalValues().size();
			for(int i=0; i<phraseCount.size(); i++) {
				Term phrase = phraseCount.get(i);
				
				if(phrase.freq <= onePercent) {
					continue;
				} else {
					retTerms.addAll(termMap.get(phrase.value));
					System.out.println();
				}
			}
		}
		
		return retTerms;
	}
	
	public static void main(String[] args) {
		try {
			Configuration c = Configuration.getSingleton(args[2]);
			Set<String> stopWords = c.getUniqLinesFromFile(c.get(ConfigKeys.BASEURL) + c.get(ConfigKeys.STOPWORDS_PATH));
			Set<String> stemmedStopWords = new HashSet<String>();
			
			net.sf.snowball.ext.PorterStemmer stemmer = new net.sf.snowball.ext.PorterStemmer();
			for(String stWord : stopWords) {
				stemmer.setCurrent(stWord);
				stemmer.stem();
				stemmedStopWords.add(stemmer.getCurrent());
			}
			
			TagCollector tagr = new TagCollector(new StanfordTreeTagger(new PorterStemmer(new Lowercase(new XMLWrapper("/Users/anand/kyubi/resources/xml/test2.xml")))), c.getArray(ConfigKeys.TAGS_TO_LEARN), stemmedStopWords);
			tagr.collectPhrases();
			ArrayList<Term> tt = tagr.getTopTagWords();
			System.out.println();
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
}
