package util.nlp;

import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.LinkedList;
import java.util.Scanner;

import util.io.FileInput;
import util.nlp.comparator.ByNodeDegree;
import util.nlp.comparator.ByNodeDistance;
import util.string.StringAnalysis;



public class LanguageModel implements Constants {

	/*
	 * Path variables and objects to access data files
	 */
	
	
	static String dict_path= "/home/sergio/Documents/delicious_stats/tags_dict.txt";
	
	static String stop_tags_path= "/home/sergio/projects/data/delicious/experiments/stop_tags.txt";
	
	
	static HashDictionary tags_dict = new HashDictionary(dict_path,true);
	static HashDictionary stop_dict = new HashDictionary(stop_tags_path,true);
	
	
	
	String hoPath;

	
	
	// Variables to manage the input of the data sets
	private FileInput data;

	// lang that is going to be processed : 0 for english 1 for czech
	int lang;

	// Total number of samples
	int N_T = 0; // Training data

	int N_HO = 0; // Held Out data

	int N_Test = 0; // Test data

	int V_T = 0; // Size of tag vocabulary
	int V_L = 0; // size of lexicon vocabulary

	// Number of words in test data that were seen in the training data
	int COVERAGE = 0;

	static double distance_mean = 0;

	static double total_item = 0;

	// Lexicon vocabulary
	Hashtable<String, Integer> vocabulary = new Hashtable<String, Integer>(24,
			1f);

	ArrayList<String> lines = new ArrayList<String>(); // Words in training data

	// Hash tables to keep track of the frequency for p(i), p(i,j) and the
	// alphabets

	/*
	 * Hash tables to store the counts of unigrams,bigrams and trigrams of the
	 * tags of the training data
	 * 
	 * uni, bi and tri . store the counts with keys in the order : i, j , k (
	 * for order in the text k j i )
	 * 
	 * bi2 and tri2 contains the same counts but the keys are in inverse order
	 */

	static Hashtable<String, Integer> uni = new Hashtable<String, Integer>(24,
			1f);
	static Hashtable<String, Integer> uniL = new Hashtable<String, Integer>(24,
			1f);
	static Hashtable<String, Integer> uniR = new Hashtable<String, Integer>(24,
			1f);

	static Hashtable<String, Hashtable<String, Integer>> bi = new Hashtable<String, Hashtable<String, Integer>>(
			24, 1f);

	static Hashtable<String, Hashtable<String, Hashtable<String, Integer>>> tri = new Hashtable<String, Hashtable<String, Hashtable<String, Integer>>>(
			24, 1f);

	static Hashtable<String, Hashtable<String, Integer>> bi2 = new Hashtable<String, Hashtable<String, Integer>>(
			24, 1f);

	static Hashtable<String, Hashtable<String, Hashtable<String, Integer>>> tri2 = new Hashtable<String, Hashtable<String, Hashtable<String, Integer>>>(
			24, 1f);

	/**
	 * Clustering and model helper data structures
	 * 
	 */

	static ArrayList<Node> nodes = new ArrayList<Node>();

	static Hashtable<String, Boolean> nodes_taken = new Hashtable<String, Boolean>();
	static Hashtable<String, ArrayList<String>> clusters = new Hashtable<String, ArrayList<String>>(
			24, 1f);

	static Hashtable<String, ArrayList<Node>> graph_distances = new Hashtable<String, ArrayList<Node>>(
			24, 1f);

	/*
	 * This two hash tables contain the tags that generates each word in
	 * training data bi_lex store this info with keys in order : word, tags
	 * Ibi_lex stores the info with keys in order : tags,words
	 */

	/**
	 * cuni stores the counts for singles states( bigrams) cbi stores trigrams
	 * used for the calculation of p(s|s') ctri stores the counts of how many
	 * times each word is produce give a couple of states that is : p(w|s,s')
	 * the order of keys if : word,i,j,k ( where i j k are the tags that appears
	 * in the text in this same order)
	 * 
	 * cbuni.cbbi and so on are used to buffer the previous hashes when we are
	 * re-estimating the counts
	 * 
	 */

	Hashtable<String, Hashtable<String, Double>> cuni = new Hashtable<String, Hashtable<String, Double>>();

	Hashtable<String, Hashtable<String, Hashtable<String, Double>>> cbi = new Hashtable<String, Hashtable<String, Hashtable<String, Double>>>();

	Hashtable<String, Hashtable<String, Hashtable<String, Hashtable<String, Double>>>> ctri = new Hashtable<String, Hashtable<String, Hashtable<String, Hashtable<String, Double>>>>();

	// hash of counts buffer

	Hashtable<String, Hashtable<String, Double>> cbuni = new Hashtable<String, Hashtable<String, Double>>();

	Hashtable<String, Hashtable<String, Hashtable<String, Double>>> cbbi = new Hashtable<String, Hashtable<String, Hashtable<String, Double>>>();

	Hashtable<String, Hashtable<String, Hashtable<String, Hashtable<String, Double>>>> cbtri = new Hashtable<String, Hashtable<String, Hashtable<String, Hashtable<String, Double>>>>();

	/**
	 * Ini the list of nodes (save all the vertices and their degree)
	 * 
	 * 
	 * @param bi
	 * @param nodes
	 */
	static void iniGraphNodes() {

		Enumeration<String> iter = bi.keys();

		while (iter.hasMoreElements()) {

			String key = iter.nextElement();

			Enumeration<String> iter2 = bi.get(key).keys();
			ArrayList<Node> list = null;
			while (iter2.hasMoreElements()) {

				String key2 = iter2.nextElement();

				// calculate distance P(key2|key)

				double temp = (double) bi.get(key).get(key2)
						/ (double) uniL.get(key);

				
				
				/*System.out.println("Context:" + key + "\tWord:" + key2 + "\t"+ temp +  "\tUniL:" + uniL.get(key) +
						"\tUniR:" + uniR.get(key) +
						
						"\tBi:" + bi.get(key).get(key2) );
				*/
				
				
				list = graph_distances.get(key);

				if (list == null)
					list = new ArrayList<Node>();

				Node node = new Node();

				distance_mean += temp;

				total_item++;

				node.setContext(key);
				node.setWord(key2);

				node.setDistance(temp);
				node.setDegree(bi.get(key).size());

				list.add(node);
				graph_distances.put(key, list);

			}

			// sort elements of graph_distances.put(key2)
			Collections.sort(list, new ByNodeDistance());

		}

		// get list of star nodes - context words

		iter = bi.keys();
		while (iter.hasMoreElements()) {
			Node node = new Node();
			String key2 = iter.nextElement();
			node.setContext(key2);
			node.setDegree( bi.get(key2).size());
			nodes_taken.put(key2, false);

			nodes.add(node);

		}

		// sort
		Collections.sort(nodes, new ByNodeDegree());

		distance_mean = distance_mean / total_item;
		System.out.println(distance_mean);
	}

	public static void getClusters() {

		while (nodes != null && nodes.size() > 0) {
			// get node with the greatest degree
			Node star = nodes.remove(0);

			if (nodes_taken.containsKey(star.getContext())
					&& !nodes_taken.get(star.getContext()).booleanValue()) {

				ArrayList<String> elements = new ArrayList<String>();
				// get satellites, using as threshold distance_mean
				ArrayList<Node> list = graph_distances.get(star.getContext());
				int i = 0;
				distance_mean = 0.00001;
				while (i < list.size() && list.get(i).getDistance() >= distance_mean) {

					String word = list.get(i).getWord();

					if (nodes_taken.containsKey(word)
							&& !nodes_taken.get(word).booleanValue()) {
						elements.add(word);

						nodes_taken.put(word, true);

					}
					i++;
				}

				clusters.put(star.getContext(), elements);
			}
		}

		System.out.println("--------------------Clusters---------------");

		Enumeration<String> keys = clusters.keys();

		while (keys.hasMoreElements()) {
			String key = keys.nextElement();
			ArrayList<String> list = clusters.get(key);

			if (list.size() > 5) {

				System.out.print(key + "\t");
				for (int i = 0; i < list.size(); i++) {
					System.out.print(list.get(i) + "\t");
				}
				System.out.println("");

			}

		}

	}

	static void generate_clusters(String filePath, int context, int distance) {

		ArrayList<String> list = generatePairsPerQuery(filePath, context,
				distance);
		iniContextCounts(list, bi, bi2, uni);
		iniUnigrams(bi, uniL, uniR);
		iniGraphNodes();
		getClusters();

	}

	/**
	 * 
	 * 
	 * 
	 * @param bi
	 *            counts p(i,j) where key refers to j. i is the context of the
	 *            word j
	 * @param uniW
	 *            hash where j counts are stored (target token)
	 * @param uniC
	 *            has where the i counts are storet (context of the token)
	 */
	static void iniUnigrams(Hashtable<String, Hashtable<String, Integer>> bi,
			Hashtable<String, Integer> uniL, Hashtable<String, Integer> uniR) {

		Enumeration<String> iter = bi.keys();

		while (iter.hasMoreElements()) {

			String key = iter.nextElement();

			Hashtable<String, Integer> temp = bi.get(key);

			Enumeration<String> iter2 = temp.keys();
			Integer adder = 0;
			while (iter2.hasMoreElements()) {

				String key2 = iter2.nextElement();
//System.out.println("Counting:" + key + "\t" + key2);
				if (uniR.containsKey(key2)) {
					adder = uniR.get(key2)+bi.get(key).get(key2);
					uniR.put(key2, adder);
				} else {
					uniR.put(key2, bi.get(key).get(key2));

				}

				if (uniL.containsKey(key)) {
					adder = uniL.get(key)+bi.get(key).get(key2);
					uniL.put(key, adder );
				} else {
					uniL.put(key, bi.get(key).get(key2));

				}

			}

		}

	}

	static void iniContextCounts(ArrayList<String> words,
			Hashtable<String, Hashtable<String, Integer>> bi,
			Hashtable<String, Hashtable<String, Integer>> biI,
			Hashtable<String, Integer> uni) {
		Integer adder;

		Hashtable<String, Integer> htemp;

		String i = null;
		String j = null;
		if (words.size() > 1) {

			j = words.get(0);

		}

		for (int k = 0; k + 1 < words.size(); k += 2) {

			// we check for the hash that contains the counts for p(i) if
			// the
			// element already exits, to sum one or add a new value

			i = words.get(k);
			j = words.get(k + 1);

			if (uni.containsKey(j)) {
				adder = uni.get(j);
				uni.put(j, adder + 1);
			} else {
				uni.put(j, 1);

			}

			if (uni.containsKey(i)) {
				adder = uni.get(i);
				uni.put(i, adder + 1);
			} else {
				uni.put(i, 1);

			}

			// we do the same for the hash p(i,j)

			if (bi.containsKey(j)) {
				htemp = bi.get(j);
				if (htemp.containsKey(i)) {
					adder = htemp.get(i);
					htemp.put(i, adder + 1);

				} else {
					htemp.put(i, 1);

				}
				bi.put(j, htemp);
			} else {

				htemp = new Hashtable<String, Integer>(24, 1f);
				htemp.put(i, 1);
				bi.put(j, htemp);
			}

			// we do the same for hash p(i,j) , inverted hash

			if (bi2.containsKey(i)) {
				htemp = bi2.get(i);
				if (htemp.containsKey(j)) {
					adder = htemp.get(j);
					htemp.put(j, adder + 1);

				} else {
					htemp.put(j, 1);

				}
				bi2.put(i, htemp);
			} else {

				htemp = new Hashtable<String, Integer>(24, 1f);
				htemp.put(j, 1);
				bi2.put(i, htemp);
			}

		}

	}
	
	
	public static void deliciousCounts(String path) {

		FileInput in = new FileInput(path);
		String line = in.readString().trim();

		LinkedList<String> tmp = new LinkedList<String>();

		String previous = "";
		String t[] = line.split("\t");
		previous = t[2];

		// boolean kids = false;

		tmp.add(line);
		while (line != null) {
			
			if(t.length>2)
			previous = t[2].trim();
			
			
			line = in.readString();
			if (line != null ) {

				line = line.trim();
				t = line.split("\t");

				if (t.length > 2 && !previous.equals(t[2].trim())) {

					// if (kids) {
					// output entry

					for (int i = 0; i < tmp.size() - 1; i++) {

						String tt[] = tmp.get(i).trim().split("\t");
						for (int j = i + 1; j < tmp.size(); j++) {

							String m[] = tmp.get(j).trim().split("\t");

							if (tt.length >= 4 && m.length >= 4) {

								String ii = tt[3];
								String jj = m[3];

								updateHashes(ii, jj);
							//	System.out.println(tt[3] + "\t" + m[3]);
							}

						}

					}

					/*
					 * for (int i = 0; i < tmp.size(); i++) {
					 * System.out.println(tmp.get(i)); }
					 */
					// }

					// kids = false;
					tmp = new LinkedList<String>();

				}

				/*
				 * for (int i = 0; i < filter.length; i++) { //
				 * System.out.println(line); if (t.length > 3 &&
				 * t[3].trim().equals(filter[i])) {
				 * 
				 * kids = true;
				 * 
				 * }
				 * 
				 * }
				 */

				tmp.add(line);

			}

		}

	}
	
	
	public static void updateHashes(String a, String b) {
		
		
		Integer adder;
		
		LinkedList<String> i_expanded = tags_dict.getEntries(a);
	
		LinkedList<String> j_expanded = tags_dict.getEntries(b);
		
		
		System.out.println("hash:" + a+ "\t"+ b);
		
		
		/*	for (int i = 0; i < i_expanded.size(); i++) {

			for (int j = 0; j < j_expanded.size(); j++) {

				String i_term = i_expanded.get(i);
				String j_term = j_expanded.get(j);

				Hashtable<String, Integer> htemp;

				if (uni.containsKey(j_term)) {
					adder = uni.get(j_term);
					uni.put(j_term, adder + 1);
				} else {
					uni.put(j_term, 1);

				}

				if (uni.containsKey(i_term)) {
					adder = uni.get(i_term);
					uni.put(i_term, adder + 1);
				} else {
					uni.put(i_term, 1);

				}

			}

		}
		*/
		
		for (int i = 0; i_expanded != null && i < i_expanded.size(); i++) {

			for (int j = 0; j_expanded != null && j < j_expanded.size(); j++) {

				String i_term = i_expanded.get(i);
				String j_term = j_expanded.get(j);

				Hashtable<String, Integer> htemp;

				if (uni.containsKey(j_term)) {
					adder = uni.get(j_term);
					uni.put(j_term, adder + 1);

				} else {
					uni.put(j_term, 1);

				}

				if (uni.containsKey(i_term)) {
					adder = uni.get(i_term);
					uni.put(i_term, adder + 1);
				} else {
					uni.put(i_term, 1);

				}

				// bigrams update

				if (bi.containsKey(j_expanded.get(j))) {
					htemp = bi.get(j_expanded.get(j));
					if (htemp.containsKey(i_expanded.get(i))) {
						adder = htemp.get(i_expanded.get(i));
						htemp.put(i_expanded.get(i), adder + 1);

					} else {
						htemp.put(i_expanded.get(i), 1);

					}
					bi.put(j_expanded.get(j), htemp);
				} else {

					htemp = new Hashtable<String, Integer>(24, 1f);
					htemp.put(i_expanded.get(i), 1);
					bi.put(j_expanded.get(j), htemp);
				}

				// we do the same for hash p(i,j) , inverted hash

				if (bi2.containsKey(i_expanded.get(i))) {
					htemp = bi2.get(i_expanded.get(i));
					if (htemp.containsKey(j_expanded.get(j))) {
						adder = htemp.get(j_expanded.get(j));
						htemp.put(j_expanded.get(j), adder + 1);

					} else {
						htemp.put(j_expanded.get(j), 1);

					}
					bi2.put(j_expanded.get(j), htemp);
				} else {

					htemp = new Hashtable<String, Integer>(24, 1f);
					htemp.put(j_expanded.get(j), 1);
					bi2.put(i_expanded.get(i), htemp);
				}

			}

		}

		for (int i = 0; i_expanded != null && i < i_expanded.size(); i++) {

			for (int j = i + 1; i_expanded != null && j < i_expanded.size(); j++) {

				String i_term = i_expanded.get(i);
				String j_term = i_expanded.get(j);

				Hashtable<String, Integer> htemp;

				if (uni.containsKey(j_term)) {
					adder = uni.get(j_term);
					uni.put(j_term, adder + 1);
				} else {
					uni.put(j_term, 1);

				}

				if (uni.containsKey(i_term)) {
					adder = uni.get(i_term);
					uni.put(i_term, adder + 1);
				} else {
					uni.put(i_term, 1);

				}

				// bigram

				if (bi.containsKey(i_expanded.get(j))) {
					htemp = bi.get(i_expanded.get(j));
					if (htemp.containsKey(i_expanded.get(i))) {
						adder = htemp.get(i_expanded.get(i));
						htemp.put(i_expanded.get(i), adder + 1);

					} else {
						htemp.put(i_expanded.get(i), 1);

					}
					bi.put(i_expanded.get(j), htemp);
				} else {

					htemp = new Hashtable<String, Integer>(24, 1f);
					htemp.put(i_expanded.get(i), 1);
					bi.put(i_expanded.get(j), htemp);
				}

				// we do the same for hash p(i,j) , inverted hash

				if (bi2.containsKey(i_expanded.get(i))) {
					htemp = bi2.get(i_expanded.get(i));
					if (htemp.containsKey(i_expanded.get(j))) {
						adder = htemp.get(i_expanded.get(j));
						htemp.put(i_expanded.get(j), adder + 1);

					} else {
						htemp.put(i_expanded.get(j), 1);

					}
					bi2.put(i_expanded.get(j), htemp);
				} else {

					htemp = new Hashtable<String, Integer>(24, 1f);
					htemp.put(i_expanded.get(j), 1);
					bi2.put(i_expanded.get(i), htemp);
				}

			}

		}

		for (int i = 0; j_expanded != null && i < j_expanded.size(); i++) {

			for (int j = i + 1; j_expanded != null && j < j_expanded.size(); j++) {

				String i_term = j_expanded.get(i);
				String j_term = j_expanded.get(j);

				Hashtable<String, Integer> htemp;

				if (uni.containsKey(j_term)) {
					adder = uni.get(j_term);
					uni.put(j_term, adder + 1);
				} else {
					uni.put(j_term, 1);

				}

				if (uni.containsKey(i_term)) {
					adder = uni.get(i_term);
					uni.put(i_term, adder + 1);
				} else {
					uni.put(i_term, 1);

				}

				// bigrams

				if (bi.containsKey(j_expanded.get(j))) {
					htemp = bi.get(j_expanded.get(j));
					if (htemp.containsKey(j_expanded.get(i))) {
						adder = htemp.get(j_expanded.get(i));
						htemp.put(j_expanded.get(i), adder + 1);

					} else {
						htemp.put(j_expanded.get(i), 1);

					}
					bi.put(j_expanded.get(j), htemp);
				} else {

					htemp = new Hashtable<String, Integer>(24, 1f);
					htemp.put(j_expanded.get(i), 1);
					bi.put(j_expanded.get(j), htemp);
				}

				// we do the same for hash p(i,j) , inverted hash

				if (bi2.containsKey(j_expanded.get(i))) {
					htemp = bi2.get(j_expanded.get(i));
					if (htemp.containsKey(j_expanded.get(j))) {
						adder = htemp.get(j_expanded.get(j));
						htemp.put(j_expanded.get(j), adder + 1);

					} else {
						htemp.put(j_expanded.get(j), 1);

					}
					bi2.put(j_expanded.get(j), htemp);
				} else {

					htemp = new Hashtable<String, Integer>(24, 1f);
					htemp.put(j_expanded.get(j), 1);
					bi2.put(j_expanded.get(i), htemp);
				}

			}
		}

	}
		

		// we do the same for the hash p(i,j)

		

	/**
	 * Path with the query log file (entries divided by \t), query in QUERY_POS
	 * 
	 * 
	 * @param path
	 * @param context
	 *            0: includes both side context 1: includes left context 2:
	 *            includes right context
	 */
	public static ArrayList<String> generatePairsPerQuery(String path,
			int context, int window) {

		FileInput in = new FileInput(path);

		ArrayList<String> pairs = new ArrayList<String>();

		String line = "";
		if (in != null)
			line = in.readString();

		while (line != null) {

			String tmp[] = line.trim().split("\t");

			tmp[QUERY_POS] = removeStopWords(tmp[QUERY_POS]);

			String words[] = tmp[QUERY_POS].trim().split("\\s+");

			if (context == 0 || context == 1) {

				if (words.length > 0) {

					pairs.add(Constants.DUMMY);
					pairs.add(words[0]);

				}

				for (int m = 1; m < words.length; m++) {

					for (int n = m - 1; n >= 0; n--) {

						if (m - n <= window) {
							pairs.add(words[n]);
							pairs.add(words[m]);
						}

					}

				}

			}

			if (context == 0 || context == 2) {
				for (int m = 0; m < words.length - 1; m++) {

					for (int n = m + 1; n < words.length; n++) {

						if (n - m <= window) {
							pairs.add(words[n]);
							pairs.add(words[m]);
						}

					}

				}

				if (words.length > 0) {
					pairs.add(Constants.DUMMY);
					pairs.add(words[words.length - 1]);

				}

			}

			line = in.readString();

		}

		return pairs;

	}

	private static String removeStopWords(String string) {
		// TODO Auto-generated method stub

		String t[] = string.split("\\s+");

		String tmp = "";

		for (int i = 0; t != null && i < t.length - 1; i++) {

			if (!StringAnalysis.isStopWord(t[i].trim())) {

				tmp += t[i].trim() + " ";

			}
		}

		if (t != null && t.length > 0) {

			if (!StringAnalysis.isStopWord(t[t.length - 1].trim())) {

				tmp += t[t.length - 1];
			}

		}

		return tmp;
	}

	/**
	 * Gets the probability of the context a given the word w : P(a|w)
	 * 
	 * using the model stored in bi and uni
	 * 
	 * 
	 * @return
	 */
	public static double getContextProb(String a, String w) {

		if (!uniR.containsKey(a) || !bi.containsKey(w)
				|| !bi.get(w).containsKey(a)) {

			return 0;

		}

		return (double) bi.get(w).get(a) / (double) uniR.get(a);

	}

	/**
	 * Initializes the counts of unigrams, bigrams and trigrams of the training
	 * data tri,uni,bi, tri2,uni2.bi2 are filled here
	 * 
	 */
	void ini_grams() {
		String j = null;
		N_T = 0;
		String i = null;
		String k = null;

		String tmp[];

		Integer adder;
		Hashtable<String, Integer> htemp;
		Hashtable<String, Hashtable<String, Integer>> htemp2;

		// we create some dummy tags for the first trigrams, bigrams
		j = DUMMY_2;
		i = DUMMY_2;

		while (!data.eof()) {
			tmp = data.readString().trim().split("\t");
			k = i;
			i = j;
			j = tmp[QUERY_POS];

			// we check for the hash that contains the counts for p(i) if
			// the
			// element already exits, to sum one or add a new value

			if (uni.containsKey(j)) {
				adder = uni.get(j);
				uni.put(j, adder + 1);
			} else {
				uni.put(j, 1);

			}
			// we do the same for the hash p(i,j)

			if (bi.containsKey(j)) {
				htemp = bi.get(j);
				if (htemp.containsKey(i)) {
					adder = htemp.get(i);
					htemp.put(i, adder + 1);

				} else {
					htemp.put(i, 1);

				}
				bi.put(j, htemp);
			} else {

				htemp = new Hashtable<String, Integer>(24, 1f);
				htemp.put(i, 1);
				bi.put(j, htemp);
			}

			// we do the same for hash p(i,j) , inverted hash

			if (bi2.containsKey(i)) {
				htemp = bi2.get(i);
				if (htemp.containsKey(j)) {
					adder = htemp.get(j);
					htemp.put(j, adder + 1);

				} else {
					htemp.put(j, 1);

				}
				bi2.put(i, htemp);
			} else {

				htemp = new Hashtable<String, Integer>(24, 1f);
				htemp.put(j, 1);
				bi2.put(i, htemp);
			}

			// we do the same for the trigrams,tags

			if (tri.containsKey(j)) {
				htemp2 = tri.get(j);
				if (htemp2.containsKey(i)) {
					htemp = htemp2.get(i);

					if (htemp.containsKey(k)) {
						adder = htemp.get(k);
						htemp.put(k, adder + 1);
					} else {
						htemp.put(k, 1);
					}

					htemp2.put(i, htemp);
				} else {

					htemp = new Hashtable<String, Integer>(24, 1f);
					htemp.put(k, 1);

					htemp2.put(i, htemp);
				}
				tri.put(j, htemp2);
			} else {

				htemp2 = new Hashtable<String, Hashtable<String, Integer>>(24,
						1f);
				htemp = new Hashtable<String, Integer>(24, 1f);
				htemp.put(k, 1);
				htemp2.put(i, htemp);
				tri.put(j, htemp2);

			}

			// inverted trigramas counts

			if (tri2.containsKey(k)) {
				htemp2 = tri2.get(k);
				if (htemp2.containsKey(i)) {
					htemp = htemp2.get(i);

					if (htemp.containsKey(j)) {
						adder = htemp.get(j);
						htemp.put(j, adder + 1);
					} else {
						htemp.put(j, 1);
					}

					htemp2.put(i, htemp);
				} else {

					htemp = new Hashtable<String, Integer>(24, 1f);
					htemp.put(j, 1);

					htemp2.put(i, htemp);
				}
				tri2.put(k, htemp2);
			} else {

				htemp2 = new Hashtable<String, Hashtable<String, Integer>>(24,
						1f);
				htemp = new Hashtable<String, Integer>(24, 1f);
				htemp.put(j, 1);
				htemp2.put(i, htemp);
				tri2.put(k, htemp2);

			}

			// Size of training data update
			N_T++;

		}// end if tmp equal null

	}
	
	
	
	public static void generate_aspectModel(String path){
		
		
		deliciousCounts(path);
		iniUnigrams(bi, uniL, uniR);
		iniGraphNodes();
		
		
	}
	
	
	public static HashSet<String> getContextTags(String word, int limit) {
		HashSet<String> set = new HashSet<String>();

		if (graph_distances.containsKey(word)) {

			ArrayList<Node> list = graph_distances.get(word);

			for (int i = 0; i < list.size() && i < limit; i++) {

				set.add(list.get(i).getWord());

			}

		}

		return set;

	}
	public static void main(String[] args) {
		// TODO Auto-generated method stub

		String filePath = "/home/sergio/projects/CODFC/queryAnalysis/kids_queries1.txt";

		// generate_clusters(filePath, 0, 2);

		String path = "/home/sergio/projects/data/delicious/experiments/dmoz_del_approx1.txt";
		generate_aspectModel(path);

		
		Scanner in = new Scanner(System.in);

		// Reads a single line from the console
		// and stores into name variable

		String word = "";
		while (!word.equals("exit")) {

			System.out.print(">");
			word = in.nextLine();

			if (graph_distances.containsKey(word)) {

				ArrayList<Node> list = graph_distances.get(word);
				
				for (int i = 0; i < list.size() && i < 20; i++) {
					System.out.println(list.get(i).getWord() + "\t"
							+ list.get(i).getContext());

				}

			}
			System.out.println();
		}

		// Reads a integer from the console
		// and stores into age variable
	
		in.close();

	}

}

