package util.nlp;

import java.util.ArrayList;
import java.util.Collections;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Scanner;
import java.util.Map.Entry;

import util.io.FileInput;
import util.nlp.comparator.ByNodeDistance;
import util.string.StringAnalysis;

public class LM implements Constants {

	/*
	 * Path variables and objects to access data files
	 */

	// String dict_path =
	// "/home/sergio/Documents/delicious_stats/tags_dict.txt";

	// String stop_tags_path =
	// "/home/sergio/projects/data/delicious/experiments/stop_tags.txt";

	double distance_mean = 0;

	public double getTotal_items_global() {
		return this.tagsFreq.get(util.delicious.Constants.UNI_TOTAL_TAG);

		// total_items_global;
	}

	private Hashtable<String, Integer> uni = new Hashtable<String, Integer>(24,
			1f);

	public Hashtable<String, Integer> uni2 = new Hashtable<String, Integer>(24,
			1f);

	private Hashtable<String, Integer> uniL = new Hashtable<String, Integer>(
			24, 1f);
	private Hashtable<String, Integer> uniR = new Hashtable<String, Integer>(
			24, 1f);

	private Hashtable<String, Hashtable<String, Integer>> bi = new Hashtable<String, Hashtable<String, Integer>>(
			24, 1f);

	private Hashtable<String, Hashtable<String, Integer>> bi2 = new Hashtable<String, Hashtable<String, Integer>>(
			24, 1f);

	/**
	 * Clustering and model helper data structures
	 * 
	 */

	private Hashtable<String, Hashtable<String, Double>> graph_distances = new Hashtable<String, Hashtable<String, Double>>();

	private Hashtable<String, ArrayList<Node>> graph = new Hashtable<String, ArrayList<Node>>(
			24, 1f);

	/*
	 * Hashtables containing users frequencies
	 * 
	 * (in the whole data set and in the kids data set)
	 */

	private Hashtable<String, Double> usersFreq = new Hashtable<String, Double>();
	private Hashtable<String, Double> usersFreqKids = new Hashtable<String, Double>();
	private Hashtable<String, HashSet<String>> userTags = new Hashtable<String, HashSet<String>>();

	private Hashtable<String, Double> tagsFreq = new Hashtable<String, Double>();

	public Hashtable<String, Double> userWeights = new Hashtable<String, Double>();

	public Hashtable<String, Double> getTagsFreq() {
		return tagsFreq;
	}

	public void setTagsFreq(Hashtable<String, Double> tagsFreq) {
		this.tagsFreq = tagsFreq;
	}

	public Hashtable<String, Double> getUsersFreq() {
		return usersFreq;
	}

	public void setUsersFreq(Hashtable<String, Double> usersFreq) {
		this.usersFreq = usersFreq;
	}

	public Hashtable<String, Double> getUsersFreqKids() {
		return usersFreqKids;
	}

	public void setUsersFreqKids(Hashtable<String, Double> usersFreqKids) {
		this.usersFreqKids = usersFreqKids;
	}

	public Hashtable<String, HashSet<String>> getUserTags() {
		return userTags;
	}

	public void setUserTags(Hashtable<String, HashSet<String>> userTags) {
		this.userTags = userTags;
	}

	// final vocabulary after filtering tags with low frequency
	private HashSet<String> vocabulary = new HashSet<String>();

	private double total_unigrams = 0;

	public double getTotal_unigrams() {
		return total_unigrams;
	}

	public void setTotal_unigrams(double totalUnigrams) {
		this.total_unigrams = totalUnigrams;
	}

	/**
	 * Init local users counts
	 * 
	 * 
	 * @param line
	 */
	private void initUsersPerline(String line, int min_freq, int max_freq) {

		/**
		 * Updata hash {tag - (users)} userTags
		 * 
		 */

		int TAGS_POS = 4;
		int USER_POS = 2;

		String t[] = line.split("\t");

		String user = t[USER_POS];

		for (int i = TAGS_POS; i < t.length; i++) {

			if (uni.containsKey(t[i]) && uni.get(t[i]) < max_freq
					&& uni.get(t[i]) > min_freq) {

				if (userTags.containsKey(t[i])) {
					userTags.get(t[i]).add(user);
				} else {
					HashSet<String> temporal = new HashSet<String>();
					temporal.add(user);
					userTags.put(t[i], temporal);

				}

			}

		}

		/**
		 * update users frequency from kids file
		 * 
		 */

		if (usersFreqKids.containsKey(t[USER_POS])) {
			usersFreqKids.put(t[USER_POS], usersFreqKids.get(t[USER_POS]) + 1);
		} else {
			usersFreqKids.put(t[USER_POS], new Double(1));
		}

	}

	private void initFrequencyHash(String pathFile,
			Hashtable<String, Double> hash, HashSet<String> voc) {

		FileInput in = new FileInput(pathFile);

		String line = in.readString();
		while (line != null) {

			String t[] = line.split("\t");

			if (t.length > 1) {

				if (voc.contains(t[0])
						|| t[0].equals(util.delicious.Constants.UNI_TOTAL_TAG))
					hash.put(t[0], new Double(t[1]));
				// System.out.println(t[0] + "\t" + t[1]);
			}

			line = in.readString();

		}

	}

	private void initFrequencyHash(String pathFile,
			Hashtable<String, Double> hash, Hashtable<String, Double> voc) {

		FileInput in = new FileInput(pathFile);

		String line = in.readString();
		while (line != null) {

			String t[] = line.split("\t");

			if (t.length > 1) {

				if (voc.containsKey(t[0])
						|| t[0].equals(util.delicious.Constants.UNI_TOTAL_TAG))
					hash.put(t[0], new Double(t[1]));
				// System.out.println(t[0] + "\t" + t[1]);
			}

			line = in.readString();

		}

	}

	private void initCountsPerline(String line, int min_freq, int max_freq) {
		int TAGS_POS = 4;
		String t[] = line.split("\t");
		LinkedList<String> tags = new LinkedList<String>();

		for (int i = TAGS_POS; i < t.length; i++) {

			tags.add(t[i]);

		}
		// generate lines
		for (int i = 0; i < tags.size(); i++) {

			if (uni.containsKey(tags.get(i)) && uni.get(tags.get(i)) > min_freq
					&& uni.get(tags.get(i)) < max_freq

			) {

				vocabulary.add(tags.get(i));

				for (int j = i + 1; j < tags.size(); j++) {

					// System.out.println(tags.get(i));
					// System.out.println(tags.get(j));

					// update hashes
					if (uni.containsKey(tags.get(j))
							&& uni.get(tags.get(j)) > min_freq
							&& uni.get(tags.get(j)) < max_freq

					) {
						// * updates uniTemporal hash \\ to count final number
						// of tagss = real vocabulary

						vocabulary.add(tags.get(j));

						total_unigrams++;
						total_unigrams++;

						updateHashes(tags.get(i), tags.get(j), bi, bi2);
					}

					// System.out.println(Constants.DUMMY);

				}

			}

		}

		tags.clear();
		tags = null;

	}

	private void initCountsPerlineDouble(String line, int min_freq, int max_freq) {
		int TAGS_POS = 4;
		String t[] = line.split("\t");
		LinkedList<String> tags = new LinkedList<String>();

		for (int i = TAGS_POS; i < t.length; i++) {

			tags.add(t[i]);

		}
		// generate lines
		for (int i = 0; i < tags.size(); i++) {

			if (uni.containsKey(tags.get(i)) && uni.get(tags.get(i)) > min_freq
					&& uni.get(tags.get(i)) < max_freq

			) {

				vocabulary.add(tags.get(i));

				for (int j = i + 1; j < tags.size(); j++) {

					// System.out.println(tags.get(i));
					// System.out.println(tags.get(j));

					// update hashes
					if (uni.containsKey(tags.get(j))
							&& uni.get(tags.get(j)) > min_freq
							&& uni.get(tags.get(j)) < max_freq

					) {
						// * updates uniTemporal hash \\ to count final number
						// of tagss = real vocabulary

						vocabulary.add(tags.get(j));

						total_unigrams++;
						total_unigrams++;

						updateHashes(tags.get(i), tags.get(j), bi, bi2);
						updateHashes(tags.get(j), tags.get(i), bi, bi2);
					}

					// System.out.println(Constants.DUMMY);

				}

			}

		}

		tags.clear();
		tags = null;

	}

	private void initCountsPerlineSimple(String line, int min_freq, int max_freq) {
		int TAGS_POS = 4;
		String t[] = line.split("\t");
		LinkedList<String> tags = new LinkedList<String>();

		for (int i = TAGS_POS; i < t.length; i++) {

			tags.add(t[i]);

		}
		// generate lines
		for (int i = 0; i < tags.size(); i++) {

			for (int j = i + 1; j < tags.size(); j++) {

				// System.out.println(tags.get(i));
				// System.out.println(tags.get(j));

				// update hashes

				// * updates uniTemporal hash \\ to count final number of
				// tagss = real vocabulary

				total_unigrams++;
				total_unigrams++;

				String tag = tags.get(i);

				if (tagsFreq.containsKey(tag)) {

					Double temporal = tagsFreq.get(tag);
					tagsFreq.put(tag, temporal + 1);

				} else {
					tagsFreq.put(tag, new Double(1));

				}

				tag = tags.get(j);

				if (tagsFreq.containsKey(tag)) {

					Double temporal = tagsFreq.get(tag);
					tagsFreq.put(tag, temporal + 1);

				} else {
					tagsFreq.put(tag, new Double(1));

				}

				// System.out.println(Constants.DUMMY);

			}

		}

	}

	/**
	 * 
	 * Get user weight for tag 'tag'
	 * 
	 * 
	 * @param tag
	 * @return
	 */

	public double getUserWeight1(String tag) {

		double w = 0;

		double alpha = (double) 1 / (double) 1000;

		if (userTags.containsKey(tag)) {

			HashSet<String> users = userTags.get(tag);

			Iterator<String> iterator = users.iterator();

			double betha = (double) users.size() / (double) 30;

			// if(betha>1)
			// System.out.println("betha greater than one:" + betha +"\t"+ tag);

			while (iterator.hasNext()) {

				String user = iterator.next();
				double uF = getUserFreq(user);
				double uFK = getUserKidsFreq(user);

				// add more importance to users tagging more entrances

				if (uF != 0) {

					double num = (double) uFK * Math.log(uFK + 1);

					w = w + (double) ((double) num / (double) uF);

				}

				// normalization;
				w = w * alpha;

				// give more importance to tags tagged by more users

				w = w * betha;

			}

			/*
			 * if (users.size() > 0) return (double) w / (double)users.size();
			 */

			// else
			return w;

		}

		return w;
	}

	public double getUserWeight(String tag) {

		double w = 0;

		if (userTags.containsKey(tag)) {

			HashSet<String> users = userTags.get(tag);

			Iterator<String> iterator = users.iterator();

			while (iterator.hasNext()) {

				String user = iterator.next();
				double uF = getUserFreq(user);
				double uFK = getUserKidsFreq(user);

				// System.out.println(uf + "\t" + uFK);
				if (uF != 0)
					w = w + (double) ((double) uFK / (double) uF);

			}

			if (users.size() > 0)
				return (double) w / (double) users.size();

			else
				return w;

		}

		return w;
	}

	public double getUserWeightInternal(HashSet<String> tags) {

		double w = 0d;

		Iterator<String> iter = tags.iterator();
		while (iter.hasNext()) {

			String t = iter.next();

		}

		return w;
	}

	/**
	 * Initialize internal user weights
	 * 
	 * 
	 */

	public void initUserWeights() {

		Enumeration<String> tags = tagsFreq.keys();

		while (tags.hasMoreElements()) {

			String tag = tags.nextElement();

			double w = getUserWeight(tag);

			userWeights.put(tag, w);

		}

	}

	/**
	 * Get frequency of user 'user' in the kids data set
	 * 
	 * 
	 * @param user
	 * @return
	 */
	private double getUserKidsFreq(String user) {

		return usersFreqKids.get(user);

	}

	/**
	 * Get frequency of user 'user' in the non kids data set
	 * 
	 * 
	 * @param user
	 * @return
	 */

	private double getUserFreq(String user) {

		return usersFreq.get(user) - getUserKidsFreq(user);

	}

	/**
	 * 
	 * Calculates p(key|key2) based on the bigrams counts in the hashtable 2
	 * 
	 * The calculations are stores in the hashtable graph_distances in a
	 * inverted way that is :
	 * 
	 * 
	 * graph_distances(key2, key) represent thte probability p(key|key2). This
	 * is neccesary because we need to iterate over
	 * 
	 * p(t|s) s to rank the tags
	 * 
	 * 
	 */
	private void iniGraphNodes() {

		Enumeration<String> iter = bi.keys();

		while (iter.hasMoreElements()) {

			String key = iter.nextElement();

			Enumeration<String> iter2 = bi.get(key).keys();

			Hashtable<String, Double> hash = null;
			while (iter2.hasMoreElements()) {

				String key2 = iter2.nextElement();

				// calculate distance P(key|key2) and inverse P(key2|key) since
				// order is not considered

				// check this

				// double temp = (double) bi.get(key).get(key2)
				// / (double) uniL.get(key);

				double a = 0.0d;
				double b = 0.0d;

				double uni_l = 0.0d;
				double uni_r = 0.0d;

				double uni_l_key = 0.0d;
				double uni_r_key = 0.0d;

				/**
				 * Init user info
				 * 
				 */

				if (bi.containsKey(key)) {

					if (bi.get(key).containsKey(key2)) {
						a = bi.get(key).get(key2);

					}
				}

				if (bi.containsKey(key2)) {

					if (bi.get(key2).containsKey(key)) {
						b = bi.get(key2).get(key);

					}
				}

				// uni counts for key
				if (uniL.containsKey(key)) {

					uni_l_key = uniL.get(key);
				}

				if (uniR.containsKey(key)) {

					uni_r_key = uniR.get(key);
				}

				// uni counts for key2

				if (uniL.containsKey(key2)) {

					uni_l = uniL.get(key2);
				}

				if (uniR.containsKey(key2)) {

					uni_r = uniR.get(key2);
				}

				double temp = (double) (a + b) / (double) (uni_l + uni_r);

				double tempInverse = (double) (a + b)
						/ (double) (uni_l_key + uni_r_key);

				/*
				 * System.out.println(key + "\t" + key2 + "\t" + (uni_l + uni_r)
				 * + "\t" + (uni_l_key + uni_r_key) + "\t" + (a + b) + "\t" +
				 * temp + "\t" + tempInverse + "\t" + getUserWeight(key) + "\t"
				 * + getUserWeight(key2) + "\t" + temp * getUserWeight(key) +
				 * "\t" + tempInverse getUserWeight(key2));
				 */

				hash = graph_distances.get(key2);

				if (hash == null)
					hash = new Hashtable<String, Double>();

				hash.put(key, temp);

				graph_distances.put(key2, hash);

				// adding inverse prob (key2|key)

				if (!graph_distances.containsKey(key)) {

					hash = new Hashtable<String, Double>();
				} else {
					hash = graph_distances.get(key);

				}

				if (!hash.containsKey(key2)) {
					hash.put(key2, tempInverse);

					graph_distances.put(key, hash);

				}

			}

			// System.out.println("Adding:" + key);

		}

		// uni unit count represented as p(x|x) in the graph
		// we use the vocabulary set for this

		Iterator<String> iter2 = vocabulary.iterator();

		while (iter2.hasNext()) {

			String voc = iter2.next();
			double l = 0;
			double r = 0;
			if (uniL.containsKey(voc))
				l = uniL.get(voc);

			if (uniR.containsKey(voc))
				r = uniR.get(voc);

			double prob = (double) (l + r) / (double) total_unigrams;
			Hashtable<String, Double> hash = null;
			if (graph_distances.containsKey(voc)) {
				hash = graph_distances.get(voc);

			} else {

				hash = new Hashtable<String, Double>();

			}
			hash.put(voc, prob);

			graph_distances.put(voc, hash);
		}

	}

	/**
	 * 
	 * 
	 * @param file
	 * @param ordered
	 */
	private void initCounts(String file, int min_freq, int max_freq) {

		FileInput in = new FileInput(file);

		String current = in.readString();

		while (current != null) {

			initCountsPerlineDouble(current, min_freq, max_freq);
			initUsersPerline(current, min_freq, max_freq);

			current = in.readString();
		}

	}

	private void initCountsSimple(String file, int min_freq, int max_freq) {

		FileInput in = new FileInput(file);

		String current = in.readString();

		while (current != null) {

			initCountsPerlineSimple(current, min_freq, max_freq);
			current = in.readString();
		}

		Enumeration<String> keys = tagsFreq.keys();

		tagsFreq.put(util.delicious.Constants.UNI_TOTAL_TAG,
				this.total_unigrams);

		while (keys.hasMoreElements()) {

			String key = keys.nextElement();
			System.out.println(key + "\t" + tagsFreq.get(key));

		}

	}

	/**
	 * 
	 * 
	 * 
	 * @param bi
	 *            counts p(i,j) where key refers to j. i is the context of the
	 *            word j
	 * @param uniW
	 *            hash where j counts are stored (target token)
	 * @param uniC
	 *            has where the i counts are storet (context of the token)
	 */
	void iniUnigrams(Hashtable<String, Hashtable<String, Integer>> bi,
			Hashtable<String, Integer> uniL, Hashtable<String, Integer> uniR) {

		Enumeration<String> iter = bi.keys();

		while (iter.hasMoreElements()) {

			String key = iter.nextElement();

			Hashtable<String, Integer> temp = bi.get(key);

			Enumeration<String> iter2 = temp.keys();
			Integer adder = 0;
			while (iter2.hasMoreElements()) {

				String key2 = iter2.nextElement();

				if (uni2.containsKey(key2)) {

					adder = uni2.get(key2) + bi.get(key).get(key2);
					uni2.put(key2, adder);
				} else {
					uni2.put(key2, bi.get(key).get(key2));

				}

				if (uni2.containsKey(key)) {

					adder = uni2.get(key2) + bi.get(key).get(key2);
					uni2.put(key, adder);
				} else {
					uni2.put(key, bi.get(key).get(key2));

				}

				if (uniR.containsKey(key2)) {

					adder = uniR.get(key2) + bi.get(key).get(key2);
					uniR.put(key2, adder);
				} else {
					uniR.put(key2, bi.get(key).get(key2));

				}

				if (uniL.containsKey(key)) {
					adder = uniL.get(key) + bi.get(key).get(key2);
					uniL.put(key, adder);
				} else {
					uniL.put(key, bi.get(key).get(key2));

				}

			}

		}

	}

	/**
	 * bi hash stores counts of (b given a)
	 * 
	 * 
	 * @param a
	 * @param b
	 * @param uni
	 * @param bi
	 * @param biInverse
	 */
	public void updateHashes(String a, String b,

	Hashtable<String, Hashtable<String, Integer>> bi,

	Hashtable<String, Hashtable<String, Integer>> biInverse

	) {

		Integer adder;
		Hashtable<String, Integer> htemp;

		// bigrams update

		if (bi != null) {

			if (bi.containsKey(b)) {
				htemp = bi.get((b));
				if (htemp.containsKey(a)) {
					adder = htemp.get(a);
					htemp.put(a, adder + 1);

				} else {
					htemp.put(a, 1);

				}
				bi.put(b, htemp);
			} else {

				htemp = new Hashtable<String, Integer>(24, 1f);
				htemp.put(a, 1);
				bi.put(b, htemp);
			}
		}
		// we do the same for hash p(i,j) , inverted hash

		if (biInverse != null) {

			if (biInverse.containsKey(a)) {
				htemp = biInverse.get(a);
				if (htemp.containsKey(b)) {
					adder = htemp.get(b);
					htemp.put(b, adder + 1);

				} else {
					htemp.put(b, 1);

				}
				biInverse.put(b, htemp);
			} else {

				htemp = new Hashtable<String, Integer>(24, 1f);
				htemp.put(b, 1);
				biInverse.put(b, htemp);
			}

		}

	}

	public void initUniRaw(String file, Hashtable<String, Integer> uni) {

		FileInput in = new FileInput(file);

		String line = in.readString();

		int TAGS_POS = 4;
		while (line != null) {

			String t[] = line.split("\t");

			for (int i = TAGS_POS; i < t.length; i++) {

				if (uni.containsKey(t[i])) {
					Integer adder = uni.get(t[i]);
					uni.put(t[i], adder + 1);

				} else {
					uni.put(t[i], 1);

				}

			}
			line = in.readString();
		}

	}

	/**
	 * Gets the probability of the context a given the word w : P(a|w)
	 * 
	 * using the model stored in bi and uni
	 * 
	 * 
	 * @return
	 */
	public double getContextProb(String a, String w) {

		if (!uniR.containsKey(a) || !bi.containsKey(w)
				|| !bi.get(w).containsKey(a)) {

			return 0;

		}

		return (double) bi.get(w).get(a) / (double) uniR.get(a);

	}

	public void generateAspectModel(String path, String externalUsers,
			String externalTags, int minFreq, int maxFreq) {

		System.out
				.println("Initializing model tags (freq and user/tags) counts.....");
		initUniRaw(path, uni);
		initCounts(path, minFreq, maxFreq);

		System.out.println("Initializing model unigrams.....");
		iniUnigrams(bi, uniL, uniR);

		System.out.println("Initializing model graph nodes.....");
		iniGraphNodes();

		// Initialize global count that it is also in the kids data

		if (externalUsers != null) {

			System.out.println("Initializing global user counts.....");
			initFrequencyHash(externalUsers, usersFreq, usersFreqKids);

		} else {

			usersFreq = null;

		}

		if (externalTags != null) {

			System.out.println("Initializing global tag counts.....");
			initFrequencyHash(externalTags, tagsFreq, vocabulary);

		} else {

			usersFreq = null;

		}

		System.out.println("Writting model.....");
	}

	public void generateModel(String path, int minFreq, int maxFreq) {

		System.out
				.println("Initializing model tags (freq and user/tags) counts.....");
		initUniRaw(path, uni);
		initCounts(path, minFreq, maxFreq);

		System.out.println("Initializing model unigrams.....");
		iniUnigrams(bi, uniL, uniR);

		System.out.println("Initializing model graph nodes.....");
		iniGraphNodes();

		// Initialize global count that it is also in the kids data
	}

	public Hashtable<String, Hashtable<String, Integer>> getBi() {
		return bi;
	}

	public LinkedList<Entry<String, Double>> getRankedTags(HashSet<String> tags) {

		LinkedList<Entry<String, Double>> ranked = new LinkedList<Entry<String, Double>>();

		// for each candidate tag
		Enumeration<String> keys = graph_distances.keys();
		int i = 0;

		while (keys.hasMoreElements()) {

			String candidate = keys.nextElement();

			// calculate chain probability based on query tags

			Iterator<String> iterator = tags.iterator();

			double prob = 1;
			double uni_prob = 0;
			// System.out.println(graph_distances.size()-i);
			i++;
			Hashtable<String, Double> hash = graph_distances.get(candidate);

			/*
			 * TODO : Enhance more evidence tags hits
			 */
			while (iterator.hasNext()) {

				String t = iterator.next();

				uni_prob = 0;
				if (graph_distances.containsKey(t)) {

					uni_prob = graph_distances.get(t).get(t);
				}

				double temp = 0d;
				if (hash.containsKey(t)) {

					// System.out.println(t);

					// we applied smoothing

					temp = hash.get(t);
					// prob = prob * temp;

				}

				prob = prob
						* (temp * Constants.ALPHA + uni_prob * Constants.BETHA + Constants.CETHA);

			}

			/*
			 * Now we multiply by the prior probability of s p(s|k)
			 */

			double s = (double) graph_distances.get(candidate).get(candidate)
					/ (double) getUniSize();

			HashMap<String, Double> map = new HashMap<String, Double>();

			if (candidate.equals(util.delicious.Constants.UNI_TOTAL_TAG))
				prob = 0;

			System.out.println("Adding: " + candidate + "\t"
					+ graph_distances.size());
			map.put(candidate, s * prob);

			addInsertInList(map.entrySet().iterator().next(), ranked);

		}

		return ranked;
	}

	private void addInsertInList(Entry<String, Double> element,
			LinkedList<Entry<String, Double>> list) {

		int index = 0;
		boolean flag = true;
		for (int i = 0; flag && i < list.size(); i++) {

			if (list.get(i).getValue() > element.getValue()) {
				index++;

			} else {

				flag = false;
			}

		}

		list.add(index, element);

	}

	public static void generateUni(String file) {

	}

	public static void main(String[] args) {
		// TODO Auto-generated method stub
		LM model = new LM();
		String filePath = "/home/sergio/projects/CODFC/queryAnalysis/kids_queries1.txt";

		// generate_clusters(filePath, 0, 2);

		String path = "/home/sergio/projects/data/delicious/experiments/dmoz_del_approx1.txt";

		//
		String pathSuffix = "/home/sergio/Dropbox/data/delicious/";

		path = pathSuffix + "delicious_kids_final.txt";

		String usersAll = pathSuffix + "delicious_users.freq.txt";

		// model.generateAspectModel(path,
		// usersAll,Integer.MIN_VALUE,Integer.MAX_VALUE);

		String full_delicious = "/media/sata_/data/delicious/delicious_single.txt";
		model.initCountsSimple(full_delicious, Integer.MIN_VALUE,
				Integer.MAX_VALUE);

	}

	public double getUniSize() {

		return total_unigrams;
	}

	public Hashtable<String, Hashtable<String, Double>> getGraphModel() {

		return graph_distances;

	}

	public void setGraphModel(Hashtable<String, Hashtable<String, Double>> graph) {

		graph_distances = graph;

	}

	public double getUni(String candidate) {
		// TODO Auto-generated method stub

		// System.out.println(candidate + "\t " +"ssssssssss");

		if (graph_distances.containsKey(candidate)) {

			return graph_distances.get(candidate).get(candidate);

		}

		return 0.0;

	}

	// return non kids frequency of the tag
	public double getGlobalUni(String candidate) {

		if (tagsFreq.containsKey(candidate)) {

			return tagsFreq.get(candidate) - getUni(candidate);

		}

		return 0.0;

	}

}
