/*
 * @(#)Resnik.java	1.0 09/01/07
 *
 * Copyright 2007 Fabio Gasparetti. All rights reserved.
 */

package org.tabularium.text.nlp;

import java.io.*;
import java.util.*;

import org.tabularium.text.TermWeightVector;
import org.tabularium.text.nlp.wordnet.*;
// debug
import org.tabularium.text.nlp.wordnet.jwi.*;

/**
 * Provides basic functionalities for Resnik's similarity and word sense
 * disambiguation algorithms.<blockquote>
 * 
 * <pre>
 * // read a word-frequecy list from file 
 * ArrayCollectionFrequency f = new ArrayCollectionFrequency();
 * BncParser p = new BncParser();
 * f.load("all.num.o5", p);
 * // initialize Resnik computing the synset probability and N values
 * Stemmer stem = new JwiStemmer();
 * Resnik r = new Resnik(wn, stem, f);
 * r.init();
 * // write probs to file
 * r.writeSynsetProbs("synsetFreqs.txt");
 * ...
 * // initialize Resnik with pre-calculated values 
 * Resnik r1 = new Resnik(wn, stem, f);
 * r1.init(Resnik.RESNIK_N_WN30_BNC4037914, "synsetFreqs.txt");
 * </pre>
 * 
 * </blockquote>
 *  
 * @author Fabio Gasparetti
 * @version 1.0, 09/01/07
 */

public class Resnik {
	// turns LinkedHashMap as a fixed-size cache
	class SynsetCache extends LinkedHashMap<String,Synset[]> {
		private static final long serialVersionUID = 1L;
		public static final int MAX_ENTRIES = 500;

		protected boolean removeEldestEntry(Map.Entry eldest) {
	        return size() > MAX_ENTRIES;
	     }
	}
	
	/**
	 * RESNIK N value for the configuration WordNet3.0 + BNC all.num.o5 4037914
	 * bytes (BNCWN30) (+ stemmer?).
	 */
	public static final long RESNIK_N_WN30_BNC4037914 = 18072666L;

	Map<String,Synset[]> synsetCache; //term-POS -> Synset[]
	 
	
	// for each synset (Integer) gets the related RESNIK probability (Double)
	protected Map synsetProbs = null;

	protected long resnikN = -1;

	protected double minProb = -1;

	WordNet wordNet = null;

	CollectionFrequency frequencies = null;

	Stemmer stemmer = null;

	/**
	 * After the object istantiation, remember to call {@link #init(String)}.
	 * <p>
	 * To set up a CollectionFrequency check the British National Corpus (BNC)
	 * <ul>
	 * <li>{@link http://www.natcorp.ox.ac.uk/}
	 * <li>{@link http://www.kilgarriff.co.uk/bnc-readme.html}
	 * </ul>
	 */
	public Resnik(WordNet wn, Stemmer stem, CollectionFrequency cf) {
		wordNet = wn;
		frequencies = cf;
		stemmer = stem;
		synsetCache = new SynsetCache();

	}

	/**
	 * Computes the <code>Resnik N</code> value and the synset probabilities
	 * according to the Resnik algorithm described in Sect. 3.1 of: <blockquote>
	 * 
	 * <pre>
	 *  
	 * Philip Resnik &quot;Semantic Similarity in a Taxonomy: An Information-Based Measure
	 * and its Application to Problems of Ambiguity in Natural Language&quot;
	 * Journal of Artificial Intelligence Research, Vol.11, pp.95-130, 1999.
	 * http://citeseer.ist.psu.edu/resnik99semantic.html
	 * </pre>
	 * 
	 * </blockquot>
	 * 
	 */
	public void init() throws IOException {
		init(computeResnikN(), null);
	}

	/**
	 * Computes the <code>Resnik N</code> value and read the synset
	 * probabilities from the given filename.
	 */
	public void init(String fn) throws IOException {
		init(computeResnikN(), fn);
	}

	/**
	 * Set the <code>Resnik N</code> value and read the synset probabilities
	 * from the given filename.
	 * <p>
	 * If filename is <code>null</code>, the sysnet probabilities are
	 * computed from scratch. Use {@link #RESNIK_N_WN30_BNC4037914} as
	 * <code>Resnik N</code> value in case you are using
	 * <code>WordNet 3.0</code> and <code>BNC all.num.o5</code> collaction
	 * (4037914 bytes)
	 */
	public void init(long resnikN, String fn) throws IOException {
		this.resnikN = resnikN;
		if (fn != null) {
			readSynsetProbs(fn);
		} else {
			computeSynsetProbs();
		}
		minProb = computeMinSynsetProb();
	}

	/**
	 * If the stemmer is defined, retrieves all the synsets iterating through
	 * the term's roots. If stemmer = null, it retrieves the synset of the term.
	 */
	public Synset[] getSynsets(String term, int pos) {
		Synset[] ssret;
		ssret = synsetCache.get(term+"_"+pos);		
		if (ssret != null) { // cached
			synsetCache.put(term+"_"+pos, ssret); // reset the age of the current element
			return ssret;
		}
		
		Set roots;
		if (stemmer != null) {
			roots = stemmer.getRoots(term, pos);
			if (roots == null)
				roots = new HashSet();
			roots.add(term);
			if (roots != null) {
				String w;
				List ss = new ArrayList();
				Synset[] ss1;
				int dim = 0;
				Iterator iter = roots.iterator();
				while (iter.hasNext()) {
					w = (String) iter.next();
					ss1 = wordNet.getSynsets(w, pos);
					if (ss1.length > 0) {
						ss.add(ss1);
						dim += ss1.length;
					}
				}
				ssret = new Synset[dim];
				iter = ss.iterator();
				int idx = 0;
				while (iter.hasNext()) {
					ss1 = (Synset[]) iter.next();
					System.arraycopy(ss1, 0, ssret, idx, ss1.length);
					idx += ss1.length;
				}
			}
		} else { // no stemmer
			ssret = wordNet.getSynsets(term, pos);
		}
		if (ssret != null) {
			synsetCache.put(term+"_"+pos, ssret);
		}
		return ssret;
	}

	/**
	 * Calculates p(c) for each synset c according to the Resnik algorithm
	 * described in Sect. 3.1 of: <blockquote>
	 * 
	 * <pre>
	 *  
	 * Philip Resnik &quot;Semantic Similarity in a Taxonomy: An Information-Based Measure
	 * and its Application to Problems of Ambiguity in Natural Language&quot;
	 * Journal of Artificial Intelligence Research, Vol.11, pp.95-130, 1999.
	 * </pre>
	 * 
	 * </blockquot>
	 * 
	 */
	protected void computeSynsetProbs() {
		synsetProbs = new HashMap();
		// instead of iterate through the synsets...
		// iterate through the collection of words
		Iterator iter = frequencies.termFreqIterator();
		int numTerms = frequencies.getNumberTerms(), n = 0;
		Synset s;
		int i;
		Double d;
		Stack hypernyms;
		Set synsetsDone;
		while (iter.hasNext()) { // iterate through collection's terms
			CollectionFrequency.TermFreq tf = (CollectionFrequency.TermFreq) iter
					.next();
			System.out.println(++n + " of " + numTerms);
			if (tf.pos == PartOfSpeech.NOUN) {
				Synset ss[] = getSynsets(tf.term, tf.pos);
				synsetsDone = new TreeSet();
				for (i = 0; i < ss.length; i++) {
					synsetsDone.add(ss[i]);
					d = (Double) synsetProbs.get(ss[i].getId());
					if (d != null) {
						d = new Double(d.doubleValue() + tf.freq);
					} else {
						d = new Double(tf.freq);
					}
					synsetProbs.put(ss[i].getId(), d);
					// collect hypernyms (IS-A) and iterate
					hypernyms = new Stack();
					hypernyms.addAll(Arrays.asList(wordNet.getRelatedSynsets(
							ss[i], Relationship.HYPERNYM)));
					while (hypernyms.size() > 0) {
						s = (Synset) hypernyms.pop();
						if (synsetsDone.contains(s))
							continue;
						d = (Double) synsetProbs.get(s.getId());
						if (d != null) {
							d = new Double(d.doubleValue() + tf.freq);
						} else {
							d = new Double(tf.freq);
						}
						synsetProbs.put(s.getId(), d);
						synsetsDone.add(s);
						hypernyms.addAll(Arrays.asList(wordNet
								.getRelatedSynsets(s, Relationship.HYPERNYM)));
					}
				}
			}
		} // while hypernyms set is not empty

		// try {
		// writeSynsetProbs("synsetProbsTemp.txt");
		// } catch (Exception ex) {
		// }

		// normalize freqs by N
		iter = synsetProbs.keySet().iterator();
		String id;
		while (iter.hasNext()) { // iterate through collection's terms
			id = (String) iter.next();
			d = (Double) synsetProbs.get(id);
			d = new Double(d.doubleValue() / (double) resnikN);
			synsetProbs.put(id, d);
		}
	}

	/**
	 * Returns the lower synset probability in loaded set. Useful to normalize
	 * Resnik similarites.
	 */
	protected double computeMinSynsetProb() {
		if (minProb > 0)
			return minProb;
		Iterator iter = synsetProbs.values().iterator();
		double d = -1d, min = Double.MAX_VALUE;
		while (iter.hasNext()) {
			d = ((Double) iter.next()).doubleValue();
			if (d < min)
				min = d;
		}
		return min;
	}

	public double minSynsetProb() {
		return minProb;
	}

	/**
	 * Note: minProb is returned if no probability is assigned to the synset.
	 * @param id
	 * @return
	 */
	public double synsetProb(String id) {		
		double ret;
		Double d = (Double)synsetProbs.get(id);
		if (d != null) {
			ret = d.doubleValue();
		} else
			ret = minProb; 
		return ret;
	}

	/**
	 * Returns the inherited-hypernyms of a given synset, i.e., the synsets the
	 * encompass the given synset.
	 * 
	 * For example: 'house' returns dwelling -> ... -> structure -> ... ->
	 * entity.
	 */
	public Set inheritedHypernyms(Synset s) {
		TreeSet synsetsDone = new TreeSet();
		Stack hypernyms = new Stack();
		hypernyms.addAll(Arrays.asList(wordNet.getRelatedSynsets(s,
				Relationship.HYPERNYM)));
		while (hypernyms.size() > 0) {
			s = (Synset) hypernyms.pop();
			if (synsetsDone.contains(s))
				continue;
			synsetsDone.add(s);
			hypernyms.addAll(Arrays.asList(wordNet.getRelatedSynsets(s,
					Relationship.HYPERNYM)));
		}
		return synsetsDone;
	}

	/**
	 * Computes the Resnik similarity between 2 nouns.
	 * 
	 * See (2) in "Semantic Similarit in a Taxonomy" by Resnik (1999).
	 */
	public WeightedSynset resnikSimilarity(String w1, String w2) {
		Synset[] ss1 = getSynsets(w1, PartOfSpeech.NOUN);
		Synset[] ss2 = getSynsets(w2, PartOfSpeech.NOUN);
		double max = 0d;
		double temp = 0d;
		Double d;
		Iterator iter;
		Synset s, smax = null;
		for (int i = 0; i < ss1.length; i++) {
			for (int j = 0; j < ss2.length; j++) {
				Set hypernyms1 = inheritedHypernyms(ss1[i]);
				Set hypernyms2 = inheritedHypernyms(ss2[j]);
				hypernyms1.retainAll(hypernyms2);
				iter = hypernyms1.iterator();
				while (iter.hasNext()) {
					s = (Synset) iter.next();
					d = ((Double) synsetProbs.get(s.getId()));
					if (d == null) 
						temp = minProb;
					else 
						temp = d.doubleValue();
					temp = -1d * Math.log(temp);
					if (temp > max) {
						max = temp;
						smax = s;
						// System.out.println(max + " .. " + ss1[i].getId() +
						// ":"
						// + ss2[j].getId() + " " + s.getId());
					}

				}
			}
		}
		return smax != null ? new WeightedSynset(smax, max) : null;
	}

	/**
	 * Computes the Resnik similarity between 2 synsets.
	 * 
	 * See (1) in "Semantic Similarit in a Taxonomy" by Resnik (1999).
	 */
	public WeightedSynset resnikSimilarity(Synset s1, Synset s2) {
		Set hypernyms1 = inheritedHypernyms(s1);
		Set hypernyms2 = inheritedHypernyms(s2);
		hypernyms1.retainAll(hypernyms2);
		Synset s, smax = null;
		double temp, max = 0d;
		Iterator iter = hypernyms1.iterator();
		while (iter.hasNext()) {
			s = (Synset) iter.next();
			temp = ((Double) synsetProbs.get(s.getId())).doubleValue();
			temp = -1d * Math.log(temp);
			if (temp > max) {
				max = temp;
				smax = s;
			}
		}
		// if (max > 0d)
		// System.out.println("Resnisk Sim = "+ max +" normalized = "+max/(-1d *
		// Math.log(minProb)));
		return smax != null ? new WeightedSynset(smax, max) : null;
	}

	public long resnikN() {
		return this.resnikN;
	}

	/**
	 * Normalizes the Resnik similarity in the [0,1] range.
	 */
	public double normalize(double sim) {
		return sim / (-1d * Math.log(minProb));
	}

	/**
	 * Note: If you want to work with morphological roots, you should use a
	 * stemmer when you build the CollectionFrequency or during the synset
	 * retrieval in the WordNet class.
	 */
	protected long computeResnikN() {
		Iterator iter = frequencies.termFreqIterator();
		long freq = 0;
		int i = 0; // debug
		while (iter.hasNext()) {
			CollectionFrequency.TermFreq tf = ((CollectionFrequency.TermFreq) iter
					.next());
			if (tf.pos == PartOfSpeech.NOUN) {
				Synset ss[] = getSynsets(tf.term, tf.pos);
				// exclude words not subsumed by any WordNet synset
				if (ss.length > 0)
					freq += tf.freq;
			}
			// System.out.println((i++)+" "+freq);
		}
		return freq;
	}

	/** @TODO */
	/*
	 * protected WeightedSynsets commonSynsets(String w1, String w2, int pos) {
	 * Synset[] ss1 = wordNet.getSynsets(w1, pos); Synset[] ss2 =
	 * wordNet.getSynsets(w2, pos); for (int i = 0; i < ss1.length; i++) { for
	 * (int j = 0; j < ss2.length; j++) { Synset[] newSs1 =
	 * wordNet.getRelatedSynsets(ss1[i], Relationship.HYPERNYM); Synset[] newSs2 =
	 * wordNet.getRelatedSynsets(ss2[j], Relationship.HYPERNYM); } }
	 * 
	 * return null; }
	 */
	public void writeSynsetProbs(String filename) throws IOException {
		FileWriter writer = new FileWriter(filename);
		BufferedWriter bufWriter = new BufferedWriter(writer);
		Iterator iter = synsetProbs.keySet().iterator();
		while (iter.hasNext()) {
			String id = (String) iter.next();
			Double d = (Double) synsetProbs.get(id);
			bufWriter.write(id + " " + d.doubleValue() + "\n");
		}
		bufWriter.close();
		writer.close();
	}

	public void readSynsetProbs(String filename) throws IOException {
		FileReader reader = new FileReader(filename);
		BufferedReader bufReader = new BufferedReader(reader);

		synsetProbs = new HashMap();
		String line;
		String[] splits;
		while ((line = bufReader.readLine()) != null) {
			splits = line.split(" ");
			if (splits.length != 2)
				throw new IOException("Parsing exception");
			synsetProbs.put(splits[0], Double.parseDouble(splits[1]));
		}
		bufReader.close();
		reader.close();
	}

	public static void main(String args[]) throws Exception {
		try {
			GateTagger tagger = new GateTagger();
			tagger.init();
			JwiWordNet wn = new JwiWordNet();
			wn.init();
			ArrayCollectionFrequency f = new ArrayCollectionFrequency();
			BncParser p = new BncParser();
			f.load("/home/fabio/projects/cues/idfs/all.num.o5", p);
			Stemmer stem = new JwiStemmer();
			Resnik r = new Resnik(wn, stem, f);
			r.init();
			r.writeSynsetProbs("synsetFreqs.txt");
			// r.init("synsetFreqs.txt");
			Synset[] ss = r.getSynsets("building", PartOfSpeech.NOUN);
			for (int i = 0; i < ss.length; i++)
				System.out.println(i + " " + ss[i].getGloss());
		} catch (Exception ex) {
			System.out.println(ex.toString());
			ex.printStackTrace();
		}
	}
}
