/*
 * @(#)ResnikDisambiguation.java	1.0 10/01/07
 *
 * Copyright 2007 Fabio Gasparetti. All rights reserved.
 */

package org.tabularium.text.nlp;

import java.io.File;
import java.io.IOException;
import java.util.*;

import org.tabularium.text.nlp.wordnet.*;
// debug
import org.tabularium.text.nlp.wordnet.jwi.*;

/**
 * Performs the Resnik disambiguation algorithm retrieving a weighted set of
 * synsets related to a given text.
 * 
 * <p>
 * A typical invocation sequence is: <blockquote>
 * 
 * <pre>
 * // pass TextTagger, WordNet Stemmer, Collocations and Resnik objects to constructor
 * ResnikDisambiguation dis = new ResnikDisambiguation(tagger, wn, coll, r);
 * WeightedSynset[] ss = dis.disambiguate(&quot;the text to disambiguate&quot;);
 * </pre>
 * 
 * </blockquote>
 * 
 * For details about the Renik disambiguation algorithm see Sect.5.1 of: <blockquote>
 * 
 * <pre>
 * Philip Resnik &quot;Semantic Similarity in a Taxonomy: An Information-Based Measure
 * and its Application to Problems of Ambiguity in Natural Language&quot;
 * Journal of Artificial Intelligence Research, Vol.11, pp.95-130, 1999.
 * http://citeseer.ist.psu.edu/resnik99semantic.html
 * </pre>
 * 
 * 
 * </blockquote>
 * 
 * If the DEFAULT_RESNIK_N
 * 
 * @author Fabio Gasparetti
 * @version 1.0, 10/01/07
 */
public class ResnikDisambiguation extends Disambiguation {
	TextTagger tagger = null;

	WordNet wordNet = null;

	// CollectionFrequency frequencies = null;

	Collocations collocations = null;

	Resnik resnik = null;

	/*
	 * public ResnikDisambiguation(TextTagger t, WordNet wn, Stemmer stem,
	 * CollectionFrequency cf, Collocations c) { this(t, wn, stem, cf, c, new
	 * Resnik(wn, stem, cf)); }
	 */
	public ResnikDisambiguation(TextTagger t, WordNet wn, /* Stemmer stem, */
	/* CollectionFrequency cf, */Collocations c, Resnik r) {
		tagger = t;
		wordNet = wn;
		// frequencies = cf;
		collocations = c;
		resnik = r;
	}

	@Override
	public WeightedSynset[] disambiguate(String text) {
		Tag[] tags = null;
		try {
			tags = tagger.tag(text);
		} catch (Exception ex) {
			ex.printStackTrace();
			return null;
		}
		return disambiguate(tags);
	}

	/**
	 * Returns an array of string objects representing the nouns (both simple
	 * and collocations) in the given sequence of tags.
	 * <p>
	 * If a noun compares alone and in a collocation, only the collocation will
	 * be returned.
	 * 
	 * @todo why is it placed here?
	 */
	public Tag[] nouns(Tag[] tags) {
		// 1st phase: collect all nouns
		Iterator iter;
		List synsets = new ArrayList();
		HashSet collocationNounTags = new HashSet();
		Tag[] sentence = TextTagger.filter(tags, "Sentence");
		for (int i = 0; i < sentence.length; i++) {
			Tag[] sentTags = sentence[i].getEnclosed(tags);
			List l = collocations.findCollocations(sentTags);

			iter = l.iterator();
			String s;
			while (iter.hasNext()) {
				s = new String();
				Tag[] tt = (Tag[]) iter.next();
				for (int k = 0; k < tt.length; k++) {
					s = s.concat(tt[k].content);
					if (tt[k].pos == PartOfSpeech.NOUN)
						collocationNounTags.add(tt[k]);
				}
			}
		}
		Tag[] nounTags = TextTagger.filter(tags, PartOfSpeech.NOUN);
		// removes nouns that compare also in collocations
		HashSet filteredNouns = new HashSet();
		for (int i = 0; i < nounTags.length; i++) {
			if (!collocationNounTags.contains(nounTags[i]))
				filteredNouns.add(nounTags[i]);
		}
		filteredNouns.addAll(collocationNounTags);
		return (Tag[]) filteredNouns.toArray(new Tag[] {});
	}

	@Override
	public WeightedSynset[] disambiguate(Tag[] tags) {
		// 1st phase: collect all nouns
		Iterator iter;
		List synsets = new ArrayList();
		HashSet collocationNounTags = new HashSet();
		Tag[] sentence = TextTagger.filter(tags, "Sentence");
		for (int i = 0; i < sentence.length; i++) {
			Tag[] sentTags = sentence[i].getEnclosed(tags);
			List l = collocations.findCollocations(sentTags);

			iter = l.iterator();
			String s;
			while (iter.hasNext()) {
				s = new String();
				Tag[] tt = (Tag[]) iter.next();
				for (int k = 0; k < tt.length; k++) {
					s = s.concat(tt[k].content);
					if (tt[k].pos == PartOfSpeech.NOUN)
						collocationNounTags.add(tt[k]);
				}
			}
		}
		Tag[] nounTags = TextTagger.filter(tags, PartOfSpeech.NOUN);
		// removes nouns that compare also in collocations
		HashSet filteredNouns = new HashSet();
		for (int i = 0; i < nounTags.length; i++) {
			if (!collocationNounTags.contains(nounTags[i]))
				filteredNouns.add(nounTags[i]);
		}

		// 2nd phase: collect synsets
		HashMap nounTagSynsetsMap = new HashMap(); // map(tag -> hypernyms set)
		iter = filteredNouns.iterator();
		int maxNumSynsets = 0;
		while (iter.hasNext()) {
			Tag t = (Tag) iter.next();
			Synset[] ss = wordNet.getSynsets(t.getContent(), PartOfSpeech.NOUN);
			if (ss.length > maxNumSynsets)
				maxNumSynsets = ss.length;
			nounTagSynsetsMap.put(t.getContent(), ss);
		}
		iter = collocationNounTags.iterator();
		while (iter.hasNext()) {
			Tag t = (Tag) iter.next();
			Synset[] ss = wordNet.getSynsets(t.getContent(), PartOfSpeech.NOUN);
			if (ss.length > maxNumSynsets)
				maxNumSynsets = ss.length;
			nounTagSynsetsMap.put(t.getContent(), ss);
		}
		// weights
		String[] nouns = (String[]) nounTagSynsetsMap.keySet().toArray(
				new String[] {});
		double weights[][] = new double[nouns.length][maxNumSynsets];
		double normalization[] = new double[nouns.length];

		// filteredNouns.addAll(collocationNounTags);
		// Tag[] nounTags = (Tag[])filteredNouns.toArray(new Tag[] {});

		// 3rd phase: disambiguate algorithm
		int i, j, k;
		String n1, n2;
		WeightedSynset ws;
		Synset s;
		Synset[] ss1, ss2;
		double w;
		for (i = 1; i < nouns.length; i++) {
			for (j = 0; j < i; j++) {
				// retrieve tags' content
				// if they're collocations, remove useless blanks
				n1 = Collocations.removeUselessBlanks(nouns[i]);
				n2 = Collocations.removeUselessBlanks(nouns[j]);
				// retrieves most informative subsumer
				ws = resnik.resnikSimilarity(n1, n2);
				if (ws == null)
					continue;
				s = ws.getSynset();
				w = ws.getWeight();

				ss1 = (Synset[]) nounTagSynsetsMap.get(nouns[i]);
				for (k = 0; k < ss1.length; k++) {
					if (isAncestor(s, ss1[k])) {
						weights[i][k] += w;
						normalization[i] += w;
					}
				}
				// normalization[i] += w;
				ss2 = (Synset[]) nounTagSynsetsMap.get(nouns[j]);
				for (k = 0; k < ss2.length; k++) {
					if (isAncestor(s, ss2[k])) {
						weights[j][k] += w;
						normalization[j] += w;
					}
				}
				// normalization[i] += w;
				// normalization[j] += w;
			}
		}
		HashMap retWSmap = new HashMap();
		int num_senses;
		for (i = 0; i < nouns.length; i++) {
			Synset[] ss = ((Synset[]) nounTagSynsetsMap.get(nouns[i]));
			num_senses = ss.length;
			for (k = 0; k < num_senses; k++) {
				if (normalization[i] > 0d) {
					w = weights[i][k] / normalization[i];
				} else {
					w = 1d / num_senses;
				}
				ws = (WeightedSynset) retWSmap.get(ss[k].getId());
				if (ws != null)
					w += ws.getWeight();
				if (w > 0d)
					retWSmap.put(ss[k].getId(), new WeightedSynset(ss[k], w));
			}
		}
		WeightedSynset[] wss = (WeightedSynset[]) retWSmap.values().toArray(
				new WeightedSynset[] {});
		Arrays.sort(wss);
		return wss;
	}

	/**
	 * Returns true if s1 is an ancestor (or coincide) of s2.
	 */
	public boolean isAncestor(Synset s1, Synset s2) {
		Set ss = resnik.inheritedHypernyms(s2);
		return ss.contains(s1);
	}

	/**
	 * Returns all the synsets associated to the nouns in the given sequence of
	 * tags.
	 * 
	 * Note: Collocations are taken into consideration.
	 */
	protected List retrieveSynsets(Tag[] tags) {
		Iterator iter;
		List synsets = new ArrayList();
		List collocationNouns = new ArrayList();
		Tag[] sentence = TextTagger.filter(tags, "Sentence");
		for (int i = 0; i < sentence.length; i++) {
			Tag[] sentTags = sentence[i].getEnclosed(tags);
			List l = collocations.findCollocations(sentTags);

			iter = l.iterator();
			String s;
			while (iter.hasNext()) {
				s = new String();
				Tag[] tt = (Tag[]) iter.next();
				for (int k = 0; k < tt.length; k++) {
					s = s.concat(tt[k].content);
					if (tt[k].pos == PartOfSpeech.NOUN)
						collocationNouns.add(tt[k]);
				}
				s = Collocations.removeUselessBlanks(s);
				Synset[] ss = wordNet.getSynsets(s, PartOfSpeech.NOUN);
				synsets.add(Arrays.asList(ss));
				/*
				 * it may happen that a token is in the findCollocations' list
				 * but it has not synset associated; findCollocations translates
				 * '-' characters to whitespaces, while getSynsets doesn't.
				 */
				for (int j = 0; j < ss.length; j++) {
					System.out.println("collocation: " + ss[j].toString());
				}
			}
		}
		Tag[] nouns = TextTagger.filter(tags, PartOfSpeech.NOUN);
		// removes nouns that compare in the collocations
		ArrayList filteredNouns = new ArrayList();
		for (int i = 0; i < nouns.length; i++) {
			if (!collocationNouns.contains(nouns[i]))
				filteredNouns.add(nouns[i]);
		}

		iter = filteredNouns.iterator();
		while (iter.hasNext()) {
			Tag t = (Tag) iter.next();
			Synset[] ss = wordNet.getSynsets(t.getContent(), PartOfSpeech.NOUN);
			synsets.add(Arrays.asList(ss));
			// System.out.println(t);
			// for (int j = 0; j < ss.length; j++) {
			// System.out.println("noun: " + t.getContent() + " synset: "
			// + ss[j].toString());
			// }
		}

		return synsets;
	}

	public static void main(String args[]) throws Exception {
		try {
			GateTagger tagger = new GateTagger();
			File fn = new File(System.getProperty("user.home"),
					"/projects/cues/GATE-4.0");
			System.out.println("Tagger: init");
			tagger.init(fn.toString());
			JwiWordNet wn = new JwiWordNet(new JwiStemmer());
			fn = new File(System.getProperty("user.home"),
					"/projects/cues/WordNet-3.0/dict");
			System.out.println("WordNet: init");
			wn.init(fn.toString());

			ArrayCollectionFrequency f = new ArrayCollectionFrequency();
			BncParser p = new BncParser();
			fn = new File(System.getProperty("user.home"),
					"/projects/cues/idfs/all.num.o5");
			System.out.println("CollectionFrequency: load");
			f.load(fn.toString(), p);
			Stemmer stem = new JwiStemmer();
			System.out.println("Semantic Similarity");
			Collocations coll = new Collocations();
			coll.load("collocations.txt");
			Resnik r = new Resnik(wn, stem, f);
			r.init("synsetFreqs.txt");
			// public ResnikDisambiguation(TextTagger t, WordNet wn, /*Stemmer
			// stem, */
			// /*CollectionFrequency cf, */Collocations c, Resnik r) {

			ResnikDisambiguation dis = new ResnikDisambiguation(tagger, wn,
					coll, r);
			System.out.println("Disambiguation: init");
			// sim.writesynsetProbs("synsetFreqs.txt");
			// System.out.println(sim.resnikSimilarity("computer", "personal
			// computer"));
			// System.out.println(sim.resnikN());
			WeightedSynset[] ss = dis
					.disambiguate("The FBI is looking for suspects in a scam that uses bomb threats to extort money from banks and stores, officials told CNN. For the past week, banks and stores in 12 states have been hit by the scam, in which a caller claims there is a bomb that can be detonated if workers don't wire money to a specific account. ");
			// WeightedSynset[] ss = dis
			// .disambiguate("The FBI is looking for suspects. FBI sucks.");
			for (int i = 0; i < ss.length; i++) {
				System.out.println("weight: " + ss[i].getWeight() + " synset:["
						+ ss[i].getSynset() + "]");
			}
		} catch (Exception ex) {
			System.out.println(ex.toString());
			ex.printStackTrace();
		}
	}

}
