package coms6111.astbbw;

import java.util.*;
import java.util.regex.*;
import java.util.Map.Entry;
import java.net.URI;
import java.net.URISyntaxException;

public class DocumentIndex extends Object {
	//interesting points about this: do we want possessives?  Contractions?
	// Most contractions should arguably be stopwords...
	// and arguably most possessives we would be better stripping.
	static Pattern WORD_REGEX = Pattern.compile("[A-Za-z0-9](?:[A-Za-z0-9]|[.'-][A-Za-z0-9])+");

	/**
	 * A hashtable to store the current score of each term
	 * that is encountered in the indexing process.
	 */
	Hashtable<String,Double> h = new Hashtable<String,Double>();
	/**
	 * A hashtable to store the current score of each stem.
	 */
	Hashtable<String,Double> stems = new Hashtable<String,Double>();
	/**
	 * A hashtable to store the first term that was encountered that produced
	 * a given stem (necessary for building a useable query again afterward).
	 */
	Hashtable<String,String> stemReverse = new Hashtable<String,String>();

	public DocumentIndex() {}

	/**
	 * Constructor to use if we have already built the various index hashes.
	 * @param hInput the hash to be used as the raw term index.
	 * @param sInput the stem index.
	 * @param rsInput the reverse-lookup to tell us what the original form
	 * of each stem was.
	 */
	protected DocumentIndex(Hashtable<String,Double> hInput,
			Hashtable<String,Double> sInput,
			Hashtable<String,String> rsInput
			) {
		this.h = hInput;
		this.stems = sInput;
		this.stemReverse = rsInput;
	}
	/**
	 * Build a DocumentIndex for this DocumentSet
	 * (parsing title, URL, and abstract).
	 * @param in a filled-in DocumentIndex.
	 */
	//FIXME took out the constants--some refactoring one way or another is clearly called for
	public DocumentIndex(DocumentSet in) {
		String urlwords[] = parseURL(in.URL);
		//updateHash(urlwords,ProcessConst.URL_WEIGHT);
		updateHash(urlwords,0);
		String titleTmp = in.Title.replace("Wikipedia, the free encyclopedia", "");
		String titlewords[] = parseString(titleTmp);
		//updateHash(titlewords, ProcessConst.TITLE_WEIGHT);
		updateHash(titlewords, 1.1);
		String bodywords[] = parseString(in.Abstract);
		updateHash(bodywords,1.0);
		//updateStems();
	}

	public DocumentIndex(String text) {
		String[] bodywords = parseString(text);
		updateHash(bodywords,1.0);
		//FIXME
		//updateStems();
	}

	/**
	 * Subtract one document vector from another, scaling by a parameter (Rocchio approach).
	 * @param a
	 * @param b
	 * @param beta each term weight in b will be multiplied by this parameter
	 * before being subtracted.
	 * @return a new DocumentIndex
	 */
	public static DocumentIndex difference(DocumentIndex a, DocumentIndex b, double beta) {
		Hashtable<String,Double> diff = hashSubtract(a.h,b.h,beta);
		Hashtable<String,Double> stemDiff = hashSubtract(a.stems,b.stems, beta);
		Hashtable<String,String> mergedReverse = mergeLookups(a.stemReverse,b.stemReverse);
		return new DocumentIndex(diff,stemDiff,mergedReverse);
	}
	protected static String[] parseString(String input) {
		Matcher m = WORD_REGEX.matcher(input);
		ArrayList <String> wordlist = new ArrayList<String>();
		String lastWord = null;
		while( m.find() ) {
			String tmp = input.substring(m.start(), m.end());
			wordlist.add(tmp);
			if (null != lastWord) {
				StringBuffer bufDiGram = new StringBuffer(50);
				bufDiGram.append(lastWord);
				bufDiGram.append(" ");
				bufDiGram.append(tmp);
				String strDiGram = bufDiGram.toString();
				wordlist.add(strDiGram);
			}
			lastWord = tmp;
		}
		return wordlist.toArray(new String[0]);
	}

	protected static String[] parseURL(String URL) {
		ArrayList<String> words = new ArrayList<String>();
		URI parsed;
		try { parsed = new URI(URL);}
		catch (URISyntaxException e) {
			/*If it doesn't parse, return nothing, silently
			 * (we're zeroing these out anyway)
			 */
			return new String[0];
		}
		String[] hostElems = parsed.getHost().split("\\.");
		for (int i = 0; i < hostElems.length; i++) {
			// we probably never care about the TLD, but perhaps .uk might matter?
			String word = hostElems[i];
			// these are worse than stopwords--right out!
			if (word.equals("www") || word.equals("com")) continue;
			words.add(word);
		}
		String path = parsed.getPath();
		Matcher m = WORD_REGEX.matcher(path);
		while( m.find() ) {
			String tmp = path.substring(m.start(), m.end());
			words.add(tmp);
		}
		return words.toArray(new String[0]);
	}

	public void addAll(ArrayList<String> in) {
		updateHash( in.toArray(new String[0]), 0 );
	}
	private void updateHash(String words[], double weight) {
		for (int i = 0; i < words.length; i++) {
			if (0 == words[i].length()) continue;
			this.addToValue(words[i].toLowerCase(), weight);
		}
	}

	// XXX could be improved (digram handling)
	/*
	private void updateStems() {
		Set<Entry<String,Double>> s = h.entrySet();
		Iterator<Entry<String,Double>> i = s.iterator();
		while ( i.hasNext() ) {
			Entry<String,Double> e = i.next();
			String unstemmed = e.getKey();
			double val = e.getValue();
			String stemmed = stem(unstemmed);
			addToStems(stemmed,val);
			if ( !stemReverse.containsKey(stemmed) ) {
				stemReverse.put(stemmed, unstemmed);
			}
		}
	}
	*/
	private void addToHashVal(Hashtable<String,Double> t, String k, double v) {
		if (t.containsKey(k)) {
			double cur = t.get(k);
			t.put(k, cur + v);
		} else {
			t.put(k, v);
		}
	}
	private void addToValue(String key, double increment) {
		addToHashVal(h,key,increment);
	}
	private void addToStems(String key, double increment) {
		addToHashVal(stems,key,increment);
	}

	public void add(DocumentIndex toAdd) {
		Enumeration<String> e = toAdd.h.keys();
		while(e.hasMoreElements()) {
			String key = e.nextElement();
			double valToAdd = toAdd.h.get(key);
			this.addToValue(key, valToAdd);
		}
		e = toAdd.stems.keys();
		while(e.hasMoreElements()) {
			String key = e.nextElement();
			double valToAdd = toAdd.stems.get(key);
			this.addToStems(key, valToAdd);
			if ( ! stemReverse.containsKey(key) ) {
				stemReverse.put(
					key, toAdd.stemReverse.get(key)
				);
			}
		}
	}

	public double get(String key) {
		if ( h.containsKey(key) ) return h.get(key); else return 0;
	}

	/**
	 * Generate a document-frequency DocumentIndex from a set of
	 * term-frequency DocumentIndexes.
	 * @param alldocs an array of DocumentIndex objects with the term frequencies for all the
	 * documents in the collection
	 * @return a DocumentIndex containing the document frequencies of all terms (words, digrams, and stems).
	 */
	public static DocumentIndex getDocumentFrequencies(DocumentIndex alldocs[]) {
		DocumentIndex df = new DocumentIndex();
		for (int i = 0; i < alldocs.length; i++) {
			DocumentIndex cur = alldocs[i];
			Enumeration<String> e = cur.h.keys();
			while (e.hasMoreElements()) {
				df.addToValue(e.nextElement(), 1.0);
			}
			e = cur.stems.keys();
			while (e.hasMoreElements()) {
				df.addToStems(e.nextElement(), 1.0);
			}
			Set<Entry<String,String>> s = cur.stemReverse.entrySet();
			Iterator<Entry<String,String>> sri = s.iterator();
			while (sri.hasNext()) {
				Entry<String,String> ent = sri.next();
				String stem = ent.getKey();
				if ( ! df.stemReverse.containsKey(cur)) {
					df.stemReverse.put(stem, ent.getValue());
				}
			}

		}
		return df;
	}

	/**
	 * Invert the frequencies found in this DocumentIndex (hopefully, they <i>are</i>
	 * document frequencies.  At the moment, this is a simple inverse-log transform,
	 * but it is subject to change if need be.
	 *
	 */
	public void dfInvert() {
		Set<Entry<String,Double>> s = h.entrySet();
		Iterator<Entry<String,Double>> i = s.iterator();
		while ( i.hasNext() ) {
			Entry<String,Double> e = i.next();
			double val = e.getValue();
			e.setValue(1/Math.log(val));
		}

		s = stems.entrySet();
		i = s.iterator();
		while ( i.hasNext() ) {
			Entry<String,Double> e = i.next();
			double val = e.getValue();
			e.setValue(1/Math.log(val));
		}
	}

	/**
	 * Scale all values in this index by a given quantity.
	 * @param scaleFactor the quantity by which each value will be multiplied.
	 */

	public void scale(double scaleFactor) {
		Set<Entry<String,Double>> s = h.entrySet();
		Iterator<Entry<String,Double>> i = s.iterator();
		while ( i.hasNext() ) {
			Entry<String,Double> e = i.next();
			double val = e.getValue();
			e.setValue(val * scaleFactor);
		}

		s = stems.entrySet();
		i = s.iterator();
		while ( i.hasNext() ) {
			Entry<String,Double> e = i.next();
			double val = e.getValue();
			e.setValue(val * scaleFactor);
		}
	}

	/**
	 * Multiply each value in this DocumentIndex by the corresponding
	 * value in another index (presumably, that value is the IDF for
	 * this term).
	 * @param idfs the DocumentIndex containing the inverted document frequencies
	 */
	public void idfMultiply(DocumentIndex idfs) {
		Set<Entry<String,Double>> s = h.entrySet();
		Iterator<Entry<String,Double>> i = s.iterator();
		while ( i.hasNext() ) {
			Entry<String,Double> e = i.next();
			String key = e.getKey();
			double val = e.getValue();
			double idfval = idfs.h.get(key);
			e.setValue(val*idfval);
		}

		s = stems.entrySet();
		i = s.iterator();
		while ( i.hasNext() ) {
			Entry<String,Double> e = i.next();
			String key = e.getKey();
			double val = e.getValue();
			double idfval = idfs.stems.get(key);
			e.setValue(val*idfval);
		}

	}

	/**
	 * What are the N keys in this hash that have the best values?
	 * @param count the number of keys to return.
	 * @return the sorted top N terms (either words or digrams) in the index.
	 */
	public String[] bestN (int count) {
		DocumentIndex tmp = new DocumentIndex();
		return bestN(count, tmp);
	}
	/**
	 * As {@link bestN(int count)} except for the additional argument.
	 *
	 * @param exclusions a DocumentIndex containing terms that should not
	 * appear in the top N results (e.g stopwords or words already in the query)
	 *
	 */
	public String[] bestN (int count, DocumentIndex exclusions) {
		return bestN(count,exclusions, 0);
	}
	/**
	 * See previous method, except as noted.
	 * @param ngram the number of words per term.  If 0, any term will be returned;
	 * if 1, only single-word terms; if 2, only digrams.  If a larger value, no
	 * results are returned at this time.
	 *
	 */
	public String[] bestN (int count, DocumentIndex exclusions, int ngram) {
		return bestN (h, count, exclusions, ngram, false);
	}
	/**
	 * See bestN: this returns stemmed terms instead of raw terms, and is
	 * otherwise identical.
	 */
	public String[] bestNStemmed (int count, DocumentIndex exclusions, int ngram) {
		return bestN (stems, count, exclusions, ngram, true);
	}
	/**
	 * Internal implementation for bestN and bestNStemmed.
	 * For unlisted arguments, see the various bestN methods.
	 * @param t the field of the object to be used (h or stems).
	 * @param excludeByLookup if true, use stemReverse to get the actual word corresponding
	 * to the current term, rather than looking up the term itself in the exclusions DocumentIndex.
	 */
	private String[] bestN (Hashtable<String,Double> t, int count,
			DocumentIndex exclusions, int ngram, boolean excludeByLookup) {
		String bestN[] = new String[count];
		double bestNVals[] = new double[count];
		Set<Entry<String,Double>> s = t.entrySet();
		Iterator<Entry<String,Double>> i = s.iterator();
		while ( i.hasNext() ) {
			Entry<String,Double> e = i.next();
			String entryKey = e.getKey();
			if (0 < ngram) { // if this does not have the right number of words, skip it
				String[] tmp = entryKey.split(" ");
				if (ngram != tmp.length) continue;
			}
			String excludeKey = entryKey;
			if (excludeByLookup) {
				excludeKey = this.stemReverse.get(entryKey);
			}
			if (exclusions.h.containsKey(excludeKey)) continue;
			double entryVal = e.getValue();
			for (int j = 0; j < count; j++) {
				if (null == bestN[j]) {
					bestN[j] = e.getKey();
					bestNVals[j] = entryVal;
					break;
				} else if (bestNVals[j] < entryVal) {
					// insert
					for (int k = count - 1; k > j; k--) {
						bestN[k] = bestN[k - 1];
						bestNVals[k] = bestNVals[k - 1];
					}
					bestN[j] = e.getKey();
					bestNVals[j] = entryVal;
					break;
				}
			}
		}
		return bestN;
	}
	public String[] worstN (Hashtable<String,Double> t, int count, DocumentIndex exclusions, int ngram) {
		String bestN[] = new String[count];
		double bestNVals[] = new double[count];
		Set<Entry<String,Double>> s = t.entrySet();
		Iterator<Entry<String,Double>> i = s.iterator();
		while ( i.hasNext() ) {
			Entry<String,Double> e = i.next();
			String entryKey = e.getKey();
			if (0 < ngram) { // if this does not have the right number of words, skip it
				String[] tmp = entryKey.split(" ");
				if (ngram != tmp.length) continue;
			}
			if (exclusions.h.containsKey(entryKey)) continue;
			double entryVal = e.getValue();
			for (int j = 0; j < count; j++) {
				if (null == bestN[j]) {
					bestN[j] = e.getKey();
					bestNVals[j] = entryVal;
					break;
				} else if (bestNVals[j] > entryVal) {
					// insert
					for (int k = count - 1; k > j; k--) {
						bestN[k] = bestN[k - 1];
						bestNVals[k] = bestNVals[k - 1];
					}
					bestN[j] = e.getKey();
					bestNVals[j] = entryVal;
					break;
				}
			}
		}
		return bestN;
	}
	/**
	 * A debugging function: print out a list of key/value pairs, each
	 * on a single line, to System.out
	 * @param keys The list of hash keys to look up and print out the values of.
	 */
	public void dumpList(String keys[]) {
		for (int i = 0; i < keys.length; i++) {
			if (null == keys[i]) break;
			System.out.println(keys[i] + " " + h.get(keys[i]));
		}
	}

	public void dumpStemmedList(String keys[]) {
		Object printfargs[] = new Object[3];
		for (int i = 0; i < keys.length; i++) {
			if (null == keys[i]) break;
			printfargs[0] = keys[i];
			printfargs[1] = stemReverse.get(keys[i]);
			printfargs[2] = stems.get(keys[i]);
			System.out.printf("%s (%s) %.2f\n", printfargs);
			//System.out.println(keys[i] + " " + stems.get(keys[i]));
		}
	}
	/*
	protected static String stem(String word) {
		Stemmer s = new Stemmer();
		char wordchars[] = word.toCharArray();
		s.add(wordchars, wordchars.length);
		s.stem();
		return s.toString();
	}
	 */
	protected static Hashtable<String,Double> hashSubtract(
			Hashtable<String,Double> a,Hashtable<String,Double> b, double betaFactor) {
		Hashtable<String,Double> result = new Hashtable<String,Double>();
		Set<Entry<String,Double>> as = a.entrySet();
		Iterator<Entry<String,Double>> ai = as.iterator();
		while ( ai.hasNext() ) {
			Entry<String,Double> e = ai.next();
			String key = e.getKey();
			double val = e.getValue();
			double subval = b.containsKey(key) ? b.get(key) : 0;
			result.put(key, val - betaFactor * subval);
		}
		Set<Entry<String,Double>> bs = b.entrySet();
		Iterator<Entry<String,Double>> bi = bs.iterator();
		while (bi.hasNext()) {
			Entry<String,Double> e = bi.next();
			String key = e.getKey();
			if (result.containsKey(key)) {
				// this has already been subtracted
				continue;
			}
			double val = e.getValue();
			result.put(key, - betaFactor * val);
		}
		return result;
	}

	@SuppressWarnings("unchecked")
	protected static Hashtable<String,String> mergeLookups(
			Hashtable<String,String> a, Hashtable<String,String> b) {
		Hashtable<String,String> ret = (Hashtable<String, String>) a.clone();
		Set<Entry<String,String>> s = b.entrySet();
		Iterator<Entry<String,String>> i = s.iterator();
		while (i.hasNext()) {
			Entry<String,String> e = i.next();
			String key = e.getKey();
			if ( ! ret.containsKey(key)) {
				ret.put(key, e.getValue());
			}
		}

		return ret;
	}

}
