/**
 * 
 */
package edu.umd.clip.lm.model.decoding;

import java.util.Arrays;

import edu.umd.clip.lm.factors.Dictionary;
import edu.umd.clip.lm.factors.FactorTuple;
import edu.umd.clip.lm.model.*;
import edu.umd.clip.lm.storage.AbstractProbTreeStorage;
import edu.umd.clip.lm.util.LRUCache;

/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class NgramMarginalization {
	private ForestModel forests[];
	private final byte wordFactorIdx;
	private final long overtMask;
	final LRUCache<NgramCacheKey,Double> ngramCache;

	public NgramMarginalization(ForestModel forest) {
		this.forests = new ForestModel[forest.getOrder()];
		this.forests[forest.getOrder()-1] = forest;
		
		AbstractProbTreeStorage storage = forest.getDecoder().getStorage();
		
		// fill lower order forests
		Experiment exp = Experiment.getInstance();
		for(ForestModel f : exp.getForests().values()) {
			if (f.getOrder() < forest.getOrder()) {
				forests[f.getOrder()-1] = f;
				f.getDecoder().setStorage(storage);
			}
		}
		
		for(int i = 0; i < forests.length; ++i) {
			if (forests[i] == null) {
				throw new Error("Cannot produce ngram marginalization: no "+(i+1)+"-gram forest");
			}
		}
		
		wordFactorIdx = exp.getTupleDescription().getMainFactorIndex();
		overtMask = exp.getTupleDescription().getOvertFactorsMask();
		
		ngramCache = new LRUCache<NgramCacheKey,Double>(10000,100000) {
			@Override
			protected Double loadItem(NgramCacheKey key) {
				final long ngram[] = key.words;
				double prob = computeNgramProbability(ngram);
				return prob;
			}
		};
	}
	
	public double getNgramProbability(long[] ngram) {
		Double prob = ngramCache.getItem(new NgramCacheKey(ngram));
		return prob;
	}
	// computes probability of an ngram with tags marginalized
	// the ngram is in order w_{i-n+1}...w_{i-1} w_{i}
	// and lms are in the order unigram, bigram, etc
	public double computeNgramProbability(long[] ngram) {
		
		int order = Math.min(ngram.length, forests.length);
		
		SentenceContext ctx = forests[order-1].getDecoder().createContext();
		double n_1_prob = 0;
		
		// find the first non-<s> word
		byte addedStarts=0;
    	for(; addedStarts<ngram.length; ++addedStarts) {
    		if (!Dictionary.isStart(FactorTuple.getValue(ngram[addedStarts], wordFactorIdx))) {
    			if (addedStarts > 0) {
    				ngram = Arrays.copyOfRange(ngram, addedStarts, ngram.length);
    			}
    			break;
    		}
    	}

		//int addedStarts = lms.length - ngram.length;
		
		for(int i=0; i<ngram.length; ++i) {
			ctx = ctx.addWord(ngram[i] & overtMask);
			forests[i+addedStarts].getDecoder().partition(ctx);
			ctx.setLastSliceTotalProb(ctx.getCurrentSlice().getTotalProb());
			if (i == ngram.length-2) {
				n_1_prob = ctx.getLogProb();
			}
		}
		double n_prob = ctx.getLogProb();
		return n_prob - n_1_prob;
	}
	
	static private class NgramCacheKey {
		private long words[];

		public NgramCacheKey(long[] words) {
			this.words = words;
		}

		@Override
		public int hashCode() {
			return Arrays.hashCode(words);
		}

		@Override
		public boolean equals(Object obj) {
			if (this == obj)
				return true;
			if (obj == null)
				return false;
			if (!(obj instanceof NgramCacheKey))
				return false;
			NgramCacheKey other = (NgramCacheKey) obj;
			if (!Arrays.equals(words, other.words))
				return false;
			return true;
		}
	}

}
