/**
 * 
 */
package edu.umd.clip.lm.ngram;

import java.io.Serializable;
import java.util.*;

import org.apache.commons.math.util.OpenIntToDoubleHashMap;

import edu.berkeley.nlp.util.Pair;
import edu.umd.clip.lm.factors.*;
import edu.umd.clip.lm.factors.Dictionary;
import edu.umd.clip.lm.model.Experiment;
import edu.umd.clip.lm.model.data.*;
import edu.umd.clip.lm.ngram.Counts.Count;
import edu.umd.clip.lm.util.Long2IntMap;
import edu.umd.clip.smoothing.*;

/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class NgramModel implements Serializable {
	private static final long serialVersionUID = 1L;
	
	private final String name;
	
	private final CtxVar contextVariables[];
	private final CtxVar futureVariable;
	
	private int futureVocab[];
	
	// context variable at the same time index as futureVariable, i.e., T@0 for W@0, or vice versa. Can be zero
	private final CtxVar futureTimeVar;
	// if futureTimeVar != null, values of futureTimeVar limit available vocabulary for futureVariable
	// to make sure we allow a word to only have tags that have been observed with it
	private final HashMap<Integer,int[]> limitedFutureVocabs;
	
	public static class BONode implements Serializable {
		private static final long serialVersionUID = 1L;

		OpenIntToDoubleHashMap probs;
		double bow;
		
		public BONode(double bow, int size) {
			this.bow = bow;
			this.probs = new OpenIntToDoubleHashMap(size, 0.0);
		}
		
		public BONode(double bow) {
			this(0.0, 4);
		}
		
		public void putProb(int word, double prob) {
			probs.put(word, prob);
		}
		
		public double getProb(int word) {
			double prob = probs.get(word);
			return prob;
		}
	}

	//private Trie<MutableInteger> discountTrie;
	
	private Trie<BONode> theTrie;
	
	private final int minCounts[];
	/**
	 * @param name
	 * @param contextVariables
	 * @param futureVariable
	 * @param futureVocab
	 */
	public NgramModel(String name, CtxVar[] contextVariables, CtxVar futureVariable, int[] futureVocab) {
		this.name = name;
		this.contextVariables = contextVariables;
		this.futureVariable = futureVariable;
		this.futureVocab = futureVocab;
		
		CtxVar v = null;
		
		for(CtxVar var : contextVariables) {
			if (var.getOffset() == futureVariable.getOffset()) {
				v = var;
				break;
			}
		}
		if (v == null) {
			this.futureTimeVar = null;
			this.limitedFutureVocabs = null;
		} else {
			System.err.printf("Variable %s limits the vocabulary\n", v);
			this.futureTimeVar = v;
			this.limitedFutureVocabs = new HashMap<Integer, int[]>();
		}
		
		this.minCounts = new int[getOrder()];
		for(int i=0; i<minCounts.length; ++i) {
			if (i < 2) {
				minCounts[i] = 1;
			} else {
				minCounts[i] = 1;
			}
		}
	}

	public void addTrainingData(Counts counts, TrainingDataBlock block) {
		if (futureTimeVar != null) {
			if (counts.getLimitedVocabs() == null) {
				counts.setLimitedVocabs(new HashMap<Integer, HashSet<Integer>>());
			}
		}
		
		int futureContextSize = 0;
		byte futureContexts[] = new byte[contextVariables.length];

		for(byte i = 0; i<contextVariables.length; ++i) {
			if (contextVariables[i].getOffset() == 0) {
				futureContexts[futureContextSize++] = i;
			}
		}
		
		for(ContextFuturesPair pair : block) {
			int ctx[] = new int[contextVariables.length];
			for(int i=0; i<ctx.length; ++i) {
				if (contextVariables[i].getOffset() == 0) continue;
				ctx[i] = contextVariables[i].getValue(pair.getContext().data);
			}

			for(TupleCountPair tc : pair.getFutures()) {
				int word;
				if (futureVariable.getOffset() == 0) {
					word = futureVariable.getValue(tc.tuple);
				} else {
					word = futureVariable.getValue(pair.getContext().data);
				}
				
				for(int i=0; i<futureContextSize; ++i) {
					ctx[futureContexts[i]] = contextVariables[futureContexts[i]].getValue(tc.tuple);
				}
				
				if (futureTimeVar != null) {
					int ctxValue = futureTimeVar.getOffset() == 0 ? 
							futureTimeVar.getValue(tc.tuple) 
							: futureTimeVar.getValue(pair.getContext().data);
							
					HashMap<Integer, HashSet<Integer>> limitedVocabs = counts.getLimitedVocabs();	
					HashSet<Integer> vocab = limitedVocabs.get(ctxValue);
					if (vocab == null) {
						vocab = new HashSet<Integer>();
						limitedVocabs.put(ctxValue, vocab);
					}
					vocab.add(word);
				}
				counts.addCount(word, ctx, tc.count);
			}				
		}
	}
	
	public void estimateModel(Counts counts) {
		Trie<Count> countTrie = counts.getTrie();

		this.futureVocab = new int[counts.getObservedVocab().size()];
		{
			int pos = 0;
			for(Integer word : counts.getObservedVocab()) {
				futureVocab[pos++] = word;
			}
			Arrays.sort(futureVocab);
			counts.setObservedVocab(null);
		}
		if (futureTimeVar != null) {
			HashMap<Integer, HashSet<Integer>> limitedVocabs = counts.getLimitedVocabs();
			for(Map.Entry<Integer, HashSet<Integer>> e : limitedVocabs.entrySet()) {
				HashSet<Integer> vocab = e.getValue();
				int arrayVocab[] = new int[vocab.size()];
				int i=0;
				for(int word : vocab) {
					arrayVocab[i++] = word;
				}
				limitedFutureVocabs.put(e.getKey(), arrayVocab);
			}
			counts.setLimitedVocabs(null);
		}
		
		KNModSmoother smoothers[] = new KNModSmoother[getOrder()];
		
		for(int o=0; o<getOrder(); ++o) {
			final SmootherProducer producer = new SmootherProducer(minCounts[o]);
			
			Trie.Callback<Counts.Count> callback = new Trie.Callback<Counts.Count>() {
				@Override
				public void call(Trie<Count> t) {
					for(Long2IntMap.Iterator it = t.getData().counts.iterator(); it.hasNext(); ) {
						int count = it.next().getValue();
						producer.addCount(count);
					}
				}
			};

			Trie.Callback<Counts.Count> lowerOrderCallback = new Trie.Callback<Counts.Count>() {
				@Override
				public void call(Trie<Count> t) {
					producer.addCount(t.getData().counts.size());
				}
			};

			if (o < getOrder() - 1) {
				countTrie.iterate(o+1, lowerOrderCallback);
			} else {
				countTrie.iterate(o, callback);
			}
			
			smoothers[o] = new KNModSmoother(producer, System.err);
			if (Double.isNaN(smoothers[o].getBackoffProb())) {
				System.err.println("Bad counts for KNMod, using dummy discounts");
				smoothers[o] = new KNModSmoother(producer, 0.0, 0.0, 0.0);
			}
			System.err.printf("Order %d: smoother: %s\n", o, smoothers[o]);
		}
		
		theTrie = new Trie<BONode>(new BONode(1.0, countTrie.getData().counts.size()));
		/*
		// estimate the unigram model separately
		for(int word : futureVocab) {
			int count = countTrie.getData().counts.get(word);
			double prob = smoothers[0].getProb(count);

			// interpolate with uniform
			prob += smoothers[0].getBackoffProb() / futureVocab.length;
			theTrie.getData().putProb(word, prob);
		}
		theTrie.getData().bow = 1.0;
		
		verifyContextProbs(new int[0], 1);
		
		if (getOrder() == 1) return;
		*/
		int context[] = new int[getOrder()-1];
		estimateNode(theTrie, countTrie, 1, context, smoothers);
		
		/*
		for(Map.Entry<Integer, Trie<Counts.Count>> entry : countTrie.getSubTries().entrySet()) {
			int nextContext = entry.getKey();
			Trie<Counts.Count> nextCounts = entry.getValue();
			
			Trie<BONode> nextTrie = new Trie<BONode>(new BONode(nextCounts.getData().counts.size()));
			theTrie.addSubtrie(nextContext, nextTrie);
			
			context[0] = nextContext;
			
			estimateNode(nextTrie, nextCounts, 1, context, smoothers);
		}
		*/
	}
	
	private void estimateNode(Trie<BONode> trie, Trie<Counts.Count> counts, int currentOrder, int context[], KNModSmoother smoothers[]) {
		
		// estimate discount for the node:
		SmootherProducer producer = new SmootherProducer();
		for(Long2IntMap.Iterator it = counts.getData().counts.iterator(); it.hasNext();) {
			Long2IntMap.Entry e = it.next();
			int count = e.getValue();
			producer.addCount(count);
		}
		
		KNModSmoother smoother = smoothers[currentOrder-1];
		// use order-global discounts with context-specific counts
		smoother = new KNModSmoother(producer, smoother.getD1(), smoother.getD2(), smoother.getD3());
		
		double totalBackoffProb = 0;
		double totalProb = 0;
		
		boolean hasBackoff = currentOrder > 1 && contextVariables[currentOrder-2] != futureTimeVar;
		
		if (hasBackoff) {
			// compute probabilities
			for(Long2IntMap.Iterator it = counts.getData().counts.iterator(); it.hasNext();) {
				Long2IntMap.Entry e = it.next();
				int word = (int) e.getKey();
				int count = e.getValue();
				
				double prob = smoother.getProb(count);
				
				double boProb;
				if (currentOrder == 1) {
					boProb = 1.0 / futureVocab.length;
				} else {
					boProb = getProb(word, context, currentOrder-1);
				}
				totalBackoffProb += boProb;
				
				// interpolate
				prob += smoother.getBackoffProb() * boProb;
				
				totalProb += prob;
				
				trie.getData().putProb(word, prob);
			}
			
			double nominator = 1.0 - totalProb;
			double denominator = 1.0 - totalBackoffProb;
			
			double bow = nominator / denominator;
			
			trie.getData().bow = bow;
		} else {
			if (futureTimeVar == null || currentOrder == 1) {
				for(int word : futureVocab) {
					int count = counts.getData().counts.get(word);
					double prob = smoother.getProb(count);

					// interpolate with uniform
					prob += smoother.getBackoffProb() / futureVocab.length;
					trie.getData().putProb(word, prob);
				}
				trie.getData().bow = 1.0;
			} else {
				//assert(contextVariables[currentOrder-1] == futureTimeVar);
				
				int limitedVocab[] = limitedFutureVocabs.get(context[currentOrder-2]);
				for(int word : limitedVocab) {
					int count = counts.getData().counts.get(word);
					double prob = smoother.getProb(count);

					// interpolate with uniform
					prob += smoother.getBackoffProb() / limitedVocab.length;
					trie.getData().putProb(word, prob);
				}
				trie.getData().bow = 1.0;				
			}
		}
		
		verifyContextProbs(context, currentOrder);
		
		if (counts.getSubTries() == null) return;
		
		for(Map.Entry<Integer, Trie<Counts.Count>> entry : counts.getSubTries().entrySet()) {
			int nextContext = entry.getKey();
			Trie<Counts.Count> nextCounts = entry.getValue();
			
			Trie<BONode> nextTrie = new Trie<BONode>(new BONode(nextCounts.getData().counts.size()));
			trie.addSubtrie(nextContext, nextTrie);
			
			context[currentOrder-1] = nextContext;
			
			estimateNode(nextTrie, nextCounts, currentOrder+1, context, smoothers);
		}
	}
	
	public double getProb(int word, int context[]) {
		return getProb(word, context, getOrder());
	}
	
	private double getProb(int word, int context[], int order) {		
		Trie<BONode> trie = theTrie;
		double bow = 1.0;
		double prob = 0.0;
		
		int i=0;
		int ctxLen = order - 1;
		while(true) {
			double p = trie.getData().getProb(word);
			if (p > 0) {
				bow = 1.0;
				prob = p;
			}
			
			if (/*p == 0 ||*/ i == ctxLen) break;
			
			trie = trie.findTrie(context[i]);
			if (trie == null) break;
				
			bow *= trie.getData().bow;
			++i;
		}
		return prob * bow;
	}
	
	private void verifyContextProbs(int context[], int order) {
		/*
		double totalProb = 0.0;
		for(int word : futureVocab) {
			double prob = getProb(word, context, order);
			totalProb += prob;
		}
		if (!ProbMath.approxEqual(totalProb, 1.0)) {
			System.err.printf("bad node: ctx=%s, probs=%g\n", Arrays.toString(Arrays.copyOf(context, order-1)), totalProb);
		} else {
			//System.err.printf("good node: ctx=%s\n", Arrays.toString(Arrays.copyOf(context, order-1)));
		}
		*/
	}
	
	public Pair<Double,Long> computeTrainingDataLogprob(TrainingDataBlock block) {
		double logProb = 0;
		long totalWordCount = 0;
		
		byte mainFactorIdx = Experiment.getInstance().getTupleDescription().getMainFactorIndex();
		
		int futureContextSize = 0;
		byte futureContexts[] = new byte[contextVariables.length];

		for(byte i = 0; i<contextVariables.length; ++i) {
			if (contextVariables[i].getOffset() == 0) {
				futureContexts[futureContextSize++] = i;
			}
		}

		for(ContextFuturesPair pair : block) {
			int ctx[] = new int[contextVariables.length];
			for(int i=0; i<ctx.length; ++i) {
				if (contextVariables[i].getOffset() == 0) continue;
				ctx[i] = contextVariables[i].getValue(pair.getContext().data);
			}

			for(TupleCountPair tc : pair.getFutures()) {
				int word;
				if (futureVariable.getOffset() == 0) {
					word = futureVariable.getValue(tc.tuple);
				} else {
					word = futureVariable.getValue(pair.getContext().data);
				}
				
				for(int i=0; i<futureContextSize; ++i) {
					ctx[futureContexts[i]] = contextVariables[futureContexts[i]].getValue(tc.tuple);
				}

				double prob = getProb(word, ctx);
				if (! (prob > 0)) {
					prob = getProb(word, ctx);
				}
				logProb += Math.log10(prob) * tc.count;
				
				// don't count </s> as a word in perplexity computation
				if (!Dictionary.isEnd(FactorTuple.getValue(tc.tuple, mainFactorIdx))) {
					totalWordCount += tc.count;
				}
			}
		}
		return new Pair<Double,Long>(logProb, totalWordCount);
	}
	
	public int getOrder() {
		return contextVariables.length + 1;
	}

	/**
	 * @return the name
	 */
	public String getName() {
		return name;
	}

	public int[] getFutureVocab(int context[]) {
		if (futureTimeVar == null || context == null)
			return futureVocab;
		
		assert(futureTimeVar == contextVariables[0]);
		int vocab[] = limitedFutureVocabs.get(context[0]);
		if (vocab == null) {
			int keys[] = new int[limitedFutureVocabs.size()];
			int pos = 0;
			for(int word : limitedFutureVocabs.keySet()) {
				keys[pos++] = word;
			}
			Arrays.sort(keys);
			System.err.print("");
		}
		return vocab;
	}

	public CtxVar[] getContextVariables() {
		return contextVariables;
	}

	public CtxVar getFutureVariable() {
		return futureVariable;
	}
}
