/**
 * 
 */
package edu.umd.clip.lm.model.training;

import java.io.*;
import java.util.*;
import com.sleepycat.bind.tuple.*;
import com.sleepycat.collections.*;
import com.sleepycat.je.*;

import edu.umd.clip.lm.factors.*;
import edu.umd.clip.lm.model.*;
import edu.umd.clip.lm.model.data.ContextFuturesPair;
import edu.umd.clip.lm.model.data.ContextReducingReadableTrainingData;
import edu.umd.clip.lm.model.data.OnDiskTrainingDataNodeReader;
import edu.umd.clip.lm.model.data.OnDiskTrainingDataNodeWriter;
import edu.umd.clip.lm.model.data.ProxyTrainingDataReader;
import edu.umd.clip.lm.model.data.ReadableTrainingDataNode;
import edu.umd.clip.lm.model.data.TrainingDataBlock;
import edu.umd.clip.lm.model.data.TrainingDataNodeReader;
import edu.umd.clip.lm.model.data.TrainingDataNodeWriter;
import edu.umd.clip.lm.model.data.TrainingDataUtil;
import edu.umd.clip.lm.model.data.TupleCountPair;
import edu.umd.clip.lm.model.data.WritableTrainingDataNode;
import edu.umd.clip.lm.model.training.NgramCounts.Trie;
import edu.umd.clip.lm.questions.Question;
import edu.umd.clip.lm.util.*;
import edu.umd.clip.lm.util.tree.*;
import edu.umd.clip.smoothing.*;

/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class TreeNgramDistribution {
	// the top-level model
	private LanguageModel lm;
	private Map<BinaryTree<HistoryTreePayload>, ClusterLists> node2ClusterLists;
	private StoredMap<DistributionID, Distribution> distStorage;
	
	private Distribution unigramDistribution;
	
	// per-order ngram smoothers, starting with the bigram
	private Smoother[] smoothers;
	
	private File dataDir;
	
	public static boolean dumpNgramCounts = false;
	
	/**
	 * @param forest
	 * @param distStorage
	 * @param dataDir
	 */
	public TreeNgramDistribution(LanguageModel lm, Database db, File dataDir) {
		this.lm = lm;
		this.distStorage = new StoredMap<DistributionID, Distribution>(db, DistributionIDBinding.instance, DistributionBinding.instance, true);
		this.dataDir = dataDir;
	}

	static class ClusterLists {
		// the cluster ids by the order, starting with the bigram
		final int[] jointClusters;

		/**
		 * @param jointClusters
		 */
		public ClusterLists(int[] jointClusters) {
			this.jointClusters = jointClusters;
		}
	}
	
	public static class DistributionIDBinding extends TupleBinding<DistributionID> {
		public static final DistributionIDBinding instance = new DistributionIDBinding();
		
		/* (non-Javadoc)
		 * @see com.sleepycat.bind.tuple.TupleBinding#entryToObject(com.sleepycat.bind.tuple.TupleInput)
		 */
		@Override
		public DistributionID entryToObject(TupleInput input) {
			byte order = input.readByte();
			int clusterid = input.readInt();
			
			return new DistributionID(order, clusterid);
		}

		/* (non-Javadoc)
		 * @see com.sleepycat.bind.tuple.TupleBinding#objectToEntry(java.lang.Object, com.sleepycat.bind.tuple.TupleOutput)
		 */
		@Override
		public void objectToEntry(DistributionID object, TupleOutput output) {
			output.writeByte(object.order);
			output.writeInt(object.clusterid);
		}
		
	}
	
	static class DistributionID {
		byte order;
		int clusterid;
		
		/**
		 * @param order
		 * @param clusterid
		 */
		public DistributionID(byte order, int clusterid) {
			this.order = order;
			this.clusterid = clusterid;
		}
		
		@Override
		public int hashCode() {
			final int prime = 31;
			int result = 1;
			result = prime * result + clusterid;
			result = prime * result + order;
			return result;
		}
		@Override
		public boolean equals(Object obj) {
			if (this == obj)
				return true;
			if (obj == null)
				return false;
			if (getClass() != obj.getClass())
				return false;
			DistributionID other = (DistributionID) obj;
			if (clusterid != other.clusterid)
				return false;
			if (order != other.order)
				return false;
			return true;
		}
	}
	
	private class GenerationData {
		File file;
		int nrBackoffClusters[];
		BinaryTree<HistoryTreePayload> nodes[];
		ClusterLists clusterLists[];
		NgramCounts ngramCounts;
		// to test something, normally shoulod be 1.0
		private double discountScale;
		
		private void dumpNgramCounts(PrintWriter out) {
			if (ngramCounts.theTrie.subTries != null) {
				for(Trie subTrie : ngramCounts.theTrie.subTries.keySet()) {
					dumpNgramCounts(out, "", subTrie);
				}
			}
		}
		
		private void dumpNgramCounts(PrintWriter out, String prefix, Trie trie) {
			if (trie.subTries == null) {
				out.print(prefix);
				out.println(FactorTuple.toStringNoNull(trie.word) + " " + trie.count);
			} else {
				String newPrefix = prefix + FactorTuple.toStringNoNull(trie.word) + " ";
				for(Trie subTrie : trie.subTries.keySet()) {
					dumpNgramCounts(out, newPrefix, subTrie);
				}
			}
		}
	}
	
	@SuppressWarnings({ "unchecked", "null" })
	public void initialize(File dataFile, boolean dummySmoother, double discountScale) throws IOException {
		BinaryTree<HistoryTreePayload> theTree = lm.getHistoryTree();
		int maxClusterId = 0;
		for(BinaryTreeIterator<HistoryTreePayload> it = theTree.getPostOrderIterator(); it.hasNext(); ) {
			int clusterId = it.next().clusterid;
			if (clusterId > maxClusterId) maxClusterId = clusterId;
		}
		int nrClusters = maxClusterId+1;
		
		GenerationData genData = new GenerationData();
		genData.file = dataFile;
		
		genData.nodes = new BinaryTree[nrClusters];
		genData.clusterLists = new ClusterLists[nrClusters];
		genData.discountScale = discountScale;
		
		/*
		int nrBackoffClusters[] = new int[forest.getOrder()-2];
		Arrays.fill(nrBackoffClusters, 1);
		nrBackoffClusters[nrBackoffClusters.length-1] = nrClusters;
		*/
		node2ClusterLists = new IdentityHashMap<BinaryTree<HistoryTreePayload>, ClusterLists>(nrClusters);
		
		for(BinaryTreeIterator<HistoryTreePayload> it = theTree.getPostOrderIterator(); it.hasNext(); ) {
			BinaryTree<HistoryTreePayload> node = it.nextNode();
			int clusterid = node.getPayload().clusterid;
			genData.nodes[clusterid] = node;
			
			if (lm.getOrder() > 1) {
				int cl[] = new int[lm.getOrder() - 1];
				Arrays.fill(cl, 0, cl.length-1, 1);
				cl[cl.length-1] = clusterid;
				ClusterLists clusterLists = new ClusterLists(cl);
				genData.clusterLists[clusterid] = clusterLists;
				node2ClusterLists.put(node, clusterLists);
			}
		}
		
		// generate NgramCounts
		{
			FileInputStream input = new FileInputStream(dataFile);
			TrainingDataNodeReader reader = new OnDiskTrainingDataNodeReader(input.getChannel());
			NgramCounts ngramCounts = new NgramCounts(lm.getOrder());
			while(true) {
				ReadableTrainingDataNode dataNode = reader.getNext();
				if (dataNode == null) break;
				ProxyTrainingDataReader proxyReader = new ProxyTrainingDataReader(dataNode.getData(0));
				ContextReducingReadableTrainingData ngramData = new ContextReducingReadableTrainingData(proxyReader, lm.getOrder(), 0, 1000000);
				ngramCounts.populateCounts(ngramData);
				break;
			}
			input.close();
			genData.ngramCounts = ngramCounts;
			
			// estimate the ngram smoothers
			smoothers = new Smoother[lm.getOrder()-1];
			
			Smoother unigramSmoother = null;
			for(byte order=1; order<=lm.getOrder(); ++order) {
				SmootherProducer producer = new SmootherProducer();
				Iterator<Trie> iterator = ngramCounts.iterateLevel(order);
				while(iterator.hasNext()) {
					producer.addCount(iterator.next().count);
				}
				
				System.err.printf("order %d smoother: ", order);
				Smoother smoother = dummySmoother ? new DummySmoother(producer) : new KNModSmoother(producer, System.err);
				
				if (order == 1) {
					unigramSmoother = smoother;
				} else {
					smoothers[order-2] = smoother;
				}
			}
			
			// compute the unigram distribution interpolated with the uniform distribution
			Set<FactorTuple> allOvertFactors = Experiment.getInstance().getTupleDescription().getAllOvertFactors().keySet();
			double uniformProb = unigramSmoother.getBackoffProb() / allOvertFactors.size();
			Long2DoubleMap unigramDist = new Long2DoubleMap(allOvertFactors.size());
			for(Iterator<Trie> it = ngramCounts.iterateLevel(1); it.hasNext(); ) {
				Trie trie = it.next();
				unigramDist.addAndGet(trie.word, unigramSmoother.getProb(trie.count));
			}
			
			double totalProb = 0;
			for(FactorTuple tuple : allOvertFactors) {
				totalProb += unigramDist.addAndGet(tuple.getBits(), uniformProb);
			}
			assert(ProbMath.approxEqual(totalProb, 1.0));
			
			unigramDistribution = new Distribution(unigramDist, unigramSmoother.getTotalCount(), 0);
		}
		
		initialize(genData);
	}
	
	private String makeFilename(int order, int level) {
		return String.format("file-%d-%d", order, level);
	}
	
	private void initialize(GenerationData genData) throws IOException {
		final long overtMask = Experiment.getInstance().getTupleDescription().getOvertFactorsMask();
		final NgramCounts ngramCounts = genData.ngramCounts;
		
		if (dumpNgramCounts) {
			PrintWriter ngramOut = new PrintWriter(new File("dump-"+lm.getId()+"-counts"));
			genData.dumpNgramCounts(ngramOut);
			ngramOut.close();
			System.err.println("finished printing ngram counts");
		}
		
		int level = 0;
		File inputFile = genData.file;
		File outputFile;
		while(true) {
			FileInputStream input = new FileInputStream(inputFile);
			TrainingDataNodeReader reader = new OnDiskTrainingDataNodeReader(input.getChannel());
			outputFile = new File(dataDir, makeFilename(lm.getOrder(), level+1));
			RandomAccessFile output = new RandomAccessFile(outputFile, "rw");
			TrainingDataNodeWriter writer = new OnDiskTrainingDataNodeWriter(output.getChannel());
			
			int nrNodes = 0;
			while(true) {
				ReadableTrainingDataNode dataNode = reader.getNext();
				if (dataNode == null) break;
				int nodeId = dataNode.getNodeId();
				
				Long2IntMap counts = new Long2IntMap();
	
				final BinaryTree<HistoryTreePayload> oldLeaf = genData.nodes[nodeId];
				
				if (oldLeaf == null) {
					System.err.printf("node %d is null!\n", nodeId);
					throw new Error("whoops!");
				}
				
				if (oldLeaf.isLeaf()) {
					if (oldLeaf.getPayload().isBackoff) {
						dataNode.skipData();
						continue;
					}
					
					// compute and add the counts
					
					Distribution distributions[] = new Distribution[lm.getOrder()-1];
					for(byte order=2; order <= lm.getOrder(); ++order) {
						Distribution dist = distStorage.get(new DistributionID(order, genData.clusterLists[nodeId].jointClusters[order-2]));
						if (dist == null) {
							dist = Distribution.emptyDistribution();
						}
						distributions[order-2] = dist;
					}
					
					while (dataNode.getData(0).hasNext()) {
						TrainingDataBlock block = dataNode.getData(0).next();
						for(ContextFuturesPair pair : block) {
							long context[] = new long[lm.getOrder()-1];
							for(int i=0; i<context.length; ++i) {
								context[i] = pair.getContext().data[i] & overtMask;
							}
							for(int i=0; i<context.length; ++i) {
								long ngramContext[] = Arrays.copyOfRange(context, context.length-i-1, context.length);
								Trie trie = ngramCounts.findTrie(ngramContext);
								assert(trie != null);
								
								Smoother smoother = smoothers[i];
								Distribution dist = distributions[i];
								
								for(TupleCountPair tc : pair.getFutures()) {
									Trie subTrie = trie.getSubtrie(tc.tuple);
									int ngramCount = subTrie.count;
									
									double d = 1.0 - smoother.getProb(ngramCount) * smoother.getTotalCount() / ngramCount;
									// apply the discount scale (debug)
									d *= genData.discountScale;
									
									dist.getDistribution().addAndGet(tc.tuple, tc.count * (1-d));
									dist.setBackoff(dist.getBackoff()
											+ (d * tc.count));
									dist.setTotalCount(dist
											.getTotalCount()
											+ tc.count);
								}
							}
						}
					}
					
					for(byte order=2; order<=lm.getOrder(); ++order) {
						if (distributions[order-2].getTotalCount() > 0) {
							distStorage.put(new DistributionID(order, genData.clusterLists[nodeId].jointClusters[order-2]), distributions[order-2]);
						}
					}
					
				} else {
					// split the data
					BinaryTree<HistoryTreePayload> left = oldLeaf.getLeft();
					BinaryTree<HistoryTreePayload> right = oldLeaf.getRight();

					int leftNodeId = left.getPayload().clusterid;
					int rightNodeId = right.getPayload().clusterid;
					
					Question question = oldLeaf.getPayload().question;
					for(int order=2; order<lm.getOrder()-1; ++order) {
						if (1-question.getTimeOffset() <= order) {
							// split the order
							int leftClusterId = ++genData.nrBackoffClusters[order-2];
							int rightClusterId = ++genData.nrBackoffClusters[order-2];
							genData.clusterLists[leftNodeId].jointClusters[order-2] = leftClusterId;
							genData.clusterLists[rightNodeId].jointClusters[order-2] = rightClusterId;
						}
					}
					
					WritableTrainingDataNode leftDataNode = writer.createNode(leftNodeId, 1);
					WritableTrainingDataNode rightDataNode = writer.createNode(rightNodeId, 1);
					
					writer.add(leftDataNode);
					writer.add(rightDataNode);
					
					TrainingDataUtil.splitData(dataNode.getData(0), oldLeaf.getPayload().question, 
							rightDataNode.getData(0), leftDataNode.getData(0), counts);
		
					nrNodes += 2;
				}
			}
			inputFile.delete();
			input.close();
			output.close();
			++level;
			inputFile = outputFile;
			
			if (nrNodes == 0) {
				inputFile.delete();
				break;
			}
		}
		
		// normalize the distributions
		StoredIterator<Map.Entry<DistributionID, Distribution>> iterator = ((StoredSortedEntrySet<DistributionID, Distribution>) distStorage.entrySet()).storedIterator();
		while(iterator.hasNext()) {
			Map.Entry<DistributionID, Distribution> entry = iterator.next();
			Distribution dist = entry.getValue();
			double revTotalCount = 1.0 / dist.getTotalCount();
			dist.setBackoff(dist.getBackoff() * revTotalCount);
			double totalProb = 0;
			for(Long2DoubleMap.Iterator it = dist.getDistribution().iterator(); it.hasNext(); ) {
				Long2DoubleMap.Entry e = it.next();
				double prob = e.getValue() * revTotalCount;
				e.setValue(prob);
				totalProb += prob;
			}
			assert(ProbMath.approxEqual(totalProb + dist.getBackoff(), 1.0));
			
			entry.setValue(dist);
			//iterator.set(entry);
		}
		iterator.close();
	}
	
	public Distribution computeDistribution(BinaryTree<HistoryTreePayload> node) {
		ClusterLists clusterLists = node2ClusterLists.get(node);
		if (clusterLists == null) return Distribution.emptyDistribution();
		
		return computeDistribution(clusterLists, lm.getOrder());
	}
	
	private Distribution computeDistribution(final ClusterLists clusterLists, byte order) {
		if (order == 1) {
			return unigramDistribution;
		}
		Distribution boDist = computeDistribution(clusterLists, (byte) (order-1));
		Long2DoubleMap backoffDist = boDist.getDistribution();
		
		Distribution currentDist = distStorage.get(new DistributionID(order, clusterLists.jointClusters[order-2]));
		
		if (currentDist == null) {
			return Distribution.emptyDistribution();
		}
		
		Long2DoubleMap theDist = new Long2DoubleMap(backoffDist.size());
		
		for(Long2DoubleMap.Iterator it = currentDist.getDistribution().iterator(); it.hasNext();) {
			Long2DoubleMap.Entry entry = it.next();
			theDist.addAndGet(entry.getKey(), entry.getValue());
		}
		
		for(Long2DoubleMap.Iterator it = backoffDist.iterator(); it.hasNext(); ) {
			Long2DoubleMap.Entry entry = it.next();
			long word = entry.getKey();
			double backoffProb = entry.getValue();
			
			theDist.addAndGet(word, backoffProb * currentDist.getBackoff());
		}
		
		return new Distribution(theDist, currentDist.getTotalCount(), 0);
	}
}
