/**
 * 
 */
package edu.umd.clip.lm.model.decoding;

import edu.berkeley.nlp.util.*;
import edu.umd.clip.lm.factors.*;
import edu.umd.clip.lm.factors.Dictionary;
import edu.umd.clip.lm.model.*;
import edu.umd.clip.lm.model.data.Context;
import edu.umd.clip.lm.model.decoding.FuturePredictions.FuturePrediction;
import edu.umd.clip.lm.model.decoding.FuturePredictions.PredictionKey;
import edu.umd.clip.lm.model.decoding.TrellisSlice.SliceItem;
import edu.umd.clip.lm.util.tree.*;
import edu.umd.clip.lm.questions.*;
import edu.umd.clip.lm.util.*;
import edu.umd.clip.lm.storage.*;
import edu.umd.clip.lm.storage.AbstractProbTreeStorage.Key;

import java.util.*;
import java.util.concurrent.atomic.AtomicLong;

import org.w3c.dom.*;
/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class Decoder extends XMLizable {
	public static class TimingData {
		/**
		 * 
		 */
		private long totalPrologTime;
		/**
		 * 
		 */
		private long totalPartitionTime;
		/**
		 * 
		 */
		private long totalMergeTime;
		/**
		 * 
		 */
		private long totalStorageTime;
		/**
		 * 
		 */
		private long totalTreeMergingTime;
		private static AtomicLong totalProbTreeRequestCount = new AtomicLong();
		private static AtomicLong totalProbTreeRequestTime = new AtomicLong();
		private static AtomicLong totalPrePartitionTime = new AtomicLong();
		private static AtomicLong totalSplittingCount = new AtomicLong();
		private static AtomicLong totalSplittingTime = new AtomicLong();
		private static AtomicLong totalCuttingCount = new AtomicLong();
		private static AtomicLong totalCuttingTime = new AtomicLong();
		private static AtomicLong totalQuestionTime = new AtomicLong();
		private static AtomicLong totalPredictionTime = new AtomicLong();
		private static AtomicLong totalQuestionCount = new AtomicLong();
		private static AtomicLong totalFwdSplittingTime = new AtomicLong();
		
		/**
		 * 
		 */
		public TimingData(long totalPrologTime, long totalPartitionTime,
				long totalMergeTime, long totalStorageTime,
				long totalTreeMergingTime) {
			this.totalPrologTime = totalPrologTime;
			this.totalPartitionTime = totalPartitionTime;
			this.totalMergeTime = totalMergeTime;
			this.totalStorageTime = totalStorageTime;
			this.totalTreeMergingTime = totalTreeMergingTime;
		}

		/**
		 * @return the totalPrologTime
		 */
		public long getTotalPrologTime() {
			return totalPrologTime;
		}

		/**
		 * @param totalPrologTime the totalPrologTime to set
		 */
		public void setTotalPrologTime(long totalPrologTime) {
			this.totalPrologTime = totalPrologTime;
		}

		/**
		 * @return the totalPartitionTime
		 */
		public long getTotalPartitionTime() {
			return totalPartitionTime;
		}

		/**
		 * @param totalPartitionTime the totalPartitionTime to set
		 */
		public void setTotalPartitionTime(long totalPartitionTime) {
			this.totalPartitionTime = totalPartitionTime;
		}

		/**
		 * @return the totalMergeTime
		 */
		public long getTotalMergeTime() {
			return totalMergeTime;
		}

		/**
		 * @param totalMergeTime the totalMergeTime to set
		 */
		public void setTotalMergeTime(long totalMergeTime) {
			this.totalMergeTime = totalMergeTime;
		}

		/**
		 * @return the totalStorageTime
		 */
		public long getTotalStorageTime() {
			return totalStorageTime;
		}

		/**
		 * @param totalStorageTime the totalStorageTime to set
		 */
		public void setTotalStorageTime(long totalStorageTime) {
			this.totalStorageTime = totalStorageTime;
		}

		/**
		 * @return the totalTreeMergingTime
		 */
		public long getTotalTreeMergingTime() {
			return totalTreeMergingTime;
		}

		/**
		 * @param totalTreeMergingTime the totalTreeMergingTime to set
		 */
		public void setTotalTreeMergingTime(long totalTreeMergingTime) {
			this.totalTreeMergingTime = totalTreeMergingTime;
		}
	}

	private static final String XML_HARD_CACHE_SIZE_ELEMENT_NAME = "hard-cache-size";
	private static final String XML_SOFT_CACHE_SIZE_ELEMENT_NAME = "soft-cache-size";
	private static final String XML_PRUNING_THRESHOLD_ELEMENT_NAME = "pruning-threshold";
	public static final String XML_ELEMENT_NAME = "decoder";

	protected int debug = 0;
	protected static int DEBUG = 0;
	
	private LanguageModel lm;
	private double PRUNING_THRESHOLD = 1E-6; 
	private int hardCacheSize = 400000;
	private int softCacheSize = 100000;
	
	private boolean useGhosts = true;
	private boolean useBackoff = true;
	
	private AbstractProbTreeStorage storage;
	
	private final static boolean USE_CUMULATIVE_BACKOFF = false;
	
	public Decoder(LanguageModel lm) {
		this.lm = lm;
		init();
	}
	
	public Decoder(LanguageModel lm, int cacheSize) {
		this.lm = lm;
		this.hardCacheSize = cacheSize;
		this.softCacheSize = 2*cacheSize;
		init();
	}
	
	public Decoder(LanguageModel lm, Element xmlnode) {
		this.lm = lm;
		Element config = lm.getDecoderConfig();
		if (config != null) {
			Element elem = XMLUtils.getFirstElementByTagName(config, XML_HARD_CACHE_SIZE_ELEMENT_NAME);
			if (elem != null) {
				hardCacheSize = Integer.parseInt(elem.getTextContent());
			}
			
			elem = XMLUtils.getFirstElementByTagName(config, XML_SOFT_CACHE_SIZE_ELEMENT_NAME);
			if (elem != null) {
				softCacheSize = Integer.parseInt(elem.getTextContent());
			}
			
			elem = XMLUtils.getFirstElementByTagName(xmlnode, XML_PRUNING_THRESHOLD_ELEMENT_NAME);
			if (elem != null) {
				PRUNING_THRESHOLD = Double.parseDouble(elem.getTextContent());
			}
		}
		init();
	}
 
	private void init() {
		Experiment experiment = Experiment.getInstance(); 
		backoffLM = experiment.getLM(lm.getBackoffLM());
		debug = experiment.getDebugDecoder();
		DEBUG = debug;
		
		if (DecoderCompactProbTree.getEmptyTree() == null) {
			DecoderCompactProbTree emptyTree = DecoderCompactProbTree.constructEmptyTree(experiment.getHFT().getTree());
			DecoderCompactProbTree.setEmptyTree(emptyTree);
		}
	}
	
	public void setCacheSize(int cacheSize) {
	}
	
	@Override
	public Element createXML(Document doc) {
		Element element = doc.createElement(XML_ELEMENT_NAME);
		
		Element e;
		e = doc.createElement(XML_HARD_CACHE_SIZE_ELEMENT_NAME);
		e.setTextContent(Integer.toString(hardCacheSize));
		element.appendChild(e);
		
		e = doc.createElement(XML_SOFT_CACHE_SIZE_ELEMENT_NAME);
		e.setTextContent(Integer.toString(softCacheSize));
		
		e = doc.createElement(XML_PRUNING_THRESHOLD_ELEMENT_NAME);
		e.setTextContent(Double.toString(PRUNING_THRESHOLD));
		element.appendChild(e);
		
		return element;
	}

	@Override
	public String getElementName() {
		return XML_ELEMENT_NAME;
	}

	static class ProbTreeKey {
		public final int clusterid;
		public final FactorTuple overtFactors;
		
		public ProbTreeKey(int clusterid, FactorTuple overtFactors) {
			this.clusterid = clusterid;
			this.overtFactors = overtFactors;
		}

		@Override
		public boolean equals(Object obj) {
			if (obj instanceof ProbTreeKey) {
				ProbTreeKey other = (ProbTreeKey)obj;
				return (clusterid == other.clusterid && overtFactors.equals(other.overtFactors)); 
			}
			return false;
		}

		@Override
		public int hashCode() {
			return (clusterid * 13) ^ overtFactors.hashCode();
		}

		@Override
		public String toString() {
			return String.format("%d:%s", clusterid, overtFactors.toString());
		}
		
	}

	public SentenceContext addWord(SentenceContext ctx, long overtFactors) {
		return ctx.addWord(overtFactors);
	}

	public void debugContext(SentenceContext ctx) {
		double prob = ctx.getCurrentSlice().getTotalProb();
		double oldProb = ctx.getSliceByOffset(-1).getTotalProb();
		if (prob < 0 || oldProb < 0) {
			System.err.printf("prob=%g, oldProb=%g\n", prob, oldProb);
		}
		double logProb = ctx.getLogProb();
		double logDiff = Math.log10(prob / oldProb) + ctx.getCurrentSlice().getSliceLogScale();
		int totalPrevLinks = 0;
		int maxPrevLinks = 0;
		
		for(ArrayList<SliceItem> prevs : ctx.getCurrentSlice().getPrevious().curr2prev.values()) {
			if (prevs != null) {
				if (prevs.size() > maxPrevLinks) maxPrevLinks = prevs.size();
				totalPrevLinks += prevs.size();
			}
		}
		System.out.printf("Current Word: %s, Items: [%d] - %d, [%d] - %d, backlinks: maxPrev=%d, totalPrev=%d, prob = %g, diff = %g [%g]\n",
				FactorTuple.toStringNoNull(ctx.getCurrentSlice().getOvertFactors()),
				ctx.getCurrentSlice().getPrevious().getTime(),
				ctx.getCurrentSlice().getPrevious().numItems(),
				ctx.getCurrentSlice().getTime(),
				ctx.getCurrentSlice().numItems(),
				maxPrevLinks,
				totalPrevLinks,
				logProb,
				Math.pow(10.0, logDiff),
				logDiff);
	}
	
	public double evaluateSentence(long[] sentence) {
		return evaluateSentence(sentence, false);
	}
	
	public double evaluateSentence(long[] sentence, boolean useConstraints) {
		ArrayList<FactorTuple> tuples = new ArrayList<FactorTuple>(sentence.length);
		for(long tuple : sentence) {
			tuples.add(new FactorTuple(tuple));
		}
		return evaluateSentence(tuples, useConstraints);
	}
	
	public double evaluateSentence(Collection<FactorTuple> sentence, boolean useConstraints) {
		SentenceContext ctx = createContext();
		final FactorTupleDescription desc = Experiment.getInstance().getTupleDescription(); 
		final long overtMask = desc.getOvertFactorsMask();
		for(FactorTuple tuple : sentence) {
			long constraints = tuple.getBits() & desc.getHiddenFactorsMask();
			tuple = new FactorTuple(tuple.getBits() & overtMask);
			ctx = ctx.addWord(tuple.getBits());
			partition(ctx);
			ctx.setLastSliceTotalProb(ctx.getCurrentSlice().getTotalProb());
			if (useConstraints && constraints != 0) {
				applyConstraint(ctx, constraints);
			}
			if (debug > 0) {
				debugContext(ctx);
				
				if (debug > 1) {
					ctx.getTrellis().dumpTrellis(String.format("trellis-%d-%s.dot", 
							ctx.getTrellis().getCurrentSlice().getTime(), tuple.toStringNoNull().replace('/', ' ')));
				}
			}
			if (useGhosts) {
				if (ctx.getCurrentSlice().getTime() > lm.getOrder()+1) {
					ctx.getSliceByOffset(- (lm.getOrder() + 1)).makeGhost();
				}
			}
			//assert(ctx.getCurrentSlice().checkLinks());
		}
		return ctx.getLogProb();
	}
	
	// computes probability of an ngram with tags marginalized
	// the ngram is in order w_{i-n+1}...w_{i-1} w_{i}
	// and lms are in the order unigram, bigram, etc
	public double computeNgramProbability(long[] ngram, LanguageModel[] lms) {
		//assert(ngram.length == lms.length);
		
		SentenceContext ctx = createContext();
		final FactorTupleDescription desc = Experiment.getInstance().getTupleDescription(); 
		final long overtMask = desc.getOvertFactorsMask();
		double n_1_prob = 0;
		
		final byte wordFactorIdx = Experiment.getInstance().getTupleDescription().getMainFactorIndex();
		
		// find the first non-<s> word
		byte addedStarts=0;
    	for(; addedStarts<ngram.length; ++addedStarts) {
    		if (!Dictionary.isStart(FactorTuple.getValue(ngram[addedStarts], wordFactorIdx))) {
    			if (addedStarts > 0) {
    				ngram = Arrays.copyOfRange(ngram, addedStarts, ngram.length);
    			}
    			break;
    		}
    	}

		//int addedStarts = lms.length - ngram.length;
		
		for(int i=addedStarts; i<lms.length; ++i) {
			ctx = ctx.addWord(ngram[i-addedStarts] & overtMask);
			lms[i].getDecoder().partition(ctx);
			ctx.setLastSliceTotalProb(ctx.getCurrentSlice().getTotalProb());
			if (i == lms.length-2) {
				n_1_prob = ctx.getLogProb();
			}
		}
		double n_prob = ctx.getLogProb();
		return n_prob - n_1_prob;
	}
	
	private void applyConstraint(SentenceContext ctx, long constraint) {
		// first, express the constraint in the form of a binary prefix
		BinaryPrefix thePrefix = null;
		final int hiddenFactors[] = FactorTuple.getHiddenValues(constraint);
		
		Map<FactorTuple, BinaryPrefix> binaryPrefixes = Experiment.getInstance().getHiddenPrefixes();
		
		for(Map.Entry<FactorTuple, BinaryPrefix> entry : binaryPrefixes.entrySet()) {
			int hidFactors[] = entry.getKey().getHiddenValues();
			boolean matched = true;
			for(byte i = 0; i < hiddenFactors.length; ++i) {
				if (hiddenFactors[i] != Dictionary.getNull() && hiddenFactors[i] != hidFactors[i]) {
					matched = false;
					break;
				}
			}
			if (matched) {
				BinaryPrefix prefix = entry.getValue();
				//System.err.printf("(%s) %s matched\n", entry.getKey().toStringNoNull(), prefix);
				if (thePrefix == null) {
					thePrefix = prefix;
				} else {
					thePrefix = BinaryPrefix.getCommonPrefix(thePrefix, prefix);
				}
			}
		}
		
		if (thePrefix == null || thePrefix.getSize() == 0) {
			System.err.println("Hidden factors:" + Arrays.toString(hiddenFactors));
			System.err.printf("Can't find a common prefix for %s\n", FactorTuple.toStringNoNull(constraint));
			return;
		}
		
		HashSet<SliceItem> itemsToRemove = new HashSet<SliceItem>();
		
		for(SliceItem item : ctx.getCurrentSlice().getElements()) {
			ProbTree probTree = item.getProbTree();
			double totalProb = probTree.getTotalProb();
			
			ProbTree cutProbTree = probTree.cut(thePrefix);
			if (probTree == cutProbTree) {
				// the constraint is fully matched
				//System.err.printf("item %s fully matched the constraint %s\n", item, thePrefix);
				continue;
			}
			if (cutProbTree == null) {
				// not matched, remove the item
				itemsToRemove.add(item);
				continue;
			}
			// re-scale the remainder
			//System.err.printf("rescaling item %s by %g\n", item.toString(), totalProb / cutProbTree.getTotalProb());
			// do not re-scale (this way we compute joint probability, as opposed to conditional) 
			//cutProbTree.scale(totalProb / cutProbTree.getTotalProb());
			
			item.tree = cutProbTree;
		}
		if (itemsToRemove.size() > 0) {
			System.err.printf("removing %d items due to constraints\n", itemsToRemove.size());
			ctx.getCurrentSlice().removeNodes(itemsToRemove);
		}
	}
	
	private double getBackoff(BinaryTree<HistoryTreePayload> node) {
		int clusterid = node.getPayload().clusterid;
		return lm.getDecodingRuntime().getBackoff(clusterid);
	}
	
	private void generatePredictions(SentenceContext ctx, TrellisSlice slice) {
		final int lmSequence[] = Experiment.getInstance().getLmIdSequence(lm);
		FuturePredictions predictions = slice.getPredictions();
		long overtFactors = slice.getOvertFactors();

		Map<Key,OnDiskCompactProbTree> allProbTrees;
		{
			RequestBundle<Key, OnDiskCompactProbTree> currentRequests = ctx.getCurrentRequests();
			try {
				currentRequests.sync();
			} catch(InterruptedException e) {
				e.printStackTrace();
			}
			allProbTrees = currentRequests.getResults();
		}
		
		
		for(Map.Entry<PredictionKey, FuturePrediction> entry : predictions.getPredictions().entrySet()) {
			PredictionKey key = entry.getKey();
			FuturePrediction prediction = entry.getValue();
			ClusterId clusterID = new ClusterId();
			
			OnDiskCompactProbTree probTree = computeProbTree(lmSequence, key.nodes, overtFactors, allProbTrees, clusterID);
			
			if (probTree != null) {
				probTree.normalize();
				
				ArrayList<SliceItem> prevLinks = prediction.getPrevLinks();
				
				SliceItem item = new SliceItem(ProbTree.makeProbTree(probTree).compact(), 0, clusterID);
				
				slice.curr2prev.put(item, prevLinks);
				
				for(SliceItem prevItem : prevLinks) {
					item.addPrevProb(prevItem.getPrevProb() * prevItem.tree.getTotalProb());
					ArrayList<SliceItem> lst = slice.prev2curr.get(prevItem);
					if (lst == null) {
						lst = new ArrayList<SliceItem>(2);
						slice.prev2curr.put(prevItem, lst);
					}
					lst.add(item);
				}
				slice.insertItem(item);
			}
		}
		slice.clearPredictions();
	}
	
	public OnDiskCompactProbTree computeProbTree(int lmIds[], BinaryTree<HistoryTreePayload> nodes[], 
			long overtFactors, Map<Key,OnDiskCompactProbTree> allProbTrees, ClusterId clusterID) 
	{
		StringBuilder debugOut = null;
		
		if (debug > 1) {
			debugOut = new StringBuilder('[');
		}
		
		ArrayList<OnDiskCompactProbTree> probTrees = new ArrayList<OnDiskCompactProbTree>(nodes.length);

		double scale = 1.0;
		for(int i = 0; i < lmIds.length; ++i) {
			int lmid = lmIds[i];
			
			BinaryTree<HistoryTreePayload> node = nodes[lmid];
			if (node == null) continue;
			int clusterid = node.getPayload().clusterid;
			
			if (clusterID != null) {
				clusterID.setId(lmid, clusterid);
			}
			
			LanguageModel currentLM = Experiment.getInstance().getLmByNumId(lmid);
			Decoder decoder = currentLM.getDecoder();

			if (node.getPayload().isBackoff) {
				if (node.getPayload().lambda == 0.0) {
					// old-style decoding where we only use the backoff
				} else {
					// a backoff cluster, take the grandparent for interpolation
					BinaryTree<HistoryTreePayload> grandparent = node.getParent().getParent();

					Key storageKey = new Key(decoder.getLm().getIdNum(), grandparent.getPayload().clusterid, overtFactors);
					OnDiskCompactProbTree gpProbTree = allProbTrees.get(storageKey);
					
					if (gpProbTree == null && !allProbTrees.containsKey(storageKey)) {
						System.err.println("bad key (backoff)");
					}
					//ProbTree gpProbTree = decoder.getInterpolatedProbTree(grandparent, overtFactors);
					double gpBackoff = decoder.getBackoff(grandparent);
					double lambda = node.getPayload().lambda;
					
					if (gpProbTree != null && gpProbTree.getScale() > 0) {
						// scale
						double currentScale;
						
						if (USE_CUMULATIVE_BACKOFF) {
							currentScale = scale * (1 - lambda) * (1 - gpBackoff);
						} else {
							currentScale = scale * (1 - lambda);
						}
						
						if (debugOut != null) {
							if (USE_CUMULATIVE_BACKOFF) {
								debugOut.append(String.format("%s=%g(%g) ", currentLM.getId(), 
										currentScale * (1 - (lambda + (1 - lambda) * gpBackoff)), gpProbTree.getTotalProb()));
							} else {
								debugOut.append(String.format("%s=%g(%g) ", currentLM.getId(), 
										currentScale * (1 - lambda), gpProbTree.getTotalProb()));								
							}
						}

						gpProbTree = (OnDiskCompactProbTree) gpProbTree.clone();
						gpProbTree.scale(currentScale);
						probTrees.add(gpProbTree);

					}
					if (USE_CUMULATIVE_BACKOFF) {
						scale *= lambda + (1 - lambda) * gpBackoff;
					} else {
						scale *= lambda;
					}
				}
			} else {
				Key storageKey = new Key(decoder.getLm().getIdNum(), node.getPayload().clusterid, overtFactors);
				OnDiskCompactProbTree probTree = allProbTrees.get(storageKey);
				//ProbTree probTree = decoder.getInterpolatedProbTree(node, slice.getOvertFactors().getBits());
				if (probTree == null && !allProbTrees.containsKey(storageKey)) {
					System.err.println("bad key");
				}

				double boWeight = decoder.getBackoff(node);
				
				if (probTree != null && probTree.getScale() > 0) {
					// the distribution is already scaled to (1.0 - getBackoff(node)) during the data population, 
					// when USE_CUMULATIVE_BACKOFF 
					double currentScale = scale;
					if (!USE_CUMULATIVE_BACKOFF) {
						currentScale *= (1.0 - boWeight);
					}
					if (debugOut != null) {
						debugOut.append(String.format("%s=%g(%g) ", currentLM.getId(), 
								currentScale * (1-boWeight), probTree.getTotalProb()));
					}
					if (currentScale != 1.0) {
						probTree = (OnDiskCompactProbTree) probTree.clone();
						probTree.scale(currentScale);
					}
					probTrees.add(probTree);
				}
				scale *= boWeight;
			}
		}
		if (debug > 2 && clusterID != null) {
			System.out.println(clusterID);
		}
		if (debugOut != null) {
			debugOut.append(']');
			System.out.println(debugOut.toString());
		}
		
		OnDiskCompactProbTree probTree = null;
		if (probTrees.size() == 1) {
			probTree = (OnDiskCompactProbTree) probTrees.get(0).clone();
		} else if (probTrees.size() > 1) {
			probTree = OnDiskCompactProbTree.merge(probTrees);
		}
		
		return probTree;
	}
	
	private static class MergingKey {
		public final SliceItem item;

		public MergingKey(SliceItem item) {
			assert(item != null);
			this.item = item;
		}

		@Override
		public int hashCode() {
			final int prime = 31;
			int result = 1;
			result = prime * result + item.clusterid.hashCode();
			result = prime * result + item.tree.hashCode();
			return result;
		}

		@Override
		public boolean equals(Object obj) {
			if (this == obj)
				return true;
			if (obj == null)
				return false;
			if (getClass() != obj.getClass())
				return false;
			MergingKey other = (MergingKey) obj;

			return item.clusterid.equals(other.item.clusterid) 
				&& item.tree.equals(other.item.tree);
		}
	}
	
	private int tryToMerge(Collection<SliceItem> items, MutableInteger iterationCount) {
		int mergedCount = 0;
		HashMap<MergingKey,ArrayList<SliceItem>> bins = new HashMap<MergingKey,ArrayList<SliceItem>>(items.size());
		for(SliceItem item : items) {
			iterationCount.add(1);
			if (item.slice.getNext().prev2curr.get(item).size() > 1) {
				// we can't merge items with multiple forward links
				continue;
			}
			
			MergingKey key = new MergingKey(item);
			ArrayList<SliceItem> lst = bins.get(key);
			if (lst == null) {
				lst = new ArrayList<SliceItem>(3);
				bins.put(key, lst);
			}
			lst.add(item);
		}
		
		// items in one bin can be merged
		for(ArrayList<SliceItem> lst : bins.values()) {
			if (lst.size() == 1) continue;
			
			SliceItem item = lst.get(0);
			TrellisSlice slice = item.slice;
			TrellisSlice nextSlice = slice.getNext();
			
			ArrayList<SliceItem> itemPrevLinks = slice.getPreviousItems(item);
			ArrayList<SliceItem> itemForwardLinks = nextSlice.prev2curr.get(item);
			assert(itemForwardLinks.size() == 1);
			//System.out.println("merging " + Arrays.toString(lst.toArray()));
			
			for(int i=1; i<lst.size(); ++i) {
				SliceItem mItem = lst.get(i);
				// merge mItem into item
			
				// merge forward links
				ArrayList<SliceItem> mItemForwardLinks = nextSlice.prev2curr.get(mItem);
				assert(mItemForwardLinks.size() == 1 && mItemForwardLinks.get(0) == itemForwardLinks.get(0));
				nextSlice.getPreviousItems(mItemForwardLinks.get(0)).remove(mItem);
				
				// merge backward links
				ArrayList<SliceItem> mItemPrevLinks = slice.getPreviousItems(mItem);
				itemPrevLinks.addAll(mItemPrevLinks);
				for(SliceItem mPrevItem : mItemPrevLinks) {
					ArrayList<SliceItem> l = slice.prev2curr.get(mPrevItem);
					int idx = l.indexOf(mItem);
					l.set(idx, item);
					iterationCount.add(1);
				}
				
				// update probability
				item.addPrevProb(mItem.getPrevProb());
				
				// remove stuff
				nextSlice.prev2curr.remove(mItem);
				slice.curr2prev.remove(mItem);
				//slice.getElements().remove(mItem);
				++mergedCount;
			}
		}
		if (mergedCount > 0) {
			//TrellisSlice slice = items.iterator().next().slice;
			//slice.setElements(new LinkedList<SliceItem>(slice.curr2prev.keySet()));
		}
		return mergedCount;
	}
	
	@SuppressWarnings("unchecked")
	public SentenceContext createContext() {
		Experiment experiment = Experiment.getInstance();
		
		SentenceContext ctx = new SentenceContext(lm.getOrder()-1);
		ctx.items = new SliceItem[Math.max(1, lm.getOrder()-1)];
		ctx.queues = new LinkQueue[Math.max(1, lm.getOrder()-1)];
		ctx.predictions = new BinaryTree[experiment.getNumLMs()];
		ctx.setDebug(debug);

		long startTuple = experiment.getTupleDescription().createStartTuple(); 
		BinaryPrefix startPrefix = experiment.getHiddenPrefix(startTuple);
		ctx.getTrellis().newSlice(startTuple);
		ProbTree startProbTree = new ProbTree(startPrefix);
		SliceItem sliceItem = new SliceItem(startProbTree, 1.0); 
		ctx.getCurrentSlice().insertItem(sliceItem);
		ctx.getCurrentSlice().curr2prev.put(sliceItem, null);
		
		for(int i=1; i<lm.getOrder()-1; ++i) {
			ctx.getTrellis().newSlice(startTuple);
			TrellisSlice currentSlice = ctx.getCurrentSlice();
			
			SliceItem newSliceItem = new SliceItem(startProbTree, 1.0);
			currentSlice.insertItem(newSliceItem);
			ArrayList<SliceItem> lst = new ArrayList<SliceItem>(1);
			lst.add(sliceItem);
			currentSlice.curr2prev.put(newSliceItem, lst);
			
			lst = new ArrayList<SliceItem>(1);
			lst.add(newSliceItem);
			currentSlice.prev2curr.put(sliceItem, lst);
			
			sliceItem = newSliceItem;
			currentSlice.getTotalProb();
		}
		ctx.setLastSliceTotalProb(1.0);
		return ctx;
	}

	private static final int QUEUE_SIZE = 256;
	private static final Comparator<SliceItem> itemComparator = new Comparator<SliceItem>() {
		@Override
		public int compare(SliceItem arg0, SliceItem arg1) {
			// reverse order
			return - (int) Math.signum(arg0.getProb() - arg1.getProb());
		}
	};
	
	private TimingData timingData = new TimingData(0, 0, 0, 0, 0);
	
	private LanguageModel backoffLM;
	
	void partition(SentenceContext ctx) {
		long realStartTime = Timing.nanoTime();
		
		TrellisSlice currentSlice = null;
		currentSlice = ctx.getSliceByOffset(-1);
		SliceItem[] currentItems = currentSlice.getElements().toArray(new SliceItem[0]);
		
		if (currentItems.length == 0) {
			System.err.printf("no elements at %d in %s\n", currentSlice.getTime(), ctx.getTrellis());
			return;
		}
		Arrays.sort(currentItems, itemComparator);
		
		int itemsSize = Math.max(lm.getOrder()-1, 1);
		double bestProb = currentItems[0].getPrevProb() * currentItems[0].tree.getTotalProb();
		
		//ArrayList<LinkQueue> itemLists = new ArrayList<LinkQueue>(itemsSize);
		LinkedList<SliceItem> currItems = new LinkedList<SliceItem>();
		
		int itemNum = 0;
		for(SliceItem currentItem : currentItems) {
			++itemNum;
			if (currentItem.getPrevProb() * currentItem.tree.getTotalProb() < bestProb * PRUNING_THRESHOLD) {
				//System.out.println("pruned!");
				break;
			}
			currItems.add(currentItem);
		}
		
		for(int i=2; i<lm.getOrder()-1; ++i) {
			// null out empty queues
			// items in the current slice (-1) have already null queue
			TrellisSlice s = ctx.getSliceByOffset(-i);
		}
		
		int numProbs = 0;
		long startTime = Timing.nanoTime();
		
		ctx.setCurrentRequests(new RequestBundle<Key,OnDiskCompactProbTree>(storage));
		
		final Context overtContext = new Context(lm.getOvertOrder());
		for(int i=0; i<overtContext.data.length; ++i) {
			overtContext.data[i] = ctx.getTrellis().getSliceByOffset(i + 1 - lm.getOvertOrder()).getOvertFactors();
		}

		// new decoding
		while(currItems.size() > 0) {
			SliceItem headItem = currItems.remove();
			newPartition(ctx, overtContext, headItem, currItems);
		}
		
		long afterPartionTime = Timing.nanoTime();
		
		int numPredictions = currentSlice.getNext().getPredictions().getPredictions().size();
		
		generatePredictions(ctx, currentSlice.getNext());
		long afterPredictionsTime = Timing.nanoTime();
		
		// now merge the elements
		MutableInteger mergeIterationCount = new MutableInteger();
		int mergedCount = 0;

		for(int timeOffset = 0; timeOffset > 2 - lm.getHiddenOrder(); --timeOffset) {
			TrellisSlice nextSlice = ctx.getSliceByOffset(timeOffset);
			for(SliceItem item : nextSlice.getElements()) {
				/*
				LinkQueue q = item.getQueue();
				if (q != null) {
					HashSet<SliceItem> prunedItems = q.getPrunedElements();
					if (prunedItems != null && prunedItems.size() > 0) {
						prunedItems.iterator().next().slice.removeNodes(prunedItems);
					}		
					if (debug > 1) {
						System.out.printf("queue for %s, totalProb=%g, pruned=%g\n", item, q.getTotalProb(), q.getPrunedProbability());
					}
					item.setQueue(null);
				}
				*/
				mergedCount += tryToMerge(nextSlice.getPreviousItems(item), mergeIterationCount);
			} 
			//nextSlice.checkLinks();
		}
		
		long afterMergeTime = Timing.nanoTime();
		
		{
			TrellisSlice nextSlice = currentSlice.getNext();
			double maxScale = nextSlice.getMaxScale();
			if (maxScale != 1.0) {
				nextSlice.scale(1.0 / maxScale);
				ctx.addLogScale(Math.log10(maxScale));
			}
		}
		if (debug > 0) {
			System.out.printf("[%d] : prep time %fs, %d iterations in %fs, %d predictions in %fs, merged %d (%d) in %fs.\n", currentSlice.getNext().getTime(), 
					((double)(startTime - realStartTime))/1000000000, numProbs, ((double)(afterPartionTime - startTime))/1000000000,
					numPredictions, ((double)(afterPredictionsTime - afterPartionTime))/1000000000,
					mergedCount, mergeIterationCount.intValue(), ((double)(afterMergeTime - afterPredictionsTime))/1000000000);
		}
		timingData.setTotalPrologTime(timingData.getTotalPrologTime() + startTime - realStartTime);
		timingData.setTotalPartitionTime(timingData.getTotalPartitionTime()
				+ afterPartionTime - startTime);
		TimingData.totalPredictionTime.addAndGet(afterPredictionsTime - afterPartionTime);
		timingData.setTotalMergeTime(timingData.getTotalMergeTime() + afterMergeTime - afterPredictionsTime);
	}

	private static class SplitLinks {
		public ArrayList<SliceItem> trueLinks;
		public ArrayList<SliceItem> falseLinks;
		
		public SplitLinks() {
			trueLinks = new ArrayList<SliceItem>(5);
			falseLinks = new ArrayList<SliceItem>(5);
		}
		
		public boolean isSplit() {
			return trueLinks.size() > 0 && falseLinks.size() > 0;
		}
		
		public boolean hasTrue() {
			return !trueLinks.isEmpty();
		}
		
		public boolean hasFalse() {
			return !falseLinks.isEmpty();
		}
	}
	
	public static boolean USE_COARSE_AND_FINE = false;
	public static double COARSE_THRESHOLD = 1e-5;
	
	private boolean useCoarse(SentenceContext ctx, SliceItem headItem) {
		if (!USE_COARSE_AND_FINE) return false;
		double prob = headItem.getProb();
		return prob < COARSE_THRESHOLD * ctx.getLastSliceTotalProb();
	}
	
	private void addCluster(SentenceContext ctx, Context overtContext, SliceItem headItem, List<SliceItem> agenda) {
		final BinaryTree<HistoryTreePayload>[] predictions = ctx.predictions;
		final int lmId = lm.getIdNum();
		BinaryTree<HistoryTreePayload> historyTree = headItem.getPosition(lmId);
		if (historyTree == null) {
			historyTree = lm.getHistoryTree();
		}

		HistoryTreePayload node = historyTree.getPayload();
		
		TrellisSlice newSlice = headItem.slice.getNext();
		
		predictions[lmId] = historyTree;
		
		{
			int requestClusterId = historyTree.getPayload().clusterid;
			if (node.isBackoff) {
				// request the grandparent
				requestClusterId = historyTree.getParent().getParent().getPayload().clusterid;
			}
			Key key = new Key(lmId, requestClusterId, newSlice.getOvertFactors());
			long start = Timing.nanoTime();
			ctx.getCurrentRequests().request(key);
			long end = Timing.nanoTime();
			if (Timing.TIMING) {
				TimingData.totalProbTreeRequestTime.addAndGet(end - start);
				TimingData.totalProbTreeRequestCount.incrementAndGet();
			}
		}
		if (lm.hasBackoffLM()) {
			if (useBackoff || node.isBackoff) {
				/*
				if (node.isBackoff) {
					predictions[lm.getIdNum()] = historyTree.getParent().getParent();
				}
				*/

				backoffLM.getDecoder().newPartition(ctx, overtContext, headItem, agenda);
				
				if (node.isBackoff) {
					return;
				}
			}
		}

		if (!lm.hasBackoffLM() || !useBackoff) {
			headItem.slice.getNext().addPrediction(predictions.clone(), headItem);
		}
		return;

	}
	@SuppressWarnings("unchecked")
	protected void newPartition(SentenceContext ctx, Context overtContext, SliceItem headItem, List<SliceItem> agenda) {
		//SliceItem headItem = agenda.remove(0);
		final int lmId = lm.getIdNum();
		
		while(true) {
			BinaryTree<HistoryTreePayload> historyTree = headItem.getPosition(lmId);
			if (historyTree == null) {
				historyTree = lm.getHistoryTree();
			}
			
			HistoryTreePayload node = historyTree.getPayload();
			if (historyTree.isLeaf()) {
				addCluster(ctx, overtContext, headItem, agenda);
				return;
			}

			Question question = node.question;
			int questionOffset = - question.getTimeOffset() - 1;
			
			if (!question.isAboutHidden()) {
				// a question about overt factors does not split the context
				final boolean result;
				{
					long start = Timing.nanoTime();
					result = question.test(overtContext);
					long end = Timing.nanoTime();
					if (Timing.TIMING) {
						TimingData.totalQuestionTime.addAndGet(end - start);
						TimingData.totalQuestionCount.incrementAndGet();
					}
				}
				if (debug > 2) {
					System.out.printf("Question %s, head = %s, result=%s\n", question.toString(), headItem, result);
				}
				if (result) {
					headItem.setPosition(lmId, historyTree.getRight());
				} else {
					headItem.setPosition(lmId, historyTree.getLeft());
				}
			} else {
				// questions about hidden factors are more fun...
				HashMap<SliceItem,SplitLinks> fwdItems = new HashMap<SliceItem,SplitLinks>();
				if (questionOffset > 0) {
					RecursiveSliceItemIterator itemIterator = new RecursiveSliceItemIterator(headItem, questionOffset-1);
					for(; itemIterator.hasNext();) {
						fwdItems.put(itemIterator.next(), new SplitLinks());
					}
				}
				LinkedList<SliceItem> currentItems = new LinkedList<SliceItem>();
				
				final boolean checkDupes = question.getTimeOffset() < -2;
				Map<SliceItem,SliceItem> _currentItems = checkDupes ? new IdentityHashMap<SliceItem,SliceItem>(100) : null;
				
				for(RecursiveSliceItemIterator itemIterator = new RecursiveSliceItemIterator(headItem, questionOffset); itemIterator.hasNext(); ) {
					SliceItem i = itemIterator.next();
					if (checkDupes) {
						if (_currentItems.containsKey(i)) continue;
						_currentItems.put(i, i);
					}
					currentItems.add(i);
				}
				if (checkDupes) {
					_currentItems = null;
				}
				
				if (debug > 1) {
					System.out.printf("checking prev links before hidden question, headItem = %s, nrItems = %d\n", 
							headItem, currentItems.size());
					headItem.checkPrevProb();
				}
				
				for(; !currentItems.isEmpty();) {
					SliceItem item = currentItems.remove();
	
					final Question.TestResult result;
					{
						long start = Timing.nanoTime();
						result = question.test(item);
						long end = Timing.nanoTime();
						if (Timing.TIMING) {
							TimingData.totalQuestionTime.addAndGet(end - start);
							TimingData.totalQuestionCount.incrementAndGet();
						}
					}
					if (debug > 2) {
						System.out.printf("Question %s, head = %s, result=%s\n", question.toString(), headItem, result);
					}
					if (!result.isSplit()) {
						if (result.getResult()) {
							//trueItems.add(item);
							if (questionOffset > 0) {
								for(SliceItem fwd : item.slice.getNext().prev2curr.get(item)) {
									SplitLinks links = fwdItems.get(fwd);
									if (links != null) {
										links.trueLinks.add(item);
									}
								}
							} else {
								headItem.setPosition(lmId, historyTree.getRight());
							}
						} else {
							//falseItems.add(item);
							if (questionOffset > 0) {
								for(SliceItem fwd : item.slice.getNext().prev2curr.get(item)) {
									SplitLinks links = fwdItems.get(fwd);
									if (links != null) {
										links.falseLinks.add(item);
									}
								}
							} else {
								headItem.setPosition(lmId, historyTree.getLeft());
							}
						}
						continue;
					}
					
					if (useCoarse(ctx, headItem)) {
						// don't split small items
						addCluster(ctx, overtContext, headItem, agenda);
						return;
					}
					
					//int index = items.indexOf(result.getItem());
					SliceItem sliceItem = (SliceItem) result.getItem();
					
					if (debug > 1) {
						System.out.printf("splitting %s\n", sliceItem);
						sliceItem.checkPrevProb();
					}
					
					// the interesting part: split the sliceItem
					//System.out.printf("old tree prob: %f\n", sliceItem.tree.getTreeProb());
					double oldTreeProb = sliceItem.tree.getTotalProb();
					ProbTree probTree;
					{
						long start = Timing.nanoTime();
						probTree = sliceItem.tree.cut(result.getPrefix());
						long end = Timing.nanoTime();
						if (Timing.TIMING) {
							TimingData.totalCuttingTime.addAndGet(end - start);
							TimingData.totalCuttingCount.incrementAndGet();
						}
					}
					if (probTree == null) {
						if (debug > 2) {
							System.out.println("no true branch");
							if (!ProbMath.approxEqual(oldTreeProb, sliceItem.tree.getTotalProb())) {
								System.out.printf("probTree was %g, now %g\n", oldTreeProb, sliceItem.tree.getTotalProb());
							}
						}
						/*
						if (!ProbMath.approxEqual(oldTreeProb, sliceItem.tree.getTotalProb())) {
							// a negligible amount of probability
							sliceItem.slice.updateForwardProbabilities(sliceItem, sliceItem.tree.getTotalProb() - oldTreeProb);
							//System.out.printf("probTree was %g, now %g\n", oldTreeProb, sliceItem.tree.getTotalProb());
						}
						*/
						// no true branch (was cut off during pruning)
						//falseItems.add(item);
						if (questionOffset > 0) {
							for(SliceItem fwd : item.slice.getNext().prev2curr.get(item)) {
								SplitLinks links = fwdItems.get(fwd);
								if (links != null) {
									links.falseLinks.add(item);
								}
							}
						} else {
							headItem.setPosition(lmId, historyTree.getLeft());
						}
						continue;
					} else if (probTree == sliceItem.tree) {
						if (debug > 2) {
							System.out.println("no false branch");
							if (!ProbMath.approxEqual(oldTreeProb, sliceItem.tree.getTotalProb())) {
								System.out.printf("probTree was %g, now %g\n", oldTreeProb, probTree.getTotalProb());
							}
						}
						/*
						if (!ProbMath.approxEqual(oldTreeProb, sliceItem.tree.getTotalProb())) {
							// a negligible amount of probability
							sliceItem.slice.updateForwardProbabilities(sliceItem, probTree.getTotalProb() - oldTreeProb);
							//System.out.printf("probTree was %g, now %g\n", oldTreeProb, probTree.getTotalProb());
						}
						*/
						// no false branch
						//trueItems.add(item);
						if (questionOffset > 0) {
							for(SliceItem fwd : item.slice.getNext().prev2curr.get(item)) {
								SplitLinks links = fwdItems.get(fwd);
								if (links != null) {
									links.trueLinks.add(item);
								}
							}
						} else {
							headItem.setPosition(lmId, historyTree.getRight());
						}
						continue;
					} else {
						if (debug > 2) {
							System.out.printf("new tree probs: %f and %f\n", probTree.getTotalProb(), sliceItem.tree.getTotalProb());
						}
						if (probTree.getTotalProb() < sliceItem.tree.getTotalProb() * PRUNING_THRESHOLD ||
								probTree.getTotalProb() * sliceItem.getPrevProb() < sliceItem.slice.bestProb * PRUNING_THRESHOLD) {
							// discard the cut off tree
							if (debug > 1) {
								System.out.printf("discarding split subtree %s (1): %e vs. %e\n", sliceItem, probTree.getTotalProb(), sliceItem.tree.getTotalProb());
							}
							double probDiff = probTree.getTotalProb() * sliceItem.getPrevProb();
							if (probDiff > 0) {
								sliceItem.slice.updateForwardProbabilities(sliceItem, -probDiff);
							}
	
							//falseItems.add(item);
							if (questionOffset > 0) {
								for(SliceItem fwd : item.slice.getNext().prev2curr.get(item)) {
									SplitLinks links = fwdItems.get(fwd);
									if (links != null) {
										links.falseLinks.add(item);
									}
								}
							} else {
								headItem.setPosition(lmId, historyTree.getLeft());
							}
							continue;
							//partitionContext(itemLists, items, historyTree.getLeft(), predictions, scale, level+1);
						} else if (sliceItem.tree.getTotalProb() < probTree.getTotalProb() * PRUNING_THRESHOLD ||
								sliceItem.tree.getTotalProb() * sliceItem.getPrevProb() < sliceItem.slice.bestProb * PRUNING_THRESHOLD) {
							if (debug > 1) {
								System.out.printf("discarding split subtree %s (2): %e vs. %e\n", sliceItem, probTree.getTotalProb(), sliceItem.tree.getTotalProb());
							}
							double probDiff = sliceItem.getPrevProb() * sliceItem.tree.getTotalProb(); 
							sliceItem.tree = probTree;
							
							if (probDiff > 0) {
								sliceItem.slice.updateForwardProbabilities(sliceItem, -probDiff);
							}
							
							//trueItems.add(item);
							if (questionOffset > 0) {
								for(SliceItem fwd : item.slice.getNext().prev2curr.get(item)) {
									SplitLinks links = fwdItems.get(fwd);
									if (links != null) {
										links.trueLinks.add(item);
									}
								}
							} else {
								headItem.setPosition(lmId, historyTree.getRight());
							}
							continue;
						} else {
							SliceItem newItem = new SliceItem(probTree, item.getPrevProb(), 
									(ClusterId)sliceItem.clusterid.clone());
							sliceItem.slice.split(item, newItem);
	
							if (debug > 1) {
								System.out.printf("cutting %s off %s\n", newItem, item);
							}
							//headItem.setPosition(lmId, historyTree.getLeft());
	
							//falseItems.add(item);
							if (questionOffset > 0) {
								for(SliceItem fwd : item.slice.getNext().prev2curr.get(item)) {
									SplitLinks links = fwdItems.get(fwd);
									if (links != null) {
										links.falseLinks.add(item);
										links.trueLinks.add(newItem);
									}
								}
							} else {
								agenda.add(newItem);
								newItem.setPosition(lmId, historyTree);
								//headItem.setPosition(lmId, historyTree.getRight());
							}
						}
					}
				} // itemIterator
				if (debug > 1) {
					System.out.printf("checking prev links after hidden question, headItem = %s\n", headItem);
					headItem.checkPrevProb();
				}
				
				if (questionOffset == 0) continue;
				
				boolean hasFalse = false;
				boolean hasTrue = false;
				
				for(SplitLinks links : fwdItems.values()) {
					if (links.hasFalse()) hasFalse = true;
					if (links.hasTrue()) hasTrue = true;
					if (hasFalse && hasTrue) break;
				}
				
				if (!hasFalse) {
					headItem.setPosition(lmId, historyTree.getRight());
				} else if (!hasTrue) {
					headItem.setPosition(lmId, historyTree.getLeft());
				} else {
					if (debug > 1) {
						System.out.printf("checking prev links [point 1], headItem = %s\n", headItem);
						headItem.checkPrevProb();
					}

					if (useCoarse(ctx, headItem)) {
						// don't split small items
						addCluster(ctx, overtContext, headItem, agenda);
						return;
					}
					
					long beforeFwdSplitting = Timing.nanoTime();
					int currentOffset = questionOffset - 1;
					while(currentOffset > 0) {
						HashMap<SliceItem,SplitLinks> newFwdItems = new HashMap<SliceItem,SplitLinks>();
						RecursiveSliceItemIterator iter = new RecursiveSliceItemIterator(headItem, currentOffset-1);
						for(; iter.hasNext();) {
							newFwdItems.put(iter.next(), new SplitLinks());
						}
	
						// copy items in the next slice that have both true and false links
						for(Map.Entry<SliceItem, SplitLinks> entry : fwdItems.entrySet()) {
							SliceItem item = entry.getKey();
							SplitLinks links = entry.getValue();
							
							if (links.isSplit()) {
								SliceItem newItem = (SliceItem) item.clone();
								item.slice.insertItem(newItem);
								
								// copy links to the following slice if it exists
								{
									TrellisSlice nextSlice = item.slice.getNext();
									if (nextSlice != null) {
										ArrayList<SliceItem> fwdLinks = nextSlice.prev2curr.get(item);
										if (fwdLinks != null) {
											nextSlice.prev2curr.put(newItem, (ArrayList<SliceItem>)fwdLinks.clone());
											for(SliceItem i : fwdLinks) {
												nextSlice.curr2prev.get(i).add(newItem);
											}
										}
									}
								}
	
								// newItem belongs to the "true" branch
								item.slice.curr2prev.put(newItem, links.trueLinks);
								item.slice.curr2prev.put(item, links.falseLinks);
								
								double prevTrueProb = 0;
								for(SliceItem prevTrueItem : links.trueLinks) {
									ArrayList<SliceItem> fwdLinks = item.slice.prev2curr.get(prevTrueItem);
									int idx = fwdLinks.indexOf(item);
									if (idx == -1) {
										if (debug > 1) {
											System.err.printf("can't find forwardLinks %s of %s\n", item, prevTrueItem);
										}
									} else {
										fwdLinks.set(idx, newItem);
										prevTrueProb += prevTrueItem.getProb() * prevTrueItem.getPrevProb();
									}
								}
								newItem.setPrevProb(prevTrueProb);
								item.addPrevProb(-prevTrueProb);
								if (debug > 2) {
									System.out.printf("moved %g prevProb from %s to %s\n", prevTrueProb, newItem, item);
								}
								for(SliceItem fwd : item.slice.getNext().prev2curr.get(item)) {
									SplitLinks newLinks = newFwdItems.get(fwd);
									if (newLinks != null) {
										newLinks.falseLinks.add(item);
										newLinks.trueLinks.add(newItem);
									}
								}
							} else {
								for(SliceItem fwd : item.slice.getNext().prev2curr.get(item)) {
									SplitLinks newLinks = newFwdItems.get(fwd);
									if (newLinks != null) {
										if (links.hasFalse()) {
											newLinks.falseLinks.add(item);
										}
										if (links.hasTrue()) {
											newLinks.trueLinks.add(item);
										}
									}
								}
							}
						}					
						fwdItems = newFwdItems;
						--currentOffset;
					}
					long afterFwdSplitting = Timing.nanoTime();
					if (Timing.TIMING) {
						TimingData.totalFwdSplittingTime.addAndGet(afterFwdSplitting - beforeFwdSplitting);
					}
					if (debug > 1) {
						System.out.printf("checking prev links [point 2], headItem = %s\n", headItem);
						headItem.checkPrevProb();
					}
					
					// fwdItems should contain just one element: headItem
					assert(fwdItems.size() == 1);
					
					
					// copy items in the next slice that have both true and false links
					for(Map.Entry<SliceItem, SplitLinks> entry : fwdItems.entrySet()) {
						SliceItem item = entry.getKey();
						SplitLinks links = entry.getValue();
						
						if (links.isSplit()) {
							SliceItem newItem = (SliceItem) item.clone();
							item.slice.insertItem(newItem);
							
							// copy links to the following slice if it exists
							{
								TrellisSlice nextSlice = item.slice.getNext();
								if (nextSlice != null) {
									ArrayList<SliceItem> fwdLinks = nextSlice.prev2curr.get(item);
									if (fwdLinks != null) {
										nextSlice.prev2curr.put(newItem, (ArrayList<SliceItem>)fwdLinks.clone());
										for(SliceItem i : fwdLinks) {
											nextSlice.curr2prev.get(i).add(newItem);
										}
									}
								}
							}
	
							// newItem belongs to the "true" branch
							item.slice.curr2prev.put(newItem, links.trueLinks);
							item.slice.curr2prev.put(item, links.falseLinks);
							
							double prevTrueProb = 0;
							for(SliceItem prevTrueItem : links.trueLinks) {
								ArrayList<SliceItem> fwdLinks = item.slice.prev2curr.get(prevTrueItem);
								int idx = fwdLinks.indexOf(item);
								fwdLinks.set(idx, newItem);
								prevTrueProb += prevTrueItem.getProb() * prevTrueItem.getPrevProb();
							}
							newItem.setPrevProb(prevTrueProb);
							newItem.setPosition(lmId, historyTree.getRight());
							
							item.addPrevProb(-prevTrueProb);
							item.setPosition(lmId, historyTree.getLeft());
							
							agenda.add(newItem);
						} else if (!links.hasFalse()) {
							item.setPosition(lmId, historyTree.getRight());
						} else {
							item.setPosition(lmId, historyTree.getLeft());
						}
					}
	
				}
			}
			//break;
		}
	}
	
	public String getStatistics() {
		final long nano = 1000000000;
		StringBuilder sb = new StringBuilder(1000);
		sb.append(String.format("Prolog time: %.3fs\n", (double)timingData.getTotalPrologTime() / nano));
		sb.append(String.format("Partition time: %.3fs\n", (double)timingData.getTotalPartitionTime() / nano));
		sb.append(String.format("  Prep time: %.3fs\n", TimingData.totalPrePartitionTime.doubleValue() / nano));
		sb.append(String.format("  Questions: %d in %.3fs\n", TimingData.totalQuestionCount.get(), TimingData.totalQuestionTime.doubleValue() / nano));
		sb.append(String.format("  Cutting: %d in %.3fs\n", TimingData.totalCuttingCount.get(), TimingData.totalCuttingTime.doubleValue() / nano));
		sb.append(String.format("  Splitting: %d in %.3fs\n", TimingData.totalSplittingCount.get(), TimingData.totalSplittingTime.doubleValue() / nano));
		sb.append(String.format("  Requests: %d in %.3fs\n", TimingData.totalProbTreeRequestCount.get(), TimingData.totalProbTreeRequestTime.doubleValue() / nano));
		sb.append(String.format("  FwdSplit time:  %.3fs\n", TimingData.totalFwdSplittingTime.doubleValue() / nano));
		sb.append(String.format("Prediction time: %fs\n", TimingData.totalPredictionTime.doubleValue() / nano));
		sb.append(String.format("  including tree merging: %fs\n", (double)timingData.getTotalTreeMergingTime() / nano));
		sb.append(String.format("    merges: leafy %d in %fs, compact %d in %fs\n", 
			ProbTree.totalLeafyMergeCount, (double) ProbTree.totalLeafyMergeTime / nano,
			ProbTree.totalCompactMergeCount, (double) ProbTree.totalCompactMergeTime / nano));
		sb.append(String.format("    merges: num %d, cloning %fs, compactifying %fs\n",
				ProbTree.totalMergingCount, (double) ProbTree.totalCloningTime / nano,
				(double) ProbTree.totalCompactifyingTime / nano));
		sb.append(String.format("  including storage: %fs\n", (double)timingData.getTotalStorageTime() / nano));
		sb.append(String.format("Merge time: %fs\n", (double)timingData.getTotalMergeTime() / nano));
		//sb.append(storage.getStats());
		sb.append("Storage stats: " + storage.getStats());
		//sb.append('\n');
		return sb.toString();
	}

	public AbstractProbTreeStorage getStorage() {
		return storage;
	}

	public void setStorage(AbstractProbTreeStorage storage) {
		this.storage = storage;
	}

	public int getDebug() {
		return debug;
	}

	public LanguageModel getLm() {
		return lm;
	}
	
}
