/**
 * 
 */
package edu.umd.clip.lm.model.training;

import java.io.*;
import java.lang.management.MemoryUsage;
import java.nio.channels.*;
import java.util.*;
import java.util.Map.Entry;
import java.util.concurrent.atomic.*;
import java.util.concurrent.locks.*;

import org.apache.commons.math.util.OpenIntToDoubleHashMap;

import com.sleepycat.je.*;

import edu.berkeley.nlp.util.*;
import edu.umd.clip.jobs.*;
import edu.umd.clip.lm.factors.*;
import edu.umd.clip.lm.model.*;
import edu.umd.clip.lm.model.data.*;
import edu.umd.clip.lm.model.training.ContextClusterStorage.ContextClusters;
import edu.umd.clip.lm.storage.*;
import edu.umd.clip.lm.storage.AbstractProbTreeStorage.Key;
import edu.umd.clip.lm.util.*;
import edu.umd.clip.lm.util.tree.*;

/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class BackoffInterpolation {
	private final LanguageModel lm;
	private final BinaryTree<HistoryTreePayload> theTree;
	private ContextClusterStorage contextClusterStorage;
	private LanguageModel boLMs[];
	private Map<ContextClusters, OpenIntToDoubleHashMap> boCluster2WordProb;
	private BinaryTree<HistoryTreePayload> nodes[];
	private int nrClusters;
	private File dataDir;
	private int nrDataFiles;
	private Environment env;
	private BDBActiveNodeStorage<ParentNodeData> parentNodeDataStorage;
	private ContextReductionTrainingDataFilter boContextReducer;
	private BackoffType backoff = BackoffType.ONE;
	private final boolean noBackoffLM;
	
	@SuppressWarnings("unchecked")
	public BackoffInterpolation(LanguageModel lm) {
		this.lm = lm;
		this.theTree = lm.getHistoryTree();
		this.noBackoffLM = lm.getBackoffLM() == null;
		
		int maxClusterId = 0;
		for(BinaryTreeIterator<HistoryTreePayload> it = theTree.getPostOrderIterator(); it.hasNext(); ) {
			int clusterId = it.next().clusterid;
			if (clusterId > maxClusterId) maxClusterId = clusterId;
		}
		
		nrClusters = maxClusterId+1;
		nodes = new BinaryTree[nrClusters];
		for(BinaryTreeIterator<HistoryTreePayload> it = theTree.getPostOrderIterator(); it.hasNext(); ) {
			BinaryTree<HistoryTreePayload> node = it.nextNode();
			int clusterid = node.getPayload().clusterid;
			nodes[clusterid] = node;
			//discountWeights[clusterid].lambda = node.getPayload().lambda;
		}

	}
	
	public void initialize(final File dataDir) throws IOException, DatabaseException, DatabaseException {
		this.dataDir = dataDir;
		OnDiskTrainingDataNodeWriter.setTempDir(dataDir);
		
		EnvironmentConfig envConf = new EnvironmentConfig();
		envConf.setAllowCreate(true);
		envConf.setSharedCache(true);
		envConf.setTransactional(false);
		envConf.setReadOnly(false);
		envConf.setCachePercent(20);
		envConf.setConfigParam("je.log.fileMax", Integer.toString(100*1024*1024));
		
		File dir = new File(dataDir, "db");
		if (!dir.isDirectory()) {
			dir.mkdirs();
		}
		env = new Environment(dir, envConf);
		contextClusterStorage = new ContextClusterStorage(env);
		parentNodeDataStorage = new BDBActiveNodeStorage<ParentNodeData>(env);

		Experiment exp = Experiment.getInstance();
		
		if (!noBackoffLM) {
			ArrayList<Integer> lmIds = new ArrayList<Integer>();
			LanguageModel boLM = exp.getLM(lm.getBackoffLM());
			boContextReducer = new ContextReductionTrainingDataFilter(boLM.getOvertOrder(), boLM.getHiddenOrder());
			System.out.printf("backoff forest: %s, context order: overt=%d, hidden=%d\n", 
					boLM.getId(), boLM.getOvertOrder(), boLM.getHiddenOrder());
	
			while(boLM != null) {
				lmIds.add(boLM.getIdNum());
				if (boLM.getBackoffLM() == null) break;
				boLM = exp.getLM(boLM.getBackoffLM());
			}
			
			boLMs = new LanguageModel[lmIds.size()];
			for(int i=0; i<lmIds.size(); ++i) {
				boLMs[i] = exp.getLmByNumId(lmIds.get(i));
				System.out.printf("boLMs[%d] = %s\n", i, boLMs[i].getId());
			}
		}
		
		byte dataId = 0;
		
		
		final Experiment.Files files = Experiment.getInstance().getFiles();
		
		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("level 0");

		nrDataFiles = files.getInterpolateData().size();
		
		if (!noBackoffLM) {
			long overtMask = Experiment.getInstance().getTupleDescription().getOvertFactorsMask();
			
			final TrainingDataFilter filter = new DataFilter(new MaskedFuturesTrainingDataFilter(overtMask), group, boLMs);
	
			for(int split=0; split<nrDataFiles; ++split) {
				final String[] filenames = files.getInterpolateDataFiles(split);
				final int id = split;
				
				Runnable run = new Runnable() {
					@Override
					public void run() {
						
						try {
							TrainingDataUtil.combineAndReduceContext(filenames, 
									new File(dataDir, makeDataFilename(0, id)).getAbsolutePath(), 
									lm.getOvertOrder(), lm.getHiddenOrder(), 
									lm.getHistoryTree().getPayload().clusterid, filter);
						} catch (IOException e) {
							e.printStackTrace();
						}
						System.out.printf("data #%d done\n", id);
					}
				};
				
				// turns out it takes too much memory to do this in parallel...
				//manager.addJob(group, new Job(run, "data #" + Integer.toString(id)));
				run.run();
			}
			
			group.join();
		}
		
		if (!noBackoffLM) {
			boCluster2WordProb = new HashMap<ContextClusters, OpenIntToDoubleHashMap>(10000);
	
			for(byte dId = 0; dId < nrDataFiles; ++dId) {
				File dataFile = new File(dataDir, makeDataFilename(0, dId));
				if (!dataFile.isFile()) {
					System.err.printf("Cannot find file %s, ", dataFile.getAbsolutePath());
					return;
				}
				FileChannel inputChannel = new FileInputStream(dataFile).getChannel();
				TrainingDataNodeReader reader = new EagerTrainingDataNodeReader(new OnDiskTrainingDataNodeReader(inputChannel));
				ReadableTrainingDataNode node = reader.getNext();
				
				System.out.printf("computing backoff probabilities in file #%d\n", dId);
				computeBackoffProbabilities(node.getData(0));
				reader.close();
			}
		}
		group.join();
	}
	
	private static class ParentNodeData implements Serializable {
		private static final long serialVersionUID = 1L;
		Long2DoubleMap smoothedDists[];
		Long2IntMap counts[];
		/**
		 * @param smoothedDists
		 * @param counts
		 */
		public ParentNodeData(Long2DoubleMap[] smoothedDists,
				Long2IntMap[] countsp) {
			this.smoothedDists = smoothedDists;
			this.counts = countsp;
		}
		
	}
	
	public void interpolate() throws IOException {
		if (noBackoffLM) {
			// a much simpler case -- no optimization needed
			for(BinaryTreeIterator<HistoryTreePayload> it = theTree.getPostOrderIterator(); it.hasNext();) {
				HistoryTreePayload payload = it.next();
				payload.backoff = 0;
			}
			return;
		}
		
		JobManager manager = JobManager.getInstance();
		
		int level = 0;
		ActiveNodeStorage<ParentNodeData>.ActiveNodeCollection collection = null;
		while(true) {
			File dataFiles[] = new File[nrDataFiles];
			FileChannel inputChannels[] = new FileChannel[nrDataFiles];
			final TrainingDataNodeReader readers[] = new TrainingDataNodeReader[nrDataFiles];
			
			for(byte dataId = 0; dataId < nrDataFiles; ++dataId) {
				dataFiles[dataId] = new File(dataDir, makeDataFilename(level, dataId));
				if (!dataFiles[dataId].isFile()) {
					System.err.printf("Cannot find file %s, ", dataFiles[dataId].getAbsolutePath());
					return;
				}
				inputChannels[dataId] = new FileInputStream(dataFiles[dataId]).getChannel();
				readers[dataId] = new EagerTrainingDataNodeReader(new OnDiskTrainingDataNodeReader(inputChannels[dataId]));
	
			}
			ActiveNodeStorage<ParentNodeData>.ActiveNodeCollection nextCollection = parentNodeDataStorage.createCollection("level#"+Integer.toString(level));

			System.out.printf("******* ESTIMATING LEVEL %d ********\n", level);
			
			estimateBackoffLevel(level, readers, collection, nextCollection);

			JobGroup group = manager.createJobGroup("split level");
			
			File newDataFiles[] = new File[nrDataFiles];
			final TrainingDataNodeWriter writers[] = new TrainingDataNodeWriter[nrDataFiles];
			final MutableInteger nextLevelNodeCounts[] = new MutableInteger[nrDataFiles];
			RandomAccessFile outFiles[] = new RandomAccessFile[nrDataFiles];
			for(byte dataId = 0; dataId < nrDataFiles; ++dataId) {
				// reset the reader
				inputChannels[dataId].position(0);
				readers[dataId] = new EagerTrainingDataNodeReader(new OnDiskTrainingDataNodeReader(inputChannels[dataId]));

				newDataFiles[dataId] = new File(dataDir, makeDataFilename(level+1, dataId));
				outFiles[dataId] = new RandomAccessFile(newDataFiles[dataId], "rw");
				outFiles[dataId].getChannel().truncate(0);

				writers[dataId] = new OnDiskTrainingDataNodeWriter(outFiles[dataId].getChannel());
				writers[dataId] = new BufferedTrainingDataNodeWriter(writers[dataId]);
				
				nextLevelNodeCounts[dataId] = new MutableInteger();
				
				final byte dId = dataId;
				Runnable run = new Runnable() {

					@Override
					public void run() {
						try {
							nextLevelNodeCounts[dId].set(splitData(readers[dId], writers[dId]));
						} catch (IOException e) {
							e.printStackTrace();
						}
					}
				};
				Job job = new Job(run, "data#" + Byte.toString(dataId));
				manager.addJob(group, job);
			}
			group.join();
			
			for(byte dataId=0; dataId<nrDataFiles; ++dataId) {
				readers[dataId].close();
				readers[dataId] = null;
				//outFiles[dataId].close();
				
				writers[dataId].close();
				writers[dataId] = null;				
			}
			inputChannels = null;
			outFiles = null;
			
			for(byte dataId=1; dataId<nrDataFiles; ++dataId) {
				assert(nextLevelNodeCounts[dataId].intValue() == nextLevelNodeCounts[0].intValue());
			}
			
			if (collection != null) {
				parentNodeDataStorage.removeCollection(collection.getName());
			}
			collection = nextCollection;

			// delete data files from the previous level
			for(File f : dataFiles) {
				f.delete();
			}
			
			if (nextLevelNodeCounts[0].intValue() == 0) {
				for(File f : newDataFiles) {
					f.delete();
				}
				if (collection != null) {
					parentNodeDataStorage.removeCollection(collection.getName());
				}
				break;
			}
			
			
			++level;
		}
	}

	private int splitData(TrainingDataNodeReader reader, TrainingDataNodeWriter writer) throws IOException {
		int nextLevelNodeCount = 0;
		
		for(ReadableTrainingDataNode nodeData = reader.getNext(); nodeData != null; nodeData = reader.getNext()) {
			final BinaryTree<HistoryTreePayload> oldLeaf = nodes[nodeData.getNodeId()];

			
			if (oldLeaf.isLeaf()) {
				// skip node
				nodeData.skipData();

				if (oldLeaf.getPayload().isBackoff) {
					populateBackoffCluster(oldLeaf);
					continue;
				}
			} else {
				nextLevelNodeCount += 2;
				
				BinaryTree<HistoryTreePayload> left = oldLeaf.getLeft();
				BinaryTree<HistoryTreePayload> right = oldLeaf.getRight();
				
				int leftNodeId = left.getPayload().clusterid;
				int rightNodeId = right.getPayload().clusterid;
				
				WritableTrainingDataNode leftDataNode = writer.createNode(leftNodeId, 1);
				WritableTrainingDataNode rightDataNode = writer.createNode(rightNodeId, 1);
				
				writer.add(leftDataNode);
				writer.add(rightDataNode);
				
				TrainingDataUtil.splitData(nodeData.getData(0), oldLeaf.getPayload().question, 
						rightDataNode.getData(0), leftDataNode.getData(0));
	
			}
		}
		return nextLevelNodeCount;
	}
	
	private void populateBackoffCluster(BinaryTree<HistoryTreePayload> node) {
		HistoryTreePayload payload = node.getPayload();
		if (backoff == BackoffType.ONE) {
			payload.lambda = 1.0;
		} else if (backoff == BackoffType.ZERO) {
			payload.lambda = 0.0;
		} else if (backoff == BackoffType.HEURISTICS) {
			BinaryTree<HistoryTreePayload> grandparent = node.getParent().getParent();
			int distance = grandparent.distanceToRoot();
			payload.lambda = 1.0 / Math.sqrt(1 + distance);
		}
		System.out.printf("backoff weight [%s]: %g\n", backoff, payload.lambda);
	}

	private void estimateBackoffLevel(int level, TrainingDataNodeReader readers[], 
			ActiveNodeStorage<ParentNodeData>.ActiveNodeCollection collection,
			final ActiveNodeStorage<ParentNodeData>.ActiveNodeCollection nextCollection) throws IOException 
	{
		JobManager manager = JobManager.getInstance();
		final JobGroup group = manager.createJobGroup("backoff estimation");
		
		MemoryUsage memuse;
		
		int maxTasks = level / 2 + 1;
		
		if (maxTasks > manager.getNumWorkers() / 2 + 1) {
			maxTasks = manager.getNumWorkers() / 2 + 1;
		}
		final TaskSequencer sequencer = new TaskSequencer(maxTasks);
		
		while(true) {
			sequencer.grabInput();
			
			final ReadableTrainingDataNode dataNodes[] = new ReadableTrainingDataNode[readers.length];
			
			for(byte dataId=0; dataId<readers.length; ++dataId) {
				dataNodes[dataId] = readers[dataId].getNext();
			}
			if (dataNodes[0] == null) {
				sequencer.releaseInput();
				sequencer.finish();
				break;
			}
			final int nodeId = dataNodes[0].getNodeId();
			
			for(byte dataId = 1; dataId < readers.length; ++dataId) {
				assert(dataNodes[dataId].getNodeId() == nodeId);
			}
			
			final ParentNodeData parentNodeData;
			
			if (collection == null) {
				Set<FactorTuple> allOvertFactors = Experiment.getInstance().getTupleDescription().getAllOvertFactors().keySet();
				double prob = 1.0 / allOvertFactors.size();
				Long2DoubleMap uniformDist = new Long2DoubleMap(allOvertFactors.size());
				Long2IntMap uniformCounts = new Long2IntMap(allOvertFactors.size());
				for(FactorTuple tuple : allOvertFactors) {
					uniformDist.addAndGet(tuple.getBits(), prob);
					// make sure it's not pruned
					uniformCounts.addAndGet(tuple.getBits(), lm.getPruningThreshold()+1);
				}
				Long2DoubleMap smoothedParentDists[] = new Long2DoubleMap[nrDataFiles];
				Long2IntMap parentCounts[] = new Long2IntMap[nrDataFiles];
				for(byte dataId = 0; dataId < nrDataFiles; ++dataId) {
					smoothedParentDists[dataId] = uniformDist;
					parentCounts[dataId] = uniformCounts;
				}
				parentNodeData = new ParentNodeData(smoothedParentDists, parentCounts);
			} else {
				int parentId = nodes[nodeId].getParent().getPayload().clusterid;
				parentNodeData = collection.getNode(parentId);
			}
			
			Runnable run = new Runnable() {

				@Override
				public void run() {
					try {
						computeNodeDiscount(nodeId, dataNodes, parentNodeData, nextCollection, /*group,*/ sequencer);
					} catch (IOException e) {
						e.printStackTrace();
					}
					sequencer.finish();
				}
				
			};
			Job job = new Job(run, "a job");
			manager.addJob(job);
			
		}
		sequencer.join();
		//group.join();
	}
	
	private class DataFilter implements TrainingDataFilter {
		final TrainingDataFilter filter;
		final JobGroup group;
		final AtomicInteger submittedJobs;
		final LanguageModel lms[];
		final static int MAX_JOBS = 100000;
		final JobManager manager;
		/**
		 * @param filter
		 */
		public DataFilter(TrainingDataFilter filter, JobGroup group, LanguageModel lms[]) {
			this.filter = filter;
			this.group = group;
			this.lms = lms;
			this.submittedJobs = new AtomicInteger();
			this.manager = JobManager.getInstance();
		}

		/* (non-Javadoc)
		 * @see edu.umd.clip.lm.model.training.TrainingDataFilter#filterData(edu.umd.clip.lm.model.training.ContextFuturesPair)
		 */
		@Override
		public ContextFuturesPair filterData(final ContextFuturesPair pair) {
			
			Runnable run = new Runnable() {
				@Override
				public void run() {
					Context ctx = boContextReducer.reduceContext(pair.getContext());
					if (contextClusterStorage.get(ctx) != null) {
						// already done it
						return;
					}
					
					int clusters[] = new int[lms.length];
					for(int i=0; i<lms.length; ++i) {
						clusters[i] = lms[i].context2cluster(ctx);
					}
					
					contextClusterStorage.putIfAbsent(ctx, new ContextClusters(clusters));
				}
			};
			
			run.run();
			
			/*
			Job job = new Job(run, "context-cluster");
			manager.addJob(group, job);
			if (submittedJobs.incrementAndGet() % MAX_JOBS == 0) {
				// synchronize now and then
				//group.join();
				//System.out.printf("context2cluster: %d contexts done\n", submittedJobs.intValue());
			}
			*/
			
			if (filter != null) {
				return filter.filterData(pair);
			} 
			return pair;
		}
		
	}
	
	@SuppressWarnings("unchecked")
	private void computeBackoffProbabilities(ReadableTrainingData data) throws IOException {
		Experiment exp = Experiment.getInstance();
		final FactorTupleDescription desc = exp.getTupleDescription();
		final long overtMask = desc.getOvertFactorsMask();

		final int numLMs = exp.getNumLMs();
		// NOTE, this is different from boLMs since it includes all LMs defined in the expriment
		//final int lmSequence[] = exp.getLmIdSequence(forest);
		final LanguageModel firstBackoff = exp.getLM(lm.getBackoffLM());
		final AbstractProbTreeStorage probTreeStorage = firstBackoff.getDecoder().getStorage();
		
		final int lmIds[] = new int[boLMs.length];
		for(byte i=0; i<boLMs.length; ++i) {
			lmIds[i] = boLMs[i].getIdNum();
		}

		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("backoff probs");
		final Lock lock = new ReentrantLock();
		
		for(; data.hasNext();) {
			Map<FactorTuple,HashSet<ContextClusters>> wordsToClusters = new HashMap<FactorTuple,HashSet<ContextClusters>>(1000);
			
			TrainingDataBlock block = data.next();
			for(ContextFuturesPair pair : block) {
				Context boContext = boContextReducer.reduceContext(pair.getContext());
				ContextClusters clusters = contextClusterStorage.get(boContext);
				if (clusters == null) {
					System.err.printf("can't find the clusters for %s\n", boContext);
					continue;
				}
				
				for(TupleCountPair tc : pair.getFutures()) {
					FactorTuple word = new FactorTuple(tc.tuple & overtMask);
					HashSet<ContextClusters> map = wordsToClusters.get(word);
					if (map == null) {
						map = new HashSet<ContextClusters>(); 
						wordsToClusters.put(word, map);
					}
					map.add(clusters);
				}				
			}
			
			for(Map.Entry<FactorTuple,HashSet<ContextClusters>> entry : wordsToClusters.entrySet()) {
				final FactorTuple word = entry.getKey();
				final HashSet<ContextClusters> clusterSet = entry.getValue();
				final int compactWord = desc.packOvertFactorsToInt(word.getBits());
				
				Runnable run = new Runnable() {

					@Override
					public void run() {
						RequestBundle<Key,OnDiskCompactProbTree> bundle = new RequestBundle<Key,OnDiskCompactProbTree>(probTreeStorage);

						long start = System.currentTimeMillis();
						
						for(ContextClusters clusters : clusterSet) {
							lock.lock();
							OpenIntToDoubleHashMap dist = boCluster2WordProb.get(clusters);
							if (dist == null) {
								dist = new OpenIntToDoubleHashMap();
								boCluster2WordProb.put(clusters, dist);
							}

							if (dist.get(compactWord) > 0) {
								lock.unlock();
								// already computed
								continue;
							}
							lock.unlock();
							
							for(byte i = 0; i<boLMs.length; ++i) {
								int lmId = boLMs[i].getIdNum();
								Key key = new Key(lmId, clusters.getClusters()[i], word.getBits());
								bundle.request(key);
							}
						}
						
						try {
							bundle.sync();
						} catch (InterruptedException e) {}
						
						@SuppressWarnings("unused")
						long requestTime = System.currentTimeMillis() - start;
						
						start = System.currentTimeMillis();
						
						for(ContextClusters clusters : clusterSet) {
							lock.lock();
							OpenIntToDoubleHashMap dist = boCluster2WordProb.get(clusters);
							if (dist == null) {
								dist = new OpenIntToDoubleHashMap();
								boCluster2WordProb.put(clusters, dist);
							}

							if (dist.get(compactWord) > 0) {
								// already computed
								lock.unlock();
								continue;
							}
							lock.unlock();
							
							BinaryTree<HistoryTreePayload> requestNodes[] = new BinaryTree[numLMs];
							for(byte i=0; i<boLMs.length; ++i) {
								LanguageModel boLM = boLMs[i];
								requestNodes[boLM.getIdNum()] = boLM.getDecodingRuntime().getNode(clusters.getClusters()[i]);
							}
							OnDiskCompactProbTree probTree = firstBackoff.getDecoder().computeProbTree(lmIds, requestNodes, word.getBits(), bundle.getResults(), null);

							if (probTree != null) {
								lock.lock();
								dist.put(compactWord, probTree.getProb());
								lock.unlock();
							}
						}	
						
						@SuppressWarnings("unused")
						long decodingTime = System.currentTimeMillis() - start;
						/*
						System.out.printf("Word: %s (%d clusters) done (%gs requests, %gs decoding)\n", 
								word.toStringNoNull(), clusterSet.size(),
								0.001*requestTime, 0.001*decodingTime); 
						*/
					}
				};
				manager.addJob(group, new Job(run, "computing " + word.toStringNoNull()));
			}
			group.join();
			
		}
		System.out.printf("Total number of backoff clusters: %d\n", boCluster2WordProb.size());
	}
	
	private void computeNodeDiscount_doBlock(TrainingDataBlock block, 
			Map<ContextClusters, Long2IntMap> boClusters2WordCounts,
			HashSet<Long> devWords,
			Long2IntMap dataCounts) 
	{
		block.addCounts(dataCounts);
		Map<ContextClusters, Long2IntMap> wordCounts = boClusters2WordCounts;
		HashSet<Long> wordSet = devWords;
		
		for(ContextFuturesPair pair : block) {
			Context boContext = boContextReducer.reduceContext(pair.getContext());
			ContextClusters clusters = contextClusterStorage.get(boContext);
			if (clusters == null) {
				System.err.printf("can't find the clusters for %s\n", boContext);
				continue;
			}
			Long2IntMap map = wordCounts.get(clusters);
			if (map == null) {
				map = new Long2IntMap(pair.getFutures().length);
				wordCounts.put(clusters, map);
			}
			for(TupleCountPair tc : pair.getFutures()) {
				map.addAndGet(tc.tuple, tc.count);
				wordSet.add(tc.tuple);
			}
		}
	}
	
	private void computeNodeDiscount_processInput(TaskSequencer sequencer,
			final ReadableTrainingDataNode dataNodes[],
			final Long2IntMap dataCounts[],
			final HashSet<Long> devWords[],
			final Map<ContextClusters, Long2IntMap> boClusters2WordCounts[]) throws IOException 
	{
		// keep the last block unprocessed to release the sequences sooner
		TrainingDataBlock blocks[] = new TrainingDataBlock[dataNodes.length];
		
		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("counting blocks");
		
		int nrFinishedNodes = 0;
		boolean finishedNodes[] = new boolean[dataNodes.length];
		
		while(nrFinishedNodes < dataNodes.length) {
			for(byte dId = 0; dId < dataNodes.length; ++dId) {
				if (finishedNodes[dId]) continue;
				
				ReadableTrainingData data = dataNodes[dId].getData(0);
				if (data.hasNext()) {
					TrainingDataBlock block = data.next();
					if (blocks[dId] != null) {
						final Map<ContextClusters, Long2IntMap> wordCounts = boClusters2WordCounts[dId];
						final HashSet<Long> wordSet = devWords[dId];
						final TrainingDataBlock theBlock = blocks[dId];
						final Long2IntMap theDataCounts = dataCounts[dId];
						
						Runnable run = new Runnable() {
							@Override
							public void run() {
								computeNodeDiscount_doBlock(theBlock, wordCounts, wordSet, theDataCounts);
							}
						};
						
						if (theBlock.size() > 1000) {
							manager.addJob(group, new Job(run, "computeNodeDiscount_doBlock"));
						} else {
							run.run();
						}
					}
					blocks[dId] = block;
				} else {
					finishedNodes[dId] = true;
					++nrFinishedNodes;
				}
			}
			group.join();
		}
		
		sequencer.releaseInput();
		
		for(byte dId = 0; dId < dataNodes.length; ++dId) {
			if (blocks[dId] != null) {
				final Map<ContextClusters, Long2IntMap> wordCounts = boClusters2WordCounts[dId];
				final HashSet<Long> wordSet = devWords[dId];
				final TrainingDataBlock theBlock = blocks[dId];
				final Long2IntMap theDataCounts = dataCounts[dId];
				
				Runnable run = new Runnable() {
					@Override
					public void run() {
						computeNodeDiscount_doBlock(theBlock, wordCounts, wordSet, theDataCounts);
					}
				};
				
				if (theBlock.size() > 1000) {
					manager.addJob(group, new Job(run, "computeNodeDiscount_doBlock"));
				} else {
					run.run();
				}
			}
		}
		group.join();
	}
	
	@SuppressWarnings("unchecked")
	private void computeNodeDiscount(final int clusterid,  
			ReadableTrainingDataNode dataNodes[], final ParentNodeData parentNodeData,
			final ActiveNodeStorage<ParentNodeData>.ActiveNodeCollection nextCollection,
			/*JobGroup group,*/ TaskSequencer sequencer) throws IOException 
	{
		
		// make train and heldout distributions
		final Long2IntMap dataCounts[] = new Long2IntMap[nrDataFiles];
		//final Long2IntMap devCounts[] = new Long2IntMap[nrDataFiles];
		final HashSet<Long> devWords[] = new HashSet[nrDataFiles];
		// maps lower order models clusters to the future words and counts (heldout data only)
		final Map<ContextClusters, Long2IntMap> boClusters2WordCounts[] = new HashMap[nrDataFiles];
		
		for(byte dataId=0; dataId < nrDataFiles; ++dataId) {
			dataCounts[dataId] = new Long2IntMap(parentNodeData.smoothedDists[0].size());
			//devCounts[dataId] = new Long2IntMap(parentDist.size());
			devWords[dataId] = new HashSet<Long>(parentNodeData.smoothedDists[0].size());
			
			boClusters2WordCounts[dataId] = new HashMap<ContextClusters, Long2IntMap>();
		}
		
		computeNodeDiscount_processInput(sequencer, dataNodes, dataCounts, devWords, boClusters2WordCounts);
		
		final BinaryTree<HistoryTreePayload> theNode = nodes[clusterid];
		if (theNode.getPayload().isBackoff) {
			theNode.getPayload().backoff = 0.0;
			return;
		}

		final boolean doPrune = lm.getPruningThreshold() > 0;
		/*
		Runnable run = new Runnable() {

			@Override
			public void run() 
			*/
			{
				//Long2IntMap trainCounts[] = new Long2IntMap[nrDataFiles];
				
				double boWeights[] = new double[nrDataFiles];
				Long2DoubleMap smoothedDists[] = new Long2DoubleMap[nrDataFiles];
				
				final int pruningThreshold = lm.getPruningThreshold();
				
				for(byte devDataId=0; devDataId<nrDataFiles; ++devDataId) {
					
					Long2IntMap trainCounts = new Long2IntMap(dataCounts[0].size()*2);
					Long2IntMap parentTrainCounts = new Long2IntMap(dataCounts[0].size()*2);
					for(byte dataId = 0; dataId < nrDataFiles; ++dataId) {
						if (dataId != devDataId) {
							trainCounts.addMap(dataCounts[dataId]);
							
							if (doPrune) {
								parentTrainCounts.addMap(parentNodeData.counts[dataId]);
							}
						}
					}
					
					if (doPrune) {
						for(Long2IntMap.Iterator it = trainCounts.iterator(); it.hasNext();) {
							Long2IntMap.Entry e = it.next();
							if (e.getValue() <= pruningThreshold && parentTrainCounts.get(e.getKey()) <= pruningThreshold) {
								it.remove();
							}
						}
					}
					
					final long totalTrainCounts = trainCounts.sumValues();
					//long totalDevCounts = dataCounts[devDataId].sumValues();
	
					Long2DoubleMap smoothedDist = new Long2DoubleMap(trainCounts.size());

					// interpolate trainCounts[devDataId] with smoothedParentDists[devDataId]
					double theLambda = theNode.getPayload().lambda;
					Long2DoubleMap smoothedParentDist = parentNodeData.smoothedDists[devDataId];
					for(long word : devWords[devDataId]) {
						int trainCount = trainCounts.get(word);
						double parentProb = smoothedParentDist.get(word);
						double prob = (1-theLambda) * parentProb;
						if (trainCount > 0 && totalTrainCounts > 0) {
							prob += theLambda * trainCount / totalTrainCounts;
						}
						if (Double.isNaN(prob)) {
							System.err.printf("smth wrong with %s in #%d: trainCount=%d/%d, theLambda=%g, parenProb=%g\n", 
									FactorTuple.toStringNoNull(word), theNode.getPayload().clusterid,
									trainCount, totalTrainCounts, 
									theLambda, parentProb);
							continue;
						}
						smoothedDist.addAndGet(word, prob);
					}
					
					// ta-da!
					double boWeight = computeBackoffProbability(boClusters2WordCounts[devDataId], smoothedDist);
					
					boWeights[devDataId] = boWeight;
					smoothedDists[devDataId] = smoothedDist;
				}
				
				// interpolate the backoff weight
				double avgWeight = 0;
				for(double w : boWeights) {
					avgWeight += w / boWeights.length;
				}
				System.err.printf("cluster #%d bo=%g, %s\n", clusterid, avgWeight, Arrays.toString(boWeights));
				
				if (avgWeight < 1e-6) {
					// stick to zero
					avgWeight = 0;
				}
				
				theNode.getPayload().backoff = avgWeight;
				
				ParentNodeData newNodeData = new ParentNodeData(smoothedDists, dataCounts);
				
				nextCollection.putNode(clusterid, newNodeData);
			}
		/*
		};
		
		Job job = new Job(run, "optimizing cluster #" + Integer.toString(clusterid));
		JobManager.getInstance().addJob(group, job);
		*/
	}
	
	private double computeBackoffProbability(Map<ContextClusters, Long2IntMap> boClusters2WordCounts,
			Long2DoubleMap smoothedDist) 
	{
		long start = System.currentTimeMillis();
		FactorTupleDescription desc = Experiment.getInstance().getTupleDescription();
		
		double boWeight = 0.4 + 0.2 * Math.random();
		int iteration = 0;
		while(true) {
			double theSum = 0;
			long totalDevCount = 0;
			
			for(Entry<ContextClusters, Long2IntMap> entry : boClusters2WordCounts.entrySet()) {
				ContextClusters clusters = entry.getKey();
				Long2IntMap devCounts = entry.getValue();
				OpenIntToDoubleHashMap boClusterProbs = boCluster2WordProb.get(clusters);
				
				for(Long2IntMap.Iterator it = devCounts.iterator(); it.hasNext(); ) {
					Long2IntMap.Entry e = it.next();
					long word = e.getKey();
					int compactWord = desc.packOvertFactorsToInt(word);
					
					int count = e.getValue();
					totalDevCount += count;
					
					double boProb = boClusterProbs.get(compactWord);
					double smoothedProb = smoothedDist.get(word);
					double addition = boProb / (boWeight*boProb + (1-boWeight)*smoothedProb);
					if (Double.isNaN(addition)) {
						System.err.printf("smth wrong with %s: boWeight=%g, boProb=%g, smoothedProb=%g\n", 
								FactorTuple.toStringNoNull(word), boWeight, boProb, smoothedProb);
						continue;
					}
					theSum += addition;
				}
			}

			if (totalDevCount == 0) {
				boWeight = 1.0;
				break;
			}
			
			double newBoWeight = boWeight * theSum / totalDevCount;
			if (ProbMath.approxEqual(boWeight, newBoWeight, 0.001) || ++iteration > 100) {
				boWeight = newBoWeight;
				break;
			}
			boWeight = newBoWeight;
		}
		long end = System.currentTimeMillis();
		System.err.printf("boWeight = %g, %d iterations, %dms\n", boWeight, iteration, (end-start));
		
		return boWeight;
	}
	
	public static String makeDataFilename(int level, int dataId) {
		return String.format("layer-%03d-data-%d", level, dataId);
	}
/*	
	public static String makeDataFilename(int level, int dataId, boolean full) {
		return String.format("layer-%03d-data-%s%d", level, (full?"full-":""), dataId);
	}
*/
	public BackoffType getBackoff() {
		return backoff;
	}

	public void setBackoff(BackoffType backoff) {
		this.backoff = backoff;
	}
}
