/**
 * 
 */
package edu.umd.clip.lm.model.training;

import java.io.*;
import java.lang.ref.SoftReference;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;

import lbfgsb.*;

import edu.berkeley.nlp.util.MutableInteger;
import edu.umd.clip.jobs.*;
import edu.umd.clip.lm.factors.*;
import edu.umd.clip.lm.model.*;
import edu.umd.clip.lm.model.ForestModel.DecodingRuntime;
import edu.umd.clip.lm.model.data.*;
import edu.umd.clip.lm.util.*;
import edu.umd.clip.lm.util.tree.*;

import com.sleepycat.util.PackedInteger;
/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class ForestInterpolation {
	private static final double MIN_WEIGHT = 1e-6;
	
	private final ForestModel forest;
	private File dataDir;
	private final int nrDataFiles;
	private final int nrModels;
	private InnerNodesLBFGS innerNodesEM;
	
	public ForestInterpolation(ForestModel forest) {
		this.forest = forest;
		this.nrModels = forest.getModels().size();

		Experiment exp = Experiment.getInstance();
		nrDataFiles = exp.getFiles().getInterpolateData().size();
	}
	
	public void initialize(File dataDir, boolean justLoad, boolean justHeldout) throws IOException {
		this.dataDir = dataDir;
		OnDiskTrainingDataNodeWriter.setTempDir(dataDir);

		Word2ClustersCounts.setNrDataFiles(nrDataFiles);
		Word2ClustersCounts.setNrModels(nrModels);


		innerNodesEM = new InnerNodesLBFGS();
		
		if (justLoad) {
			initializeWordCounts(true);	
		} else if (justHeldout) {
			initializeWordCounts(true);
			initializeHeldoutData();
		} else {
			initializeTrainingData();
			
			initializeWordCounts(false);

			initializeHeldoutData();
		}
		
	}

	private void initializeTrainingData() throws IOException {
		Experiment exp = Experiment.getInstance();
		final Experiment.Files files = exp.getFiles();
		
		long overtMask = exp.getTupleDescription().getOvertFactorsMask();
		TrainingDataFilter filter = new MaskedFuturesTrainingDataFilter(overtMask);
		
		for(int split=0; split<nrDataFiles; ++split) {
			final String[] filenames = files.getInterpolateDataFiles(split);
			TrainingDataUtil.combineAndReduceContext(filenames, 
					new File(this.dataDir, makeDataFilename(0, split, 0)).getAbsolutePath(), 
					forest.getOvertOrder(), forest.getHiddenOrder(), 
					1, filter);
		}		
	}
	
	private static class ModelWordClusterCounts implements Serializable {
		private static final long serialVersionUID = 1L;
		public static final String clusterCountsFilename = "cluster-counts.ser";
		public static final String wordCountsDirname = "word-counts";

		//private final int dataid;
		//private final int lmNum;
		// [fold][lmNum][clusterid]
		private final long[][][] clusterCounts;
		private final File baseDir;
		private final transient ForestModel forest;
		
		private HashMap<FactorTuple,Word2ClustersCounts> wordCounts;
		private final transient ReadWriteLock lock;
		
		public ModelWordClusterCounts(ForestModel forest, int nrDataFiles, File baseDir) {
			this.forest = forest;
			//this.dataid = dataid;
			//this.lmNum = lmNum;
			int nrModels = forest.getModels().size();
			this.clusterCounts = new long[nrDataFiles][][];
			for(int fold=0; fold<nrDataFiles; ++fold) {
				this.clusterCounts[fold] = new long[nrModels][];
				for(int lmNum=0; lmNum<nrModels; ++lmNum) {
					this.clusterCounts[fold][lmNum] = new long[forest.getModels().get(lmNum).getNodes().length];
				}
			}
			this.baseDir = baseDir;
			
			
			wordCounts = new HashMap<FactorTuple,Word2ClustersCounts>(10000);
			
			File theDir = new File(baseDir, wordCountsDirname);
			if (!theDir.isDirectory()) {
				theDir.mkdirs();
			}

			lock = new ReentrantReadWriteLock();
		}
		
		public static String makeBaseDirname(int dataid, int lmNum) {
			return Integer.toString(dataid) + "-" + Integer.toString(lmNum);
		}
		
		public void populateData(File dataDir, int dataid, int lmNum, Observer fileCache) throws IOException {
			final LanguageModel lm = forest.getModels().get(lmNum);
			final BinaryTree<HistoryTreePayload> nodes[] = lm.getNodes();
			
			int level = 0;
			while(true) {
				File oldFile = new File(dataDir, makeDataFilename(level, dataid, lmNum));
				File newFile = new File(dataDir, makeDataFilename(level+1, dataid, lmNum));
				
				FileChannel inputChannel = new FileInputStream(oldFile).getChannel();
				TrainingDataNodeReader reader = new EagerTrainingDataNodeReader(new OnDiskTrainingDataNodeReader(inputChannel));

				RandomAccessFile outFile = new RandomAccessFile(newFile, "rw");
				outFile.getChannel().truncate(0);

				TrainingDataNodeWriter writer = new OnDiskTrainingDataNodeWriter(outFile.getChannel());
				writer = new BufferedTrainingDataNodeWriter(writer);

				int nodeCount = 0;
				while(true) {
					ReadableTrainingDataNode dataNode = reader.getNext();
					if (dataNode == null) break;
					
					int clusterid = dataNode.getNodeId();
					BinaryTree<HistoryTreePayload> node = nodes[clusterid];
					
					Long2IntMap counts = new Long2IntMap();
					
					if (node.isLeaf()) {
						ReadableTrainingData data = dataNode.getData(0);
						while(data.hasNext()) {
							TrainingDataBlock block = data.next();
							block.addCounts(counts);
						}
					} else {
						nodeCount += 2;
						
						BinaryTree<HistoryTreePayload> left = node.getLeft();
						BinaryTree<HistoryTreePayload> right = node.getRight();
						
						int leftNodeId = left.getPayload().clusterid;
						int rightNodeId = right.getPayload().clusterid;
						
						WritableTrainingDataNode leftDataNode = writer.createNode(leftNodeId, 1);
						WritableTrainingDataNode rightDataNode = writer.createNode(rightNodeId, 1);
						
						writer.add(leftDataNode);
						writer.add(rightDataNode);
					
						TrainingDataUtil.splitData(dataNode.getData(0), node.getPayload().question, 
								rightDataNode.getData(0), leftDataNode.getData(0), counts);

					}
					
					if (node.getParent() == null) {
						// the unigram model
						counts = adjustUnigramCounts(counts);

						File theDir = new File(baseDir, wordCountsDirname);
						
						for(Long2IntMap.Iterator it = counts.iterator(); it.hasNext(); ) {
							Long2IntMap.Entry e = it.next();
							long word = e.getKey();
							FactorTuple tuple = new FactorTuple(word);
							
							Word2ClustersCounts myWordCounts;
							
							lock.writeLock().lock();
							myWordCounts = wordCounts.get(tuple);
							if (myWordCounts == null) {
								myWordCounts = new Word2ClustersCounts(new File(theDir, Long.toString(word)));
								myWordCounts.openForWriting(fileCache);
								wordCounts.put(tuple, myWordCounts);
							}
							lock.writeLock().unlock();
						}
					}
					
					long totalClusterCount = 0;
					for(Long2IntMap.Iterator it = counts.iterator(); it.hasNext(); ) {
						Long2IntMap.Entry e = it.next();
						
						long word = e.getKey();
						int count = e.getValue();
						
						totalClusterCount += count;
						
						lock.readLock().lock();
						Word2ClustersCounts myWordCounts = wordCounts.get(new FactorTuple(word));
						lock.readLock().unlock();
						
						myWordCounts.addClusterCount(dataid, lmNum, clusterid, count);
					}
					clusterCounts[dataid][lmNum][clusterid] = totalClusterCount;
				}
				writer.close();
				reader.close();
				
				if (level > 0) {
					oldFile.delete();
				}
				if (nodeCount == 0) {
					newFile.delete();
					break;
				}
				++level;
			}
		}
		
		private void finish() throws IOException {
			for(Word2ClustersCounts w2c : wordCounts.values()) {
				w2c.finishWriting();
			}
			
			// serialize myself, just in case we want to restart interpolation later
			ObjectOutputStream ous = new ObjectOutputStream(IO.getOutputStream(new File(baseDir, clusterCountsFilename)));
			ous.writeObject(this);
			ous.close();			
		}
	}
	
	/**
	 * @param counts
	 * @return *all* (including the unseen) counts with 1 added
	 */
	public static Long2IntMap adjustUnigramCounts(Long2IntMap counts) {
		Long2IntMap newCounts = new Long2IntMap(counts.size());
		FactorTupleDescription desc = Experiment.getInstance().getTupleDescription();
		Set<FactorTuple> allOvertFactors = desc.getAllOvertFactors().keySet();
		
		long nullWord = desc.createTuple();
		long startWord = desc.createStartTuple() & desc.getOvertFactorsMask();
		
		for(FactorTuple overtFactors : allOvertFactors) {
			long theWord = overtFactors.getBits();
			if (theWord != nullWord && theWord != startWord) {
				int count = counts.get(theWord); 
				newCounts.put(theWord, count+1);
			}
		}
		
		return newCounts;
	}

	private void clearWordCounts() {
		for(int lmNum=0; lmNum<nrModels; ++lmNum) {
			for(int dataid=0; dataid < nrDataFiles; ++dataid) {
				File baseDir = new File(dataDir, ModelWordClusterCounts.makeBaseDirname(dataid, lmNum));
				
				File clusterCountsFile = new File(baseDir, ModelWordClusterCounts.clusterCountsFilename);
				clusterCountsFile.delete();
				
				File wordCountsDir = new File(baseDir, ModelWordClusterCounts.wordCountsDirname);
				File files[] = wordCountsDir.listFiles();
				for(File file : files) {
					file.delete();
				}
				wordCountsDir.delete();
				
				baseDir.delete();
			}
		}
	}
	// [lmNum][dataid]
	private ModelWordClusterCounts dataModelCounts;
	private final DetachableOutputFileCache fileCache = new DetachableOutputFileCache(800);
	
	private void initializeWordCounts(boolean justLoad) throws IOException {
		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("count initialization");
		
		if (justLoad) {
			ObjectInputStream ois = new ObjectInputStream(IO.getInputStream(new File(dataDir, ModelWordClusterCounts.clusterCountsFilename)));
			try {
				dataModelCounts = (ModelWordClusterCounts) ois.readObject();
			} catch (ClassNotFoundException e) {
				e.printStackTrace();
			}
			ois.close();			
		} else {
			dataModelCounts = new ModelWordClusterCounts(forest, nrDataFiles, dataDir);
		}

		if (!justLoad) {
			
			for(int lmNum=0; lmNum<nrModels; ++lmNum) {
				final int _lmNum = lmNum;
				
				for(int dataid=0; dataid < nrDataFiles; ++dataid) {
					final int fold = dataid;
	
					Runnable run = new Runnable() {
						@Override
						public void run() {
							try {
								dataModelCounts.populateData(dataDir, fold, _lmNum, fileCache);
							} catch (IOException e) {
								e.printStackTrace();
							}
	
						}
					};
					Job job = new Job(run, "populating word counts");
					manager.addJob(group, job);
				}
			}
			group.join();
			dataModelCounts.finish();
			fileCache.clear();
		}
	}
	
	private void initializeHeldoutData() throws IOException {
		Experiment exp = Experiment.getInstance();
		final int vocabSize = exp.getTupleDescription().getAllOvertFactors().size();
		
		JobManager manager = JobManager.getInstance();
		final int MAX_CHUNK_SIZE = 10000;
		final int MAX_HELDOUT_RECORDS = 1000000;
		final int MAX_PRELOADED_COUNTS = 5;
		
		File tempHeldoutDataDir = new File(dataDir, "temp-heldout");
		if (!tempHeldoutDataDir.isDirectory()) {
			tempHeldoutDataDir.mkdirs();
		}
		
		//final DetachableOutputFileCache fileCache = new DetachableOutputFileCache(400);
		for(int split=0; split<nrDataFiles; ++split) {
			File dataFile = new File(dataDir, makeDataFilename(0, split, 0));
			
			FileChannel inputChannel = new FileInputStream(dataFile).getChannel();
			TrainingDataNodeReader reader = new EagerTrainingDataNodeReader(new OnDiskTrainingDataNodeReader(inputChannel));

			final LinkedHashMap<FactorTuple, Map<ClusteredContext,MutableInteger>> cachedHeldoutData = 
				new LinkedHashMap<FactorTuple, Map<ClusteredContext,MutableInteger>>(vocabSize, 0.75f, true);
			
			final AtomicInteger cachedHeldoutRecords = new AtomicInteger();
			
			final HashMap<FactorTuple, DetachableOutputStream> tempHeldoutData = new HashMap<FactorTuple, DetachableOutputStream>(); 
			
			while(true) {
				ReadableTrainingDataNode dataNode = reader.getNext();
				if (dataNode == null) break;

				ReadableTrainingData data = dataNode.getData(0);
				while(data.hasNext()) {
					TrainingDataBlock block = data.next();
					

					int remaining = block.size(); 
					Iterator<ContextFuturesPair> it = block.iterator();
					while(remaining > 0) {
						final ContextFuturesPair chunk[] = new ContextFuturesPair[Math.min(MAX_CHUNK_SIZE, remaining)];
						for(int i=0; i<chunk.length; ++i) {
							chunk[i] = it.next();
						}
						remaining -= chunk.length;
						
						JobGroup chunkGroup = manager.createJobGroup("chunk group");
						ClusteredContext[] clusteredContext = clusterContext(chunk, chunkGroup);
						chunkGroup.join();
						
						int chunkCount = 0;
						for(int i=0; i<chunk.length; ++i) {
							ClusteredContext ctx = clusteredContext[i];
							TupleCountPair futures[] = chunk[i].getFutures();
							
							for(TupleCountPair tc : futures) {
								FactorTuple word = new FactorTuple(tc.tuple);
								Map<ClusteredContext,MutableInteger> map = cachedHeldoutData.get(word);
								if (map == null) {
									map = new HashMap<ClusteredContext,MutableInteger>();
									cachedHeldoutData.put(word, map);
								}
								
								MutableInteger count = map.get(ctx);
								if (count == null) {
									count = new MutableInteger();
									map.put(ctx, count);
									++chunkCount;
								}
								count.add(tc.count);
							}
						}
						cachedHeldoutRecords.addAndGet(chunkCount);
						
						if (cachedHeldoutRecords.intValue() > MAX_HELDOUT_RECORDS) {
							Iterator<Map.Entry<FactorTuple, Map<ClusteredContext,MutableInteger>>> iter = 
								cachedHeldoutData.entrySet().iterator();
							ArrayList<Map.Entry<FactorTuple, Map<ClusteredContext,MutableInteger>>> entries = new ArrayList<Map.Entry<FactorTuple, Map<ClusteredContext,MutableInteger>>>();
							
							while(iter.hasNext() && cachedHeldoutRecords.intValue() > MAX_HELDOUT_RECORDS) {
								Map.Entry<FactorTuple, Map<ClusteredContext,MutableInteger>> entry = iter.next();
								iter.remove();
								entries.add(entry);
								
							}
							
							@SuppressWarnings("unchecked")
							final SoftReference<CompactReadOnlyInt2IntHashMap[][]> preloadedCounts[] = new SoftReference[entries.size()];
							for(int i=1; i<MAX_PRELOADED_COUNTS && i<entries.size(); ++i) {
								preloadWordClusterCounts(preloadedCounts, i, entries.get(i).getKey());
							}
							
							for(int i=0; i<entries.size(); ++i) {
								Map.Entry<FactorTuple, Map<ClusteredContext,MutableInteger>> entry = entries.get(i);
								FactorTuple tuple = entry.getKey();
								
								HeldoutDataRecord rec = makeHeldoutDataRecord(tuple.getBits(), split, entry.getValue());

								if (i >= MAX_PRELOADED_COUNTS) {
									// clear the old ones
									for(int j=i-MAX_PRELOADED_COUNTS; j<i; ++j) {
										preloadedCounts[j] = null;
									}
								}
								
								if (i + MAX_PRELOADED_COUNTS < entries.size()) {
									// start the new one 
									preloadWordClusterCounts(preloadedCounts, i+MAX_PRELOADED_COUNTS, entries.get(i+MAX_PRELOADED_COUNTS).getKey());
								}
								
								DetachableOutputStream out = tempHeldoutData.get(tuple);
								if (out == null) {
									out = new DetachableOutputStream(new File(tempHeldoutDataDir, Long.toString(tuple.getBits())), 8192, 4096);
									out.addFileOpenListener(fileCache);
									tempHeldoutData.put(tuple, out);
								}
								rec.write(out);
								//rec.writeExternal(output);
								
								cachedHeldoutRecords.addAndGet(- entry.getValue().size());
							}
						}
					}
				}
			}
			reader.close();
			
			{
				Iterator<Map.Entry<FactorTuple, Map<ClusteredContext,MutableInteger>>> iter = 
					cachedHeldoutData.entrySet().iterator();

				ArrayList<Map.Entry<FactorTuple, Map<ClusteredContext,MutableInteger>>> entries = new ArrayList<Map.Entry<FactorTuple, Map<ClusteredContext,MutableInteger>>>();
				
				while(iter.hasNext() && cachedHeldoutRecords.intValue() > MAX_HELDOUT_RECORDS) {
					Map.Entry<FactorTuple, Map<ClusteredContext,MutableInteger>> entry = iter.next();
					entries.add(entry);
				}
				cachedHeldoutData.clear();
				
				@SuppressWarnings("unchecked")
				final SoftReference<CompactReadOnlyInt2IntHashMap[][]> preloadedCounts[] = new SoftReference[entries.size()];
				for(int i=1; i<MAX_PRELOADED_COUNTS && i<entries.size(); ++i) {
					preloadWordClusterCounts(preloadedCounts, i, entries.get(i).getKey());
				}
				
				for(int i=0; i<entries.size(); ++i) {
					Map.Entry<FactorTuple, Map<ClusteredContext,MutableInteger>> e = entries.get(i);
					FactorTuple tuple = e.getKey();
	
					HeldoutDataRecord rec = makeHeldoutDataRecord(tuple.getBits(), split, e.getValue());
					
					if (i >= MAX_PRELOADED_COUNTS) {
						// clear the old ones
						for(int j=i-MAX_PRELOADED_COUNTS; j<i; ++j) {
							preloadedCounts[j] = null;
						}
					}
					
					if (i + MAX_PRELOADED_COUNTS < entries.size()) {
						// start the new one 
						preloadWordClusterCounts(preloadedCounts, i+MAX_PRELOADED_COUNTS, entries.get(i+MAX_PRELOADED_COUNTS).getKey());
					}
					
					DetachableOutputStream out = tempHeldoutData.get(tuple);
					if (out == null) {
						out = new DetachableOutputStream(new File(tempHeldoutDataDir, Long.toString(tuple.getBits())), 8192, 4096);
						out.addFileOpenListener(fileCache);
						tempHeldoutData.put(tuple, out);
					}
					rec.write(out);
					out.close();
					//rec.writeExternal(output);				
				}
			}
			for(DetachableOutputStream out : tempHeldoutData.values()) {
				out.close();
			}
			
			FactorTuple tuples[] = tempHeldoutData.keySet().toArray(new FactorTuple[tempHeldoutData.size()]);
			tempHeldoutData.clear();
			
			File outputFile = new File(dataDir, makeHeldoutFilename(split));
			ObjectOutputStream output = new ObjectOutputStream(IO.getOutputStream(outputFile));

			// now aggregate heldout records
			for(FactorTuple tuple : tuples) {
				File file = new File(tempHeldoutDataDir, Long.toString(tuple.getBits()));
				ArrayList<HeldoutDataRecord> records = new ArrayList<HeldoutDataRecord>();
				FileInputStream in = new FileInputStream(file);
				int totalContexts = 0;
				while(true) {
					try {
						HeldoutDataRecord rec = new HeldoutDataRecord();
						rec.read(in);
						records.add(rec);
						totalContexts += rec.contexts.length;
						
					} catch(IOException e) {
						break;
					}
				}
				in.close();
				file.delete();
				
				HashMap<ClusteredContext, Integer> context2num = new HashMap<ClusteredContext, Integer>(totalContexts);
				
				ClusteredContext contexts[] = new ClusteredContext[totalContexts];
				int counts[] = new int[totalContexts];
				double[][] probs = new double[totalContexts][];
				
				for(HeldoutDataRecord rec : records) {
					for(int i=0; i<rec.contexts.length; ++i) {
						ClusteredContext ctx = rec.contexts[i];
						Integer num = context2num.get(ctx);
						if (num == null) {
							num = new Integer(context2num.size());
							context2num.put(ctx, num);
							probs[num.intValue()] = rec.probabilities[i];
							contexts[num.intValue()] = ctx;
						}
						counts[num.intValue()] += rec.counts[i];
					}
				}
				//System.err.printf("merging contexts (%s): %d to %d\n", tuple.toStringNoNull(), totalContexts, context2num.size());
				
				if (context2num.size() < totalContexts) {
					contexts = Arrays.copyOf(contexts, context2num.size());
					counts = Arrays.copyOf(counts, context2num.size());
					probs = Arrays.copyOf(probs, context2num.size());
				}
				HeldoutDataRecord rec = new HeldoutDataRecord(tuple.getBits(), contexts, probs, counts);
				rec.writeExternal(output);
			}
			
			output.close();
		}		
	}
	
	private HeldoutDataRecord makeHeldoutDataRecord(long word, int split, Map<ClusteredContext,MutableInteger> map) {
		ClusteredContext contexts[] = new ClusteredContext[map.size()];
		int counts[] = new int[map.size()];
		
		int i=0;
		for(Map.Entry<ClusteredContext, MutableInteger> e : map.entrySet()) {
			contexts[i] = e.getKey();
			counts[i] = e.getValue().intValue();
			++i;
		}

		double probabilities[][] = computeProbabilities(word, split, contexts);
		return new HeldoutDataRecord(word, contexts, probabilities, counts);
	}
	
	private void preloadWordClusterCounts(final SoftReference<CompactReadOnlyInt2IntHashMap[][]> preloadedCounts[], final int pos, final FactorTuple word) {
		new Thread() {
			public void run() {
				Word2ClustersCounts word2ClustersCounts = dataModelCounts.wordCounts.get(word);
				CompactReadOnlyInt2IntHashMap[][] clusterCounts = word2ClustersCounts.getClusterCounts();
				preloadedCounts[pos] = new SoftReference<CompactReadOnlyInt2IntHashMap[][]>(clusterCounts);
			}
		}.start();
	}
	
	/**
	 * @param word
	 * @param split
	 * @param contexts
	 * @return probabilities for each model estimated from folded data (all but @param split)
	 */
	private double[][] computeProbabilities(final long word, final int split, ClusteredContext contexts[]) {
		// unique clusters by model
		final Long2DoubleMap cluster2probByModel[] = new Long2DoubleMap[nrModels];
		for(int i=0; i<nrModels; ++i) {
			cluster2probByModel[i] = new Long2DoubleMap();
		}
		
		for(ClusteredContext ctx : contexts) {
			for(int i=0; i<nrModels; ++i) {
				cluster2probByModel[i].addAndGet(ctx.clusters[i], 0);
			}
		}
		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("compute probabilities");
		
		final Word2ClustersCounts word2ClustersCounts = dataModelCounts.wordCounts.get(new FactorTuple(word));
		final CompactReadOnlyInt2IntHashMap[][] clusterCounts = word2ClustersCounts.getClusterCounts();
		
		for(int lmNum=0; lmNum<nrModels; ++lmNum) {
			final int _lmNum = lmNum;
			Long2DoubleMap cluster2prob = cluster2probByModel[lmNum];
			final BinaryTree<HistoryTreePayload> nodes[] = forest.getModels().get(lmNum).getNodes();
			
			for(Long2DoubleMap.Iterator it = cluster2prob.iterator(); it.hasNext(); ) {
				final Long2DoubleMap.Entry e = it.next();
				final int clusterid = (int) it.getKey();
				
				Runnable run = new Runnable() {
					@Override
					public void run() {
						BinaryTree<HistoryTreePayload> node = nodes[clusterid];
						
						double currentWeight = 1.0;
						double prob = 0.0;
						while(node != null) {
							HistoryTreePayload payload = node.getPayload();
							int clusterCount = 0;
							long clusterTotalCount = 0;
							
							for(int dataId=0; dataId<nrDataFiles; ++dataId) {
								if (dataId == split) continue;
								
								clusterCount += clusterCounts[dataId][_lmNum].get(payload.clusterid);
								
								clusterTotalCount += dataModelCounts.clusterCounts[dataId][_lmNum][payload.clusterid];
							}
							if (clusterCount > 0) {
								double clusterProb = (double) clusterCount / clusterTotalCount;
								if (Double.isNaN(clusterProb)) {
									System.err.print("");
								}
								prob += clusterProb * currentWeight * payload.lambda;
							}
							currentWeight *= (1.0 - payload.lambda);
							
							node = node.getParent();
						}
						
						e.setValue(prob);
					}
					
				};
				
				Job job = new Job(run, "");
				manager.addJob(group, job);
				group.join(manager.getNumWorkers());
			}
		}
		
		group.join();
		
		final double probs[][] = new double[contexts.length][];
		for(int i=0; i<contexts.length; ++i) {
			double contextProbs[] = new double[nrModels];
			probs[i] = contextProbs;
			int clusters[] = contexts[i].clusters; 
			for(int lmNum=0; lmNum<nrModels; ++lmNum) {
				contextProbs[lmNum] = cluster2probByModel[lmNum].get(clusters[lmNum]);
			}
		}
		return probs;
	}

	private ClusteredContext[] clusterContext(final ContextFuturesPair[] data, JobGroup group) {
		JobManager manager = JobManager.getInstance();
		
		final ClusteredContext clusteredContexts[] = new ClusteredContext[data.length];
		for(int i=0; i<clusteredContexts.length; ++i) {
			clusteredContexts[i] = new ClusteredContext(nrModels);
		}
		
		for(int lmNum=0; lmNum<nrModels; ++lmNum) {
			final int idx = lmNum;
			Runnable run = new Runnable() {
				@Override
				public void run() {
					LanguageModel lm = forest.getModels().get(idx);
					for(int i=0; i<clusteredContexts.length; ++i) {
						int clusterid = lm.context2cluster(data[i].getContext());
						clusteredContexts[i].clusters[idx] = clusterid;
					}
				}
			};
			Job job = new Job(run, "clusterContext");
			manager.addJob(group, job);
		}
		return clusteredContexts;
	}
	
	private static class ClusteredContext {
		final int clusters[];
		
		public ClusteredContext(int nrModels) {
			this.clusters = new int[nrModels];
		}

		public ClusteredContext(ObjectInput in) throws IOException {
			byte len = in.readByte();
			this.clusters = new int[len];
			for(byte i=0; i<len; ++i) {
				clusters[i] = in.readInt();
			}
		}
		
		public void writeExternal(ObjectOutput out) throws IOException {
			assert(clusters.length <= Byte.MAX_VALUE);
			out.writeByte(clusters.length);
			for(int cluster : clusters) {
				out.writeInt(cluster);
			}
		}
		
		@Override
		public int hashCode() {
			final int prime = 31;
			int result = 1;
			result = prime * result + Arrays.hashCode(clusters);
			return result;
		}

		@Override
		public boolean equals(Object obj) {
			if (this == obj)
				return true;
			if (obj == null)
				return false;
			if (!(obj instanceof ClusteredContext))
				return false;
			ClusteredContext other = (ClusteredContext) obj;
			if (!Arrays.equals(clusters, other.clusters))
				return false;
			return true;
		}
	}
	
	private class InnerNodesLBFGS {
		BitSet leafClusters[]; // [lmNum]
		int clusters2paramsMap[][];
		double params[];
		
		private InnerNodesLBFGS() {
			leafClusters = new BitSet[nrModels];
			clusters2paramsMap = new int[nrModels][];
			
			int nrParams = 0;
			for(int lmNum=0; lmNum<nrModels; ++lmNum) {
				BinaryTree<HistoryTreePayload>[] nodes = forest.getModels().get(lmNum).getNodes();
				BitSet bs = new BitSet(nodes.length);
				clusters2paramsMap[lmNum] = new int[nodes.length];
				
				for(int clusterid = 0; clusterid<nodes.length; ++clusterid) {
					if (nodes[clusterid] != null && nodes[clusterid].isLeaf()) {
						bs.set(clusterid);
						clusters2paramsMap[lmNum][clusterid] = -1;
					} else {
						clusters2paramsMap[lmNum][clusterid] = nrParams++;
					}
					
				}
				leafClusters[lmNum] = bs;
			}
			
			params = new double[nrParams];
		}
		
		private void doOptimize(int nrIterations) {
			Minimizer minimizer = new Minimizer();
			
			ArrayList<Bound> bounds = new ArrayList<Bound>(params.length);
			// use a small number since 0 appears to mean no bound
			Bound bound = new Bound(new Double(1e-7), new Double(1.0));
			for(int i=0; i<params.length; ++i) {
				bounds.add(bound);
			}

			minimizer.setBounds(bounds);
			StopConditions conditions = minimizer.getStopConditions();
			conditions.setMaxIterations(nrIterations);

			
			DifferentiableFunction fun = new DifferentiableFunction() {
				@Override
				public FunctionValues getValues(double[] point) {
					double gradient[] = new double[params.length];
					double crossEntropy = computeGradientAndCrossEntropy(point, gradient);
					FunctionValues val = new FunctionValues(crossEntropy, gradient);
					System.err.printf("crossEntropy = %f\n", crossEntropy);
					return val;
				}
			};
			
			try {
				Arrays.fill(params, 0.1);
				Result result = minimizer.run(fun, params);
				System.arraycopy(result.point, 0, params, 0, params.length);
				
				System.err.println(result.iterationsInfo.toString());
			} catch (LBFGSBException e) {
				e.printStackTrace();
			}
		}
		
		private void updateWeights() {
			for(int lmNum=0; lmNum<nrModels; ++lmNum) {
				double weights[] = forest.getDecodingRuntime().getWeights(lmNum);
				
				for(int clusterid=0; clusterid<clusters2paramsMap[lmNum].length; ++clusterid) {
					int pos = clusters2paramsMap[lmNum][clusterid]; 
					if (pos >= 0) {
						weights[clusterid] = params[pos];
					}
				}
			}

		}
		
		private double computeGradientAndCrossEntropy(final double params[], final double gradient[]) {
			assert(params.length == gradient.length);
			
			JobManager manager = JobManager.getInstance();
			JobGroup group = manager.createJobGroup("cross entropy");
			
			// [dataid][param-position]
			final double totalEntropies[][] = new double[nrDataFiles][];
			final double partialGradients[][] = new double[nrDataFiles][];
			final long totalCounts[][] = new long[nrDataFiles][];

			final DecodingRuntime runtime = forest.getDecodingRuntime();

			for(int fold=0; fold<nrDataFiles; ++fold) {
				final int dataid = fold;
				totalCounts[dataid] = new long[params.length];
				totalEntropies[dataid] = new double[params.length];
				partialGradients[dataid] = new double[gradient.length];

				
				final ObjectInputStream ois;
				try {
					ois = new ObjectInputStream(IO.getInputStream(new File(dataDir, makeHeldoutFilename(dataid))));
				} catch (IOException e1) {
					e1.printStackTrace();
					continue;
				}

				Runnable run = new Runnable() {
					@Override
					public void run() {
						long myTotalCounts[] = new long[params.length];
						double myTotalEntropies[] = new double[params.length];
						double myPartialGradient[] = new double[params.length];
						int nrRecords = 0;
						
						try {
							while(true) {
								try {
									HeldoutDataRecord rec;
									
									synchronized(ois) {
										rec = new HeldoutDataRecord(ois);
										++nrRecords;
									}
									
									long word = rec.word;
									FactorTuple tuple = new FactorTuple(word);
									
									Word2ClustersCounts theWordCounts = dataModelCounts.wordCounts.get(tuple);
									CompactReadOnlyInt2IntHashMap[] theCounts = theWordCounts.getClusterCounts()[dataid];

									CompactReadOnlyInt2DoubleHashMap cluster2MLprobs[] = new CompactReadOnlyInt2DoubleHashMap[nrModels];
									CompactReadOnlyInt2DoubleHashMap cluster2probs[] = new CompactReadOnlyInt2DoubleHashMap[nrModels];
									
									
									for(int lmNum = 0; lmNum < nrModels; ++lmNum) {
										CompactReadOnlyInt2IntHashMap clusters2counts = theCounts[lmNum];
										long totalClusterCounts[] = dataModelCounts.clusterCounts[dataid][lmNum];
										
										int clusters[] = clusters2counts.keys();
										int counts[] = clusters2counts.values();
										
										
										CompactReadOnlyInt2DoubleHashMap cluster2MLprob = new CompactReadOnlyInt2DoubleHashMap(clusters2counts.getKeysHashSet(), 0.0);
										
										for(int i=0; i<clusters.length; ++i) {
											int clusterid = clusters[i];
											if (counts[i] > totalClusterCounts[clusterid]) {
												System.err.print("");
											}
											cluster2MLprob.set(clusterid, (double)counts[i] / totalClusterCounts[clusterid]);
										}
										CompactReadOnlyInt2DoubleHashMap cluster2prob = new CompactReadOnlyInt2DoubleHashMap(clusters2counts.getKeysHashSet(), 0.0);
										cluster2MLprobs[lmNum] = cluster2MLprob;
										cluster2probs[lmNum] = cluster2prob;
									}
									
									for(int i=0; i<rec.contexts.length; ++i) {
										int clusters[] = rec.contexts[i].clusters;
										double probs[] = rec.probabilities[i];
										int count = rec.counts[i];
										
										for(int lmNum = 0; lmNum < nrModels; ++lmNum) {
											double sumProbs = 0;
											double sumWeights = 0;
											
											for(int _lmNum = 0; _lmNum <nrModels; ++_lmNum) {
												if (_lmNum != lmNum) {
													double weight = runtime.getWeights(_lmNum)[clusters[_lmNum]];
													sumWeights += weight;
													sumProbs += weight * probs[_lmNum];
												}
											}
											
											LanguageModel lm = forest.getModels().get(lmNum);
											BinaryTree<HistoryTreePayload> nodes[] = lm.getNodes();
											
											int leafClusterid = clusters[lmNum];
											BinaryTree<HistoryTreePayload> parent = nodes[leafClusterid].getParent();
											if (parent == null) continue;
											
											CompactReadOnlyInt2DoubleHashMap cluster2MLprob = cluster2MLprobs[lmNum];
											CompactReadOnlyInt2DoubleHashMap cluster2prob = cluster2probs[lmNum];
											
											fillInterpolatedProbabilities(cluster2MLprob, cluster2prob, parent);

											BinaryTree<HistoryTreePayload> node = parent;
											while(node != null) {
												int clusterid = node.getPayload().clusterid;

												int pos = clusters2paramsMap[lmNum][clusterid];
												myTotalCounts[pos] += count;
												
												double weight = params[pos];
												if (weight < MIN_WEIGHT) {
													params[pos] = MIN_WEIGHT;
													weight = MIN_WEIGHT;
													//System.err.print("");
												}
												double myProb = cluster2prob.get(clusterid);
												double mySumWeights = sumWeights + weight;
												double mySumProbs = sumProbs + weight * myProb;

												if (mySumProbs <= 0) {
													// oops!
													StringBuilder sb = new StringBuilder();
													sb.append(String.format("sumProbs=%f, sumWeights=%f, word=%s, lmNum=%d\n", mySumProbs, mySumWeights, FactorTuple.toStringNoNull(rec.word), lmNum));
													System.err.print(sb.toString());
												}
												
												if (mySumWeights > 0) {
													double prob = mySumProbs / mySumWeights;
													double addition = count * ProbMath.log2(prob);
													if (Double.isNaN(addition)) {
														System.err.print("");
													}
													myTotalEntropies[pos] -= addition;
													
													double update = count / mySumWeights * (myProb/prob - 1);
													myPartialGradient[pos] -= update;
												} else {
													System.err.printf("sumWeights = %f\n", mySumWeights);
												}
												
												node = node.getParent();
											}
										}
									}
								} catch(IOException e) {
									break;
								}
							}
							ois.close();
						} catch (IOException e) {
							e.printStackTrace();
						}
						
						synchronized(totalCounts[dataid]) {
							//System.err.printf("dataid=%d, nrRecords = %d\n", dataid, nrRecords);
							
							for(int i=0; i<params.length; ++i) {
								totalCounts[dataid][i] += myTotalCounts[i];
							}
							for(int i=0; i<params.length; ++i) {
								totalEntropies[dataid][i] += myTotalEntropies[i];
							}
							for(int i=0; i<params.length; ++i) {
								partialGradients[dataid][i] += myPartialGradient[i];
							}							
						}
					}
				};

				for(int i=0; i<Math.ceil(manager.getNumWorkers() / (double) nrDataFiles); ++i) {
					Job job = new Job(run, "cross entropy");
					manager.addJob(group, job);
				}
			}
			group.join();
			
			long superTotalCounts[] = new long[params.length];
			for(long totalCount[] : totalCounts) {
				for(int i=0; i<params.length; ++i) {
					superTotalCounts[i] += totalCount[i];
				}
			}
			
			Arrays.fill(gradient, 0);
			double entropy = 0;
			for(int fold=0; fold<nrDataFiles; ++fold) {
				double partialGradient[] = partialGradients[fold];
				for(int i=0; i<params.length; ++i) {
					long totalCount = superTotalCounts[i];
					if (totalCount > 0) {
						entropy += totalEntropies[fold][i] / totalCount;
						gradient[i] += partialGradient[i] / totalCount;
					}
				}
			}

			return entropy;
		}
		
		private double fillInterpolatedProbabilities(CompactReadOnlyInt2DoubleHashMap cluster2MLprob, CompactReadOnlyInt2DoubleHashMap cluster2prob, BinaryTree<HistoryTreePayload> node) {
			int clusterid = node.getPayload().clusterid;
			
			double prob = cluster2prob.get(clusterid);
			if (prob > 0) return prob;
			
			double weight = node.getPayload().lambda;

			prob = weight * cluster2MLprob.get(clusterid);
			
			BinaryTree<HistoryTreePayload> parent = node.getParent();
			if (parent != null) {
				double parentProb = fillInterpolatedProbabilities(cluster2MLprob, cluster2prob, parent);
				prob += (1 - weight) * parentProb;
			}
			if (!(prob >= 0.0 && prob <= 1.0)) {
				System.err.print("");
			}
			
			cluster2prob.set(clusterid, prob);
			return prob;
		}
		
	}
	static String makeInnerNodesDirname(int dataid) {
		return String.format("inner-%d", dataid);
	}
	
	private static class HeldoutDataRecord implements Externalizable {
		long word;
		ClusteredContext contexts[];
		// [context list][lmNum]
		double probabilities[][];
		int counts[];
		
		public HeldoutDataRecord(long word, ClusteredContext[] contexts, double[][] probabilities, int[] counts) {
			this.word = word;
			this.contexts = contexts;
			this.probabilities = probabilities;
			this.counts = counts;
		}
		
		public HeldoutDataRecord() {}
		
		public HeldoutDataRecord(ObjectInput in) throws IOException {
			readExternal(in);
		}
		
		public void read(InputStream in) throws IOException {
			byte buf[] = new byte[4];
			ByteBuffer buffer = ByteBuffer.wrap(buf);
			int bytesRead = in.read(buf);
			if (bytesRead == -1) {
				throw new IOException("eof");
			}
			assert(bytesRead == 4);
			int len = buffer.getInt(0);
			buf = new byte[len];
			buffer = ByteBuffer.wrap(buf);
			
			int position = 0;
			while(position < len) {
				int read = in.read(buf, position, len - position);
				if (read > 0) {
					position += read;
				} else {
					throw new IOException("failed to read stream");
				}
			}
			
			word = buffer.getLong();
			position = buffer.position();
			
			int nrModels = PackedInteger.readInt(buf, position);
			position += PackedInteger.getReadIntLength(buf, position);
			int nrContexts = PackedInteger.readInt(buf, position);
			position += PackedInteger.getReadIntLength(buf, position);
			
			contexts = new ClusteredContext[nrContexts];
			counts = new int[nrContexts];
			probabilities = new double[nrContexts][];
			
			for(int i=0; i<nrContexts; ++i) {
				ClusteredContext ctx = new ClusteredContext(nrModels);
				contexts[i] = ctx;
				for(int m=0; m<nrModels; ++m) {
					ctx.clusters[m] = PackedInteger.readInt(buf, position);
					position += PackedInteger.getReadIntLength(buf, position);
				}
				counts[i] = PackedInteger.readInt(buf, position);
				position += PackedInteger.getReadIntLength(buf, position);
				
				buffer.position(position);
				double probs[] = new double[nrModels];
				for(int m=0; m<nrModels; ++m) {
					probs[m] = buffer.getDouble();
				}
				probabilities[i] = probs;
				position = buffer.position();
			}
		}
		
		public void write(OutputStream out) throws IOException {
			int nrModels = probabilities[0].length; 
			
			int maxLen = 4; // the length
			maxLen += PackedInteger.MAX_LENGTH * (2 + contexts.length * nrModels); // the contexts plus nrModels plus num contexts
			maxLen += 8; // the word
			maxLen += 8 * contexts.length * nrModels;
			
			byte buf[] = new byte[maxLen];
			ByteBuffer buffer = ByteBuffer.wrap(buf);
			
			buffer.position(4); // reserve space for the length
			buffer.putLong(word);
			int position = buffer.position();
			
			position = PackedInteger.writeInt(buf, position, nrModels);
			position = PackedInteger.writeInt(buf, position, contexts.length);

			for(int i=0; i<contexts.length; ++i) {
				ClusteredContext ctx = contexts[i];
				for(int clusterid : ctx.clusters) {
					position = PackedInteger.writeInt(buf, position, clusterid);
				}
				position = PackedInteger.writeInt(buf, position, counts[i]);
				
				buffer.position(position);
				for(double prob : probabilities[i]) {
					buffer.putDouble(prob);
				}
				position = buffer.position();
			}
			
			buffer.putInt(0, position - 4); // the size of the data block
			
			out.write(buf, 0, position);
		}
		
		/* (non-Javadoc)
		 * @see java.io.Externalizable#writeExternal(java.io.ObjectOutput)
		 */
		@Override
		public void writeExternal(ObjectOutput out) throws IOException {
			out.writeLong(word);
			out.writeInt(contexts.length);
			
			int nrModels = probabilities[0].length; 
			assert(nrModels <= Byte.MAX_VALUE);
			out.writeByte(nrModels);
			
			for(int i=0; i<contexts.length; ++i) {
				contexts[i].writeExternal(out);
				out.writeInt(counts[i]);
				for(double prob : probabilities[i]) {
					out.writeDouble(prob);
				}
			}
			
		}
		/* (non-Javadoc)
		 * @see java.io.Externalizable#readExternal(java.io.ObjectInput)
		 */
		@Override
		public void readExternal(ObjectInput in) throws IOException {
			word = in.readLong();
			int len = in.readInt();
			byte nrModels = in.readByte();
			
			contexts = new ClusteredContext[len];
			counts = new int[len];
			probabilities = new double[len][];
			
			for(int i=0; i<len; ++i) {
				contexts[i] = new ClusteredContext(in);
				counts[i] = in.readInt();
				double probs[] = new double[nrModels];
				for(byte lmNum=0; lmNum<nrModels; ++lmNum) {
					probs[lmNum] = in.readDouble();
				}
				probabilities[i] = probs;
			}
		}
	}
	
	private class Parameters {
		final int modelCluster2pos[][];
		final double params[];
		
		public Parameters(boolean useOldValues) {
			this(useOldValues, true);
		}
		
		public Parameters(boolean useOldValues, boolean random) {
			modelCluster2pos = new int[nrModels][];
			Random rnd = new Random();
			
			int totalLeafCount = 0;
			for(int lmNum=0; lmNum<nrModels; ++lmNum) {
				totalLeafCount += (forest.getModels().get(lmNum).getNodes().length /*cluster ids start with 1, no need for +1 */) / 2;
			}
			params = new double[totalLeafCount];
			
			int pos = 0;
			for(int lmNum=0; lmNum<nrModels; ++lmNum) {
				BinaryTree<HistoryTreePayload> nodes[] = forest.getModels().get(lmNum).getNodes();
				double weights[] = forest.getDecodingRuntime().getWeights(lmNum);
				
				int cluster2pos[] = new int[nodes.length];
				Arrays.fill(cluster2pos, -1);
				
				int leafNum = 0;
				for(BinaryTree<HistoryTreePayload> node : nodes) {
					if (node != null && node.isLeaf()) {
						int clusterid = node.getPayload().clusterid;
						cluster2pos[clusterid] = pos;
						
						double value;
						if (useOldValues) {
							value = weights[clusterid];
						} else {
							if (clusterid == 1) {
								// cluster#1 is a leaf only in the unigram model
								// set lower weight for faster convergence
								value = 0.01;
							} else {
								// random: 0.5 +- 0.1
								value = random ? rnd.nextDouble() * 0.2 + 0.4 : 0.5;
							}
						}
						params[pos] = value;
						++pos;
						++leafNum;
					}
				}
				modelCluster2pos[lmNum] = cluster2pos;
			}
		}
		
		public int nrParams() {
			return params.length;
		}
		
		@SuppressWarnings("unused")
		public double computeCrossEntropy() {
			JobManager manager = JobManager.getInstance();
			JobGroup group = manager.createJobGroup("cross entropy");

			final double totalEntropies[] = new double[nrDataFiles];
			for(int fold=0; fold<nrDataFiles; ++fold) {
				final int dataid = fold;
				Runnable run = new Runnable() {
					@Override
					public void run() {
						try {
							ObjectInputStream ois = new ObjectInputStream(IO.getInputStream(new File(dataDir, makeHeldoutFilename(dataid))));
							long totalCount = 0;
							double foldEntropy = 0;
							while(true) {
								try {
									HeldoutDataRecord rec = new HeldoutDataRecord(ois);
									for(int i=0; i<rec.contexts.length; ++i) {
										int clusters[] = rec.contexts[i].clusters;
										double probs[] = rec.probabilities[i];
										
										double sumProbs = 0;
										double sumWeights = 0;

										for(int lmNum = 0; lmNum < nrModels; ++lmNum) {
											double weight = modelCluster2pos[lmNum][clusters[lmNum]];
											sumWeights += weight;
											sumProbs += weight * probs[lmNum];
										}
										
										assert(sumWeights > 0);
										double prob = sumProbs / sumWeights;
										totalCount += rec.counts[i];
										foldEntropy -= rec.counts[i] * ProbMath.log2(prob);
									}
								} catch(IOException e) {
									break;
								}
							}
							foldEntropy /= totalCount;
							totalEntropies[dataid] = foldEntropy;
							ois.close();
						} catch (IOException e) {
							e.printStackTrace();
						}

					}
				};
				Job job = new Job(run, "cross entropy");
				manager.addJob(group, job);
			}
			group.join();
			
			double entropy = 0;
			for(int fold=0; fold<nrDataFiles; ++fold) {
				entropy += totalEntropies[fold];
			}
			return entropy;
		}
		
		@SuppressWarnings("unused")
		public double computeGradientAndCrossEntropy(final double[] gradient) {
			return computeGradientAndCrossEntropy(this.params, gradient);
		}
		
		public double computeGradientAndCrossEntropy(final double params[], final double[] gradient) {
			assert(params.length == gradient.length);
			
			JobManager manager = JobManager.getInstance();
			JobGroup group = manager.createJobGroup("cross entropy");
			
			final double totalEntropies[] = new double[nrDataFiles];
			final double partialGradients[][] = new double[nrDataFiles][];
			final long totalCounts[] = new long[nrDataFiles];
			
			for(int fold=0; fold<nrDataFiles; ++fold) {
				final int dataid = fold;
				Runnable run = new Runnable() {
					@Override
					public void run() {
						try {
							final double partialGradient[] = new double[gradient.length];
							partialGradients[dataid] = partialGradient;
							
							ObjectInputStream ois = new ObjectInputStream(IO.getInputStream(new File(dataDir, makeHeldoutFilename(dataid))));
							long totalCount = 0;
							double foldEntropy = 0;
							while(true) {
								try {
									HeldoutDataRecord rec = new HeldoutDataRecord(ois);
									for(int i=0; i<rec.contexts.length; ++i) {
										int clusters[] = rec.contexts[i].clusters;
										double probs[] = rec.probabilities[i];
										
										double sumProbs = 0;
										double sumWeights = 0;

										for(int lmNum = 0; lmNum < nrModels; ++lmNum) {
											int pos = modelCluster2pos[lmNum][clusters[lmNum]];
											double weight = params[pos];
											if (weight < MIN_WEIGHT) {
												params[pos] = MIN_WEIGHT;
												weight = MIN_WEIGHT;
												//System.err.print("");
											}
											sumWeights += weight;
											sumProbs += weight * probs[lmNum];
										}
										
										if (sumProbs <= 0) {
											// oops!
											StringBuilder sb = new StringBuilder();
											sb.append(String.format("sumProbs=%f, sumWeights=%f, word=%s\n", sumProbs, sumWeights, FactorTuple.toStringNoNull(rec.word)));
											for(int lmNum = 0; lmNum < nrModels; ++lmNum) {
												int pos = modelCluster2pos[lmNum][clusters[lmNum]];
												double weight = params[pos];
												sb.append(String.format(" [%d]: cluster=%d, weight=%f, prob=%f\n",
														lmNum, clusters[lmNum], weight, probs[lmNum]));
											}
											System.err.print(sb.toString());
										}
										
										if (sumWeights > 0) {
											double prob = sumProbs / sumWeights;
											int count = rec.counts[i];
											totalCount += count;
											double addition = count * ProbMath.log2(prob);
											if (Double.isNaN(addition)) {
												System.err.print("");
											}
											foldEntropy -= addition;
											
											for(int lmNum=0; lmNum<nrModels; ++lmNum) {
												double update = count / sumWeights * (probs[lmNum]/prob - 1);
												partialGradient[modelCluster2pos[lmNum][clusters[lmNum]]] -= update;
											}
										} else {
											System.err.printf("sumWeights = %f\n", sumWeights);
										}
									}
								} catch(IOException e) {
									break;
								}
							}
							totalCounts[dataid] = totalCount;
							
							//double revTotalCount = 1.0 / totalCount / nrDataFiles;
							
							//foldEntropy *= revTotalCount;
							totalEntropies[dataid] = foldEntropy;
							
							/*
							for(int i=0; i<partialGradient.length; ++i) {
								partialGradient[i] *= revTotalCount;
							}
							*/
							ois.close();
						} catch (IOException e) {
							e.printStackTrace();
						}

					}
				};
				Job job = new Job(run, "cross entropy");
				manager.addJob(group, job);
			}
			group.join();
			
			Arrays.fill(gradient, 0);
			
			long superTotalCount = 0;
			for(long totalCount : totalCounts) {
				superTotalCount += totalCount;
			}
			double revTotalCount = 1.0 / superTotalCount;
			
			double entropy = 0;
			for(int fold=0; fold<nrDataFiles; ++fold) {
				
				entropy += totalEntropies[fold] * revTotalCount;

				double partialGradient[] = partialGradients[fold];
				for(int i=0; i<gradient.length; ++i) {
					gradient[i] += partialGradient[i] * revTotalCount;
				}
			}
			
			/*
			for(int i=0; i<gradient.length; ++i) {
				if (params[i] == MIN_WEIGHT && gradient[i] < 0) {
					gradient[i] = 0;
				}
			}
			*/
			return entropy;
		}

		public void unpackParams() {
			for(int lmNum=0; lmNum<nrModels; ++lmNum) {
				BinaryTree<HistoryTreePayload> nodes[] = forest.getModels().get(lmNum).getNodes();
				
				int cluster2pos[] = modelCluster2pos[lmNum];
				double newWeights[] = new double[nodes.length];
				
				for(int clusterid = 0; clusterid<cluster2pos.length; ++clusterid) {
					int pos = cluster2pos[clusterid];
					if (pos >= 0) {
						newWeights[clusterid] = params[pos];
					}
				}
				forest.getDecodingRuntime().setWeights(lmNum, newWeights);
			}
		}
		
	}
	
	private class MyLBFGSB {
		Parameters params;
		Minimizer minimizer;
		
		public MyLBFGSB(boolean useOldValues) {
			params = new Parameters(useOldValues);

			final int nrParams = params.nrParams();
			ArrayList<Bound> bounds = new ArrayList<Bound>(nrParams);
			// use a small number since 0 appears to mean no bound
			Bound bound = new Bound(new Double(1e-7), new Double(1.0));
			for(int i=0; i<nrParams; ++i) {
				bounds.add(bound);
			}
			
			minimizer = new Minimizer();
			minimizer.setBounds(bounds);
			StopConditions conditions = minimizer.getStopConditions();
			conditions.setMaxIterations(200);
			//conditions.setMaxGradientNormInactive();
		}
		
		public double run() {
			DifferentiableFunction fun = new DifferentiableFunction() {
				@Override
				public FunctionValues getValues(double[] point) {
					double gradient[] = new double[params.nrParams()];
					double crossEntropy = params.computeGradientAndCrossEntropy(point, gradient);
					FunctionValues val = new FunctionValues(crossEntropy, gradient);
					System.err.printf("crossEntropy = %f\n", crossEntropy);
					return val;
				}
			};
			
			try {
				Result result = minimizer.run(fun, params.params);
				System.arraycopy(result.point, 0, params.params, 0, params.nrParams());
				
				System.err.println(result.iterationsInfo.toString());
				return result.functionValue;
				
			} catch (LBFGSBException e) {
				e.printStackTrace();
			}
			return Double.MAX_VALUE;
		}
	}
	
	public void interpolate(boolean useOldValues, int nrRuns) throws Exception {
		Parameters params = null;
		double bestEntropy = Double.MAX_VALUE;
		
		for (int i=0; i<nrRuns; ++i) {
			long start = System.currentTimeMillis();
			MyLBFGSB lbfgsb = new MyLBFGSB(useOldValues);
			double crossEntropy = lbfgsb.run();
			long end = System.currentTimeMillis();
			System.out.printf("Run %d took %ds: entropy=%f, %d parameters\n", 
					i+1, (end-start)/1000, crossEntropy, lbfgsb.params.params.length);
			if (crossEntropy < bestEntropy) {
				params = lbfgsb.params;
				bestEntropy = crossEntropy;
			}
		}
		
		if (params != null) {
			params.unpackParams();
		}
		
		innerNodesEM.doOptimize(100);
		innerNodesEM.updateWeights();
	}
	
	public void setConst(boolean innerOnly) {
		DecodingRuntime runtime = forest.getDecodingRuntime();
		final double LEAF_CONST_WEIGHT = 0.5;
		final double INNER_CONST_WEIGHT = 1e-7;
		
		for(int lmNum=0; lmNum<forest.getModels().size(); ++lmNum) {
			LanguageModel lm = forest.getModels().get(lmNum);
			double weights[] = runtime.getWeights(lmNum);
			
			BinaryTree<HistoryTreePayload> tree = lm.getHistoryTree();
			for(BinaryTreeIterator<HistoryTreePayload> it = tree.getPostOrderIterator(); it.hasNext(); ) {
				BinaryTree<HistoryTreePayload> node = it.nextNode();
				if (!innerOnly || !node.isLeaf()) {
					HistoryTreePayload payload = node.getPayload();
					if (payload.isBackoff) {
						weights[payload.clusterid] = 0;
					} else {
						weights[payload.clusterid] = node.isLeaf() ? LEAF_CONST_WEIGHT : INNER_CONST_WEIGHT;
					}
				}
			}
		}
	}
	
	@SuppressWarnings("unused")
	private void checkHeldoutData(int nrClusters) {
		Random rnd = new Random();
		for(int lmNum=0; lmNum<nrModels; ++lmNum) {
			BinaryTree<HistoryTreePayload> nodes[] = forest.getModels().get(lmNum).getNodes();
			
			for(int i=0; i<nrClusters; ++i) {
				int clusterid = 0;
				do {
					clusterid = rnd.nextInt(nodes.length / 2);
					clusterid += nodes.length / 2;
				} while(nodes[clusterid] == null || nodes[clusterid].isLeaf() || nodes[clusterid].getPayload().isBackoff);
				
				checkHeldoutData(lmNum, clusterid);
			}
		}
	}

	private void checkHeldoutData(final int lmNum, final int clusterid) {
		final double totalProbs[] = new double[nrDataFiles];
		final BinaryTree<HistoryTreePayload> leaf = forest.getModels().get(lmNum).getNodes()[clusterid];
		
		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("check probabilities");
	
		for(int d = 0; d<nrDataFiles; ++d) {
			final int split = d;
			for(final FactorTuple tuple : Experiment.getInstance().getTupleDescription().getAllOvertFactors().keySet()) {
				
				Runnable run = new Runnable() {
					@Override
					public void run() {
						BinaryTree<HistoryTreePayload> node = leaf;
						
						double currentWeight = 1.0;
						double prob = 0.0;
						while(node != null) {
							HistoryTreePayload payload = node.getPayload();
							int clusterCount = 0;
							long clusterTotalCount = 0;
							
							for(int dataId=0; dataId<nrDataFiles; ++dataId) {
								if (dataId == split) continue;
								
								Word2ClustersCounts word2ClustersCounts = dataModelCounts.wordCounts.get(tuple);
								if (word2ClustersCounts != null) {
									CompactReadOnlyInt2IntHashMap counts = word2ClustersCounts.getClusterCounts()[split][lmNum];
									clusterCount += counts.get(payload.clusterid);
								}
								clusterTotalCount += dataModelCounts.clusterCounts[split][lmNum][payload.clusterid];
							}
							if (clusterCount > 0) {
								double clusterProb = (double) clusterCount / clusterTotalCount;
								if (Double.isNaN(clusterProb)) {
									System.err.print("");
								}
								prob += clusterProb * currentWeight * payload.lambda;
							}
							currentWeight *= (1.0 - payload.lambda);
							
							node = node.getParent();
						}
						
						synchronized(totalProbs) {
							totalProbs[split] += prob;
						}
					}
				};
				Job job = new Job(run, "");
				manager.addJob(group, job);
				group.join(manager.getNumWorkers());
			}
		}
		group.join();
		
		System.err.printf("lm=%d, cluster #%d: probs: %s\n", lmNum, clusterid, Arrays.toString(totalProbs));
	}

	public static String makeDataFilename(int level, int dataId, int lmNum) {
		if (level == 0) {
			return String.format("layer-0-data-%d", dataId);
		}
		return String.format("layer-%03d-data-%d-lm-%d", level, dataId, lmNum);
	}

	public static String makeHeldoutFilename(int dataId) {
		return makeHeldoutFilename(dataId, -1);
	}
	
	public static String makeHeldoutFilename(int dataId, int model) {
		return String.format("heldout-%d%s", dataId, model < 0 ? "" : ("-model-"+model));
	}
}
