/**
 * 
 */
package edu.umd.clip.lm.model.training;

import java.io.*;
import java.nio.channels.FileChannel;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;

import org.apache.commons.math.util.OpenIntToDoubleHashMap;

import lbfgsb.*;

import edu.berkeley.nlp.util.MutableInteger;
import edu.umd.clip.jobs.*;
import edu.umd.clip.lm.factors.*;
import edu.umd.clip.lm.model.*;
import edu.umd.clip.lm.model.ForestModel.DecodingRuntime;
import edu.umd.clip.lm.model.data.*;
import edu.umd.clip.lm.util.*;
import edu.umd.clip.lm.util.tree.*;

import com.sleepycat.je.util.*;
import com.sleepycat.util.PackedInteger;
/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class ForestInterpolation {
	private static final double MIN_WEIGHT = 1e-6;
	
	private final ForestModel forest;
	private File dataDir;
	private int nrDataFiles;
	private final int nrModels;
	private InnerNodesLBFGS innerNodesEM;
	
	public ForestInterpolation(ForestModel forest) {
		this.forest = forest;
		this.nrModels = forest.getModels().size();
	}
	
	public void initialize(File dataDir, boolean justLoad) throws IOException {
		this.dataDir = dataDir;
		OnDiskTrainingDataNodeWriter.setTempDir(dataDir);

		Experiment exp = Experiment.getInstance();
		nrDataFiles = exp.getFiles().getInterpolateData().size();

		innerNodesEM = new InnerNodesLBFGS();
		
		if (!justLoad) {
			initializeTrainingData();
			
			initializeWordCounts(justLoad);

			initializeHeldoutData();

			initializeInnerHeldoutData();
			
			dataModelCounts = null;
			
			Runnable run = new Runnable() {
				@Override
				public void run() {
					clearWordCounts();
				}
			};
			new Thread(run, "clear word counts").start();
		}
		
	}

	private void initializeTrainingData() throws IOException {
		Experiment exp = Experiment.getInstance();
		final Experiment.Files files = exp.getFiles();
		
		long overtMask = exp.getTupleDescription().getOvertFactorsMask();
		TrainingDataFilter filter = new MaskedFuturesTrainingDataFilter(overtMask);
		
		for(int split=0; split<nrDataFiles; ++split) {
			final String[] filenames = files.getInterpolateDataFiles(split);
			TrainingDataUtil.combineAndReduceContext(filenames, 
					new File(this.dataDir, makeDataFilename(0, split)).getAbsolutePath(), 
					forest.getOvertOrder(), forest.getHiddenOrder(), 
					1, filter);
		}		
	}
	
	private static class ModelWordClusterCounts implements Serializable {
		private static final long serialVersionUID = 1L;
		public static final String clusterCountsFilename = "cluster-counts.ser";
		public static final String wordCountsDirname = "word-counts";

		private final int dataid;
		private final int lmNum;
		private final long[] clusterCounts;
		private final File baseDir;
		private final transient ForestModel forest;
		
		private CompactReadOnlyLong2ObjectHashMap<Word2ClustersCounts> wordCounts;
		
		public ModelWordClusterCounts(ForestModel forest, int dataid, int lmNum, int nrClusters, File baseDir) {
			this.forest = forest;
			this.dataid = dataid;
			this.lmNum = lmNum;
			this.clusterCounts = new long[nrClusters];
			this.baseDir = baseDir;
		}
		
		public static String makeBaseDirname(int dataid, int lmNum) {
			return Integer.toString(dataid) + "-" + Integer.toString(lmNum);
		}
		
		public void populateData(File dataDir, Observer fileCache) throws IOException {
			final LanguageModel lm = forest.getModels().get(lmNum);
			final BinaryTree<HistoryTreePayload> nodes[] = lm.getNodes();
			
			int level = 0;
			while(true) {
				File oldFile = new File(dataDir, makeDataFilename(level, dataid));
				File newFile = new File(dataDir, makeDataFilename(level+1, dataid));
				
				FileChannel inputChannel = new FileInputStream(oldFile).getChannel();
				TrainingDataNodeReader reader = new EagerTrainingDataNodeReader(new OnDiskTrainingDataNodeReader(inputChannel));

				RandomAccessFile outFile = new RandomAccessFile(newFile, "rw");
				outFile.getChannel().truncate(0);

				TrainingDataNodeWriter writer = new OnDiskTrainingDataNodeWriter(outFile.getChannel());
				writer = new BufferedTrainingDataNodeWriter(writer);

				int nodeCount = 0;
				while(true) {
					ReadableTrainingDataNode dataNode = reader.getNext();
					if (dataNode == null) break;
					
					int clusterid = dataNode.getNodeId();
					BinaryTree<HistoryTreePayload> node = nodes[clusterid];
					
					Long2IntMap counts = new Long2IntMap();
					
					if (node.isLeaf()) {
						ReadableTrainingData data = dataNode.getData(0);
						while(data.hasNext()) {
							TrainingDataBlock block = data.next();
							block.addCounts(counts);
						}
					} else {
						nodeCount += 2;
						
						BinaryTree<HistoryTreePayload> left = node.getLeft();
						BinaryTree<HistoryTreePayload> right = node.getRight();
						
						int leftNodeId = left.getPayload().clusterid;
						int rightNodeId = right.getPayload().clusterid;
						
						WritableTrainingDataNode leftDataNode = writer.createNode(leftNodeId, 1);
						WritableTrainingDataNode rightDataNode = writer.createNode(rightNodeId, 1);
						
						writer.add(leftDataNode);
						writer.add(rightDataNode);
					
						TrainingDataUtil.splitData(dataNode.getData(0), node.getPayload().question, 
								rightDataNode.getData(0), leftDataNode.getData(0), counts);

					}
					
					if (node.getParent() == null) {
						// the unigram model
						counts = adjustUnigramCounts(counts);
					}
					
					if (wordCounts == null) {
						// create the directory if necessary
						File theDir = new File(baseDir, wordCountsDirname);
						if (!theDir.isDirectory()) {
							theDir.mkdirs();
						}
						
						// initialize counts from the unigram model
						long words[] = new long[counts.size()];
						Word2ClustersCounts wordClusterCounts[] = new Word2ClustersCounts[counts.size()];
						
						int pos = 0;
						for(Long2IntMap.Iterator it = counts.iterator(); it.hasNext(); ) {
							Long2IntMap.Entry e = it.next();
							long word = e.getKey();
							words[pos] = word;
							
							wordClusterCounts[pos] = new Word2ClustersCounts(new File(theDir, Long.toString(word)));
							wordClusterCounts[pos].openForWriting(fileCache);
							
							++pos;
						}
						
						wordCounts = new CompactReadOnlyLong2ObjectHashMap<Word2ClustersCounts>(words, wordClusterCounts);
					}
					
					long totalClusterCount = 0;
					for(Long2IntMap.Iterator it = counts.iterator(); it.hasNext(); ) {
						Long2IntMap.Entry e = it.next();
						
						long word = e.getKey();
						int count = e.getValue();
						
						totalClusterCount += count;
						wordCounts.get(word).addClusterCount(clusterid, count);
					}
					clusterCounts[clusterid] = totalClusterCount;
				}
				writer.close();
				reader.close();
				
				if (level > 0) {
					oldFile.delete();
				}
				if (nodeCount == 0) {
					newFile.delete();
					break;
				}
				++level;
			}
			
			for(Object w2c : wordCounts.values()) {
				((Word2ClustersCounts)w2c).finishWriting();
			}
			
			// serialize myself, just in case we want to restart interpolation later
			ObjectOutputStream ous = new ObjectOutputStream(IO.getOutputStream(new File(baseDir, clusterCountsFilename)));
			ous.writeObject(this);
			ous.close();

		}
	}
	
	/**
	 * @param counts
	 * @return *all* (including the unseen) counts with 1 added
	 */
	public static Long2IntMap adjustUnigramCounts(Long2IntMap counts) {
		Long2IntMap newCounts = new Long2IntMap(counts.size());
		FactorTupleDescription desc = Experiment.getInstance().getTupleDescription();
		Set<FactorTuple> allOvertFactors = desc.getAllOvertFactors().keySet();
		
		long nullWord = desc.createTuple();
		long startWord = desc.createStartTuple() & desc.getOvertFactorsMask();
		
		for(FactorTuple overtFactors : allOvertFactors) {
			long theWord = overtFactors.getBits();
			if (theWord != nullWord && theWord != startWord) {
				int count = counts.get(theWord); 
				newCounts.put(theWord, count+1);
			}
		}
		
		return newCounts;
	}

	private void clearWordCounts() {
		for(int lmNum=0; lmNum<nrModels; ++lmNum) {
			for(int dataid=0; dataid < nrDataFiles; ++dataid) {
				File baseDir = new File(dataDir, ModelWordClusterCounts.makeBaseDirname(dataid, lmNum));
				
				File clusterCountsFile = new File(baseDir, ModelWordClusterCounts.clusterCountsFilename);
				clusterCountsFile.delete();
				
				File wordCountsDir = new File(baseDir, ModelWordClusterCounts.wordCountsDirname);
				File files[] = wordCountsDir.listFiles();
				for(File file : files) {
					file.delete();
				}
				wordCountsDir.delete();
				
				baseDir.delete();
			}
		}
	}
	// [lmNum][dataid]
	private ModelWordClusterCounts[][] dataModelCounts;
	
	private void initializeWordCounts(boolean justLoad) throws IOException {
		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("count initialization");
		
		dataModelCounts = new ModelWordClusterCounts[nrModels][];
		
		final DetachableOutputFileCache fileCache = new DetachableOutputFileCache();
		
		for(int lmNum=0; lmNum<nrModels; ++lmNum) {
			dataModelCounts[lmNum] = new ModelWordClusterCounts[nrDataFiles];
			
			final int nrClusters = forest.getModels().get(lmNum).getNodes().length;
			
			for(int dataid=0; dataid < nrDataFiles; ++dataid) {
				File baseDir = new File(dataDir, ModelWordClusterCounts.makeBaseDirname(dataid, lmNum));
				if (justLoad) {
					ObjectInputStream ois = new ObjectInputStream(IO.getInputStream(new File(baseDir, ModelWordClusterCounts.clusterCountsFilename)));
					try {
						final ModelWordClusterCounts wordClusterCounts = (ModelWordClusterCounts) ois.readObject();
						dataModelCounts[lmNum][dataid] = wordClusterCounts;
					} catch (ClassNotFoundException e) {
						e.printStackTrace();
					}
					ois.close();

				} else {
					final ModelWordClusterCounts wordClusterCounts = new ModelWordClusterCounts(forest, dataid, lmNum, nrClusters, baseDir);
					
					dataModelCounts[lmNum][dataid] = wordClusterCounts;
					
					Runnable run = new Runnable() {
						@Override
						public void run() {
							try {
								wordClusterCounts.populateData(dataDir, fileCache);
							} catch (IOException e) {
								e.printStackTrace();
							}
							
						}
					};
					Job job = new Job(run, "");
					manager.addJob(group, job);
				}
			}
			
			group.join();
		}
	}
	
	private void initializeHeldoutData() throws IOException {
		Experiment exp = Experiment.getInstance();
		final int vocabSize = exp.getTupleDescription().getAllOvertFactors().size();
		
		JobManager manager = JobManager.getInstance();
		final int MAX_CHUNK_SIZE = 10000;
		final int MAX_HELDOUT_RECORDS = 1000000;
		
		for(int split=0; split<nrDataFiles; ++split) {
			File dataFile = new File(this.dataDir, makeDataFilename(0, split));
			File outputFile = new File(dataDir, makeHeldoutFilename(split));
			
			FileChannel inputChannel = new FileInputStream(dataFile).getChannel();
			TrainingDataNodeReader reader = new EagerTrainingDataNodeReader(new OnDiskTrainingDataNodeReader(inputChannel));

			ObjectOutputStream output = new ObjectOutputStream(IO.getOutputStream(outputFile));

			final LinkedHashMap<FactorTuple, Map<ClusteredContext,MutableInteger>> cachedHeldoutData = 
				new LinkedHashMap<FactorTuple, Map<ClusteredContext,MutableInteger>>(vocabSize, 0.75f, true);
			
			final AtomicInteger cachedHeldoutRecords = new AtomicInteger();
			
			while(true) {
				ReadableTrainingDataNode dataNode = reader.getNext();
				if (dataNode == null) break;

				ReadableTrainingData data = dataNode.getData(0);
				while(data.hasNext()) {
					TrainingDataBlock block = data.next();
					

					int remaining = block.size(); 
					Iterator<ContextFuturesPair> it = block.iterator();
					while(remaining > 0) {
						final ContextFuturesPair chunk[] = new ContextFuturesPair[Math.min(MAX_CHUNK_SIZE, remaining)];
						for(int i=0; i<chunk.length; ++i) {
							chunk[i] = it.next();
						}
						remaining -= chunk.length;
						
						JobGroup chunkGroup = manager.createJobGroup("chunk group");
						ClusteredContext[] clusteredContext = clusterContext(chunk, chunkGroup);
						chunkGroup.join();
						
						int chunkCount = 0;
						for(int i=0; i<chunk.length; ++i) {
							ClusteredContext ctx = clusteredContext[i];
							TupleCountPair futures[] = chunk[i].getFutures();
							
							for(TupleCountPair tc : futures) {
								FactorTuple word = new FactorTuple(tc.tuple);
								Map<ClusteredContext,MutableInteger> map = cachedHeldoutData.get(word);
								if (map == null) {
									map = new HashMap<ClusteredContext,MutableInteger>();
									cachedHeldoutData.put(word, map);
								}
								
								MutableInteger count = map.get(ctx);
								if (count == null) {
									count = new MutableInteger();
									map.put(ctx, count);
									++chunkCount;
								}
								count.add(tc.count);
							}
						}
						cachedHeldoutRecords.addAndGet(chunkCount);
						
						if (cachedHeldoutRecords.intValue() > MAX_HELDOUT_RECORDS) {
							Iterator<Map.Entry<FactorTuple, Map<ClusteredContext,MutableInteger>>> iter = 
								cachedHeldoutData.entrySet().iterator();
							
							while(iter.hasNext() && cachedHeldoutRecords.intValue() > MAX_HELDOUT_RECORDS) {
								Map.Entry<FactorTuple, Map<ClusteredContext,MutableInteger>> entry = iter.next();
								iter.remove();
								
								HeldoutDataRecord rec = makeHeldoutDataRecord(entry.getKey().getBits(), split, entry.getValue());
								rec.writeExternal(output);
								
								cachedHeldoutRecords.addAndGet(- entry.getValue().size());
							}
						}
					}
				}

			}
			
			for(Map.Entry<FactorTuple, Map<ClusteredContext,MutableInteger>> e : cachedHeldoutData.entrySet()) {
				HeldoutDataRecord rec = makeHeldoutDataRecord(e.getKey().getBits(), split, e.getValue());
				rec.writeExternal(output);				
			}
			output.close();
			
			reader.close();
		}		
	}
	
	private HeldoutDataRecord makeHeldoutDataRecord(long word, int split, Map<ClusteredContext,MutableInteger> map) {
		ClusteredContext contexts[] = new ClusteredContext[map.size()];
		int counts[] = new int[map.size()];
		
		int i=0;
		for(Map.Entry<ClusteredContext, MutableInteger> e : map.entrySet()) {
			contexts[i] = e.getKey();
			counts[i] = e.getValue().intValue();
			++i;
		}

		double probabilities[][] = computeProbabilities(word, split, contexts);
		return new HeldoutDataRecord(word, contexts, probabilities, counts);
	}
	
	private void initializeInnerHeldoutData() {
		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("inner data");
		
		for(int dataid=0; dataid<nrDataFiles; ++dataid) {
			final int fold = dataid;

			Runnable run = new Runnable() {
				@Override
				public void run() {
					try {
						innerNodesEM.heldoutData2InnerData(fold);
					} catch (IOException e) {
						e.printStackTrace();
					}
				}
			};
			
			Job job = new Job(run, "");
			manager.addJob(group, job);
		}
		group.join();
	}
	/**
	 * @param word
	 * @param split
	 * @param contexts
	 * @return probabilities for each model estimated from folded data (all but @param split)
	 */
	private double[][] computeProbabilities(final long word, final int split, ClusteredContext contexts[]) {
		// unique clusters by model
		final Long2DoubleMap cluster2probByModel[] = new Long2DoubleMap[nrModels];
		for(int i=0; i<nrModels; ++i) {
			cluster2probByModel[i] = new Long2DoubleMap();
		}
		
		for(ClusteredContext ctx : contexts) {
			for(int i=0; i<nrModels; ++i) {
				cluster2probByModel[i].addAndGet(ctx.clusters[i], 0);
			}
		}
		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("compute probabilities");
		
		for(int lmNum=0; lmNum<nrModels; ++lmNum) {
			Long2DoubleMap cluster2prob = cluster2probByModel[lmNum];
			final ModelWordClusterCounts dataCounts[] = dataModelCounts[lmNum];
			final BinaryTree<HistoryTreePayload> nodes[] = forest.getModels().get(lmNum).getNodes();
			
			for(Long2DoubleMap.Iterator it = cluster2prob.iterator(); it.hasNext(); ) {
				final Long2DoubleMap.Entry e = it.next();
				final int clusterid = (int) it.getKey();
				
				Runnable run = new Runnable() {
					@Override
					public void run() {
						BinaryTree<HistoryTreePayload> node = nodes[clusterid];
						
						double currentWeight = 1.0;
						double prob = 0.0;
						while(node != null) {
							HistoryTreePayload payload = node.getPayload();
							int clusterCount = 0;
							long clusterTotalCount = 0;
							
							for(int dataId=0; dataId<nrDataFiles; ++dataId) {
								if (dataId == split) continue;
								
								Word2ClustersCounts word2ClustersCounts = dataCounts[dataId].wordCounts.get(word);
								if (word2ClustersCounts != null) {
									CompactReadOnlyInt2IntHashMap counts = word2ClustersCounts.getClusterCounts();
									clusterCount += counts.get(payload.clusterid);
								}
								clusterTotalCount += dataCounts[dataId].clusterCounts[payload.clusterid];
							}
							if (clusterCount > 0) {
								double clusterProb = (double) clusterCount / clusterTotalCount;
								if (Double.isNaN(clusterProb)) {
									System.err.print("");
								}
								prob += clusterProb * currentWeight * payload.lambda;
							}
							currentWeight *= (1.0 - payload.lambda);
							
							node = node.getParent();
						}
						
						e.setValue(prob);
					}
					
				};
				
				Job job = new Job(run, "");
				manager.addJob(group, job);
				group.join(manager.getNumWorkers());
			}
		}
		
		group.join();
		
		final double probs[][] = new double[contexts.length][];
		for(int i=0; i<contexts.length; ++i) {
			double contextProbs[] = new double[nrModels];
			probs[i] = contextProbs;
			int clusters[] = contexts[i].clusters; 
			for(int lmNum=0; lmNum<nrModels; ++lmNum) {
				contextProbs[lmNum] = cluster2probByModel[lmNum].get(clusters[lmNum]);
			}
		}
		return probs;
	}

	private ClusteredContext[] clusterContext(final ContextFuturesPair[] data, JobGroup group) {
		JobManager manager = JobManager.getInstance();
		
		final ClusteredContext clusteredContexts[] = new ClusteredContext[data.length];
		for(int i=0; i<clusteredContexts.length; ++i) {
			clusteredContexts[i] = new ClusteredContext(nrModels);
		}
		
		for(int lmNum=0; lmNum<nrModels; ++lmNum) {
			final int idx = lmNum;
			Runnable run = new Runnable() {
				@Override
				public void run() {
					LanguageModel lm = forest.getModels().get(idx);
					for(int i=0; i<clusteredContexts.length; ++i) {
						int clusterid = lm.context2cluster(data[i].getContext());
						clusteredContexts[i].clusters[idx] = clusterid;
					}
				}
			};
			Job job = new Job(run, "clusterContext");
			manager.addJob(group, job);
		}
		return clusteredContexts;
	}
	
	private static class ClusteredContext {
		final int clusters[];
		
		public ClusteredContext(int nrModels) {
			this.clusters = new int[nrModels];
		}

		public ClusteredContext(ObjectInput in) throws IOException {
			byte len = in.readByte();
			this.clusters = new int[len];
			for(byte i=0; i<len; ++i) {
				clusters[i] = in.readInt();
			}
		}
		
		public void writeExternal(ObjectOutput out) throws IOException {
			assert(clusters.length <= Byte.MAX_VALUE);
			out.writeByte(clusters.length);
			for(int cluster : clusters) {
				out.writeInt(cluster);
			}
		}
		
		@Override
		public int hashCode() {
			final int prime = 31;
			int result = 1;
			result = prime * result + Arrays.hashCode(clusters);
			return result;
		}

		@Override
		public boolean equals(Object obj) {
			if (this == obj)
				return true;
			if (obj == null)
				return false;
			if (!(obj instanceof ClusteredContext))
				return false;
			ClusteredContext other = (ClusteredContext) obj;
			if (!Arrays.equals(clusters, other.clusters))
				return false;
			return true;
		}
	}
	
	private static class InnerNodesDataRecord implements Externalizable {
		long word;
		
		// [lmNum][list of inner clusters]
		int clusters[][];
		// [lmNum][word counts]
		long clusterCounts[][];
		long clusterTotalCounts[][];
		
		
		/**
		 * @param word
		 * @param clusters
		 * @param clusterCounts
		 * @param clusterTotalCounts
		 */
		public InnerNodesDataRecord(long word, int[][] clusters, long[][] clusterCounts, long[][] clusterTotalCounts) {
			this.word = word;
			this.clusters = clusters;
			this.clusterCounts = clusterCounts;
			this.clusterTotalCounts = clusterTotalCounts;
		}

		public InnerNodesDataRecord(ObjectInput in) throws IOException {
			readExternal(in);
		}
		
		/* (non-Javadoc)
		 * @see java.io.Externalizable#writeExternal(java.io.ObjectOutput)
		 */
		@Override
		public void writeExternal(ObjectOutput out) throws IOException {
			out.writeLong(word);
			// nrModels
			out.writeByte(clusters.length);
			
			for(int lmNum=0; lmNum<clusters.length; ++lmNum) {
				//nrClusters
				int nrClusters = clusters[lmNum].length;
				out.writeInt(nrClusters);
				
				byte buf[] = new byte[PackedInteger.MAX_LENGTH + PackedInteger.MAX_LONG_LENGTH * nrClusters * 2];
				int pos = 0;
				
				for(int clusterid : clusters[lmNum]) {
					pos = PackedInteger.writeInt(buf, pos, clusterid);
				}

				for(long count : clusterCounts[lmNum]) {
					pos = PackedInteger.writeLong(buf, pos, count);
				}
				for(long count : clusterTotalCounts[lmNum]) {
					pos = PackedInteger.writeLong(buf, pos, count);
				}
				
				out.writeInt(pos);
				out.write(buf, 0, pos);
			}
			
		}

		/* (non-Javadoc)
		 * @see java.io.Externalizable#readExternal(java.io.ObjectInput)
		 */
		@Override
		public void readExternal(ObjectInput in) throws IOException {
			word = in.readLong();
			byte nrModels = in.readByte();
			
			clusters = new int[nrModels][];
			clusterCounts = new long[nrModels][];
			clusterTotalCounts = new long[nrModels][];
			
			for(int lmNum=0; lmNum<clusters.length; ++lmNum) {
				int nrClusters = in.readInt();
				int bufSize = in.readInt();
				
				byte buf[] = new byte[bufSize];
				in.readFully(buf);
				
				clusters[lmNum] = new int[nrClusters];
				clusterCounts[lmNum] = new long[nrClusters];
				clusterTotalCounts[lmNum] = new long[nrClusters];
				
				int pos = 0;
				for(int i=0; i<nrClusters; ++i) {
					clusters[lmNum][i] = PackedInteger.readInt(buf, pos);
					pos += PackedInteger.getReadIntLength(buf, pos);
				}
				
				for(int i=0; i<nrClusters; ++i) {
					clusterCounts[lmNum][i] = PackedInteger.readLong(buf, pos);
					pos += PackedInteger.getReadLongLength(buf, pos);
				}
			
				for(int i=0; i<nrClusters; ++i) {
					clusterTotalCounts[lmNum][i] = PackedInteger.readLong(buf, pos);
					pos += PackedInteger.getReadLongLength(buf, pos);
				}
			}
		}
	}
	
	private class InnerNodesLBFGS {
		BitSet leafClusters[]; // [lmNum]
		int clusters2paramsMap[][];
		double params[];
		
		private InnerNodesLBFGS() {
			leafClusters = new BitSet[nrModels];
			clusters2paramsMap = new int[nrModels][];
			
			int nrParams = 0;
			for(int lmNum=0; lmNum<nrModels; ++lmNum) {
				BinaryTree<HistoryTreePayload>[] nodes = forest.getModels().get(lmNum).getNodes();
				BitSet bs = new BitSet(nodes.length);
				clusters2paramsMap[lmNum] = new int[nodes.length];
				
				for(int clusterid = 0; clusterid<nodes.length; ++clusterid) {
					if (nodes[clusterid] != null && nodes[clusterid].isLeaf()) {
						bs.set(clusterid);
						clusters2paramsMap[lmNum][clusterid] = -1;
					} else {
						clusters2paramsMap[lmNum][clusterid] = nrParams++;
					}
					
				}
				leafClusters[lmNum] = bs;
			}
			
			params = new double[nrParams];
		}
		
		private void doOptimize(int nrIterations) {
			Minimizer minimizer = new Minimizer();
			
			ArrayList<Bound> bounds = new ArrayList<Bound>(params.length);
			// use a small number since 0 appears to mean no bound
			Bound bound = new Bound(new Double(1e-7), new Double(1.0));
			for(int i=0; i<params.length; ++i) {
				bounds.add(bound);
			}

			minimizer.setBounds(bounds);
			StopConditions conditions = minimizer.getStopConditions();
			conditions.setMaxIterations(200);

			
			DifferentiableFunction fun = new DifferentiableFunction() {
				@Override
				public FunctionValues getValues(double[] point) {
					double gradient[] = new double[params.length];
					double crossEntropy = computeGradientAndCrossEntropy(point, gradient);
					FunctionValues val = new FunctionValues(crossEntropy, gradient);
					System.err.printf("crossEntropy = %f\n", crossEntropy);
					return val;
				}
			};
			
			try {
				Arrays.fill(params, 0.1);
				Result result = minimizer.run(fun, params);
				System.arraycopy(result.point, 0, params, 0, params.length);
				
				System.err.println(result.iterationsInfo.toString());
			} catch (LBFGSBException e) {
				e.printStackTrace();
			}
		}
		
		private void updateWeights() {
			for(int lmNum=0; lmNum<nrModels; ++lmNum) {
				double weights[] = forest.getDecodingRuntime().getWeights(lmNum);
				
				for(int clusterid=0; clusterid<clusters2paramsMap[lmNum].length; ++clusterid) {
					int pos = clusters2paramsMap[lmNum][clusterid]; 
					if (pos >= 0) {
						weights[clusterid] = params[pos];
					}
				}
			}

		}
		
		private double computeGradientAndCrossEntropy(final double params[], final double gradient[]) {
			assert(params.length == gradient.length);
			
			JobManager manager = JobManager.getInstance();
			JobGroup group = manager.createJobGroup("cross entropy");
			
			// [dataid][param-position]
			final double totalEntropies[][] = new double[nrDataFiles][];
			final double partialGradients[][] = new double[nrDataFiles][];
			final long totalCounts[][] = new long[nrDataFiles][];
			
			for(int fold=0; fold<nrDataFiles; ++fold) {
				final int dataid = fold;
				totalCounts[dataid] = new long[params.length];
				totalEntropies[dataid] = new double[params.length];

				Runnable run = new Runnable() {
					@Override
					public void run() {
						try {
							File innerDir = new File(dataDir, makeInnerNodesDirname(dataid));

							final double partialGradient[] = new double[gradient.length];
							partialGradients[dataid] = partialGradient;

							DecodingRuntime runtime = forest.getDecodingRuntime();
							
							ObjectInputStream ois = new ObjectInputStream(IO.getInputStream(new File(dataDir, makeHeldoutFilename(dataid))));
							
							double foldEntropy = 0; // this is actually the sum of entropies of all lmNum's
							while(true) {
								try {
									HeldoutDataRecord rec = new HeldoutDataRecord(ois);
									long word = rec.word;
									
									File innerNodesFile = new File(innerDir, Long.toString(word));
									ObjectInputStream innerData = new ObjectInputStream(IO.getInputStream(innerNodesFile));
									InnerNodesDataRecord innerRec = new InnerNodesDataRecord(innerData);
									innerData.close();

									assert(word == innerRec.word);

									OpenIntToDoubleHashMap cluster2MLprobs[] = new OpenIntToDoubleHashMap[nrModels];
									OpenIntToDoubleHashMap cluster2probs[] = new OpenIntToDoubleHashMap[nrModels];
									
									for(int lmNum = 0; lmNum < nrModels; ++lmNum) {
										int innerClusters[] = innerRec.clusters[lmNum];
										
										OpenIntToDoubleHashMap cluster2MLprob = new OpenIntToDoubleHashMap(innerClusters.length, 0.0);
										for(int i=0; i<innerClusters.length; ++i) {
											cluster2MLprob.put(innerClusters[i], (double)innerRec.clusterCounts[lmNum][i] / innerRec.clusterTotalCounts[lmNum][i]);
										}
										OpenIntToDoubleHashMap cluster2prob = new OpenIntToDoubleHashMap(innerClusters.length, 0.0);
										cluster2MLprobs[lmNum] = cluster2MLprob;
										cluster2probs[lmNum] = cluster2prob;
									}
									
									for(int i=0; i<rec.contexts.length; ++i) {
										int clusters[] = rec.contexts[i].clusters;
										double probs[] = rec.probabilities[i];
										int count = rec.counts[i];
										
										for(int lmNum = 0; lmNum < nrModels; ++lmNum) {
											double sumProbs = 0;
											double sumWeights = 0;
											
											for(int _lmNum = 0; _lmNum <nrModels; ++_lmNum) {
												if (_lmNum != lmNum) {
													double weight = runtime.getWeights(_lmNum)[clusters[_lmNum]];
													sumWeights += weight;
													sumProbs += weight * probs[_lmNum];
												}
											}
											
											LanguageModel lm = forest.getModels().get(lmNum);
											BinaryTree<HistoryTreePayload> nodes[] = lm.getNodes();
											
											int leafClusterid = clusters[lmNum];
											BinaryTree<HistoryTreePayload> parent = nodes[leafClusterid].getParent();
											if (parent == null) continue;
											
											OpenIntToDoubleHashMap cluster2MLprob = cluster2MLprobs[lmNum];
											OpenIntToDoubleHashMap cluster2prob = cluster2probs[lmNum];
											
											fillInterpolatedProbabilities(cluster2MLprob, cluster2prob, parent);

											BinaryTree<HistoryTreePayload> node = parent;
											while(node != null) {
												int clusterid = node.getPayload().clusterid;

												int pos = clusters2paramsMap[lmNum][clusterid];
												totalCounts[dataid][pos] += count;
												
												double weight = params[pos];
												if (weight < MIN_WEIGHT) {
													params[pos] = MIN_WEIGHT;
													weight = MIN_WEIGHT;
													//System.err.print("");
												}
												double myProb = cluster2prob.get(clusterid);
												double mySumWeights = sumWeights + weight;
												double mySumProbs = sumProbs + weight * myProb;

												if (mySumProbs <= 0) {
													// oops!
													StringBuilder sb = new StringBuilder();
													sb.append(String.format("sumProbs=%f, sumWeights=%f, word=%s, lmNum=%d\n", mySumProbs, mySumWeights, FactorTuple.toStringNoNull(rec.word), lmNum));
													System.err.print(sb.toString());
												}
												
												if (mySumWeights > 0) {
													double prob = mySumProbs / mySumWeights;
													double addition = count * ProbMath.log2(prob);
													if (Double.isNaN(addition)) {
														System.err.print("");
													}
													totalEntropies[dataid][pos] -= addition;
													
													double update = count / mySumWeights * (myProb/prob - 1);
													partialGradient[pos] -= update;
												} else {
													System.err.printf("sumWeights = %f\n", mySumWeights);
												}
												
												node = node.getParent();
											}
											
										}
										
									}
								} catch(IOException e) {
									break;
								}
							}
							ois.close();
						} catch (IOException e) {
							e.printStackTrace();
						}


					}
				};
				Job job = new Job(run, "cross entropy");
				manager.addJob(group, job);
			}
			group.join();
			
			long superTotalCounts[] = new long[params.length];
			for(long totalCount[] : totalCounts) {
				for(int i=0; i<params.length; ++i) {
					superTotalCounts[i] += totalCount[i];
				}
			}
			
			Arrays.fill(gradient, 0);
			double entropy = 0;
			for(int fold=0; fold<nrDataFiles; ++fold) {
				double partialGradient[] = partialGradients[fold];
				for(int i=0; i<params.length; ++i) {
					long totalCount = superTotalCounts[i];
					if (totalCount > 0) {
						entropy += totalEntropies[fold][i] / totalCount;
						gradient[i] += partialGradient[i] / totalCount;
					}
				}
			}

			return entropy;
		}
		
		private double fillInterpolatedProbabilities(OpenIntToDoubleHashMap cluster2MLprob, OpenIntToDoubleHashMap cluster2prob, BinaryTree<HistoryTreePayload> node) {
			int clusterid = node.getPayload().clusterid;
			
			double prob = cluster2prob.get(clusterid);
			if (prob > 0) return prob;
			
			double weight = node.getPayload().lambda;

			prob = weight * cluster2MLprob.get(clusterid);
			
			BinaryTree<HistoryTreePayload> parent = node.getParent();
			if (parent != null) {
				double parentProb = fillInterpolatedProbabilities(cluster2MLprob, cluster2prob, parent);
				prob += (1 - weight) * parentProb;
			}
			if (Double.isNaN(prob)) {
				System.err.print("");
			}
			
			cluster2prob.put(clusterid, prob);
			return prob;
		}
		
		private void heldoutData2InnerData(int dataid) throws IOException {
			ObjectInputStream ois = new ObjectInputStream(IO.getInputStream(new File(dataDir, makeHeldoutFilename(dataid))));
			//LanguageModel lm = forest.getModels().get(lmNum);
			//BinaryTree<HistoryTreePayload>[] nodes = lm.getNodes();
			
			//final ModelWordClusterCounts dataCounts[] = dataModelCounts[lmNum];
			
			File innerDir = new File(dataDir, makeInnerNodesDirname(dataid));
			if (!innerDir.isDirectory()) {
				innerDir.mkdirs();
			}
			
			while(true) {
				try {
					HeldoutDataRecord rec = new HeldoutDataRecord(ois);
					long word = rec.word;
					//Long2DoubleMap leafProbs = new Long2DoubleMap();
					//Long2LongMap leafCounts = new Long2LongMap();
					
					BitSet currentLeavesModel[] = new BitSet[nrModels];
					// in-tree interpolated probabilities
					double clusterProbsModel[][] = new double[nrModels][];
	
					long clusterCountsModel[][] = new long[nrModels][];
					long clusterTotalCountsModel[][] = new long[nrModels][];
	
					for(int lmNum=0; lmNum<nrModels; ++lmNum) {
						BinaryTree<HistoryTreePayload>[] nodes = forest.getModels().get(lmNum).getNodes();
						currentLeavesModel[lmNum] = (BitSet) leafClusters[lmNum].clone();
						
						clusterProbsModel[lmNum] = new double[nodes.length];
						clusterCountsModel[lmNum] = new long[nodes.length];
						clusterTotalCountsModel[lmNum] = new long[nodes.length];
					}
					//long heldoutClusterCounts[] = new long[nodes.length];
					
					for(int i=0; i<rec.contexts.length; ++i) {
						ClusteredContext context = rec.contexts[i];
						double probabilities[] = rec.probabilities[i];
						
						for(int lmNum=0; lmNum<nrModels; ++lmNum) {
							int clusterid = context.clusters[lmNum];
							
							clusterProbsModel[lmNum][clusterid] = rec.probabilities[i][lmNum];
							//heldoutClusterCounts[clusterid] = rec.counts[i];
							
							int clusterCount = 0;
							long clusterTotalCount = 0;
							
							for(int fold=0; fold<nrDataFiles; ++fold) {
								if (fold == dataid) continue;
								
								Word2ClustersCounts word2ClustersCounts = dataModelCounts[lmNum][fold].wordCounts.get(word);
								if (word2ClustersCounts != null) {
									CompactReadOnlyInt2IntHashMap counts = word2ClustersCounts.getClusterCounts();
									clusterCount += counts.get(clusterid);
								}
								clusterTotalCount += dataModelCounts[lmNum][fold].clusterCounts[clusterid];
							}
							clusterCountsModel[lmNum][clusterid] = clusterCount;
							clusterTotalCountsModel[lmNum][clusterid] = clusterTotalCount;
						}
					}
					
					int clusters[][] = new int[nrModels][];
					
					for(int lmNum=0; lmNum<nrModels; ++lmNum) {
						BitSet currentLeaves = currentLeavesModel[lmNum];
						BitSet nonEmptyInnerNodes = new BitSet(currentLeaves.length()/2 + 1);
						
						double clusterProbs[] = clusterProbsModel[lmNum];
						long clusterCounts[] = clusterCountsModel[lmNum];
						long clusterTotalCounts[] = clusterTotalCountsModel[lmNum];
						
						while(!currentLeaves.isEmpty()) {
							for(int clusterid = currentLeaves.length()-1; clusterid > 0; --clusterid) {
								if (!currentLeaves.get(clusterid)) continue;
								currentLeaves.clear(clusterid);
								
								BinaryTree<HistoryTreePayload>[] nodes = forest.getModels().get(lmNum).getNodes();
								
								BinaryTree<HistoryTreePayload> node = nodes[clusterid];
								BinaryTree<HistoryTreePayload> parent = node.getParent();
								if (parent == null) continue;
								
								BinaryTree<HistoryTreePayload> left = parent.getLeft();
								BinaryTree<HistoryTreePayload> right = parent.getRight();
								int leftId = left.getPayload().clusterid;
								int rightId = right.getPayload().clusterid;
								
								if (!(left == node || currentLeaves.get(leftId) || left.getPayload().isBackoff)) continue;
								if (!(right == node || currentLeaves.get(rightId))) continue;
								
								int parentClusterid = parent.getPayload().clusterid;
								
								double prob = clusterProbs[clusterid];
								double lambda = node.getPayload().lambda;
		
								double parentProb;
								if (lambda == 1.0) {
									parentProb = prob;
								} else {
									parentProb = (prob - lambda * clusterCounts[clusterid]/clusterTotalCounts[clusterid])  / (1 - lambda);
								}
								clusterProbs[parentClusterid] = parentProb;
		
								clusterCounts[parentClusterid] = clusterCounts[leftId] + clusterCounts[rightId];

								if (clusterCounts[parentClusterid] > 0) {
									nonEmptyInnerNodes.set(parentClusterid);
								}
								
								clusterTotalCounts[parentClusterid] = clusterTotalCounts[leftId] + clusterTotalCounts[rightId];
								//heldoutClusterCounts[parentClusterid] = heldoutClusterCounts[leftId] + heldoutClusterCounts[rightId];
								
								currentLeaves.set(parentClusterid);
								currentLeaves.clear(left.getPayload().clusterid);
								currentLeaves.clear(right.getPayload().clusterid);
							}
						}
						clusters[lmNum] = new int[nonEmptyInnerNodes.length()];
						int pos = 0;
						for(int clusterid=0; clusterid<clusters[lmNum].length; ++clusterid) {
							if (nonEmptyInnerNodes.get(clusterid)) {
								clusters[lmNum][pos++] = clusterid;
							}
						}
						if (pos < clusters[lmNum].length) {
							clusters[lmNum] = Arrays.copyOf(clusters[lmNum], pos);
						}
					}
					currentLeavesModel = null;
					
					long compactClusterCounts[][] = new long[nrModels][];
					long compactClusterTotalCounts[][] = new long[nrModels][];
					
					for(int lmNum=0; lmNum<nrModels; ++lmNum) {
						compactClusterCounts[lmNum] = new long[clusters[lmNum].length];
						compactClusterTotalCounts[lmNum] = new long[clusters[lmNum].length];
						int pos = 0;
						for(int clusterid : clusters[lmNum]) {
							compactClusterCounts[lmNum][pos] = clusterCountsModel[lmNum][clusterid];
							compactClusterTotalCounts[lmNum][pos] = clusterTotalCountsModel[lmNum][clusterid];
							++pos;
						}
					}
					clusterCountsModel = null;
					clusterTotalCountsModel = null;
					
					InnerNodesDataRecord innerData = new InnerNodesDataRecord(word, clusters, compactClusterCounts, compactClusterTotalCounts);
					
					File innerNodesFile = new File(innerDir, Long.toString(word));
					ObjectOutputStream oos = new ObjectOutputStream(IO.getOutputStream(innerNodesFile));
					innerData.writeExternal(oos);
					oos.close();
					
				} catch(IOException e) {
					break;
				}
			}
			ois.close();
		}
		
	}
	static String makeInnerNodesDirname(int dataid) {
		return String.format("inner-%d", dataid);
	}
	
	private static class HeldoutDataRecord implements Externalizable {
		long word;
		ClusteredContext contexts[];
		// [context list][lmNum]
		double probabilities[][];
		int counts[];
		
		public HeldoutDataRecord(long word, ClusteredContext[] contexts, double[][] probabilities, int[] counts) {
			this.word = word;
			this.contexts = contexts;
			this.probabilities = probabilities;
			this.counts = counts;
		}
		
		public HeldoutDataRecord(ObjectInput in) throws IOException {
			readExternal(in);
		}
		
		/* (non-Javadoc)
		 * @see java.io.Externalizable#writeExternal(java.io.ObjectOutput)
		 */
		@Override
		public void writeExternal(ObjectOutput out) throws IOException {
			out.writeLong(word);
			out.writeInt(contexts.length);
			
			int nrModels = probabilities[0].length; 
			assert(nrModels <= Byte.MAX_VALUE);
			out.writeByte(nrModels);
			
			for(int i=0; i<contexts.length; ++i) {
				contexts[i].writeExternal(out);
				out.writeInt(counts[i]);
				for(double prob : probabilities[i]) {
					out.writeDouble(prob);
				}
			}
			
		}
		/* (non-Javadoc)
		 * @see java.io.Externalizable#readExternal(java.io.ObjectInput)
		 */
		@Override
		public void readExternal(ObjectInput in) throws IOException {
			word = in.readLong();
			int len = in.readInt();
			byte nrModels = in.readByte();
			
			contexts = new ClusteredContext[len];
			counts = new int[len];
			probabilities = new double[len][];
			
			for(int i=0; i<len; ++i) {
				contexts[i] = new ClusteredContext(in);
				counts[i] = in.readInt();
				double probs[] = new double[nrModels];
				for(byte lmNum=0; lmNum<nrModels; ++lmNum) {
					probs[lmNum] = in.readDouble();
				}
				probabilities[i] = probs;
			}
		}
	}
	
	private class Parameters {
		final int modelCluster2pos[][];
		final double params[];
		
		public Parameters(boolean useOldValues) {
			this(useOldValues, true);
		}
		
		public Parameters(boolean useOldValues, boolean random) {
			modelCluster2pos = new int[nrModels][];
			Random rnd = new Random();
			
			int totalLeafCount = 0;
			for(int lmNum=0; lmNum<nrModels; ++lmNum) {
				totalLeafCount += (forest.getModels().get(lmNum).getNodes().length /*cluster ids start with 1, no need for +1 */) / 2;
			}
			params = new double[totalLeafCount];
			
			int pos = 0;
			for(int lmNum=0; lmNum<nrModels; ++lmNum) {
				BinaryTree<HistoryTreePayload> nodes[] = forest.getModels().get(lmNum).getNodes();
				double weights[] = forest.getDecodingRuntime().getWeights(lmNum);
				
				int cluster2pos[] = new int[nodes.length];
				Arrays.fill(cluster2pos, -1);
				
				int leafNum = 0;
				for(BinaryTree<HistoryTreePayload> node : nodes) {
					if (node != null && node.isLeaf()) {
						int clusterid = node.getPayload().clusterid;
						cluster2pos[clusterid] = pos;
						
						double value;
						if (useOldValues) {
							value = weights[clusterid];
						} else {
							if (clusterid == 1) {
								// cluster#1 is a leaf only in the unigram model
								// set lower weight for faster convergence
								value = 0.01;
							} else {
								// random: 0.5 +- 0.1
								value = random ? rnd.nextDouble() * 0.2 + 0.4 : 0.5;
							}
						}
						params[pos] = value;
						++pos;
						++leafNum;
					}
				}
				modelCluster2pos[lmNum] = cluster2pos;
			}
		}
		
		public int nrParams() {
			return params.length;
		}
		
		@SuppressWarnings("unused")
		public double computeCrossEntropy() {
			JobManager manager = JobManager.getInstance();
			JobGroup group = manager.createJobGroup("cross entropy");

			final double totalEntropies[] = new double[nrDataFiles];
			for(int fold=0; fold<nrDataFiles; ++fold) {
				final int dataid = fold;
				Runnable run = new Runnable() {
					@Override
					public void run() {
						try {
							ObjectInputStream ois = new ObjectInputStream(IO.getInputStream(new File(dataDir, makeHeldoutFilename(dataid))));
							long totalCount = 0;
							double foldEntropy = 0;
							while(true) {
								try {
									HeldoutDataRecord rec = new HeldoutDataRecord(ois);
									for(int i=0; i<rec.contexts.length; ++i) {
										int clusters[] = rec.contexts[i].clusters;
										double probs[] = rec.probabilities[i];
										
										double sumProbs = 0;
										double sumWeights = 0;

										for(int lmNum = 0; lmNum < nrModels; ++lmNum) {
											double weight = modelCluster2pos[lmNum][clusters[lmNum]];
											sumWeights += weight;
											sumProbs += weight * probs[lmNum];
										}
										
										assert(sumWeights > 0);
										double prob = sumProbs / sumWeights;
										totalCount += rec.counts[i];
										foldEntropy -= rec.counts[i] * ProbMath.log2(prob);
									}
								} catch(IOException e) {
									break;
								}
							}
							foldEntropy /= totalCount;
							totalEntropies[dataid] = foldEntropy;
							ois.close();
						} catch (IOException e) {
							e.printStackTrace();
						}

					}
				};
				Job job = new Job(run, "cross entropy");
				manager.addJob(group, job);
			}
			group.join();
			
			double entropy = 0;
			for(int fold=0; fold<nrDataFiles; ++fold) {
				entropy += totalEntropies[fold];
			}
			return entropy;
		}
		
		@SuppressWarnings("unused")
		public double computeGradientAndCrossEntropy(final double[] gradient) {
			return computeGradientAndCrossEntropy(this.params, gradient);
		}
		
		public double computeGradientAndCrossEntropy(final double params[], final double[] gradient) {
			assert(params.length == gradient.length);
			
			JobManager manager = JobManager.getInstance();
			JobGroup group = manager.createJobGroup("cross entropy");
			
			final double totalEntropies[] = new double[nrDataFiles];
			final double partialGradients[][] = new double[nrDataFiles][];
			final long totalCounts[] = new long[nrDataFiles];
			
			for(int fold=0; fold<nrDataFiles; ++fold) {
				final int dataid = fold;
				Runnable run = new Runnable() {
					@Override
					public void run() {
						try {
							final double partialGradient[] = new double[gradient.length];
							partialGradients[dataid] = partialGradient;
							
							ObjectInputStream ois = new ObjectInputStream(IO.getInputStream(new File(dataDir, makeHeldoutFilename(dataid))));
							long totalCount = 0;
							double foldEntropy = 0;
							while(true) {
								try {
									HeldoutDataRecord rec = new HeldoutDataRecord(ois);
									for(int i=0; i<rec.contexts.length; ++i) {
										int clusters[] = rec.contexts[i].clusters;
										double probs[] = rec.probabilities[i];
										
										double sumProbs = 0;
										double sumWeights = 0;

										for(int lmNum = 0; lmNum < nrModels; ++lmNum) {
											int pos = modelCluster2pos[lmNum][clusters[lmNum]];
											double weight = params[pos];
											if (weight < MIN_WEIGHT) {
												params[pos] = MIN_WEIGHT;
												weight = MIN_WEIGHT;
												//System.err.print("");
											}
											sumWeights += weight;
											sumProbs += weight * probs[lmNum];
										}
										
										if (sumProbs <= 0) {
											// oops!
											StringBuilder sb = new StringBuilder();
											sb.append(String.format("sumProbs=%f, sumWeights=%f, word=%s\n", sumProbs, sumWeights, FactorTuple.toStringNoNull(rec.word)));
											for(int lmNum = 0; lmNum < nrModels; ++lmNum) {
												int pos = modelCluster2pos[lmNum][clusters[lmNum]];
												double weight = params[pos];
												sb.append(String.format(" [%d]: cluster=%d, weight=%f, prob=%f\n",
														lmNum, clusters[lmNum], weight, probs[lmNum]));
											}
											System.err.print(sb.toString());
										}
										
										if (sumWeights > 0) {
											double prob = sumProbs / sumWeights;
											int count = rec.counts[i];
											totalCount += count;
											double addition = count * ProbMath.log2(prob);
											if (Double.isNaN(addition)) {
												System.err.print("");
											}
											foldEntropy -= addition;
											
											for(int lmNum=0; lmNum<nrModels; ++lmNum) {
												double update = count / sumWeights * (probs[lmNum]/prob - 1);
												partialGradient[modelCluster2pos[lmNum][clusters[lmNum]]] -= update;
											}
										} else {
											System.err.printf("sumWeights = %f\n", sumWeights);
										}
									}
								} catch(IOException e) {
									break;
								}
							}
							totalCounts[dataid] = totalCount;
							
							//double revTotalCount = 1.0 / totalCount / nrDataFiles;
							
							//foldEntropy *= revTotalCount;
							totalEntropies[dataid] = foldEntropy;
							
							/*
							for(int i=0; i<partialGradient.length; ++i) {
								partialGradient[i] *= revTotalCount;
							}
							*/
							ois.close();
						} catch (IOException e) {
							e.printStackTrace();
						}

					}
				};
				Job job = new Job(run, "cross entropy");
				manager.addJob(group, job);
			}
			group.join();
			
			Arrays.fill(gradient, 0);
			
			long superTotalCount = 0;
			for(long totalCount : totalCounts) {
				superTotalCount += totalCount;
			}
			double revTotalCount = 1.0 / superTotalCount;
			
			double entropy = 0;
			for(int fold=0; fold<nrDataFiles; ++fold) {
				
				entropy += totalEntropies[fold] * revTotalCount;

				double partialGradient[] = partialGradients[fold];
				for(int i=0; i<gradient.length; ++i) {
					gradient[i] += partialGradient[i] * revTotalCount;
				}
			}
			
			/*
			for(int i=0; i<gradient.length; ++i) {
				if (params[i] == MIN_WEIGHT && gradient[i] < 0) {
					gradient[i] = 0;
				}
			}
			*/
			return entropy;
		}

		public void unpackParams() {
			for(int lmNum=0; lmNum<nrModels; ++lmNum) {
				BinaryTree<HistoryTreePayload> nodes[] = forest.getModels().get(lmNum).getNodes();
				
				int cluster2pos[] = modelCluster2pos[lmNum];
				double newWeights[] = new double[nodes.length];
				
				for(int clusterid = 0; clusterid<cluster2pos.length; ++clusterid) {
					int pos = cluster2pos[clusterid];
					if (pos >= 0) {
						newWeights[clusterid] = params[pos];
					}
				}
				forest.getDecodingRuntime().setWeights(lmNum, newWeights);
			}
		}
		
	}
	
	private class MyLBFGSB {
		Parameters params;
		Minimizer minimizer;
		
		public MyLBFGSB(boolean useOldValues) {
			params = new Parameters(useOldValues);

			final int nrParams = params.nrParams();
			ArrayList<Bound> bounds = new ArrayList<Bound>(nrParams);
			// use a small number since 0 appears to mean no bound
			Bound bound = new Bound(new Double(1e-7), new Double(1.0));
			for(int i=0; i<nrParams; ++i) {
				bounds.add(bound);
			}
			
			minimizer = new Minimizer();
			minimizer.setBounds(bounds);
			StopConditions conditions = minimizer.getStopConditions();
			conditions.setMaxIterations(200);
			//conditions.setMaxGradientNormInactive();
		}
		
		public double run() {
			DifferentiableFunction fun = new DifferentiableFunction() {
				@Override
				public FunctionValues getValues(double[] point) {
					double gradient[] = new double[params.nrParams()];
					double crossEntropy = params.computeGradientAndCrossEntropy(point, gradient);
					FunctionValues val = new FunctionValues(crossEntropy, gradient);
					System.err.printf("crossEntropy = %f\n", crossEntropy);
					return val;
				}
			};
			
			try {
				Result result = minimizer.run(fun, params.params);
				System.arraycopy(result.point, 0, params.params, 0, params.nrParams());
				
				System.err.println(result.iterationsInfo.toString());
				return result.functionValue;
				
			} catch (LBFGSBException e) {
				e.printStackTrace();
			}
			return Double.MAX_VALUE;
		}
	}
	
	public void interpolate(boolean useOldValues, int nrRuns) throws Exception {
		Parameters params = null;
		double bestEntropy = Double.MAX_VALUE;
		
		for (int i=0; i<nrRuns; ++i) {
			long start = System.currentTimeMillis();
			MyLBFGSB lbfgsb = new MyLBFGSB(useOldValues);
			double crossEntropy = lbfgsb.run();
			long end = System.currentTimeMillis();
			System.out.printf("Run %d took %ds: entropy=%f, %d parameters\n", 
					i+1, (end-start)/1000, crossEntropy, lbfgsb.params.params.length);
			if (crossEntropy < bestEntropy) {
				params = lbfgsb.params;
				bestEntropy = crossEntropy;
			}
		}
		
		if (params != null) {
			params.unpackParams();
		}
		
		innerNodesEM.doOptimize(10);
		innerNodesEM.updateWeights();
	}
	
	public void setConst(boolean innerOnly) {
		DecodingRuntime runtime = forest.getDecodingRuntime();
		final double LEAF_CONST_WEIGHT = 0.5;
		final double INNER_CONST_WEIGHT = 1e-7;
		
		for(int lmNum=0; lmNum<forest.getModels().size(); ++lmNum) {
			LanguageModel lm = forest.getModels().get(lmNum);
			double weights[] = runtime.getWeights(lmNum);
			
			BinaryTree<HistoryTreePayload> tree = lm.getHistoryTree();
			for(BinaryTreeIterator<HistoryTreePayload> it = tree.getPostOrderIterator(); it.hasNext(); ) {
				BinaryTree<HistoryTreePayload> node = it.nextNode();
				if (!innerOnly || !node.isLeaf()) {
					HistoryTreePayload payload = node.getPayload();
					if (payload.isBackoff) {
						weights[payload.clusterid] = 0;
					} else {
						weights[payload.clusterid] = node.isLeaf() ? LEAF_CONST_WEIGHT : INNER_CONST_WEIGHT;
					}
				}
			}
		}
	}
	
	@SuppressWarnings("unused")
	private void checkHeldoutData(int nrClusters) {
		Random rnd = new Random();
		for(int lmNum=0; lmNum<nrModels; ++lmNum) {
			BinaryTree<HistoryTreePayload> nodes[] = forest.getModels().get(lmNum).getNodes();
			
			for(int i=0; i<nrClusters; ++i) {
				int clusterid = 0;
				do {
					clusterid = rnd.nextInt(nodes.length / 2);
					clusterid += nodes.length / 2;
				} while(nodes[clusterid] == null || nodes[clusterid].isLeaf() || nodes[clusterid].getPayload().isBackoff);
				
				checkHeldoutData(lmNum, clusterid);
			}
		}
	}

	private void checkHeldoutData(final int lmNum, final int clusterid) {
		final double totalProbs[] = new double[nrDataFiles];
		final ModelWordClusterCounts dataCounts[] = dataModelCounts[lmNum];
		final BinaryTree<HistoryTreePayload> leaf = forest.getModels().get(lmNum).getNodes()[clusterid];
		
		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("check probabilities");
	
		for(int d = 0; d<nrDataFiles; ++d) {
			final int split = d;
			for(FactorTuple tuple : Experiment.getInstance().getTupleDescription().getAllOvertFactors().keySet()) {
				final long word = tuple.getBits();
				
				Runnable run = new Runnable() {
					@Override
					public void run() {
						BinaryTree<HistoryTreePayload> node = leaf;
						
						double currentWeight = 1.0;
						double prob = 0.0;
						while(node != null) {
							HistoryTreePayload payload = node.getPayload();
							int clusterCount = 0;
							long clusterTotalCount = 0;
							
							for(int dataId=0; dataId<nrDataFiles; ++dataId) {
								if (dataId == split) continue;
								
								Word2ClustersCounts word2ClustersCounts = dataCounts[dataId].wordCounts.get(word);
								if (word2ClustersCounts != null) {
									CompactReadOnlyInt2IntHashMap counts = word2ClustersCounts.getClusterCounts();
									clusterCount += counts.get(payload.clusterid);
								}
								clusterTotalCount += dataCounts[dataId].clusterCounts[payload.clusterid];
							}
							if (clusterCount > 0) {
								double clusterProb = (double) clusterCount / clusterTotalCount;
								if (Double.isNaN(clusterProb)) {
									System.err.print("");
								}
								prob += clusterProb * currentWeight * payload.lambda;
							}
							currentWeight *= (1.0 - payload.lambda);
							
							node = node.getParent();
						}
						
						synchronized(totalProbs) {
							totalProbs[split] += prob;
						}
					}
				};
				Job job = new Job(run, "");
				manager.addJob(group, job);
				group.join(manager.getNumWorkers());
			}
		}
		group.join();
		
		System.err.printf("lm=%d, cluster #%d: probs: %s\n", lmNum, clusterid, Arrays.toString(totalProbs));
	}

	@SuppressWarnings("unused")
	private void checkClusterChain(final int lmNum, final int clusterid) {
		System.err.printf("Checking cluster chain starting with #%d\n", clusterid);
		
		BinaryTree<HistoryTreePayload> node = forest.getModels().get(lmNum).getNodes()[clusterid];
		while(node != null) {
			checkCluster(lmNum, node.getPayload().clusterid);
			node = node.getParent();
		}
		System.err.printf("End of cluster chain  #%d\n", clusterid);
	}

	private void checkCluster(final int lmNum, final int clusterid) {
		final ModelWordClusterCounts dataCounts[] = dataModelCounts[lmNum];
		final Set<FactorTuple> allFactors = Experiment.getInstance().getTupleDescription().getAllOvertFactors().keySet();
		
		final long totalCounts[] = new long[nrDataFiles];
		final long actualTotalCounts[] = new long[nrDataFiles];
		
		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("cluster "+Integer.toString(clusterid)+" check");
		
		for(int dataId=0; dataId<nrDataFiles; ++dataId) {
			final int split = dataId;
			
			Runnable run = new Runnable() {
				@Override
				public void run() {
					long totalCount = 0;
					for(FactorTuple tuple : allFactors) {
						Word2ClustersCounts word2ClustersCounts = dataCounts[split].wordCounts.get(tuple.getBits());
						if (word2ClustersCounts != null) {
							CompactReadOnlyInt2IntHashMap counts = word2ClustersCounts.getClusterCounts();
							totalCount += counts.get(clusterid);
						}
						actualTotalCounts[split] = totalCount;
						totalCounts[split] = dataCounts[split].clusterCounts[clusterid];
					}
				}
				
			};
			Job job = new Job(run, "q job");
			manager.addJob(group, job);
		}
		group.join();
		
		boolean clusterOK = true;
		for(int i=0; i<totalCounts.length; ++i) {
			if (totalCounts[i] != actualTotalCounts[i]) {
				clusterOK = false;
				break;
			}
		}
		System.err.printf("cluster #%d %s: total=%s, actual=%s\n", clusterid, (clusterOK?"OK":"FAILED"),
				Arrays.toString(totalCounts), Arrays.toString(actualTotalCounts));
	}

	public static String makeDataFilename(int level, int dataId) {
		return String.format("layer-%03d-data-%d", level, dataId);
	}

	public static String makeHeldoutFilename(int dataId) {
		return makeHeldoutFilename(dataId, -1);
	}
	
	public static String makeHeldoutFilename(int dataId, int model) {
		return String.format("heldout-%d%s", dataId, model < 0 ? "" : ("-model-"+model));
	}
}
