/**
 * 
 */
package edu.umd.clip.lm.model.training;

import java.io.*;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean;
import java.lang.management.MemoryUsage;
import java.nio.channels.FileChannel;
import java.util.*;
import java.util.Map.Entry;

import com.sleepycat.je.*;

import edu.berkeley.nlp.util.Pair;
import edu.umd.clip.jobs.*;
import edu.umd.clip.lm.factors.*;
import edu.umd.clip.lm.model.*;
import edu.umd.clip.lm.model.data.*;
import edu.umd.clip.lm.model.training.AbstractQuestionEstimator.Result;
import edu.umd.clip.lm.model.training.metrics.*;
import edu.umd.clip.lm.questions.*;
import edu.umd.clip.lm.util.*;
import edu.umd.clip.lm.util.tree.*;

/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class NewTrainer {
	//ArrayList<ActiveTreeNode> newActiveNodes;
	//ArrayList<BinaryTree<HistoryTreePayload>> newLeaves;
	private final QuestionGenerator questionGenerator;
	private final LanguageModel lm;
	private final File tmpDir;
	private int nbestQuestions = 1; // 1 means don't use nbest
	private AbstractQuestionEstimator estimator1; // selects n best questions
	private AbstractQuestionEstimator estimator2; // final selection, ignored if nbestQuestion == 1
	private ActiveNodeStorage<ActiveTreeNode> activeNodeStorage;
	private Environment tmpEnv;
    private final MemoryMXBean memoryBean = ManagementFactory.getMemoryMXBean();
	private final ContextVariableMetrics ctxVarMetrics;
	private final ContextVariableStoppingRule ctxVarStoppingMetrics;
	private double[] entropies;
	private double[] entropyReductions;
	private final Question[] allQuestions;
	private boolean[] factorsForExchangeAlgo;
	
	public static int MAX_LOG_FILE_SIZE = 64*1024*1024;

	protected static class Distributions {
		final CompactReadOnlyLongHashSet futureWordIndex;
		int[] trueDist;
		int[] falseDist;
		long totalTrueCount;
		long totalFalseCount;
		
		public Distributions(CompactReadOnlyLongHashSet futureWordIndex) {
			this.futureWordIndex = futureWordIndex;
			trueDist = new int[futureWordIndex.size()];
			falseDist = new int[futureWordIndex.size()];
			totalTrueCount = 0;
			totalFalseCount = 0;
		}
		
		public void addTrueCount(long word, int count) {
			trueDist[futureWordIndex.getKeyPosition(word)] += count;
			totalTrueCount += count;
		}
		
		public void addFalseCount(long word, int count) {
			falseDist[futureWordIndex.getKeyPosition(word)] += count;
			totalFalseCount += count;
		}
		
		public long getTotalTrueCount() {
			return totalTrueCount;
		}
		
		public long getTotalFalseCount() {
			return totalFalseCount;
		}
	}
	
	private static class NbestQuestionsEntry implements Comparable<NbestQuestionsEntry> {
		final public Question question;
		final public AbstractQuestionEstimator.Result results[];
		
		public NbestQuestionsEntry(Question question, Result results[]) {
			this.question = question;
			this.results = results;
		}

		/* (non-Javadoc)
		 * @see java.lang.Comparable#compareTo(java.lang.Object)
		 */
		@Override
		public int compareTo(NbestQuestionsEntry o) {
			double totalCost1 = 0;
			for(Result result : results) {
				totalCost1 += result.getCost();
			}
			double totalCost2 = 0;
			for(Result result : o.results) {
				totalCost2 += result.getCost();
			}
			return (int) Math.signum(totalCost1 - totalCost2);
		}
	}
	
	public NewTrainer(LanguageModel lm, File tmpDir) {
		this.lm = lm;
		this.tmpDir = tmpDir;
		this.ctxVarMetrics = lm.getContextVariableMetrics();
		this.ctxVarStoppingMetrics = lm.getContextVariableStoppingRule();
		
		System.err.println("Context Variable Metrics: " + this.ctxVarMetrics.toString());
		System.err.println("Context Varirable Stopping Rule: " + this.ctxVarStoppingMetrics.toString());
		
		OnDiskTrainingDataNodeWriter.setTempDir(tmpDir);
		
		FactorTupleDescription desc = Experiment.getInstance().getTupleDescription();
		factorsForExchangeAlgo = new boolean[desc.numFactors()];
		for(byte factorIdx : desc.getOvertFactors()) {
			factorsForExchangeAlgo[factorIdx] = true;
		}
		
		questionGenerator = new QuestionGenerator();
		questionGenerator.initialize(lm, false);
		List<Question> questions = questionGenerator.getQuestions();
		allQuestions = questions.toArray(new Question[questions.size()]);
		
		estimator1 = new EntropyQuestionEstimator((int) lm.getMIN_TOTAL_COUNT(), lm.getMIN_COST_DECREASE());
		try {
			File file = new File(tmpDir, lm.getId() + "-active-nodes");
			String path = file.getAbsolutePath();
			//file.delete();
			this.tmpEnv = makeTempEnvironment(path);
			activeNodeStorage = new BDBActiveNodeStorage<ActiveTreeNode>(tmpEnv);
		} catch (DatabaseException e) {
			e.printStackTrace();
		}
	}

	private static Environment makeTempEnvironment(String path) throws DatabaseException {
		EnvironmentConfig envConf = new EnvironmentConfig();
		envConf.setAllowCreate(true);
		//envConf.setSharedCache(true);
		envConf.setTransactional(false);
		envConf.setReadOnly(false);
		envConf.setCachePercent(20);
		envConf.setConfigParam("je.log.fileMax", Integer.toString(MAX_LOG_FILE_SIZE));
		
		File dir = new File(path);
		if (!dir.isDirectory()) {
			dir.mkdirs();
		}
		Environment env = new Environment(dir, envConf);
		return env;
	}
	
	public TrainingContext initialize() throws IOException {
		HistoryTreePayload payload = new HistoryTreePayload((Question)null);
		payload.top = true;
		
		activeNodeStorage.open();
		Experiment exp = Experiment.getInstance();
		FactorTupleDescription desc = exp.getTupleDescription();
		
		Experiment.Files files = exp.getFiles();
		
		int numSplits = files.getTrainData().size();
		
		TrainingContext ctx = createNewContext(0, 1, new BinaryTree<HistoryTreePayload>(payload));
		int nodeId = ctx.getNextNodeId(); 
		payload.clusterid = nodeId;

		
		OnDiskTrainingDataNodeWriter writer = new OnDiskTrainingDataNodeWriter(ctx.getFile().getChannel());
		
		WritableTrainingDataNode node = writer.createNode(nodeId, numSplits);
		writer.add(node);
		WritableTrainingData[] newData = node.getData();
		
		long overtMask = desc.getOvertFactorsMask();
		TrainingDataFilter filter = new MaskedFuturesTrainingDataFilter(overtMask);

		Long2IntMap dataCounts[] = new Long2IntMap[numSplits];

		for(int i=0; i<numSplits; ++i) {
			dataCounts[i] = new Long2IntMap();
			for(String fname : files.getTrainDataFiles(i)) {
				FileChannel channel = new FileInputStream(fname).getChannel();
				TrainingDataReader reader = new OnDiskTrainingDataReader(channel);
				ReadableTrainingData inputData = new ReadableTrainingData(reader);
	
				//System.err.printf("data #%d\n", i);
				TrainingDataUtil.reduceContext(inputData, newData[i], lm.getOvertOrder(), lm.getHiddenOrder(), filter, dataCounts[i]);
				//System.err.printf("=====================\n");
				channel.close();
			}
			newData[i].finish();
		}
		writer.close();
		
		ctx.setDataWriter(writer);
		
		ActiveTreeNode activeNode = new ActiveTreeNode(numSplits);
		activeNode.possibleQuestions = new int[allQuestions.length];
		for(int i=0; i<allQuestions.length; ++i) {
			activeNode.possibleQuestions[i] = i;
		}
		
		this.entropies = new double[numSplits];
		
		long totalCounts[] = new long[numSplits];
		
		for(int i=0; i<numSplits; ++i) {
			//convert training counts to distribution
			long totalCount = 0;
			for(Long2IntMap.Iterator it = dataCounts[i].iterator(); it.hasNext();) {
				totalCount += it.next().getValue();
			}
			totalCounts[i] = totalCount;
			double revTotalCount = 1.0 / totalCount;
			Long2DoubleMap trainingDist = new Long2DoubleMap(dataCounts[i].size());
			for(Long2IntMap.Iterator it = dataCounts[i].iterator(); it.hasNext();) {
				Long2IntMap.Entry entry = it.next();
				trainingDist.put(entry.getKey(), entry.getValue() * revTotalCount);
			}
			//activeNode.smoothedDistributions[i] = trainingDist;
			
			double entropy = ProbMath.computeEntropy(dataCounts[i]);
			entropies[i] = entropy;
		}
		activeNode.nodeCosts = Arrays.copyOf(entropies, entropies.length);
		activeNode.possibleWords = new long[desc.getAllOvertFactors().size()];
		{
			int i=0;
			for(FactorTuple tuple : desc.getAllOvertFactors().keySet()) {
				activeNode.possibleWords[i++] = tuple.getBits();
			}
		}
		AbstractQuestionEstimator.setTotalCounts(totalCounts);
		
		ctx.getActiveNodes().putNode(nodeId, activeNode);
		ctx.putLeaf(nodeId, ctx.getTree());
		
		return ctx;
	}
	
	public void train(TrainingContext ctx) throws IOException {
		final int levelsBetweenSnapshort = 10;
		while(true) {
			TrainingContext newCtx = createNewContext(ctx);
			ctx.setFile(new RandomAccessFile(ctx.getFilename(), "r"));
			//ctx.getFile().getChannel().position(0);
			TrainingDataNodeReader reader = new EagerTrainingDataNodeReader(new OnDiskTrainingDataNodeReader(ctx.getFile().getChannel()));
			ctx.setDataReader(reader);
			
			TrainingDataNodeWriter writer = new OnDiskTrainingDataNodeWriter(newCtx.getFile().getChannel()); 
			writer = new BufferedTrainingDataNodeWriter(writer);
			newCtx.setDataWriter(writer);

			newCtx.getLastNodeId().set(ctx.getLastNodeId().get());
			
			// grow the tree by one level
			System.out.printf("Starting iteration %d, entropies: %s\n", 
					ctx.getIteration(), Arrays.toString(entropies));
			
			this.entropyReductions = new double[entropies.length];
			processActiveNodes(ctx, newCtx);
			
			System.out.printf("iteration %d reduced entropies by %s\n", 
					ctx.getIteration(), Arrays.toString(entropyReductions));

			for(int i=0; i<entropies.length; ++i) {
				this.entropies[i] -= entropyReductions[i];
			}
			
			// rearrange the training data
			ctx.setFile(new RandomAccessFile(ctx.getFilename(), "r"));
			reader = new EagerTrainingDataNodeReader(new OnDiskTrainingDataNodeReader(ctx.getFile().getChannel()));
			ctx.setDataReader(reader);
			prepareNextData(ctx, newCtx);
			
			reader.close();
			writer.close();
			
			// discard data from the previous iteration
			//ctx.getFile().setLength(0);
			ctx.getFilename().delete();
			//ctx.getFile().close();
			
			activeNodeStorage.removeCollection(ctx.getActiveNodes().getName());
			
			newCtx.setDataWriter(null);
			ctx = newCtx;
			int newNodes = ctx.getNodeCount();
			if (newNodes == 0) {
				break;
			}
			if (ctx.getIteration() % levelsBetweenSnapshort == 0) {
				try {
					makeSnapshot(ctx);
				} catch (Exception e) {
					e.printStackTrace();
				}
			}
		}
		saveTrainedTree(ctx);
	}
	
	public TrainingContext loadSnapshot(TrainingContext ctx, BinaryTree<HistoryTreePayload> snapshot) throws IOException {
		ctx.setTree(snapshot);
		ctx.putLeaf(snapshot.getPayload().clusterid, snapshot);
		
		int numNodes = 0;
		int maxClusterId = 0;
		for(BinaryTreeIterator<HistoryTreePayload> it = snapshot.getPreOrderIterator(); it.hasNext(); ) {
			HistoryTreePayload payload = it.next();
			++numNodes;
			if (payload.clusterid > maxClusterId) {
				maxClusterId = payload.clusterid;
			}
		}
		System.err.printf("snapshot: %d nodes, max-cluster-id = %d\n", numNodes, maxClusterId);
		ctx.getLastNodeId().set(maxClusterId);
		
		while(true) {
			TrainingContext newCtx = createNewContext(ctx);
			ctx.setFile(new RandomAccessFile(ctx.getFilename(), "r"));
			TrainingDataNodeReader reader = new EagerTrainingDataNodeReader(new OnDiskTrainingDataNodeReader(ctx.getFile().getChannel()));
			ctx.setDataReader(reader);
			newCtx.getLastNodeId().set(maxClusterId);

			TrainingDataNodeWriter writer = new OnDiskTrainingDataNodeWriter(newCtx.getFile().getChannel()); 
			writer = new BufferedTrainingDataNodeWriter(writer);
			newCtx.setDataWriter(writer);
			
			int nonLeafNodes = fastForwardOneLevel(ctx, newCtx);
			
			// discard data from the previous iteration
			ctx.getFilename().delete();
			
			activeNodeStorage.removeCollection(ctx.getActiveNodes().getName());
			
			reader.close();
			writer.close();
			
			newCtx.setDataWriter(null);
			ctx = newCtx;
			if (nonLeafNodes == 0) {
				break;
			}
		}
		return ctx;
	}
	
	private TrainingContext createNewContext(TrainingContext ctx) throws IOException {
		return createNewContext(ctx.getIteration()+1, ctx.getLastNodeId().intValue() * 2, ctx.getTree());
	}
	
	private TrainingContext createNewContext(int iteration, int count, BinaryTree<HistoryTreePayload> tree) throws IOException {
		ActiveNodeStorage<ActiveTreeNode>.ActiveNodeCollection collection = activeNodeStorage.createCollection("iter-" + iteration);
		
		TrainingContext newCtx = new TrainingContext(iteration, count, tree, collection);
		File tempOutput = new File(tmpDir, lm.getId() + "-forest-iter-" + Integer.toString(iteration) + ".data");
		if (tempOutput.exists()) {
			tempOutput.delete();
		}
		tempOutput.deleteOnExit();
		newCtx.setFilename(tempOutput);
		newCtx.setFile(new RandomAccessFile(tempOutput, "rw"));
		return newCtx;
	}
	
	private final static float MAX_MEMORY_USE_FRACTION = 0.7f;
	
	private void processActiveNodes(final TrainingContext ctx, final TrainingContext newCtx) throws IOException {
		TrainingDataNodeReader reader = ctx.getDataReader();
		
		JobManager manager = JobManager.getInstance();
		MemoryUsage memuse;
		
		int maxTasks = 1;
		int level = ctx.getIteration();
		if (level >= lm.getForceHiddenQuestions() || level > 8) {
			maxTasks = level / 2 + 1;
		}
		if (maxTasks > manager.getNumWorkers() / 2 + 1) {
			maxTasks = manager.getNumWorkers() / 2 + 1;
		}
		
		final TaskSequencer sequencer = new TaskSequencer(maxTasks);
		while(true) {
			sequencer.grabInput();
			
			
			// do not start next node if the memory usage is high or until there are no other nodes being processed
			// note: sequencer.runningTasks cannot increase while we are holding the input
			while(sequencer.getRunningTasks() > 1) {
				memuse = memoryBean.getHeapMemoryUsage();
				if (memuse.getUsed() > memuse.getMax() * MAX_MEMORY_USE_FRACTION) {
					System.err.printf("Too little memory to start the next node: runningTasks=%d, mem used: %dM/%dM, running GC\n", 
							sequencer.getRunningTasks(), memuse.getUsed()/1048576, memuse.getMax()/1048576);
					
					System.gc();
					memuse = memoryBean.getHeapMemoryUsage();
					
					if (memuse.getUsed() > memuse.getMax() * MAX_MEMORY_USE_FRACTION) {
						System.err.printf("Delaying starting the next node: runningTasks=%d, mem used: %dM/%dM\n", 
								sequencer.getRunningTasks(), memuse.getUsed()/1048576, memuse.getMax()/1048576);
						if (!sequencer.waitForSomething(1)) {
							// no nodes left to wait for
							break;
						}
					} else {
						System.err.printf("GC helped! : runningTasks=%d, mem used: %dM/%dM\n", 
								sequencer.getRunningTasks(), memuse.getUsed()/1048576, memuse.getMax()/1048576);
					}
				} else {
					break;
				}
			}
			
			final ReadableTrainingDataNode data = reader.getNext();
			if (data == null) {
				sequencer.releaseInput();
				sequencer.finish();
				break;
			}

			Runnable run = new Runnable() {
				@Override
				public void run() {
					try {
						processActiveNode(ctx, newCtx, data, sequencer);
					} finally {
						sequencer.finish();
					}
				}
			};
			Job job = new Job(run, "a job");
			manager.addJob(job);
		}
		sequencer.join();
		reader.close();
	}
	
	private void prepareNextData(TrainingContext ctx, TrainingContext newCtx) throws IOException {
		TrainingDataNodeReader reader = ctx.getDataReader();
		TrainingDataNodeWriter writer = newCtx.getDataWriter();
		
		// the position is already at 0
		//reader.reset();
		
		int oldNodeCount = 0;
		int newNodeCount = 0;
		
		for(ReadableTrainingDataNode nodeData = reader.getNext(); nodeData != null; nodeData = reader.getNext()) {
			++oldNodeCount;
			BinaryTree<HistoryTreePayload> oldLeaf = ctx.getLeaf(nodeData.getNodeId());
			if (oldLeaf.isLeaf()) {
				// skip the node
				nodeData.skipData();
				continue;
			}
			newNodeCount += 2;
			
			BinaryTree<HistoryTreePayload> left = getFalseBranch(oldLeaf);
			BinaryTree<HistoryTreePayload> right = getTrueBranch(oldLeaf);
			
			int leftNodeId = left.getPayload().clusterid;
			int rightNodeId = right.getPayload().clusterid;
			
			WritableTrainingDataNode leftDataNode = writer.createNode(leftNodeId, nodeData.getNumData());
			WritableTrainingDataNode rightDataNode = writer.createNode(rightNodeId, nodeData.getNumData());
			
			writer.add(leftDataNode);
			writer.add(rightDataNode);
			
			for(int i=0; i<nodeData.getNumData(); ++i) {
				TrainingDataUtil.splitData(nodeData.getData(i), oldLeaf.getPayload().question, rightDataNode.getData(i), leftDataNode.getData(i));
			}
		}		
		reader.close();
		writer.close();
		
		System.err.printf("old node count: %d, new node count: %d\n", oldNodeCount, newNodeCount);
	}
	
	
	protected static BinaryTree<HistoryTreePayload> getTrueBranch(BinaryTree<HistoryTreePayload> oldLeaf) {
		return oldLeaf.getRight();
	}
	
	protected static BinaryTree<HistoryTreePayload> getFalseBranch(BinaryTree<HistoryTreePayload> oldLeaf) {
		BinaryTree<HistoryTreePayload> left = oldLeaf.getLeft();
		if (oldLeaf.getPayload().question.getQuestionType() == Question.IN_SET_QUESTION) {
			if (left.getPayload().question != null && 
					left.getPayload().question.getQuestionType() == Question.IN_SET_QUESTION &&
					left.getLeft().getPayload().isBackoff) 
			{
				return left.getRight(); 
			}
		}
		return left;
	}
	
	private boolean filterQuestions(TrainingContext ctx, Question question) {
		if (!lm.useHFTQuestions() && question.isAboutHidden()) return false;
		int treeLevel = ctx.getIteration();
		if (!question.isAboutHidden() && lm.useHFTQuestions() && treeLevel < lm.getForceHiddenQuestions()) return false;
		
		if (treeLevel < 7 && question.getQuestionType() == Question.BINARY_PREFIX_QUESTION) {
			if (((BinaryPrefixQuestion)question).getPrefix().getSize() > 3 + treeLevel*2) {
				return false;
			}
		}

		return true;
	}
	
	private boolean allowOvertQuestions(TrainingContext ctx) {
		int treeLevel = ctx.getIteration();
		return true && (!lm.useHFTQuestions() || treeLevel >= lm.getForceHiddenQuestions());
	}
	
	private static class CollectExchangeStatsJob implements Runnable {
		private final TrainingDataBlock block;
		private final byte offset;
		private final HashMap<Integer, Long2IntMap> eventsByWord;
		private final byte wordIndex;
		
		/**
		 * @param block
		 * @param minOffset
		 * @param contextSize
		 * @param eventsByWord
		 * @param wordIndex
		 */
		public CollectExchangeStatsJob(TrainingDataBlock block, byte offset,
				HashMap<Integer, Long2IntMap> eventsByWord,
				byte wordIndex) {
			this.block = block;
			this.offset = offset;
			this.eventsByWord = eventsByWord;
			this.wordIndex = wordIndex;
		}

		/* (non-Javadoc)
		 * @see java.lang.Runnable#run()
		 */
		@Override
		public void run() {
			for(final ContextFuturesPair pair : block) {
				Integer word = FactorTuple.getValue(pair.getContext().data[offset], wordIndex);
				Long2IntMap map = eventsByWord.get(word);
				if (map == null) {
					map = new Long2IntMap();
					eventsByWord.put(word, map);
				}
				for(TupleCountPair tc : pair.getFutures()) {
					map.addAndGet(tc.tuple, tc.count);
				}
			}			
		}
	}
	
	private static class CollectQuestionStatsJob implements Runnable {
		private final TrainingDataBlock block;
		private final Question question;
		private final Distributions distributions;
		
		/**
		 * @param block
		 * @param question
		 * @param distributions
		 */
		public CollectQuestionStatsJob(TrainingDataBlock block, Question question,
				Distributions distributions) {
			this.block = block;
			this.question = question;
			this.distributions = distributions;
		}

		/* (non-Javadoc)
		 * @see java.lang.Runnable#run()
		 */
		@Override
		public void run() {
			for(final ContextFuturesPair pair : block) {
				final Context context = pair.getContext();
				final TupleCountPair futures[] = pair.getFutures();
				boolean result = question.test(context);

				if (result) {
					for(TupleCountPair tc : futures) {
						distributions.addTrueCount(tc.tuple, tc.count);
					}
				} else {
					for(TupleCountPair tc : futures) {
						distributions.addFalseCount(tc.tuple, tc.count);
					}
				}
			}
		}
	}
	
	private static class CollectContextVariableStats implements Runnable {
		// collects statistics: w -> count, x -> count, and  x -> w -> count in order to compute Chi square and other stats
		
		private final TrainingDataBlock block;
		private final ContextVariable ctxVar;
		private final ContextVariableStats stats;

		public CollectContextVariableStats(TrainingDataBlock block,
				ContextVariable ctxVar, ContextVariableStats stats) 
		{
			this.block = block;
			this.ctxVar = ctxVar;
			this.stats = stats;
		}
		/* (non-Javadoc)
		 * @see java.lang.Runnable#run()
		 */
		@Override
		public void run() {
			stats.processBlock(ctxVar, block);
		}
		
	}
	
	private static final int MAX_QUESTION_DISTANCE = 6;
	private static int NR_RANDOM_EXCHANGE_INIT = 4;
	
	@SuppressWarnings("unchecked")
	private void processActiveNode(TrainingContext ctx, TrainingContext newCtx, 
			ReadableTrainingDataNode nodeData, TaskSequencer sequencer) 
	{
		final int nodeId = nodeData.getNodeId(); 
		final ReadableTrainingData data[] = nodeData.getData();
		data[0].start();
		final byte contextSize = data[0].getContextSize();
		
		final BinaryTree<HistoryTreePayload> currentLeaf = ctx.getLeaf(nodeId);

		final ActiveTreeNode activeNode = ctx.getActiveNodes().getNode(nodeId);
		
		final CompactReadOnlyLongHashSet futureWordIndex = new CompactReadOnlyLongHashSet(activeNode.possibleWords);
		
		final JobManager manager = JobManager.getInstance();
		
		// data for the Exchange algorithm
		//final byte wordIndex = exp.getTupleDescription().getMainFactorIndex();
		byte minOffset = (byte) (contextSize + 1 - lm.getOvertOrder());
		final HashMap<Integer, Long2IntMap> eventsByWord[][] = new HashMap[factorsForExchangeAlgo.length][];
		
		final HashMap<Integer, Long2IntMap> eventsByWordAndData[][][] = new HashMap[factorsForExchangeAlgo.length][][];
		for(int factorIdx=0; factorIdx<factorsForExchangeAlgo.length; ++factorIdx) {
			if (factorsForExchangeAlgo[factorIdx]) {
				eventsByWordAndData[factorIdx] = new HashMap[data.length][];
				eventsByWord[factorIdx] = new HashMap[contextSize];
			}
		}
		
		// enumerate all questions (except Exchange questions, which will be constructed later)
		final ArrayList<Question> questions = new ArrayList<Question>();
		
		final HashMap<Question,Distributions[]> allDistributions = new HashMap<Question,Distributions[]>(questions.size());
		//final HashMap<ContextVariable, HashMap<Long,CountDistribution>> distributionsByContextVariable = new HashMap<ContextVariable, HashMap<Long,CountDistribution>>(2*(forest.getOrder()-1));
		//final HashMap<ContextVariable, Pair<CountDistribution, CountDistribution>> marginalDistsByContextVariable = new HashMap<ContextVariable, Pair<CountDistribution, CountDistribution>>(2*(forest.getOrder()-1));
		
		final HashMap<ContextVariable, ContextVariableStats> ctxVarStats = new HashMap<ContextVariable, ContextVariableStats>(2*(lm.getOrder()-1)); 
		final HashMap<ContextVariable, Double> ctxVarScores = new HashMap<ContextVariable, Double>(2*(lm.getOrder()-1)); 
		ArrayList<Question> trueQuestions;
		ArrayList<Question> falseQuestions;
		
		try {
			{
				trueQuestions = new ArrayList<Question>();
				falseQuestions = new ArrayList<Question>();
				/*
				 * don't need this anymore since the list of sensible questions is stored in ActiveTreeNode explicitly
				 * 
				Pair<ArrayList<Question>,ArrayList<Question>> pair = getUselessQuestions(currentLeaf);
				trueQuestions = pair.getFirst();
				falseQuestions = pair.getSecond();
				System.err.printf("node #%d has %d useless true and %d useless false questions\n", 
						nodeId, trueQuestions.size(), falseQuestions.size());
				*/
			}
			
			{
				// OPTIMIZATION: filter out irrelevant or unlikely questions
				constructTrueAndFalseQuestions(currentLeaf, trueQuestions, falseQuestions);
				
				ArrayList<BinaryPrefix> askedPrefixes[] = new ArrayList[contextSize];
				for(int i=0; i<askedPrefixes.length; ++i) {
					askedPrefixes[i] = new ArrayList<BinaryPrefix>();
					askedPrefixes[i].add(new BinaryPrefix()); // add an empty prefix
				}
				
				BinaryTree<HistoryTreePayload> node = currentLeaf.getParent();
				while(node != null) {
					Question q = node.getPayload().question;
					if (q.isAboutHidden()) {
						askedPrefixes[askedPrefixes.length+q.getTimeOffset()].add(((BinaryPrefixQuestion) q).getPrefix());
					}
					node = node.getParent();
				}
				
				for(int questionIdx : activeNode.possibleQuestions) {
					Question question = allQuestions[questionIdx];
					
					if (filterQuestions(ctx, question)) {
						boolean allowed = false;
						if (MAX_QUESTION_DISTANCE > 0 && question.isAboutHidden()) {
							// don't ask questions that are farther than MAX_QUESTION_DISTANCE away from a previously asked question
							// particularly important for higher order models
							BinaryPrefixQuestion bpq = (BinaryPrefixQuestion) question;
							ArrayList<BinaryPrefix> prefixes = askedPrefixes[askedPrefixes.length + bpq.getIndex()];
							if (prefixes.size() < 10) {
								for(BinaryPrefix prefix : prefixes) {
									if (BinaryPrefix.distance(prefix, bpq.getPrefix()) <= MAX_QUESTION_DISTANCE) {
										allowed = true;
										break;
									}
								}
							} else {
								allowed = true; // don't bother
							}
						} else {
							allowed = true;
						}
						if (allowed) {
							questions.add(question);
						}
					}
				}
				askedPrefixes = null;
			}
			
			// create distributions for every question
			//final Long2DoubleMap parentTrainDist = new Long2DoubleMap();
			//final Long2DoubleMap parentDevDist = new Long2DoubleMap();
			
			for(Question q : questions) {
				Distributions dists[] = new Distributions[data.length];
				for(int i=0; i<dists.length; ++i) {
					dists[i] = new Distributions(futureWordIndex);
				}
				//d.parentTrain = parentTrainDist;
				//d.parentDev = parentDevDist;
				allDistributions.put(q, dists);
				
				ContextVariable ctxVar = new ContextVariable(q);
				
				if (!ctxVarStats.containsKey(ctxVar)) {
					boolean hasWordCounts = false;
					if (ctxVarMetrics.needWordCounts() || ctxVarStoppingMetrics.needWordCounts()) {
						hasWordCounts = true;
					}
					boolean hasContextCounts = false;
					if (ctxVarMetrics.needContextVarCounts() || ctxVarStoppingMetrics.needContextVarCounts()) {
						hasContextCounts = true;
					}
					boolean hasW2X = false;
					if (ctxVarMetrics.needWordToVarCounts() || ctxVarStoppingMetrics.needWordToVarCounts()) {
						hasW2X = true;
					}
					boolean hasX2W = false;
					if (ctxVarMetrics.needVarToWordCounts() || ctxVarStoppingMetrics.needVarToWordCounts()) {
						hasX2W = true;
					}
					ContextVariableStats stats = new ContextVariableStats(hasWordCounts, hasContextCounts, hasW2X, hasX2W);
					ctxVarStats.put(ctxVar, stats);
				}
			}
			
			for(byte factorIdx=0; factorIdx<factorsForExchangeAlgo.length; ++factorIdx) {
				if (factorsForExchangeAlgo[factorIdx]) {
					for(byte i=minOffset; i<contextSize; ++i) {
						eventsByWord[factorIdx][i] = new HashMap<Integer, Long2IntMap>();
						
						// add context variable for word questions explicitly because they are not created yet
						ContextVariable ctxVar = new ContextVariable(false, (byte) (i-contextSize), factorIdx);
			
						if (!ctxVarStats.containsKey(ctxVar)) {
							boolean hasWordCounts = false;
							if (ctxVarMetrics.needWordCounts() || ctxVarStoppingMetrics.needWordCounts()) {
								hasWordCounts = true;
							}
							boolean hasContextCounts = false;
							if (ctxVarMetrics.needContextVarCounts() || ctxVarStoppingMetrics.needContextVarCounts()) {
								hasContextCounts = true;
							}
							boolean hasW2X = false;
							if (ctxVarMetrics.needWordToVarCounts() || ctxVarStoppingMetrics.needWordToVarCounts()) {
								hasW2X = true;
							}
							boolean hasX2W = false;
							if (ctxVarMetrics.needVarToWordCounts() || ctxVarStoppingMetrics.needVarToWordCounts()) {
								hasX2W = true;
							}
							ContextVariableStats stats = new ContextVariableStats(hasWordCounts, hasContextCounts, hasW2X, hasX2W);
							ctxVarStats.put(ctxVar, stats);
						}
					}
				}
			}
			
			for(int factorIdx=0; factorIdx<factorsForExchangeAlgo.length; ++factorIdx) {
				if (eventsByWordAndData[factorIdx] != null) {
					for(int d =0; d<data.length; ++d) {
						eventsByWordAndData[factorIdx][d] = new HashMap[contextSize];
						for(byte i=minOffset; i<contextSize; ++i) {
							eventsByWordAndData[factorIdx][d][i] = new HashMap<Integer, Long2IntMap>();
						}
					}
				}
			}
			// process the training data
			try {
				for(int dataNum = 0; dataNum < data.length; ++dataNum) {
					long dataCount = 0;
					for( ; data[dataNum].hasNext(); ) {
						final TrainingDataBlock block = data[dataNum].next();
						dataCount += block.getTotalCount();
						
						JobGroup group = manager.createJobGroup("training data");
		
						Runnable run;
						Job job;
						ArrayList<Job> jobs = new ArrayList<Job>();
						/*
						run = new CollectParentDistStatsJob(block, parentTrainDist);
						job = new Job(run, "parent stats");
						manager.addJob(group, job);
						*/
						
						// collect stats for the Exchange algorithm
						for(byte factorIdx=0; factorIdx<factorsForExchangeAlgo.length; ++factorIdx) {
							if (eventsByWordAndData[factorIdx] != null) {
								for(byte i=minOffset; i<contextSize; ++i) {
									run = new CollectExchangeStatsJob(block, i, eventsByWordAndData[factorIdx][dataNum][i], factorIdx);
									job = new Job(run, "exchange stats");
									jobs.add(job);
								}
							}
						}
						// split distributions for each question
						for(Question question : questions) {
							Distributions distributions[] = allDistributions.get(question);
		
							run = new CollectQuestionStatsJob(block, question, distributions[dataNum]);
							job = new Job(run, "question stats");
							jobs.add(job);
						}
						
						// collect statistics to select which context variable to split
						for(Map.Entry<ContextVariable, ContextVariableStats> e : ctxVarStats.entrySet()) {
							run = new CollectContextVariableStats(block, e.getKey(), e.getValue());
							job = new Job(run, "context var stats: " + e.getKey().toString());
							jobs.add(job);
						}
						manager.addBatch(group, jobs);
						group.join();
					}
					//System.err.printf("data #%d totalCount: %d\n", dataNum, dataCount);
				}
			} catch(IOException e) {
				e.printStackTrace();
			}
		} finally {
			sequencer.releaseInput();
		}
		
		//System.err.println("Number of questions: " + questions.size());
		
		// compute the scores for the context variables
		{
			JobGroup group = manager.createJobGroup("context variable scores");
			for(Map.Entry<ContextVariable, ContextVariableStats> e : ctxVarStats.entrySet()) {
				final ContextVariable var = e.getKey();
				final ContextVariableStats stats = e.getValue();
				//nbestByContext.put(var, new NbestDataSplits<NbestQuestionsEntry>(nbestQuestions));
				Runnable run = new Runnable() {
					@Override
					public void run() {
						if (ctxVarStoppingMetrics != null) {
							if (!ctxVarStoppingMetrics.isGood(var, stats)) {
								return;
							}
						}
						double score = ctxVarMetrics.computeScore(var, stats);
						if (Double.isNaN(score)) {
							System.err.printf("NaN score (%s), var=%s\n", ctxVarMetrics.getName(), var.toString());
							return;
						}
						synchronized(ctxVarScores) {
							ctxVarScores.put(var, score);
						}
					}
				};
				Job job = new Job(run, "");
				manager.addJob(group, job);
			}
			group.join();
			ctxVarStats.clear();
		}
		
		if (ctxVarScores.isEmpty()) {
			return;
		}
		
		final HashMap<ContextVariable, NbestDataSplits<NbestQuestionsEntry>> nbestByContext = new HashMap<ContextVariable, NbestDataSplits<NbestQuestionsEntry>>();
		final ArrayList<ContextVariable> orderOfContextVariables = new ArrayList<ContextVariable>();
		{
			// compute the mapping ContextVariable -> NbestDataSplits according to ctxVarScores
			// note that some ContextVariables may share NbestDataSplits iff they have the same scores
			Map.Entry<ContextVariable, Double> sortedVars[] = ctxVarScores.entrySet().toArray(new Map.Entry[ctxVarScores.size()]);
			Arrays.sort(sortedVars, 
					new Comparator<Map.Entry<ContextVariable, Double>>() {
						@Override
						public int compare(Entry<ContextVariable, Double> o1,
								Entry<ContextVariable, Double> o2) {
							return (int) Math.signum(o1.getValue() - o2.getValue());
						}
					});
			
			double lastScore = Double.NEGATIVE_INFINITY;
			ContextVariable lastVar = null;
			for(Map.Entry<ContextVariable, Double> entry : sortedVars) {
				double score = entry.getValue();
				ContextVariable ctxVar = entry.getKey();
				orderOfContextVariables.add(ctxVar);

				// merge nbest lists of questions when the ctxVar scores are the same (e.g. in the case of DummyMetrics)
				// except the case when we use Bernoulli trials to select the best context variable later on
				if (score == lastScore && lm.getRandomness() == 0) {
					nbestByContext.put(ctxVar, nbestByContext.get(lastVar));
				} else {
					nbestByContext.put(ctxVar, new NbestDataSplits<NbestQuestionsEntry>(nbestQuestions));
					lastScore = score;
					lastVar = ctxVar;
				}
			}
		}
		
		List<Question> uselessTrueQuestions = new LinkedList<Question>();
		List<Question> uselessFalseQuestions = new LinkedList<Question>();
		
		// select nbest questions
		//NbestDataSplits<NbestQuestionsEntry> nbest = new NbestDataSplits<NbestQuestionsEntry>(nbestQuestions);
		for(Question question : questions) {
			Distributions dists[] = allDistributions.get(question);
			AbstractQuestionEstimator.Result[] results = new AbstractQuestionEstimator.Result[data.length];
			boolean badQuestion = false;

			ContextVariable contextVar = new ContextVariable(question);
			NbestDataSplits<NbestQuestionsEntry> nbest = nbestByContext.get(contextVar);
			if (nbest == null) {
				// failed the stopping rule
				allDistributions.remove(question);
				continue;
			}

			//double randomness = Math.random() * lm.getRandomness();
			
			for(int i=0; i<data.length; ++i) {
				Distributions d = dists[i];
				AbstractQuestionEstimator.Result result = estimator1.estimateQuestion(i, activeNode, d);
				// add randomness
				//result.cost *= 1.0 + randomness;
				
				results[i] = result;
				
				if (d.getTotalFalseCount() < lm.getMIN_TOTAL_COUNT()) {
					boolean doAdd = true;
					for(ListIterator<Question> it = uselessTrueQuestions.listIterator(); it.hasNext();) {
						Question q = it.next();
						if (question.implication(q, true) == Boolean.TRUE) {
							// q => question
							// already covered
							doAdd = false;
							break;
						}
						if (q.implication(question, true) == Boolean.TRUE) {
							// question => q
							it.remove();
							//System.err.println(q + " removed from useless true (question="+question.toShortString()+")");
						}
					}
					if (doAdd || uselessTrueQuestions.size() == 0) {
						uselessTrueQuestions.add(question);
						//System.err.println(question + " added to useless true");
					}
					badQuestion = true;
				} else if (d.getTotalTrueCount() < lm.getMIN_TOTAL_COUNT()) {
					boolean doAdd = true;
					for(ListIterator<Question> it = uselessFalseQuestions.listIterator(); it.hasNext();) {
						Question q = it.next();
						if (question.implication(q, false) == Boolean.FALSE) {
							// not(q) => not(question)
							// already covered
							doAdd = false;
							break;
						}
						if (q.implication(question, false) == Boolean.FALSE) {
							// not(question) => not(q)
							it.remove();
							//System.err.println(q + " removed from useless false (question="+question.toShortString()+")");
						}
					}
					if (doAdd || uselessFalseQuestions.size() == 0) {
						uselessFalseQuestions.add(question);
						//System.err.println(question + " added to useless false");
					}
					badQuestion = true;
				}
			}
			
			if (badQuestion || !estimator1.isGood(results, activeNode)) {
				badQuestion = true;
			}
			
			if (!badQuestion) {
				NbestQuestionsEntry removed = nbest.add(new NbestQuestionsEntry(question, results));
				if (removed != null) {
					allDistributions.remove(removed.question);
				}
			}
		}
		
		/*
		{
			TreeNode treeNode = new TreeNode();
			treeNode.uselessTrueQuestions = new ArrayList<Question>(uselessTrueQuestions);
			treeNode.uselessFalseQuestions = new ArrayList<Question>(uselessFalseQuestions);
			nodeStorage.putNode(nodeId, treeNode);
		}
		*/
		//activeNode.trueQuestions.addAll(uselessTrueQuestions);
		//activeNode.falseQuestions.addAll(uselessFalseQuestions);
		
		
		int totalNbestSize = 0;
		for(NbestDataSplits<NbestQuestionsEntry> nbest : nbestByContext.values()) {
			totalNbestSize += nbest.size();
		}
		
		// make the Exchange questions
		final HashMap<Question, Pair<HashSet<Integer>, HashSet<Integer>>> wordSets = new HashMap<Question, Pair<HashSet<Integer>, HashSet<Integer>>>(); 
		
		if (totalNbestSize == 0 || allowOvertQuestions(ctx)) {
			JobGroup group = manager.createJobGroup("exchange algo");
			//final NbestDataSplits<NbestQuestionsEntry> _nbest = nbest;
			ArrayList<Job> jobs = new ArrayList<Job>();
			
			for(byte factorIdx=0; factorIdx<factorsForExchangeAlgo.length; ++factorIdx) {
				if (eventsByWordAndData[factorIdx] != null) {
					final HashMap<Integer, Long2IntMap>[][] eventsByFactor = eventsByWordAndData[factorIdx];
					for(byte i=minOffset; i<contextSize; ++i) {
						eventsByWord[factorIdx][i] = new HashMap<Integer, Long2IntMap>(eventsByFactor[0][i].size());
						final HashMap<Integer, Long2IntMap> events = eventsByWord[factorIdx][i];
						final byte offset = i;
						Runnable run = new Runnable() {
							@Override
							public void run() {
								for(int i=0; i<data.length; ++i) {
									for(Map.Entry<Integer, Long2IntMap> entry : eventsByFactor[i][offset].entrySet()) {
										Long2IntMap map = events.get(entry.getKey());
										if (map == null) {
											events.put(entry.getKey(), (Long2IntMap) entry.getValue().clone());
										} else {
											ExchangeAlgo.addWordToSet(map, entry.getValue());
										}
									}
								}
							}
						};
						Job job = new Job(run, "populate exchange counts");
						jobs.add(job);
					}
				}
			}
			manager.addBatch(group, jobs);
			group.join();
			jobs.clear();
			
			for(byte factorIdx=0; factorIdx<factorsForExchangeAlgo.length; ++factorIdx) {
				if (!factorsForExchangeAlgo[factorIdx]) continue;
				final byte factorIndex = factorIdx;
				
				for(byte i=minOffset; i<contextSize; ++i) {
					final HashMap<Integer, Long2IntMap> events = eventsByWord[factorIdx][i];
					final byte offset = i;
						
					if (events.size() < 2) continue;
					
					final int nr_exchange_inits = events.keySet().size() > 6 ? NR_RANDOM_EXCHANGE_INIT : 1;
					
					//System.err.printf("node #%d has %d exchange inits (offset %d)\n", nodeId, nr_exchange_inits, i);
					
					final Question[] exchangeQuestions = new Question[nr_exchange_inits];
					final Result[][] exchangeResults = new Result[nr_exchange_inits][];
					for(int exchangeRun=0; exchangeRun < nr_exchange_inits; ++exchangeRun) {
						final int myExchangeRun = exchangeRun;
						
						final Set<Integer> exchangeInit = new HashSet<Integer>(events.keySet().size()/2);
						Random rnd = new Random();
						exchangeInit: do {
							for(Integer word : events.keySet()) {
								if (rnd.nextFloat() < 0.5) {
									exchangeInit.add(word);
									if (exchangeInit.size() > events.keySet().size() - 3) break exchangeInit;
								}
							}
						} while(exchangeInit.size() < 3);
						
						Runnable run = new Runnable() {
							@SuppressWarnings("unused")
							@Override
							public void run() {
								Pair<HashSet<Integer>, HashSet<Integer>> wordSet = 
									ExchangeAlgo.getExchangeWordSplit(events.keySet(), events, exchangeInit);
								
								Question question = new InSetQuestion((byte) (offset-contextSize), factorIndex, wordSet.getFirst());
								
								Distributions dists[] = new Distributions[data.length];
								AbstractQuestionEstimator.Result results[] = new AbstractQuestionEstimator.Result[data.length];
	
								ContextVariable contextVar = new ContextVariable(question);
								NbestDataSplits<NbestQuestionsEntry> nbest = nbestByContext.get(contextVar);
								if (nbest == null) {
									// failed the stopping rule
									// TODO: move up, before the exchange algorithm
									return;
								}
								/*
								for(Map.Entry<ContextVariable, NbestDataSplits<NbestQuestionsEntry>> e : nbestByContext.entrySet()) {
									if (contextVar.equals(e.getKey())) {
										contextVar = e.getKey();
										nbest = e.getValue();
										break;
									}
								}
								*/
								
								for(int i=0; i<dists.length; ++i) {
									// construct left and right distributions for the split
									Distributions d = new Distributions(futureWordIndex);
									//d.parentTrain = parentTrainDist;
									//d.parentDev = parentDevDist;
				
									// train true distributions
									//d.trueDist = makeDistribution(wordSet.getFirst(), eventsByWordAndData[factorIndex][i][offset]);
									{
										Map<Integer,Long2IntMap> ev = eventsByWordAndData[factorIndex][i][offset];
										for(Integer word : wordSet.getFirst()) {
											Long2IntMap e = ev.get(word);
											if (e != null) {
												for(Long2IntMap.Iterator it = e.iterator(); it.hasNext(); ) {
													Long2IntMap.Entry entry = it.next();
													
													d.addTrueCount(entry.getKey(), entry.getValue());
												}
											}
										}
									}
									// train false distributions
									//d.falseDist = makeDistribution(wordSet.getSecond(), eventsByWordAndData[factorIndex][i][offset]);
									{
										Map<Integer,Long2IntMap> ev = eventsByWordAndData[factorIndex][i][offset];
										for(Integer word : wordSet.getSecond()) {
											Long2IntMap e = ev.get(word);
											if (e != null) {
												for(Long2IntMap.Iterator it = e.iterator(); it.hasNext(); ) {
													Long2IntMap.Entry entry = it.next();
													
													d.addFalseCount(entry.getKey(), entry.getValue());
												}
											}
										}
									}
									
									AbstractQuestionEstimator.Result result = estimator1.estimateQuestion(i, activeNode, d);
									
									results[i] = result;
									dists[i] = d;
								}
								
								synchronized(allDistributions) {
									allDistributions.put(question, dists);
									questions.add(question);
									wordSets.put(question, wordSet);
									
									if (estimator1.isGood(results, activeNode)) {
										NbestQuestionsEntry removed = nbest.add(new NbestQuestionsEntry(question, results));
										if (removed != null) {
											allDistributions.remove(removed.question);
										}
									}
									
									if (false && nr_exchange_inits > 1) {
										// debug staff only
										exchangeQuestions[myExchangeRun] = question;
										exchangeResults[myExchangeRun] = results;
										boolean allDone = true;
										for(Question q : exchangeQuestions) {
											if (q == null) {
												allDone = false;
												break;
											}
										}
										
										if (allDone) {
											StringBuilder sb = new StringBuilder();
											sb.append("Exchage runs: ");
											sb.append(events.keySet().size());
											sb.append(" words\n");
											int run = 1;
											for(Result[] exResults : exchangeResults) {
												sb.append(String.format("#%d:", run));
												for(Result r : exResults) {
													sb.append(' ');
													sb.append(r.getCost());
												}
												sb.append("\n");
												++run;
											}
											System.err.print(sb.toString());
										}
									}
								}
								
							}
						};
						
						Job job = new Job(run, "do exchange algo");
						jobs.add(job);
					}
				}
			}
			manager.addBatch(group, jobs);
			group.join();
		}

		{
			StringBuilder sb = new StringBuilder("Context order:");
			for(ContextVariable var : orderOfContextVariables) {
				NbestDataSplits<NbestQuestionsEntry> myNbest = nbestByContext.get(var);
				sb.append(' ');
				sb.append(var.toString());
				sb.append(" [");
				sb.append(ctxVarScores.get(var));
				sb.append(" #");
				sb.append(myNbest.size());
				sb.append('@');
				sb.append(myNbest.hashCode());
				sb.append("]");
			}
			sb.append("\n");
			System.err.print(sb.toString());
		}
		
		NbestDataSplits<NbestQuestionsEntry> nbest = null;
		
		if (lm.getRandomness() > 0.0) {
			// first check if there's anything good at all
			ArrayList<ContextVariable> goodVars = new ArrayList<ContextVariable>(orderOfContextVariables.size());
			for(ContextVariable var : orderOfContextVariables) {
				NbestDataSplits<NbestQuestionsEntry> myNbest = nbestByContext.get(var);
				if (myNbest.size() > 0) {
					goodVars.add(var);
				}
			}
			
			switch(goodVars.size()) {
			case 0: 
				return;
			case 1: 
				nbest = nbestByContext.get(goodVars.get(0));
				break;
			default:
				Random rnd = new Random();
				StringBuilder sb = new StringBuilder("Ctx vars: [");
				do {
					for(ContextVariable var : goodVars) {
						if (rnd.nextDouble() < lm.getRandomness()) {
							NbestDataSplits<NbestQuestionsEntry> myNbest = nbestByContext.get(var);
							sb.append(var.toString());
							sb.append(' ');
							if (nbest == null) {
								nbest = myNbest;
							} else {
								for(NbestQuestionsEntry split : myNbest.getSplits()) {
									nbest.add(split);
								}
							}
						}
					}
				} while(nbest == null);
				sb.append(']');
				System.err.println(sb.toString());
			}
		} else {
			for(ContextVariable var : orderOfContextVariables) {
				NbestDataSplits<NbestQuestionsEntry> myNbest = nbestByContext.get(var);
				if (myNbest.size() > 0) {
					nbest = myNbest;
					break;
				}
			}
		}
		
		if (nbest == null) {
			return;
		}

		// apply the second estimator if necessary
		if (nbestQuestions > 1 && estimator2 != null && nbest.size() > 1) {
			NbestQuestionsEntry[] entries = nbest.toArray(new NbestQuestionsEntry[nbest.size()]);
			nbest = new NbestDataSplits<NbestQuestionsEntry>(1);
			for(NbestQuestionsEntry entry : entries) {
				Distributions dists[] = allDistributions.get(entry.question);
				AbstractQuestionEstimator.Result results[] = new AbstractQuestionEstimator.Result[data.length];
				for(int i=0; i<data.length; ++i) {
					AbstractQuestionEstimator.Result result = estimator2.estimateQuestion(0, activeNode, dists[i]);
					results[i] = result;
				}
				nbest.add(new NbestQuestionsEntry(entry.question, results));				
			}
		}
		
		if (nbest.size() == 0) return;
		
		AbstractQuestionEstimator estimator = estimator2 == null ? estimator1 : estimator2;
		
		NbestQuestionsEntry theBest = nbest.getSplits().first();

		// check the stopping criterion
		if (!estimator.isGood(theBest.results, activeNode)) {
			System.out.printf("post check failed on %s\n", theBest.question.toShortString());
			return;
		}
			
		{
			StringBuilder sb = new StringBuilder();
			sb.append(String.format("Node #%d the best question: %s", nodeId, theBest.question.toShortString()));
			for(Result res : theBest.results) {
				sb.append(' ');
				sb.append(res.getCost());
			}
			sb.append('\n');
			System.err.print(sb.toString());
		}
		
		Distributions theBestDistributions[] = allDistributions.get(theBest.question);
		allDistributions.clear();
		//allDistributions = null;
		
		long trueBranchWords[];
		long falseBranchWords[];
		{
			Long2IntMap trueWords = new Long2IntMap(futureWordIndex.size());
			Long2IntMap falseWords = new Long2IntMap(futureWordIndex.size());
			
			long keys[] = futureWordIndex.toArray();
			for(int i=0; i<data.length; ++i) {
				Distributions d = theBestDistributions[i];
				for(int idx=0; idx<keys.length; ++idx) {
					if (d.trueDist[idx] > 0) {
						trueWords.addAndGet(keys[idx], 1);
					}
					if (d.falseDist[idx] > 0) {
						falseWords.addAndGet(keys[idx], 1);
					}
				}
			}
			trueBranchWords = new long[trueWords.size()];
			int i=0;
			for(Long2IntMap.Iterator it=trueWords.iterator(); it.hasNext(); ) {
				Long2IntMap.Entry e = it.next();
				trueBranchWords[i++] = e.getKey();
			}
			i = 0;
			falseBranchWords = new long[falseWords.size()];
			for(Long2IntMap.Iterator it=falseWords.iterator(); it.hasNext(); ) {
				Long2IntMap.Entry e = it.next();
				falseBranchWords[i++] = e.getKey();
			}
		}
		// grow the tree and add create new active nodes
		currentLeaf.getPayload().question = theBest.question;
		
		BinaryTree<HistoryTreePayload> rightLeaf = new BinaryTree<HistoryTreePayload>(new HistoryTreePayload((Question)null));
		int rightId = newCtx.getNextNodeId();
		int leftId = newCtx.getNextNodeId();
		
		rightLeaf.getPayload().clusterid = rightId;
		currentLeaf.attachRight(rightLeaf);
		
		double newCosts[] = new double[data.length];
		
		ActiveTreeNode rightNode = new ActiveTreeNode(data.length);
		rightNode.possibleWords = trueBranchWords;
		for(int i=0; i<data.length; ++i) {
			rightNode.counts[i] = theBestDistributions[i].getTotalTrueCount();
			theBest.results[i].setupTrueActiveNode(i, rightNode, activeNode, theBestDistributions[i]);
			newCosts[i] = rightNode.nodeCosts[i];
		}

		{
			int newNodeQuestions[] = new int[activeNode.possibleQuestions.length];
			int pos = 0;
			for(int i=0; i<activeNode.possibleQuestions.length; ++i) {
				Question q = allQuestions[activeNode.possibleQuestions[i]];
				/*
				if (q == null || theBest.question == null) {
					System.err.print("");
				}
				*/
				if (QuestionGenerator.isImplied(q, trueQuestions, falseQuestions)) continue;
				if (QuestionGenerator.isImplied(q, uselessTrueQuestions, uselessFalseQuestions)) continue;
				if (q.implication(theBest.question, true) != null) continue;
				
				newNodeQuestions[pos++] = activeNode.possibleQuestions[i];
			}
			if (pos < newNodeQuestions.length) {
				newNodeQuestions = Arrays.copyOf(newNodeQuestions, pos);
			}
			//System.err.printf("Node #%d possibleQuestions: %d, parent %d\n", rightId, pos, activeNode.possibleQuestions.length);
			rightNode.possibleQuestions = newNodeQuestions;
		}
		// TODO: check the type of theBest.result and get the lambdas if they are there
		//rightNode.smoothedDistribution = getTrimmedDistribution(activeNode.smoothedDistribution, 
		//		theBestDistributions.trueTrain, theBestDistributions.trueDev, trueLambda);
		newCtx.putLeaf(rightId, rightLeaf);
		
		newCtx.getActiveNodes().putNode(rightId, rightNode);
		
		inSetQuestion: {
			if (theBest.question.getQuestionType() == Question.IN_SET_QUESTION) {

				Set<Integer> complement = wordSets.get(theBest.question).getSecond();
				if (complement.size() > 0) {
					byte factorIdx = theBest.question.getIndex();
					
					Collection<Integer> possibleWords = ExchangeAlgo.getPossibleFactors(factorIdx, theBest.question.getTimeOffset(), currentLeaf);

					if (possibleWords.size() > complement.size()) {
						//System.out.printf("Complement is NOT full: %s (%d/%d)\n", 
						//		theBest.question.toShortString(), complement.size(), possibleWords.size());

						HistoryTreePayload falsePayload = new HistoryTreePayload((Question)null);
						falsePayload.question = new InSetQuestion(theBest.question.getTimeOffset(), factorIdx, complement);
						falsePayload.clusterid = leftId;
						BinaryTree<HistoryTreePayload> falseBranch = new BinaryTree<HistoryTreePayload> (falsePayload); 
						currentLeaf.attachLeft(falseBranch);

						int newLeftId = newCtx.getNextNodeId();
						int newRightId = newCtx.getNextNodeId();
						
						HistoryTreePayload truePayload = new HistoryTreePayload((Question)null);
						truePayload.clusterid = newRightId;

						BinaryTree<HistoryTreePayload> trueBranch = new BinaryTree<HistoryTreePayload> (truePayload);
						falseBranch.attachRight(trueBranch);

						ActiveTreeNode leftNode = new ActiveTreeNode(data.length);
						leftNode.possibleWords = falseBranchWords;
						
						for(int i=0; i<data.length; ++i) {
							leftNode.counts[i] = theBestDistributions[i].getTotalFalseCount();
							theBest.results[i].setupFalseActiveNode(i, leftNode, activeNode, theBestDistributions[i]);
							newCosts[i] += leftNode.nodeCosts[i];
						}
						{
							int newNodeQuestions[] = new int[activeNode.possibleQuestions.length];
							int pos = 0;
							for(int i=0; i<activeNode.possibleQuestions.length; ++i) {
								Question q = allQuestions[activeNode.possibleQuestions[i]];
								
								if (QuestionGenerator.isImplied(q, trueQuestions, falseQuestions)) continue;
								if (QuestionGenerator.isImplied(q, uselessTrueQuestions, uselessFalseQuestions)) continue;
								if (q.implication(theBest.question, false) != null) continue;
								
								newNodeQuestions[pos++] = activeNode.possibleQuestions[i];
							}
							if (pos < newNodeQuestions.length) {
								newNodeQuestions = Arrays.copyOf(newNodeQuestions, pos);
							}
							//System.err.printf("Node #%d possibleQuestions: %d, parent %d\n", leftId, pos, activeNode.possibleQuestions.length);
							leftNode.possibleQuestions = newNodeQuestions;
						}

						newCtx.putLeaf(newRightId, trueBranch);
						newCtx.getActiveNodes().putNode(newRightId, leftNode);

						// the backoff node
						BinaryTree<HistoryTreePayload> backoffNode = new BinaryTree<HistoryTreePayload>(new HistoryTreePayload((Question)null));
						backoffNode.getPayload().isBackoff = true;
						backoffNode.getPayload().clusterid = newLeftId;
						falseBranch.attachLeft(backoffNode);
						break inSetQuestion;
					}
					//System.out.printf("Complement is full: %s (%d/%d)\n", 
					//		theBest.question.toShortString(), complement.size(), possibleWords.size());
				}
			}
			// not an in-set question or the complement is full
			ActiveTreeNode leftNode = new ActiveTreeNode(data.length);
			leftNode.possibleWords = falseBranchWords;

			for(int i=0; i<data.length; ++i) {
				leftNode.counts[i] = theBestDistributions[i].getTotalFalseCount();
				theBest.results[i].setupFalseActiveNode(i, leftNode, activeNode, theBestDistributions[i]);
				newCosts[i] += leftNode.nodeCosts[i];
			}
			{
				int newNodeQuestions[] = new int[activeNode.possibleQuestions.length];
				int pos = 0;
				for(int i=0; i<activeNode.possibleQuestions.length; ++i) {
					Question q = allQuestions[activeNode.possibleQuestions[i]];
					
					if (QuestionGenerator.isImplied(q, trueQuestions, falseQuestions)) continue;
					if (QuestionGenerator.isImplied(q, uselessTrueQuestions, uselessFalseQuestions)) continue;
					if (q.implication(theBest.question, false) != null) continue;
					
					newNodeQuestions[pos++] = activeNode.possibleQuestions[i];
				}
				if (pos < newNodeQuestions.length) {
					newNodeQuestions = Arrays.copyOf(newNodeQuestions, pos);
				}
				//System.err.printf("Node #%d possibleQuestions: %d, parent %d\n", leftId, pos, activeNode.possibleQuestions.length);
				leftNode.possibleQuestions = newNodeQuestions;
			}

			BinaryTree<HistoryTreePayload> leftLeaf = new BinaryTree<HistoryTreePayload>(new HistoryTreePayload((Question)null));
			
			leftLeaf.getPayload().clusterid = leftId;
			currentLeaf.attachLeft(leftLeaf);

			newCtx.putLeaf(leftId, leftLeaf);
			newCtx.getActiveNodes().putNode(leftId, leftNode);
		}
		
		synchronized(this.entropyReductions) {
			for(int i = 0; i<entropyReductions.length; ++i) {
				entropyReductions[i] += activeNode.nodeCosts[i] - newCosts[i];
			}
		}
		
		return;
	}
	
	private static void constructTrueAndFalseQuestions(BinaryTree<HistoryTreePayload> leaf, 
			List<Question> trueQuestions, List<Question> falseQuestions) 
	{
		BinaryTree<HistoryTreePayload> node = leaf.getParent();
		while(node != null) {
			if (node.getLeft() == leaf) {
				falseQuestions.add(node.getPayload().question);
			} else {
				trueQuestions.add(node.getPayload().question);
			}
			leaf = node;
			node = node.getParent();
		}
	}
	
	private int fastForwardOneLevel(TrainingContext ctx, TrainingContext newCtx) throws IOException {
		TrainingDataNodeReader reader = ctx.getDataReader();
		TrainingDataNodeWriter writer = newCtx.getDataWriter();
		
		//reader.reset();
		
		int oldNodeCount = 0;
		//int newNodeCount = 0;
		int maxId = ctx.getLastNodeId().intValue();
		int nonLeafNodes = 0;
		
		final AbstractQuestionEstimator estimator;
		if (nbestQuestions > 1 && estimator2 != null) {
			estimator = estimator2;
		} else {
			estimator = estimator1;
		}
		
		for(ReadableTrainingDataNode nodeData = reader.getNext(); nodeData != null; nodeData = reader.getNext()) {
			if (nodeData.getNodeId() > maxId) {
				maxId = nodeData.getNodeId();
			}
			
			++oldNodeCount;
			BinaryTree<HistoryTreePayload> oldLeaf = ctx.getLeaf(nodeData.getNodeId());
			
			ActiveTreeNode activeNode = ctx.getActiveNodes().getNode(nodeData.getNodeId());
			final CompactReadOnlyLongHashSet futureWordIndex = new CompactReadOnlyLongHashSet(activeNode.possibleWords);

			if (oldLeaf.isLeaf()) {
				if (!oldLeaf.getPayload().isBackoff) {
					// simply copy the node
					//System.out.printf("copying leaf node %d\n", nodeData.nodeId);
					WritableTrainingDataNode newNode = writer.createNode(nodeData.getNodeId(), nodeData.getNumData());
					writer.add(newNode);

					for(int i=0; i<nodeData.getNumData(); ++i) {
						ReadableTrainingData in = nodeData.getData(i);
						WritableTrainingData out = newNode.getData(i);
						
						in.start();
						out.setContextSize(in.getContextSize());
						out.start();
						while(in.hasNext()) {
							out.add(in.next());
						}
						out.finish();
					}
					
					if (activeNode != null) {
						newCtx.getActiveNodes().putNode(nodeData.getNodeId(), activeNode);
					}
					newCtx.putLeaf(nodeData.getNodeId(), oldLeaf);
				} else {
					// just read the data
					nodeData.skipData();
				}
				continue;
			}
			
			//newNodeCount += 2;
			
			BinaryTree<HistoryTreePayload> left = getFalseBranch(oldLeaf);
			BinaryTree<HistoryTreePayload> right = getTrueBranch(oldLeaf);
			
			if (!left.isLeaf()) {
				++nonLeafNodes;
			}
			if (!right.isLeaf()) {
				++nonLeafNodes;
			}
			
			int leftNodeId = left.getPayload().clusterid;
			int rightNodeId = right.getPayload().clusterid;
			
			WritableTrainingDataNode leftDataNode = writer.createNode(leftNodeId, nodeData.getNumData());
			WritableTrainingDataNode rightDataNode = writer.createNode(rightNodeId, nodeData.getNumData());
			
			writer.add(leftDataNode);
			writer.add(rightDataNode);
			
			Distributions dists[] = new Distributions[nodeData.getNumData()];
			AbstractQuestionEstimator.Result results[] = new AbstractQuestionEstimator.Result[nodeData.getNumData()];
			
			Long2IntMap trueWords = new Long2IntMap(futureWordIndex.size());
			Long2IntMap falseWords = new Long2IntMap(futureWordIndex.size());
			
			for(int i=0; i<dists.length; ++i) {
				Distributions dist = new Distributions(futureWordIndex);
				
				Long2IntMap trueCounts = new Long2IntMap();
				Long2IntMap falseCounts = new Long2IntMap();
				
				TrainingDataUtil.splitData(nodeData.getData(i), oldLeaf.getPayload().question, 
						rightDataNode.getData(i), leftDataNode.getData(i), null, trueCounts, falseCounts);
				
				for(Long2IntMap.Iterator it = trueCounts.iterator(); it.hasNext();) {
					Long2IntMap.Entry e = it.next();
					dist.addTrueCount(e.getKey(), e.getValue());
					trueWords.addAndGet(e.getKey(), 1);
				}
				for(Long2IntMap.Iterator it = falseCounts.iterator(); it.hasNext();) {
					Long2IntMap.Entry e = it.next();
					dist.addFalseCount(e.getKey(), e.getValue());
					falseWords.addAndGet(e.getKey(), 1);
				}
				
				AbstractQuestionEstimator.Result result = estimator.estimateQuestion(i, activeNode, dist);
				results[i] = result;
				dists[i] = dist;
			}
			
			
			ActiveTreeNode rightNode = new ActiveTreeNode(nodeData.getNumData());
			for(int i=0; i<dists.length; ++i) {
				rightNode.counts[i] = dists[i].getTotalTrueCount();
				results[i].setupTrueActiveNode(i, rightNode, activeNode, dists[i]);
			}
			rightNode.possibleWords = new long[trueWords.size()];
			{
				int i=0;
				for(Long2IntMap.Iterator it=trueWords.iterator(); it.hasNext(); ) {
					Long2IntMap.Entry e = it.next();
					rightNode.possibleWords[i++] = e.getKey();
				}
			}
			newCtx.getActiveNodes().putNode(rightNodeId, rightNode);
			newCtx.putLeaf(rightNodeId, right);

			ActiveTreeNode leftNode = new ActiveTreeNode(nodeData.getNumData());
			for(int i=0; i<dists.length; ++i) {
				leftNode.counts[i] = dists[i].getTotalFalseCount();
				results[i].setupFalseActiveNode(i, leftNode, activeNode, dists[i]);
			}
			leftNode.possibleWords = new long[falseWords.size()];
			{
				int i=0;
				for(Long2IntMap.Iterator it=falseWords.iterator(); it.hasNext(); ) {
					Long2IntMap.Entry e = it.next();
					leftNode.possibleWords[i++] = e.getKey();
				}
			}
			newCtx.getActiveNodes().putNode(leftNodeId, leftNode);
			newCtx.putLeaf(leftNodeId, left);
		}		
		reader.close();
		writer.close();

		newCtx.getLastNodeId().set(maxId);
		System.err.printf("old node count: %d, non-leaf node count: %d\n", oldNodeCount, nonLeafNodes);
		return nonLeafNodes;
	}
	
	private void saveTrainedTree(TrainingContext ctx) {
		lm.setHistoryTree(ctx.getTree());
		lm.saveHistoryTree();
	}
	
	private static ConstCountDistribution makeDistribution(Collection<Integer> wordSet, Map<Integer, Long2IntMap> events) {
		Long2LongMap dist = new Long2LongMap();
		
		for(Integer word : wordSet) {
			Long2IntMap e = events.get(word);
			if (e != null) {
				for(Long2IntMap.Iterator i = e.iterator(); i.hasNext(); ) {
					Long2IntMap.Entry entry = i.next();
					
					dist.addAndGet(entry.getKey(), entry.getValue());
				}
			}
		}
		
		return new ConstCountDistribution(dist);
	}
	
	private void makeSnapshot(TrainingContext ctx) throws Exception {
		BinaryTree<HistoryTreePayload> trainedTree = ctx.getTree();
		if (trainedTree == null) return;
		
		String filename = String.format("snapshot-%s-%03d.ser.gz", lm.getId(), ctx.getIteration());
		LanguageModel.saveSerializedHistoryTree(trainedTree, filename);
	}

	public static int getNR_RANDOM_EXCHANGE_INIT() {
		return NR_RANDOM_EXCHANGE_INIT;
	}

	public static void setNR_RANDOM_EXCHANGE_INIT(int nRRANDOMEXCHANGEINIT) {
		NR_RANDOM_EXCHANGE_INIT = nRRANDOMEXCHANGEINIT;
	}

	public void ignoreOvertFactor(byte factorIdx) {
		factorsForExchangeAlgo[factorIdx] = false;
	}
}
