/**
 * 
 */
package edu.umd.clip.lm.model.training;

import java.io.*;
import java.nio.channels.*;
import java.util.*;
import java.util.Map.Entry;
import java.util.concurrent.atomic.*;
import java.util.concurrent.locks.*;

import com.sleepycat.je.*;

import de.jtem.numericalMethods.calculus.function.RealFunctionOfSeveralVariables;
import de.jtem.numericalMethods.calculus.minimizing.Powell;

import edu.berkeley.nlp.util.*;
import edu.umd.clip.jobs.*;
import edu.umd.clip.lm.factors.*;
import edu.umd.clip.lm.model.*;
import edu.umd.clip.lm.model.data.Context;
import edu.umd.clip.lm.model.data.ContextFuturesPair;
import edu.umd.clip.lm.model.data.ContextReductionTrainingDataFilter;
import edu.umd.clip.lm.model.data.MaskedFuturesTrainingDataFilter;
import edu.umd.clip.lm.model.data.OnDiskTrainingDataNodeReader;
import edu.umd.clip.lm.model.data.OnDiskTrainingDataNodeWriter;
import edu.umd.clip.lm.model.data.OnDiskTrainingDataReader;
import edu.umd.clip.lm.model.data.ReadableTrainingData;
import edu.umd.clip.lm.model.data.ReadableTrainingDataNode;
import edu.umd.clip.lm.model.data.TrainingDataBlock;
import edu.umd.clip.lm.model.data.TrainingDataFilter;
import edu.umd.clip.lm.model.data.TrainingDataNodeReader;
import edu.umd.clip.lm.model.data.TrainingDataNodeWriter;
import edu.umd.clip.lm.model.data.TrainingDataReader;
import edu.umd.clip.lm.model.data.TrainingDataUtil;
import edu.umd.clip.lm.model.data.TupleCountPair;
import edu.umd.clip.lm.model.data.WritableTrainingData;
import edu.umd.clip.lm.model.data.WritableTrainingDataNode;
import edu.umd.clip.lm.model.training.ContextClusterStorage.ContextClusters;
import edu.umd.clip.lm.storage.*;
import edu.umd.clip.lm.storage.AbstractProbTreeStorage.Key;
import edu.umd.clip.lm.util.*;
import edu.umd.clip.lm.util.tree.*;
import edu.umd.clip.smoothing.*;



/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class NewClusterPopulator {
	private final LanguageModel lm;
	private final BinaryTree<HistoryTreePayload> theTree;
	SmoothingDataStorage distStorage;
	ContextClusterStorage contextClusterStorage;
	LanguageModel boLMs[];
	Map<ContextClusters, Long2DoubleMap> boCluster2WordProb;
	private AbstractProbTreeStorage probTreeStorage;
	private DiscountWeights discountWeights[];
	private BinaryTree<HistoryTreePayload> nodes[];
	int nrClusters;
	private File dataDir;
	private int nrDataFiles;
	private Environment env;
	BDBActiveNodeStorage<Long2DoubleMap[]> dotPdistStorage;
	ContextReductionTrainingDataFilter boContextReducer;
	
	@SuppressWarnings("unchecked")
	public NewClusterPopulator(LanguageModel lm, AbstractProbTreeStorage storage) {
		this.lm = lm;
		this.theTree = lm.getHistoryTree();
		this.probTreeStorage = storage;
		
		int maxClusterId = 0;
		for(BinaryTreeIterator<HistoryTreePayload> it = theTree.getPostOrderIterator(); it.hasNext(); ) {
			int clusterId = it.next().clusterid;
			if (clusterId > maxClusterId) maxClusterId = clusterId;
		}
		
		nrClusters = maxClusterId+1;
		discountWeights = new DiscountWeights[nrClusters];
		nodes = new BinaryTree[nrClusters];
		for(BinaryTreeIterator<HistoryTreePayload> it = theTree.getPostOrderIterator(); it.hasNext(); ) {
			BinaryTree<HistoryTreePayload> node = it.nextNode();
			int clusterid = node.getPayload().clusterid;
			nodes[clusterid] = node;
			discountWeights[clusterid] = new DiscountWeights();
			//discountWeights[clusterid].lambda = node.getPayload().lambda;
		}

	}
	
	public void initialize(final File dataDir) throws IOException, DatabaseException, DatabaseException {
		this.dataDir = dataDir;
		OnDiskTrainingDataNodeWriter.setTempDir(dataDir);
		
		EnvironmentConfig envConf = new EnvironmentConfig();
		envConf.setAllowCreate(true);
		envConf.setSharedCache(true);
		envConf.setTransactional(false);
		envConf.setReadOnly(false);
		envConf.setCachePercent(20);
		envConf.setConfigParam("je.log.fileMax", Integer.toString(100*1024*1024));
		
		File dir = new File(dataDir, "db");
		if (!dir.isDirectory()) {
			dir.mkdirs();
		}
		env = new Environment(dir, envConf);
		contextClusterStorage = new ContextClusterStorage(env);
		dotPdistStorage = new BDBActiveNodeStorage<Long2DoubleMap[]>(env);

		Experiment exp = Experiment.getInstance();
		
		ArrayList<Integer> lmIds = new ArrayList<Integer>();
		LanguageModel boLM = exp.getLM(lm.getBackoffLM());
		boContextReducer = new ContextReductionTrainingDataFilter(boLM.getOvertOrder(), boLM.getHiddenOrder());
		System.out.printf("backoff forest: %s, context order: overt=%d, hidden=%d\n", 
				boLM.getId(), boLM.getOvertOrder(), boLM.getHiddenOrder());

		while(boLM != null) {
			lmIds.add(boLM.getIdNum());
			if (boLM.getBackoffLM() == null) break;
			boLM = exp.getLM(boLM.getBackoffLM());
		}
		
		boLMs = new LanguageModel[lmIds.size()];
		for(int i=0; i<lmIds.size(); ++i) {
			boLMs[i] = exp.getLmByNumId(lmIds.get(i));
			System.out.printf("boLMs[%d] = %s\n", i, boLMs[i].getId());
		}
		
		final Experiment.Files files = Experiment.getInstance().getFiles();

		nrDataFiles = files.getInterpolateData().size();
		
		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("level 0");
		
		long overtMask = Experiment.getInstance().getTupleDescription().getOvertFactorsMask();
		
		final TrainingDataFilter filter = new DataFilter(new MaskedFuturesTrainingDataFilter(overtMask), group, boLMs);
		
		for(int split=0; split<nrDataFiles; ++split) {
			final String[] filenames = files.getInterpolateDataFiles(split);
			final int id = split;
			
			Runnable run = new Runnable() {
				@Override
				public void run() {
					
					try {
						TrainingDataUtil.combineAndReduceContext(filenames, 
								new File(dataDir, makeDataFilename(0, id)).getAbsolutePath(), 
								lm.getOvertOrder(), lm.getHiddenOrder(), 
								lm.getHistoryTree().getPayload().clusterid, filter);
					} catch (IOException e) {
						e.printStackTrace();
					}
					System.out.printf("data #%d done\n", id);
				}
			};
			
			// turns out it takes too much memory to do this in parallel...
			//manager.addJob(group, new Job(run, "data #" + Integer.toString(id)));
			run.run();
		}

		group.join();
		
		// prepare data that with non-reduced futures for population
		Runnable run = new Runnable() {
			@Override
			public void run() {
				try {
					for(int split=0; split<nrDataFiles; ++split) {
						final String[] filenames = files.getInterpolateDataFiles(split);
						TrainingDataUtil.combineAndReduceContext(filenames, 
								new File(dataDir, makeDataFilename(0, split, true)).getAbsolutePath(), 
								lm.getOvertOrder(), lm.getHiddenOrder(), 
								lm.getHistoryTree().getPayload().clusterid, null);
					}
				} catch(IOException e) {
					e.printStackTrace();
				}
			}
		};
		manager.addJob(group, new Job(run, "prepare full data"));
		
		boCluster2WordProb = new HashMap<ContextClusters, Long2DoubleMap>(10000);

		for(byte dId = 0; dId < nrDataFiles; ++dId) {
			File dataFile = new File(dataDir, makeDataFilename(0, dId));
			if (!dataFile.isFile()) {
				System.err.printf("Cannot find file %s, ", dataFile.getAbsolutePath());
				return;
			}
			FileChannel inputChannel = new FileInputStream(dataFile).getChannel();
			TrainingDataNodeReader reader = new OnDiskTrainingDataNodeReader(inputChannel);
			ReadableTrainingDataNode node = reader.getNext();
			
			System.out.printf("computing backoff probabilities in file #%d\n", dId);
			computeBackoffProbabilities(node.getData(0));
			inputChannel.close();
		}
		group.join();
	}
	
	public void populate() throws IOException {
		JobManager manager = JobManager.getInstance();
		JobGroup popGroup = manager.createJobGroup("pop group");
		
		int level = 0;
		ActiveNodeStorage<Long2DoubleMap[]>.ActiveNodeCollection collection = null;
		while(true) {
			File dataFiles[] = new File[nrDataFiles];
			FileChannel inputChannels[] = new FileChannel[nrDataFiles];
			final TrainingDataNodeReader readers[] = new TrainingDataNodeReader[nrDataFiles];
			
			for(byte dataId = 0; dataId < nrDataFiles; ++dataId) {
				dataFiles[dataId] = new File(dataDir, makeDataFilename(level, dataId));
				if (!dataFiles[dataId].isFile()) {
					System.err.printf("Cannot find file %s, ", dataFiles[dataId].getAbsolutePath());
					return;
				}
				inputChannels[dataId] = new FileInputStream(dataFiles[dataId]).getChannel();
				readers[dataId] = new OnDiskTrainingDataNodeReader(inputChannels[dataId]);
	
			}
			ActiveNodeStorage<Long2DoubleMap[]>.ActiveNodeCollection nextCollection = dotPdistStorage.createCollection("level#"+Integer.toString(level));

			System.out.printf("******* ESTIMATING LEVEL %d ********\n", level);
			
			estimateBackoffLevel(readers, collection, nextCollection);

			JobGroup group = manager.createJobGroup("split level");
			
			File newDataFiles[] = new File[nrDataFiles];
			final TrainingDataNodeWriter writers[] = new TrainingDataNodeWriter[nrDataFiles];
			final MutableInteger nextLevelNodeCounts[] = new MutableInteger[nrDataFiles];
			RandomAccessFile outFiles[] = new RandomAccessFile[nrDataFiles];
			for(byte dataId = 0; dataId < nrDataFiles; ++dataId) {
				inputChannels[dataId].position(0);
				readers[dataId] = new OnDiskTrainingDataNodeReader(inputChannels[dataId]);
				
				newDataFiles[dataId] = new File(dataDir, makeDataFilename(level+1, dataId));
				outFiles[dataId] = new RandomAccessFile(newDataFiles[dataId], "rw");
				outFiles[dataId].getChannel().truncate(0);

				writers[dataId] = new OnDiskTrainingDataNodeWriter(outFiles[dataId].getChannel());
				nextLevelNodeCounts[dataId] = new MutableInteger();
				
				final byte dId = dataId;
				Runnable run = new Runnable() {

					@Override
					public void run() {
						try {
							nextLevelNodeCounts[dId].set(splitData(readers[dId], writers[dId]));
						} catch (IOException e) {
							e.printStackTrace();
						}
					}
				};
				Job job = new Job(run, "data#" + Byte.toString(dataId));
				manager.addJob(group, job);
			}
			group.join();
			
			for(byte dataId=0; dataId<nrDataFiles; ++dataId) {
				inputChannels[dataId].close();
				readers[dataId] = null;
				outFiles[dataId].close();
				writers[dataId] = null;				
			}
			inputChannels = null;
			outFiles = null;
			
			for(byte dataId=1; dataId<nrDataFiles; ++dataId) {
				assert(nextLevelNodeCounts[dataId].intValue() == nextLevelNodeCounts[0].intValue());
			}
			
			if (collection != null) {
				dotPdistStorage.removeCollection(collection.getName());
			}
			collection = nextCollection;

			// delete data files from the previous level
			for(File f : dataFiles) {
				f.delete();
			}

			// make sure the previous population round has terminated by now
			popGroup.join();
			
			{
				final int lev = level;
				Runnable run = new Runnable() {

					@Override
					public void run() {
						try {
							populateDataLevel(lev);
						} catch (IOException e) {
							e.printStackTrace();
						}
					}
				};
				manager.addJob(popGroup, new Job(run, "data pop level #"+Integer.toString(level)));
			}
			
			if (nextLevelNodeCounts[0].intValue() == 0) {
				for(File f : newDataFiles) {
					f.delete();
				}
				if (collection != null) {
					dotPdistStorage.removeCollection(collection.getName());
				}
				break;
			}
			
			
			++level;
		}
		popGroup.join();
		
	}

	private void populateDataLevel(int level) throws IOException {
		File dataFiles[] = new File[nrDataFiles];
		File newDataFiles[] = new File[nrDataFiles];
		FileChannel inputChannels[] = new FileChannel[nrDataFiles];
		RandomAccessFile outFiles[] = new RandomAccessFile[nrDataFiles];
		final TrainingDataNodeReader readers[] = new TrainingDataNodeReader[nrDataFiles];
		final TrainingDataNodeWriter writers[] = new TrainingDataNodeWriter[nrDataFiles];
		
		for(byte dataId = 0; dataId < nrDataFiles; ++dataId) {
			dataFiles[dataId] = new File(dataDir, makeDataFilename(level, dataId, true));
			if (!dataFiles[dataId].isFile()) {
				System.err.printf("Cannot find file %s, ", dataFiles[dataId].getAbsolutePath());
				return;
			}
			inputChannels[dataId] = new FileInputStream(dataFiles[dataId]).getChannel();
			readers[dataId] = new OnDiskTrainingDataNodeReader(inputChannels[dataId]);

			newDataFiles[dataId] = new File(dataDir, makeDataFilename(level+1, dataId, true));
			outFiles[dataId] = new RandomAccessFile(newDataFiles[dataId], "rw");
			outFiles[dataId].getChannel().truncate(0);

			writers[dataId] = new OnDiskTrainingDataNodeWriter(outFiles[dataId].getChannel());
			
		}

		populateData(readers, writers);
		
		// delete data files from the previous level
		for(byte dataId = 0; dataId < nrDataFiles; ++dataId) {
			dataFiles[dataId].delete();
			inputChannels[dataId].close();
			outFiles[dataId].close();
		}
		
	}
	
	private void populateData(TrainingDataNodeReader readers[], TrainingDataNodeWriter writers[]) throws IOException {
		while(true) {
			ReadableTrainingDataNode dataNodes[] = new ReadableTrainingDataNode[readers.length];
			for(byte dataId=0; dataId<readers.length; ++dataId) {
				dataNodes[dataId] = readers[dataId].getNext();
			}
			if (dataNodes[0] == null) return;
			int nodeId = dataNodes[0].getNodeId();
			
			for(byte dataId = 1; dataId < readers.length; ++dataId) {
				assert(dataNodes[dataId].getNodeId() == nodeId);
			}
			
			Long2IntMap counts = new Long2IntMap();

			final BinaryTree<HistoryTreePayload> oldLeaf = nodes[nodeId];
			
			if (oldLeaf.isLeaf()) {
				for(byte dataId = 0; dataId < readers.length; ++dataId) {
					ReadableTrainingData data = dataNodes[dataId].getData(0);
					while(data.hasNext()) {
						TrainingDataBlock block = data.next();
						block.addCounts(counts);
					}
				}
				if (oldLeaf.getPayload().isBackoff) {
					continue;
				}
			} else {
				for(byte dataId = 0; dataId < readers.length; ++dataId) {
					TrainingDataNodeWriter writer = writers[dataId];
					
					BinaryTree<HistoryTreePayload> left = oldLeaf.getLeft();
					BinaryTree<HistoryTreePayload> right = oldLeaf.getRight();
					
					int leftNodeId = left.getPayload().clusterid;
					int rightNodeId = right.getPayload().clusterid;
					
					WritableTrainingDataNode leftDataNode = writer.createNode(leftNodeId, 1);
					WritableTrainingDataNode rightDataNode = writer.createNode(rightNodeId, 1);
					
					writer.add(leftDataNode);
					writer.add(rightDataNode);
					
					TrainingDataUtil.splitData(dataNodes[dataId].getData(0), oldLeaf.getPayload().question, 
							rightDataNode.getData(0), leftDataNode.getData(0), counts);
		
				}				
			}
			
			populateCluster(nodeId, counts);
		}
	}
	
	private int splitData(TrainingDataNodeReader reader, TrainingDataNodeWriter writer) throws IOException {
		int nextLevelNodeCount = 0;
		
		for(ReadableTrainingDataNode nodeData = reader.getNext(); nodeData != null; nodeData = reader.getNext()) {
			final BinaryTree<HistoryTreePayload> oldLeaf = nodes[nodeData.getNodeId()];

			
			if (oldLeaf.isLeaf()) {
				// skip node
				while(nodeData.getData(0).hasNext()) {
					nodeData.getData(0).next();
				}
				if (oldLeaf.getPayload().isBackoff) {
					BinaryTree<HistoryTreePayload> grandparent = oldLeaf.getParent().getParent();
					int distance = grandparent.distanceToRoot();
					oldLeaf.getPayload().lambda = 1.0 / Math.sqrt(1 + distance);
					continue;
				}
			} else {
				nextLevelNodeCount += 2;
				
				BinaryTree<HistoryTreePayload> left = oldLeaf.getLeft();
				BinaryTree<HistoryTreePayload> right = oldLeaf.getRight();
				
				int leftNodeId = left.getPayload().clusterid;
				int rightNodeId = right.getPayload().clusterid;
				
				WritableTrainingDataNode leftDataNode = writer.createNode(leftNodeId, 1);
				WritableTrainingDataNode rightDataNode = writer.createNode(rightNodeId, 1);
				
				writer.add(leftDataNode);
				writer.add(rightDataNode);
				
				TrainingDataUtil.splitData(nodeData.getData(0), oldLeaf.getPayload().question, 
						rightDataNode.getData(0), leftDataNode.getData(0));
	
			}
		}
		return nextLevelNodeCount;
	}
	
	private void populateCluster(int clusterid, Long2IntMap counts) {
		Map<FactorTuple, OnDiskCompactProbTree> probTrees = estimateClusterProbabilities(clusterid, counts);
		saveCluster(clusterid, probTrees);
	}
	
	private void saveCluster(int clusterid, Map<FactorTuple, OnDiskCompactProbTree> probTrees) {
		for(Map.Entry<FactorTuple, OnDiskCompactProbTree> entry : probTrees.entrySet()) {
			entry.getValue().normalize();
			probTreeStorage.setProbTree(lm.getIdNum(), clusterid, entry.getKey(), entry.getValue());
		}

	}
	
	private Map<FactorTuple, OnDiskCompactProbTree> estimateClusterProbabilities(
			int clusterid, 
			Long2IntMap counts) 
	{
		Experiment experiment = Experiment.getInstance();
		FactorTupleDescription desc = experiment.getTupleDescription();

		final long overtMask = desc.getOvertFactorsMask();
		final long hiddenMask = desc.getHiddenFactorsMask();
		
		Map<FactorTuple, OnDiskCompactProbTree> data = new HashMap<FactorTuple, OnDiskCompactProbTree>();

		BinaryTree<HistoryTreePayload> node = nodes[clusterid];
		DiscountWeights weights = discountWeights[clusterid];
		
		if (weights.alpha == 0) {
			node.getPayload().backoff = 1.0;
			return data;
		}
		
		// convert joint counts to word counts
		Long2IntMap wordCounts = new Long2IntMap(counts.size());
		for(Long2IntMap.Iterator i=counts.iterator(); i.hasNext();) {
			Long2IntMap.Entry e = i.next();
			wordCounts.addAndGet(e.getKey() & overtMask, e.getValue());
		}
		SmootherProducer producer = new SmootherProducer();
		for(Long2IntMap.Iterator i=wordCounts.iterator(); i.hasNext();) {
			producer.addCount(i.next().getValue());
		}
		KNModSmoother smoother = new KNModSmoother(producer, weights.d1, weights.d2, weights.d3);

		Map<FactorTuple,FactorTuple> allOvertFactors = experiment.getTupleDescription().getAllOvertFactors();

		// populate distributions at leaves
		FactorTuple dummyTuple = new FactorTuple();

		for(Long2IntMap.Iterator iter = counts.iterator(); iter.hasNext();) {
			Long2IntMap.Entry entry = iter.next();
			long tuple = entry.getKey();
			int count = entry.getValue();

			dummyTuple.setBits(tuple & overtMask);
			FactorTuple overtFactors = allOvertFactors.get(dummyTuple);
			if (overtFactors == null) {
				overtFactors = new FactorTuple(tuple & overtMask);
				System.out.printf("unkown overt factors: %s\n", overtFactors.toStringNoNull());
			}
			//FactorTuple hiddenFactors = new FactorTuple(tupleCount.tuple & hiddenMask);
			int packedHiddenFactors = desc.packHiddenFactorsToInt(tuple & hiddenMask);

			OnDiskCompactProbTree probTree = data.get(overtFactors);
			if (probTree == null) {
				probTree = new OnDiskCompactProbTree(packedHiddenFactors, count);
				data.put(overtFactors, probTree);
			} else { 
				probTree.addProbability(packedHiddenFactors, count);
			} 
			assert(probTree.checkTree());
		}

		// convert counts to distributions
		double totalCount = 0;
		for(Iterator<Map.Entry<FactorTuple, OnDiskCompactProbTree>> iterator = data.entrySet().iterator(); iterator.hasNext(); ) {
			OnDiskCompactProbTree probTree = iterator.next().getValue();
			if (probTree.getTotalProb() > lm.getPruningThreshold()) { 
				//AbstractCompactProbTree compactTree = probTree.getTree();
				//compactTree.scale(revTotalCount);
				//probTree.setTree(compactTree.compact());
				totalCount += probTree.getTotalProb();
			} else {
				iterator.remove();
			}
		}

		assert(smoother.getTotalCount() == totalCount);

		for(OnDiskCompactProbTree probTree : data.values()) {
			long count = Math.round(probTree.getTotalProb());
			double prob = smoother.getProb(count);
			probTree.scale(weights.alpha * prob / count);
		}

		boolean estimateUnseen = node.getParent() == null;
		unseen: if (estimateUnseen) {
			HashSet<FactorTuple> unseenFactors = new HashSet<FactorTuple>(10);

			FactorTuple nullEvent = allOvertFactors.get(new FactorTuple(0));
			FactorTuple startEvent = allOvertFactors.get(new FactorTuple(desc.createStartTuple() & overtMask));
			for(FactorTuple overtFactors : allOvertFactors.keySet()) {
				if (!data.containsKey(overtFactors) && !(overtFactors == nullEvent || overtFactors == startEvent)) {
					unseenFactors.add(overtFactors);
					System.out.printf("unseen event: %s\n", overtFactors.toStringNoNull());
				}
			}
			if (unseenFactors.size() == 0) {
				break unseen;
			}

			HashMap<FactorTuple, HashSet<FactorTuple>> hidden2overt = new HashMap<FactorTuple, HashSet<FactorTuple>>(100);
			for(Long2IntMap.Iterator iter = counts.iterator(); iter.hasNext();) {
				Long2IntMap.Entry entry = iter.next();
				long tuple = entry.getKey();
				dummyTuple.setBits(tuple & overtMask);

				FactorTuple overtFactors = allOvertFactors.get(dummyTuple);
				FactorTuple hiddenFactors = new FactorTuple(tuple & hiddenMask);
				HashSet<FactorTuple> set = hidden2overt.get(hiddenFactors);
				if (set == null) {
					set = new HashSet<FactorTuple>();					
					hidden2overt.put(hiddenFactors, set);
				}
				set.add(overtFactors);
			}

			// the probability of an unseen event to have a tag is propotional to the number of distinct words with that tag
			OnDiskCompactProbTree unseenTree = new OnDiskCompactProbTree(hidden2overt.size());
			int count = 0;
			for(Map.Entry<FactorTuple,HashSet<FactorTuple>> entry : hidden2overt.entrySet()) {
				int size = entry.getValue().size();
				count += size;
				FactorTuple hiddenFactors = entry.getKey();
				int compactHiddenFactors = desc.packHiddenFactorsToInt(hiddenFactors.getBits());
				if (compactHiddenFactors > 0) {
					// ignore <NULL> should they appear
					unseenTree.addProbability(compactHiddenFactors, size);
				}
			}
			double unseenProbability = unseenFactors.size() / totalCount;
			unseenTree.scale(weights.alpha * unseenProbability / (count * unseenFactors.size()));

			// make sure the distribution sums to 1.0 after adding unseen probabilities
			for(OnDiskCompactProbTree probTree : data.values()) {
				probTree.scale(1.0 - unseenProbability);
			}

			for(FactorTuple overtFactors : unseenFactors) {
				OnDiskCompactProbTree probTree = (OnDiskCompactProbTree)unseenTree.clone();
				data.put(overtFactors, probTree);
			}			
			System.out.printf("distributing %e probability among %d unseen events\n", 
					unseenProbability, unseenFactors.size());
		}
		
		node.getPayload().backoff = weights.alpha * smoother.getBackoffProb() + 1 - weights.alpha;
		
		// according to the new semantics
		// backoff weights are cumulative
		// and the probabilities in a cluster sum to 1
		
		if (node.getParent() != null) {
			node.getPayload().backoff = node.getPayload().backoff * node.getPayload().lambda +
				(1 - node.getPayload().lambda) * node.getParent().getPayload().backoff;
		}
		
		double totalProb = 0;
		for(OnDiskCompactProbTree probTree : data.values()) {
			totalProb += probTree.getTotalProb();
		}

		for(OnDiskCompactProbTree probTree : data.values()) {
			probTree.scale(1.0/totalProb);
		}
		
		return data;
	}
	
	private void estimateBackoffLevel(TrainingDataNodeReader readers[], 
			ActiveNodeStorage<Long2DoubleMap[]>.ActiveNodeCollection collection,
			ActiveNodeStorage<Long2DoubleMap[]>.ActiveNodeCollection nextCollection) throws IOException 
	{
		ReadableTrainingDataNode dataNodes[] = new ReadableTrainingDataNode[readers.length];
		
		int count = 0;
		JobGroup group = JobManager.getInstance().createJobGroup("backoff estimation");
		
		while(true) {
			for(byte dataId=0; dataId<readers.length; ++dataId) {
				dataNodes[dataId] = readers[dataId].getNext();
			}
			if (dataNodes[0] == null) break;
			int nodeId = dataNodes[0].getNodeId();
			
			for(byte dataId = 1; dataId < readers.length; ++dataId) {
				assert(dataNodes[dataId].getNodeId() == nodeId);
			}
			
			ReadableTrainingData data[] = new ReadableTrainingData[readers.length];
			for(byte dataId = 0; dataId < readers.length; ++dataId) {
				data[dataId] = dataNodes[dataId].getData(0);
				data[dataId].start();
			}
			Long2DoubleMap parentDotPdists[];
			if (collection == null) {
				parentDotPdists = new Long2DoubleMap[nrDataFiles];
				for(byte dataId = 0; dataId < nrDataFiles; ++dataId) {
					parentDotPdists[dataId] = new Long2DoubleMap();
				}
			} else {
				int parentId = nodes[nodeId].getParent().getPayload().clusterid;
				parentDotPdists = collection.getNode(parentId);
			}
			
			computeNodeDiscount(nodeId, data, parentDotPdists, nextCollection, group);
			
			if (++count % 100 == 0) {
				group.join();
			}
		}
		group.join();
	}
	
	private class DataFilter implements TrainingDataFilter {
		final TrainingDataFilter filter;
		final JobGroup group;
		final AtomicInteger submittedJobs;
		final LanguageModel lms[];
		final static int MAX_JOBS = 100000;
		final JobManager manager;
		/**
		 * @param filter
		 */
		public DataFilter(TrainingDataFilter filter, JobGroup group, LanguageModel lms[]) {
			this.filter = filter;
			this.group = group;
			this.lms = lms;
			this.submittedJobs = new AtomicInteger();
			this.manager = JobManager.getInstance();
		}

		/* (non-Javadoc)
		 * @see edu.umd.clip.lm.model.training.TrainingDataFilter#filterData(edu.umd.clip.lm.model.training.ContextFuturesPair)
		 */
		@Override
		public ContextFuturesPair filterData(final ContextFuturesPair pair) {
			
			Runnable run = new Runnable() {
				@Override
				public void run() {
					Context ctx = boContextReducer.reduceContext(pair.getContext());
					if (contextClusterStorage.get(ctx) != null) {
						// already done it
						return;
					}
					
					int clusters[] = new int[lms.length];
					for(int i=0; i<lms.length; ++i) {
						clusters[i] = lms[i].context2cluster(ctx);
					}
					
					contextClusterStorage.putIfAbsent(ctx, new ContextClusters(clusters));
				}
			};
			
			Job job = new Job(run, "context-cluster");
			manager.addJob(group, job);
			if (submittedJobs.incrementAndGet() % MAX_JOBS == 0) {
				// synchronize now and then
				//group.join();
				//System.out.printf("context2cluster: %d contexts done\n", submittedJobs.intValue());
			}
			
			if (filter != null) {
				return filter.filterData(pair);
			} 
			return pair;
		}
		
	}
	
	private static class DiscountWeights implements Cloneable {
		double alpha;
		double d1;
		double d2;
		double d3;
		//double beta;
		//double lambda;
		double gamma;
		@Override
		protected Object clone() {
			DiscountWeights w = new DiscountWeights();
			w.alpha = alpha;
			w.d1 = d1;
			w.d2 = d2;
			w.d3 = d3;
			//w.beta = beta;
			//w.lambda = lambda;
			w.gamma = gamma;
			return w;
		}
	}
	
	@SuppressWarnings("unchecked")
	private void computeBackoffProbabilities(ReadableTrainingData data) throws IOException {
		Experiment exp = Experiment.getInstance();
		final long overtMask = exp.getTupleDescription().getOvertFactorsMask();

		// NOTE, this is different from boLMs since it includes all LMs defined in the expriment
		final int lmSequence[] = exp.getLmIdSequence(lm);
		final LanguageModel firstBackoff = exp.getLM(lm.getBackoffLM());
		final int lmIds[] = new int[boLMs.length];
		for(byte i=0; i<boLMs.length; ++i) {
			lmIds[i] = boLMs[i].getIdNum();
		}

		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("backoff probs");
		final Lock lock = new ReentrantLock();
		
		for(; data.hasNext();) {
			Map<FactorTuple,HashSet<ContextClusters>> wordsToClusters = new HashMap<FactorTuple,HashSet<ContextClusters>>(1000);
			
			TrainingDataBlock block = data.next();
			for(ContextFuturesPair pair : block) {
				Context boContext = boContextReducer.reduceContext(pair.getContext());
				ContextClusters clusters = contextClusterStorage.get(boContext);
				if (clusters == null) {
					System.err.printf("can't find the clusters for %s\n", boContext);
					continue;
				}
				
				for(TupleCountPair tc : pair.getFutures()) {
					FactorTuple word = new FactorTuple(tc.tuple & overtMask);
					HashSet<ContextClusters> map = wordsToClusters.get(word);
					if (map == null) {
						map = new HashSet<ContextClusters>(); 
						wordsToClusters.put(word, map);
					}
					map.add(clusters);
				}				
			}
			
			for(Map.Entry<FactorTuple,HashSet<ContextClusters>> entry : wordsToClusters.entrySet()) {
				final FactorTuple word = entry.getKey();
				final HashSet<ContextClusters> clusterSet = entry.getValue();
				
				Runnable run = new Runnable() {

					@Override
					public void run() {
						RequestBundle<Key,OnDiskCompactProbTree> bundle = new RequestBundle<Key,OnDiskCompactProbTree>(probTreeStorage);

						long start = System.currentTimeMillis();
						
						for(ContextClusters clusters : clusterSet) {
							lock.lock();
							Long2DoubleMap dist = boCluster2WordProb.get(clusters);
							if (dist == null) {
								dist = new Long2DoubleMap();
								boCluster2WordProb.put(clusters, dist);
							}

							if (dist.get(word.getBits()) > 0) {
								lock.unlock();
								// already computed
								continue;
							}
							lock.unlock();
							
							for(byte i = 0; i<boLMs.length; ++i) {
								int lmId = boLMs[i].getIdNum();
								Key key = new Key(lmId, clusters.getClusters()[i], word.getBits());
								bundle.request(key);
							}
						}
						
						try {
							bundle.sync();
						} catch (InterruptedException e) {}
						
						long requestTime = System.currentTimeMillis() - start;
						
						start = System.currentTimeMillis();
						
						for(ContextClusters clusters : clusterSet) {
							lock.lock();
							Long2DoubleMap dist = boCluster2WordProb.get(clusters);
							if (dist == null) {
								dist = new Long2DoubleMap();
								boCluster2WordProb.put(clusters, dist);
							}

							if (dist.get(word.getBits()) > 0) {
								// already computed
								lock.unlock();
								continue;
							}
							lock.unlock();
							
							BinaryTree<HistoryTreePayload> requestNodes[] = new BinaryTree[lmSequence.length];
							for(byte i=0; i<boLMs.length; ++i) {
								LanguageModel boLM = boLMs[i];
								requestNodes[boLM.getIdNum()] = boLM.getDecodingRuntime().getNode(clusters.getClusters()[i]);
							}
							OnDiskCompactProbTree probTree = firstBackoff.getDecoder().computeProbTree(lmIds, requestNodes, word.getBits(), bundle.getResults(), null);

							if (probTree != null) {
								lock.lock();
								dist.put(word.getBits(), probTree.getProb());
								lock.unlock();
							}
						}	
						
						long decodingTime = System.currentTimeMillis() - start;
						/*
						System.out.printf("Word: %s (%d clusters) done (%gs requests, %gs decoding)\n", 
								word.toStringNoNull(), clusterSet.size(),
								0.001*requestTime, 0.001*decodingTime); 
						*/
					}
				};
				manager.addJob(group, new Job(run, "computing " + word.toStringNoNull()));
			}
			group.join();
			
		}
		System.out.printf("Total number of backoff clusters: %d\n", boCluster2WordProb.size());
	}
	
	private static final long MIN_COUNTS = 5;
	
	private void computeNodeDiscount(final int clusterid,  
			ReadableTrainingData data[], final Long2DoubleMap parentDists[],
			final ActiveNodeStorage<Long2DoubleMap[]>.ActiveNodeCollection nextCollection,
			JobGroup group) throws IOException 
	{
		
		// make train and heldout distributions
		final Long2IntMap dataCounts[] = new Long2IntMap[nrDataFiles];
		//final Long2IntMap devCounts[] = new Long2IntMap[nrDataFiles];
		final HashSet<Long> devWords[] = new HashSet[nrDataFiles];
		// maps lower order models clusters to the future words and counts (heldout data only)
		final Map<ContextClusters, Long2IntMap> boClusters2WordCounts[] = new HashMap[nrDataFiles];
		
		for(byte dataId=0; dataId < nrDataFiles; ++dataId) {
			dataCounts[dataId] = new Long2IntMap(parentDists[0].size());
			//devCounts[dataId] = new Long2IntMap(parentDist.size());
			devWords[dataId] = new HashSet<Long>(parentDists[0].size());
			
			boClusters2WordCounts[dataId] = new HashMap<ContextClusters, Long2IntMap>();
		}
		
		for(byte dId = 0; dId < data.length; ++dId) {
			for(;data[dId].hasNext();) {
				TrainingDataBlock block = data[dId].next();
				block.addCounts(dataCounts[dId]);
				Map<ContextClusters, Long2IntMap> wordCounts = boClusters2WordCounts[dId];
				HashSet<Long> wordSet = devWords[dId];
				
				for(ContextFuturesPair pair : block) {
					Context boContext = boContextReducer.reduceContext(pair.getContext());
					ContextClusters clusters = contextClusterStorage.get(boContext);
					if (clusters == null) {
						System.err.printf("can't find the clusters for %s\n", boContext);
						continue;
					}
					Long2IntMap map = wordCounts.get(clusters);
					if (map == null) {
						map = new Long2IntMap(pair.getFutures().length);
						wordCounts.put(clusters, map);
					}
					for(TupleCountPair tc : pair.getFutures()) {
						map.addAndGet(tc.tuple, tc.count);
						wordSet.add(tc.tuple);
					}
				}
			}				
		}
		
		Runnable run = new Runnable() {

			@Override
			public void run() {
				DiscountWeights tmpWeights[] = new DiscountWeights[nrDataFiles];
				BinaryTree<HistoryTreePayload> theNode = nodes[clusterid];
				Long2DoubleMap dotPdists[] = new Long2DoubleMap[nrDataFiles];
				Long2IntMap trainCounts[] = new Long2IntMap[nrDataFiles];
				
				for(byte devDataId=0; devDataId<nrDataFiles; ++devDataId) {
					
					DiscountWeights weights = (DiscountWeights) discountWeights[clusterid].clone();
					tmpWeights[devDataId] = weights;
					trainCounts[devDataId] = new Long2IntMap(dataCounts[0].size()*2);
					for(byte dataId = 0; dataId < nrDataFiles; ++dataId) {
						if (dataId != devDataId) {
							trainCounts[devDataId].addMap(dataCounts[dataId]);
						}
					}
					long totalTrainCounts = trainCounts[devDataId].sumValues();
					long totalDevCounts = dataCounts[devDataId].sumValues();
	
					EntropyFunction function = new EntropyFunction();
					function.boClusters2WordCounts = boClusters2WordCounts[devDataId];
					function.devWords = devWords[devDataId];
					function.trainCounts = trainCounts[devDataId];
					function.parentDotPdist = parentDists[devDataId];
					if (theNode.getParent() == null) {
						function.parentGamma = 0;
					} else {
						function.parentGamma = discountWeights[theNode.getParent().getPayload().clusterid].gamma;
					}
					function.lambda = nodes[clusterid].getPayload().lambda;
					function.initialize();
	
					if (totalTrainCounts > MIN_COUNTS && totalDevCounts > MIN_COUNTS) {
						
						double minEntropy = Powell.search(function.params2vector(), 0.01, 100, function);
						function.setMinimum();
						
					} else {
						// too few items -- unreliable
						function.alpha = 0.0;
					}
					weights.alpha = function.alpha;
					//weights.beta = function.beta;
					weights.gamma = function.gamma;
					weights.d1 = function.N1 > 0 && function.alpha > 0 ? function.D1 : EntropyFunction.DEFAULT_D1;
					weights.d2 = function.N2 > 0 && function.alpha > 0 ? function.D2 : EntropyFunction.DEFAULT_D2;
					weights.d3 = function.N3plus > 0 && function.alpha > 0 ? function.D3 : EntropyFunction.DEFAULT_D3;
					// compute dotP for the new node and store it in the newCollection 
				}
				
				double minAlpha = 1;
				double minD1 = 1;
				double minD2 = 2;
				double minD3 = 3;
				double maxAlpha = 0;
				double maxD1 = 0;
				double maxD2 = 0;
				double maxD3 = 0;
				
				DiscountWeights weights = new DiscountWeights();
				for(byte dataId=0; dataId < nrDataFiles; ++dataId) {
					weights.alpha += tmpWeights[dataId].alpha / nrDataFiles;
					weights.d1 += tmpWeights[dataId].d1 / nrDataFiles;
					weights.d2 += tmpWeights[dataId].d2 / nrDataFiles;
					weights.d3 += tmpWeights[dataId].d3 / nrDataFiles;
					
					if (tmpWeights[dataId].alpha < minAlpha) minAlpha = tmpWeights[dataId].alpha;
					if (tmpWeights[dataId].alpha > maxAlpha) maxAlpha = tmpWeights[dataId].alpha;
					
					if (tmpWeights[dataId].d1 < minD1) minD1 = tmpWeights[dataId].d1;
					if (tmpWeights[dataId].d1 > maxD1) maxD1 = tmpWeights[dataId].d1;
					
					if (tmpWeights[dataId].d2 < minD2) minD2 = tmpWeights[dataId].d2;
					if (tmpWeights[dataId].d2 > maxD2) maxD2 = tmpWeights[dataId].d2;
					
					if (tmpWeights[dataId].d3 < minD3) minD3 = tmpWeights[dataId].d3;
					if (tmpWeights[dataId].d3 > maxD3) maxD3 = tmpWeights[dataId].d3;
				}
				
				System.out.printf("alpha: %g/%g/%g, D1: %g/%g/%g, D2: %g/%g/%g, D3: %g/%g/%g\n",
						minAlpha, weights.alpha, maxAlpha,
						minD1, weights.d1, maxD1,
						minD2, weights.d2, maxD2,
						minD3, weights.d3, maxD3);
				
				{
					// computing the averaged gamma
					Long2IntMap allTrainCounts = new Long2IntMap(dataCounts[0].size()*2);
					for(byte dataId = 0; dataId < nrDataFiles; ++dataId) {
						allTrainCounts.addMap(dataCounts[dataId]);
					}
					
					EntropyFunction function = new EntropyFunction();
					function.trainCounts = allTrainCounts;
					function.totalCount = allTrainCounts.sumValues();
					if (theNode.getParent() == null) {
						function.parentGamma = 0;
					} else {
						function.parentGamma = discountWeights[theNode.getParent().getPayload().clusterid].gamma;
					}
					function.lambda = nodes[clusterid].getPayload().lambda;
					function.alpha = weights.alpha;
					function.D1 = weights.d1;
					function.D2 = weights.d2;
					function.D3 = weights.d3;
					function.computeBeta();
					function.computeGamma();

					weights.gamma = function.gamma;
					discountWeights[clusterid] = weights;
					
					for(byte devDataId=0; devDataId<nrDataFiles; ++devDataId) {
						function.devWords = devWords[devDataId];
						function.trainCounts = trainCounts[devDataId];
						function.totalCount = trainCounts[devDataId].sumValues();
						function.parentDotPdist = parentDists[devDataId];
						
						function.computeBeta();
						function.computeGamma();
						
						dotPdists[devDataId] = function.makeDotPdist();
						discountWeights[clusterid].gamma = function.gamma;
					}
					nextCollection.putNode(clusterid, dotPdists);
				}
			}
		};
		
		Job job = new Job(run, "optimizing cluster #" + Integer.toString(clusterid));
		JobManager.getInstance().addJob(group, job);
		
	}
	
	private class EntropyFunction implements RealFunctionOfSeveralVariables {
		private double parentGamma;
		private long N1;
		private long N2;
		private long N3plus;
		private long totalCount;
		private double lambda;
		private double alpha;
		private double D1;
		private double D2;
		private double D3;
		private double beta;
		private double gamma;
		private Map<ContextClusters, Long2IntMap> boClusters2WordCounts;
		private Long2IntMap trainCounts;
		private Long2DoubleMap parentDotPdist; // \dot{p}_{n'}
		private HashSet<Long> devWords;
		
		private double minAlpha;
		private double minD1;
		private double minD2;
		private double minD3;
		private double minEntropy;
		private int nrInvocations;
		
		private static final double DEFAULT_D1 = 0.8;
		private static final double DEFAULT_D2 = 1.2;
		private static final double DEFAULT_D3 = 1.6;
		
		void setMinimum() {
			alpha = minAlpha < 1e-7 ? 0 : minAlpha;
			D1 = minD1 < 1e-7 ? 0 : minD1;
			D2 = minD2 < 1e-7 ? 0 : minD2;
			D3 = minD3 < 1e-7 ? 0 : minD3;
			computeBeta();
			computeGamma();
			System.out.printf("powell min at %04d: E=%g, alpha=%g, D1=%g*%d, D2=%g*%d, D3=%g*%d, gamma=%g\n", 
					nrInvocations, minEntropy, alpha, D1, N1, D2, N2, D3, N3plus, gamma);
		}
		
		void initialize() {
			nrInvocations = 0;
			minEntropy = Double.POSITIVE_INFINITY;
			
			SmootherProducer producer = new SmootherProducer();
			for(Long2IntMap.Iterator it = trainCounts.iterator(); it.hasNext(); ) {
				producer.addCount(it.next().getValue());
			}
			KNModSmoother smoother = new KNModSmoother(producer);
			{
				int counts[] = producer.getCounts();
				N1 = counts[1];
				N2 = counts[2];
				N3plus = 0;
				for(int i=3; i<counts.length; ++i) {
					N3plus += counts[i];
				}
			}
			if (Double.isNaN(smoother.getBackoffProb())) {
				// set some initial values if cannot be estimated
				D1 = DEFAULT_D1;
				D2 = DEFAULT_D2;
				D3 = DEFAULT_D3;
			} else {
				D1 = smoother.getD1();
				D2 = smoother.getD2();
				D3 = smoother.getD3();
			}
			alpha = 0.5;

			totalCount = producer.getTotalCount();
			
			computeBeta();
			computeGamma();
		}
		
		public double logistic(double x, double yMin, double yMax) {
			double y = (yMax - yMin) / (1 + Math.exp(-x)) + yMin;
			return y;
		}
		
		public double invLogistic(double y, double yMin, double yMax) {
			double x = - Math.log((yMax - yMin)/(y - yMin) - 1);
			return x;
		}
		
		private double[] params2vector() {
			double[] v = new double[4];
			v[0] = invLogistic(alpha, 0, 1);
			v[1] = invLogistic(D1, 0, 1);
			v[2] = invLogistic(D2, 0, 2);
			v[3] = invLogistic(D3, 0, 3);
			return v;
		}
		
		private void vector2params(double v[]) {
			alpha = logistic(v[0], 0, 1);
			D1 = logistic(v[1], 0, 1);
			D2 = logistic(v[2], 0, 2);
			D3 = logistic(v[3], 0, 3);
			computeBeta();
			computeGamma();
		}
		
		void computeBeta() {
			if (totalCount > 0) {
				beta = (D1*N1 + D2*N2 + D3*N3plus) / totalCount;
			} else {
				beta = 0;
			}
		}
		
		void computeGamma() {
			gamma = lambda * (alpha*beta + 1 - alpha) + (1 - lambda)*parentGamma;
		}

		/* (non-Javadoc)
		 * @see de.jtem.numericalMethods.calculus.function.RealFunctionOfSeveralVariables#eval(double[])
		 */
		@Override
		public double eval(double[] x) {
			vector2params(x);
			++nrInvocations;
			
			// pre-compute \dot{p}_n since it doesn't depend on the context
			Long2DoubleMap dotPdist = makeDotPdist();
			
			double logLikelihood = 0;
			long totalDevCount = 0;
			
			for(Entry<ContextClusters, Long2IntMap> entry : boClusters2WordCounts.entrySet()) {
				ContextClusters clusters = entry.getKey();
				Long2IntMap devCounts = entry.getValue();
				Long2DoubleMap boClusterProbs = boCluster2WordProb.get(clusters);
				
				for(Long2IntMap.Iterator it = devCounts.iterator(); it.hasNext(); ) {
					Long2IntMap.Entry e = it.next();
					long word = e.getKey();
					int count = e.getValue();
					totalDevCount += count;
					
					double tildeP = dotPdist.get(word) + gamma * boClusterProbs.get(word);
					if (tildeP > 0) {
						logLikelihood += count * Math.log10(tildeP);
					}
				}
			}
			
			double entropy = - logLikelihood / totalDevCount;
			if (entropy < minEntropy) {
				minAlpha = alpha;
				minD1 = D1;
				minD2 = D2;
				minD3 = D3;
				minEntropy = entropy;
				/*
				System.err.printf("powell [%04d]: E=%g, alpha=%g, D1=%g, D2=%g, D3=%g\n", 
						nrInvocations, entropy, alpha, D1, D2, D3);
				*/
			}

			return entropy;
		}

		/* (non-Javadoc)
		 * @see de.jtem.numericalMethods.calculus.function.RealFunctionOfSeveralVariables#getNumberOfVariables()
		 */
		@Override
		public int getNumberOfVariables() {
			return 4;
		}
		
		private double discount(long count) {
			if (count == 1) return count - D1;
			if (count == 2) return count - D2;
			if (count >= 3) return count - D3;
			return 0;
		}
		
		private double computeDotP(long word) {
			double result = 0;
			if (alpha > 0) {
				result += lambda * alpha * discount(trainCounts.get(word)) / totalCount; 
			}
			result += (1 - lambda) * parentDotPdist.get(word);
			return result;
		}
		
		private Long2DoubleMap makeDotPdist() {
			Long2DoubleMap dotPdist = new Long2DoubleMap(devWords.size());
			for(long word : devWords) {
				double prob = computeDotP(word);
				if (prob > 0) {
					dotPdist.put(word, prob);
				}
			}
			return dotPdist;
		}
	}
	
	public static String makeDataFilename(int level, int dataId) {
		return makeDataFilename(level, dataId, false);
	}
	
	public static String makeDataFilename(int level, int dataId, boolean full) {
		return String.format("layer-%03d-data-%s%d", level, (full?"full-":""), dataId);
	}
}
