/**
 * 
 */
package edu.umd.clip.lm.model.training;

import java.io.*;
import java.nio.channels.*;
import java.util.*;

import com.sleepycat.je.*;

import edu.umd.clip.jobs.*;
import edu.umd.clip.lm.factors.*;
import edu.umd.clip.lm.model.*;
import edu.umd.clip.lm.model.data.*;
import edu.umd.clip.lm.storage.*;
import edu.umd.clip.lm.util.*;
import edu.umd.clip.lm.util.tree.*;



/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class YetAnotherClusterPopulator {
	private final LanguageModel lm;
	private final BinaryTree<HistoryTreePayload> theTree;
	private AbstractProbTreeStorage probTreeStorage;
	private BinaryTree<HistoryTreePayload> nodes[];
	private int nrClusters;
	private File dataDir;
	private int nrDataFiles;
	private Environment env;
	//private BDBActiveNodeStorage<ParentNodeData> parentNodeDataStorage;
	private BDBActiveNodeStorage<HashSet<Long>> wordsToPruneStorage;
	//private ContextReductionTrainingDataFilter boContextReducer;
	private BackoffType backoff = BackoffType.ONE;
	
	@SuppressWarnings("unchecked")
	public YetAnotherClusterPopulator(LanguageModel lm, AbstractProbTreeStorage storage) {
		this.lm = lm;
		this.theTree = lm.getHistoryTree();
		this.probTreeStorage = storage;
		
		int maxClusterId = 0;
		for(BinaryTreeIterator<HistoryTreePayload> it = theTree.getPostOrderIterator(); it.hasNext(); ) {
			int clusterId = it.next().clusterid;
			if (clusterId > maxClusterId) maxClusterId = clusterId;
		}
		
		nrClusters = maxClusterId+1;
		nodes = new BinaryTree[nrClusters];
		for(BinaryTreeIterator<HistoryTreePayload> it = theTree.getPostOrderIterator(); it.hasNext(); ) {
			BinaryTree<HistoryTreePayload> node = it.nextNode();
			int clusterid = node.getPayload().clusterid;
			nodes[clusterid] = node;
			//discountWeights[clusterid].lambda = node.getPayload().lambda;
		}

	}
	
	public void initialize(final File dataDir) throws IOException, DatabaseException, DatabaseException {
		this.dataDir = dataDir;
		OnDiskTrainingDataNodeWriter.setTempDir(dataDir);
		
		EnvironmentConfig envConf = new EnvironmentConfig();
		envConf.setAllowCreate(true);
		envConf.setSharedCache(true);
		envConf.setTransactional(false);
		envConf.setReadOnly(false);
		envConf.setCachePercent(20);
		envConf.setConfigParam("je.log.fileMax", Integer.toString(100*1024*1024));
		
		File dir = new File(dataDir, "db");
		if (!dir.isDirectory()) {
			dir.mkdirs();
		}
		env = new Environment(dir, envConf);
		//contextClusterStorage = new ContextClusterStorage(env);
		//parentNodeDataStorage = new BDBActiveNodeStorage<ParentNodeData>(env);
		wordsToPruneStorage = new BDBActiveNodeStorage<HashSet<Long>>(env);
		
		final Experiment.Files files = Experiment.getInstance().getFiles();
		
		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("level 0");
		
		nrDataFiles = files.getInterpolateData().size();
		
		// prepare data that with non-reduced futures for population
		Runnable run = new Runnable() {
			@Override
			public void run() {
				try {
					for(int split=0; split<nrDataFiles; ++split) {
						final String[] filenames = files.getInterpolateDataFiles(split);
						TrainingDataUtil.combineAndReduceContext(filenames, 
								new File(dataDir, makeDataFilename(0, split, true)).getAbsolutePath(), 
								lm.getOvertOrder(), lm.getHiddenOrder(), 
								lm.getHistoryTree().getPayload().clusterid, null);
					}
				} catch(IOException e) {
					e.printStackTrace();
				}
			}
		};
		manager.addJob(group, new Job(run, "prepare full data"));
		
		group.join();
	}

	public void populate() throws IOException {
		int level = 0;
		while(true) {
			
			int numNodes = 0;
			
			ActiveNodeStorage<HashSet<Long>>.ActiveNodeCollection wordsToPruneCollection = wordsToPruneStorage.getCollection("words2prune level#"+Integer.toString(level));
			ActiveNodeStorage<HashSet<Long>>.ActiveNodeCollection nextWordsToPruneCollection = wordsToPruneStorage.createCollection("words2prune level#"+Integer.toString(level+1));
			numNodes = populateDataLevel(level, wordsToPruneCollection, nextWordsToPruneCollection);
			if (wordsToPruneCollection != null) {
				wordsToPruneStorage.removeCollection(wordsToPruneCollection.getName());
			}
			

			if (numNodes == 0) {
				if (nextWordsToPruneCollection != null) {
					wordsToPruneStorage.removeCollection(nextWordsToPruneCollection.getName());
				}
				break;
			}
			
			
			++level;
		}
		
	}

	private int  populateDataLevel(int level,
			ActiveNodeStorage<HashSet<Long>>.ActiveNodeCollection wordsToPruneCollection,
			ActiveNodeStorage<HashSet<Long>>.ActiveNodeCollection nextWordsToPruneCollection) throws IOException 
	{
		File dataFiles[] = new File[nrDataFiles];
		File newDataFiles[] = new File[nrDataFiles];
		FileChannel inputChannels[] = new FileChannel[nrDataFiles];
		RandomAccessFile outFiles[] = new RandomAccessFile[nrDataFiles];
		final TrainingDataNodeReader readers[] = new TrainingDataNodeReader[nrDataFiles];
		final TrainingDataNodeWriter writers[] = new TrainingDataNodeWriter[nrDataFiles];
		
		for(byte dataId = 0; dataId < nrDataFiles; ++dataId) {
			dataFiles[dataId] = new File(dataDir, makeDataFilename(level, dataId, true));
			if (!dataFiles[dataId].isFile()) {
				System.err.printf("Cannot find file %s, ", dataFiles[dataId].getAbsolutePath());
				return 0;
			}
			inputChannels[dataId] = new FileInputStream(dataFiles[dataId]).getChannel();
			readers[dataId] = new EagerTrainingDataNodeReader(new OnDiskTrainingDataNodeReader(inputChannels[dataId]));

			newDataFiles[dataId] = new File(dataDir, makeDataFilename(level+1, dataId, true));
			outFiles[dataId] = new RandomAccessFile(newDataFiles[dataId], "rw");
			outFiles[dataId].getChannel().truncate(0);

			writers[dataId] = new OnDiskTrainingDataNodeWriter(outFiles[dataId].getChannel());
			writers[dataId] = new BufferedTrainingDataNodeWriter(writers[dataId]);
		}

		int numNodes = populateData(readers, writers, wordsToPruneCollection, nextWordsToPruneCollection);
		
		// delete data files from the previous level
		for(byte dataId = 0; dataId < nrDataFiles; ++dataId) {
			writers[dataId].close();
			readers[dataId].close();
			
			dataFiles[dataId].delete();
			//inputChannels[dataId].close();
			//outFiles[dataId].close();
		}
		
		return numNodes;
	}
	
	private int populateData(final TrainingDataNodeReader readers[], final TrainingDataNodeWriter writers[],
			final ActiveNodeStorage<HashSet<Long>>.ActiveNodeCollection wordsToPruneCollection,
			final ActiveNodeStorage<HashSet<Long>>.ActiveNodeCollection nextWordsToPruneCollection) throws IOException 
	{
		int numNodes = 0;
		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("full data split");
		JobGroup populateGroup = manager.createJobGroup("populate clusters");
		
		while(true) {
			final ReadableTrainingDataNode dataNodes[] = new ReadableTrainingDataNode[readers.length];
			for(byte dataId=0; dataId<readers.length; ++dataId) {
				dataNodes[dataId] = readers[dataId].getNext();
			}
			if (dataNodes[0] == null) break;
			final int nodeId = dataNodes[0].getNodeId();
			
			for(byte dataId = 1; dataId < readers.length; ++dataId) {
				assert(dataNodes[dataId].getNodeId() == nodeId);
			}
			
			final Long2IntMap counts = new Long2IntMap();

			final BinaryTree<HistoryTreePayload> oldLeaf = nodes[nodeId];
			
			if (oldLeaf.isLeaf()) {
				for(byte dataId = 0; dataId < readers.length; ++dataId) {
					ReadableTrainingData data = dataNodes[dataId].getData(0);
					while(data.hasNext()) {
						TrainingDataBlock block = data.next();
						block.addCounts(counts);
					}
				}
				if (oldLeaf.getPayload().isBackoff) {
					oldLeaf.getPayload().lambda = 0;
					continue;
				}
			} else {
				final Long2IntMap countsByData[] = new Long2IntMap[readers.length];
				countsByData[0] = counts;
				for(byte d=1; d<countsByData.length; ++d) {
					countsByData[d] = new Long2IntMap();
				}
				for(byte dataId = 0; dataId < readers.length; ++dataId) {
					final byte myDataId = dataId;
					Runnable run = new Runnable() {

						@Override
						public void run() {
							try {
								TrainingDataNodeWriter writer = writers[myDataId];
								
								BinaryTree<HistoryTreePayload> left = oldLeaf.getLeft();
								BinaryTree<HistoryTreePayload> right = oldLeaf.getRight();
								
								int leftNodeId = left.getPayload().clusterid;
								int rightNodeId = right.getPayload().clusterid;
								
								WritableTrainingDataNode leftDataNode = writer.createNode(leftNodeId, 1);
								WritableTrainingDataNode rightDataNode = writer.createNode(rightNodeId, 1);
								
								writer.add(leftDataNode);
								writer.add(rightDataNode);
							
								TrainingDataUtil.splitData(dataNodes[myDataId].getData(0), oldLeaf.getPayload().question, 
										rightDataNode.getData(0), leftDataNode.getData(0), countsByData[myDataId]);
				
							} catch (IOException e) {
								e.printStackTrace();
							}
						}
					};
					Job job = new Job(run, "");
					manager.addJob(group, job);
				}	
				group.join();
				numNodes += 2;
				// sum up all counts
				for(byte d=1; d<countsByData.length; ++d) {
					counts.addMap(countsByData[d]);
				}
			}
			
			Runnable run = new Runnable() {
				@Override
				public void run() {
					populateCluster(nodeId, counts, wordsToPruneCollection, nextWordsToPruneCollection);
				}
			};
			Job job = new Job(run, "");
			manager.addJob(populateGroup, job);
			populateGroup.join(manager.getNumWorkers()/2 + 1);
		}
		
		// wait for the last populateCluster()
		populateGroup.join();
		
		return numNodes;
	}
	
	private void populateCluster(int clusterid, Long2IntMap counts,
			ActiveNodeStorage<HashSet<Long>>.ActiveNodeCollection wordsToPruneCollection,
			ActiveNodeStorage<HashSet<Long>>.ActiveNodeCollection nextWordsToPruneCollection) 
	{
		Map<FactorTuple, OnDiskCompactProbTree> probTrees = estimateClusterProbabilities(clusterid, counts, wordsToPruneCollection, nextWordsToPruneCollection);
		
		long totalCount = counts.sumValues();
		if (clusterid == 1) {
			// the root of the tree is smoothed with the uniform distribution, thus counts are not integer
			// more importantly, the unseen counts will get less than 1 counts, which will round down to 0
			totalCount *= 1000;
		}
		probTreeStorage.setTotalClusterCount(lm.getIdNum(), clusterid, totalCount);
		
		saveCluster(clusterid, probTrees);
	}
	
	private void saveCluster(int clusterid, Map<FactorTuple, OnDiskCompactProbTree> probTrees) {
		for(Map.Entry<FactorTuple, OnDiskCompactProbTree> entry : probTrees.entrySet()) {
			entry.getValue().normalize();
			probTreeStorage.setProbTree(lm.getIdNum(), clusterid, entry.getKey(), entry.getValue());
		}
	}
	
	private Map<FactorTuple, OnDiskCompactProbTree> estimateClusterProbabilities(
			int clusterid, Long2IntMap counts,
			ActiveNodeStorage<HashSet<Long>>.ActiveNodeCollection wordsToPruneCollection,
			ActiveNodeStorage<HashSet<Long>>.ActiveNodeCollection nextWordsToPruneCollection) 
	{
		Experiment experiment = Experiment.getInstance();
		FactorTupleDescription desc = experiment.getTupleDescription();

		final long overtMask = desc.getOvertFactorsMask();
		final long hiddenMask = desc.getHiddenFactorsMask();
		
		Map<FactorTuple, OnDiskCompactProbTree> data = new HashMap<FactorTuple, OnDiskCompactProbTree>();

		BinaryTree<HistoryTreePayload> node = nodes[clusterid];
		final boolean estimateUnseen = node.getParent() == null;
		HashSet<Long> wordsToPrune = null;
		HashSet<Long> newWordsToPrune = null;
		if (lm.getPruningThreshold() > 0 ) {
			if (wordsToPruneCollection != null && node.getParent() != null) { 
				wordsToPrune = wordsToPruneCollection.getNode(node.getParent().getPayload().clusterid);
			}
			newWordsToPrune = new HashSet<Long>();
		}
		
		// convert joint counts to word counts
		Long2IntMap wordCounts = new Long2IntMap(counts.size());
		for(Long2IntMap.Iterator i=counts.iterator(); i.hasNext();) {
			Long2IntMap.Entry e = i.next();
			wordCounts.addAndGet(e.getKey() & overtMask, e.getValue());
		}
		/*
		SmootherProducer producer = new SmootherProducer();
		for(Long2IntMap.Iterator i=wordCounts.iterator(); i.hasNext();) {
			producer.addCount(i.next().getValue());
		}
		*/
		Map<FactorTuple,FactorTuple> allOvertFactors = experiment.getTupleDescription().getAllOvertFactors();

		// populate distributions at leaves
		FactorTuple dummyTuple = new FactorTuple();

		double totalCount = 0;
		for(Long2IntMap.Iterator iter = counts.iterator(); iter.hasNext();) {
			Long2IntMap.Entry entry = iter.next();
			long tuple = entry.getKey();
			int count = entry.getValue();

			dummyTuple.setBits(tuple & overtMask);
			FactorTuple overtFactors = allOvertFactors.get(dummyTuple);
			if (overtFactors == null) {
				overtFactors = new FactorTuple(tuple & overtMask);
				System.out.printf("unkown overt factors: %s\n", overtFactors.toStringNoNull());
			}
			
			if (newWordsToPrune != null && lm.getPruningThreshold() >= count) {
				newWordsToPrune.add(overtFactors.getBits());
			}
			
			if (wordsToPrune != null && lm.getPruningThreshold() >= count && wordsToPrune.contains(overtFactors.getBits())) {
				continue;
			}
			
			if (newWordsToPrune != null && lm.getPruningThreshold() >= count) {
				newWordsToPrune.add(overtFactors.getBits());
			}
			totalCount += count;
			
			//FactorTuple hiddenFactors = new FactorTuple(tupleCount.tuple & hiddenMask);
			int packedHiddenFactors = desc.packHiddenFactorsToInt(tuple & hiddenMask);

			OnDiskCompactProbTree probTree = data.get(overtFactors);
			if (probTree == null) {
				probTree = new OnDiskCompactProbTree(packedHiddenFactors, count);
				data.put(overtFactors, probTree);
			} else { 
				probTree.addProbability(packedHiddenFactors, count);
			} 
			assert(probTree.checkTree());
		}

		if (newWordsToPrune != null && newWordsToPrune.size() > 0) {
			nextWordsToPruneCollection.putNode(clusterid, newWordsToPrune);
		}
		
		for(OnDiskCompactProbTree probTree : data.values()) {
			probTree.scale(1.0 / totalCount);
		}

		unseen: if (estimateUnseen) {
			HashSet<FactorTuple> unseenFactors = new HashSet<FactorTuple>(10);

			FactorTuple nullEvent = allOvertFactors.get(new FactorTuple(0));
			FactorTuple startEvent = allOvertFactors.get(new FactorTuple(desc.createStartTuple() & overtMask));
			for(FactorTuple overtFactors : allOvertFactors.keySet()) {
				if (!data.containsKey(overtFactors) && !(overtFactors == nullEvent || overtFactors == startEvent)) {
					unseenFactors.add(overtFactors);
					System.out.printf("unseen event: %s\n", overtFactors.toStringNoNull());
				}
			}
			if (unseenFactors.size() == 0) {
				break unseen;
			}

			HashMap<FactorTuple, HashSet<FactorTuple>> hidden2overt = new HashMap<FactorTuple, HashSet<FactorTuple>>(100);
			for(Long2IntMap.Iterator iter = counts.iterator(); iter.hasNext();) {
				Long2IntMap.Entry entry = iter.next();
				long tuple = entry.getKey();
				dummyTuple.setBits(tuple & overtMask);

				FactorTuple overtFactors = allOvertFactors.get(dummyTuple);
				FactorTuple hiddenFactors = new FactorTuple(tuple & hiddenMask);
				HashSet<FactorTuple> set = hidden2overt.get(hiddenFactors);
				if (set == null) {
					set = new HashSet<FactorTuple>();					
					hidden2overt.put(hiddenFactors, set);
				}
				set.add(overtFactors);
			}

			// the probability of an unseen event to have a tag is propotional to the number of distinct words with that tag
			OnDiskCompactProbTree unseenTree = new OnDiskCompactProbTree(hidden2overt.size());
			int count = 0;
			for(Map.Entry<FactorTuple,HashSet<FactorTuple>> entry : hidden2overt.entrySet()) {
				int size = entry.getValue().size();
				count += size;
				FactorTuple hiddenFactors = entry.getKey();
				int compactHiddenFactors = desc.packHiddenFactorsToInt(hiddenFactors.getBits());
				if (compactHiddenFactors > 0) {
					// ignore <NULL> should they appear
					unseenTree.addProbability(compactHiddenFactors, size);
				}
			}
			double unseenProbability = unseenFactors.size() / totalCount;
			unseenTree.scale(unseenProbability / (count * unseenFactors.size()));

			// make sure the distribution sums to 1.0 after adding unseen probabilities
			for(OnDiskCompactProbTree probTree : data.values()) {
				probTree.scale(1.0 - unseenProbability);
			}

			for(FactorTuple overtFactors : unseenFactors) {
				OnDiskCompactProbTree probTree = (OnDiskCompactProbTree)unseenTree.clone();
				data.put(overtFactors, probTree);
			}			
			System.out.printf("distributing %e probability among %d unseen events\n", 
					unseenProbability, unseenFactors.size());
		}
		return data;
	}

	public static String makeDataFilename(int level, int dataId) {
		return makeDataFilename(level, dataId, false);
	}
	
	public static String makeDataFilename(int level, int dataId, boolean full) {
		return String.format("layer-%03d-data-%s%d", level, (full?"full-":""), dataId);
	}

}
