/**
 * 
 */
package edu.umd.clip.lm.tools;

import java.io.*;
import java.nio.channels.*;
import java.util.*;

import edu.berkeley.nlp.util.*;

import edu.umd.clip.jobs.*;
import edu.umd.clip.lm.model.*;
import edu.umd.clip.lm.model.Experiment.Files;
import edu.umd.clip.lm.model.data.*;
import edu.umd.clip.lm.questions.Question;
import edu.umd.clip.lm.util.*;
import edu.umd.clip.lm.util.tree.*;

/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class CountQuestionsByOffset2 {
	public static class Options {
        @Option(name = "-config", required = true, usage = "XML config file")
		public String config;
        @Option(name = "-input", required = false, usage = "Training data file (Default: stdin)")
		public String input;
        @Option(name = "-forest", required = false, usage = "LM ID to train (default: " + LanguageModel.PRIMARY_LM_ID + ")")
		public String lm = LanguageModel.PRIMARY_LM_ID;        
        @Option(name = "-datadir", required = false, usage = "the directory for the temporary files")
		public String datadir = null;        
        @Option(name = "-jobs", required = false, usage = "number of concurrent jobs (default: 1)")
        public int jobs = 1;

	}
	
	LanguageModel lm;
	int nrDataFiles;
	File dataDir;
	BinaryTree<HistoryTreePayload> nodes[];
	double totalCount;
	final int MAX_OFFSET=5;
	ArrayList<int []> overtQuestionCountsByRank = new ArrayList<int []>(100);
	ArrayList<int []> hiddenQuestionCountsByRank = new ArrayList<int []>(100);
	Options opts;
	
	private int populateDataLevel(int level) throws IOException 
	{
		File dataFiles[] = new File[nrDataFiles];
		File newDataFiles[] = new File[nrDataFiles];
		FileChannel inputChannels[] = new FileChannel[nrDataFiles];
		RandomAccessFile outFiles[] = new RandomAccessFile[nrDataFiles];
		final TrainingDataNodeReader readers[] = new TrainingDataNodeReader[nrDataFiles];
		final TrainingDataNodeWriter writers[] = new TrainingDataNodeWriter[nrDataFiles];
		
		for(byte dataId = 0; dataId < nrDataFiles; ++dataId) {
			dataFiles[dataId] = new File(dataDir, makeDataFilename(level, dataId));
			if (!dataFiles[dataId].isFile()) {
				System.err.printf("Cannot find file %s, ", dataFiles[dataId].getAbsolutePath());
				return 0;
			}
			inputChannels[dataId] = new FileInputStream(dataFiles[dataId]).getChannel();
			readers[dataId] = new EagerTrainingDataNodeReader(new OnDiskTrainingDataNodeReader(inputChannels[dataId]));

			newDataFiles[dataId] = new File(dataDir, makeDataFilename(level+1, dataId));
			outFiles[dataId] = new RandomAccessFile(newDataFiles[dataId], "rw");
			outFiles[dataId].getChannel().truncate(0);

			writers[dataId] = new OnDiskTrainingDataNodeWriter(outFiles[dataId].getChannel());
			writers[dataId] = new BufferedTrainingDataNodeWriter(writers[dataId]);
		}

		int nrNodes = populateData(readers, writers);
		
		// delete data files from the previous level
		for(byte dataId = 0; dataId < nrDataFiles; ++dataId) {
			writers[dataId].close();
			
			dataFiles[dataId].delete();
			inputChannels[dataId].close();
			outFiles[dataId].close();
		}
		
		return nrNodes;
	}
	
	private int populateData(final TrainingDataNodeReader readers[], final TrainingDataNodeWriter writers[]) throws IOException 
	{
		int nrNodes = 0;
		
		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("full data split");
		while(true) {
			final ReadableTrainingDataNode dataNodes[] = new ReadableTrainingDataNode[readers.length];
			for(byte dataId=0; dataId<readers.length; ++dataId) {
				dataNodes[dataId] = readers[dataId].getNext();
			}
			if (dataNodes[0] == null) return nrNodes;
			int nodeId = dataNodes[0].getNodeId();
			
			for(byte dataId = 1; dataId < readers.length; ++dataId) {
				assert(dataNodes[dataId].getNodeId() == nodeId);
			}
			

			final BinaryTree<HistoryTreePayload> oldLeaf = nodes[nodeId];
			long totalNodeCount = 0;
			
			if (oldLeaf.isLeaf()) {
				for(byte dataId = 0; dataId < readers.length; ++dataId) {
					dataNodes[dataId].skipData();
				}
				continue;
			}
			
			final Long2IntMap countsByData[] = new Long2IntMap[readers.length];
			for(byte d=0; d<countsByData.length; ++d) {
				countsByData[d] = new Long2IntMap();
			}
			for(byte dataId = 0; dataId < readers.length; ++dataId) {
				final byte myDataId = dataId;
				Runnable run = new Runnable() {

					@Override
					public void run() {
						try {
							TrainingDataNodeWriter writer = writers[myDataId];
							
							BinaryTree<HistoryTreePayload> left = oldLeaf.getLeft();
							BinaryTree<HistoryTreePayload> right = oldLeaf.getRight();
							
							int leftNodeId = left.getPayload().clusterid;
							int rightNodeId = right.getPayload().clusterid;
							
							WritableTrainingDataNode leftDataNode = writer.createNode(leftNodeId, 1);
							WritableTrainingDataNode rightDataNode = writer.createNode(rightNodeId, 1);
							
							writer.add(leftDataNode);
							writer.add(rightDataNode);
						
							TrainingDataUtil.splitData(dataNodes[myDataId].getData(0), oldLeaf.getPayload().question, 
									rightDataNode.getData(0), leftDataNode.getData(0), countsByData[myDataId]);
			
						} catch (IOException e) {
							e.printStackTrace();
						}
					}
				};
				Job job = new Job(run, "q");
				manager.addJob(group, job);
			}	
			group.join();
			// sum up all counts
			for(byte d=1; d<countsByData.length; ++d) {
				totalNodeCount += countsByData[d].sumValues();
			}
			
			if (totalNodeCount == 0) continue;
			
			if (oldLeaf.getParent() == null) {
				totalCount = totalNodeCount;
			}
			
			int rank = (int)Math.round(ProbMath.log2(totalCount / totalNodeCount));
			
			Question question = oldLeaf.getPayload().question;

			ArrayList<int []> questionCountsByRank = question.isAboutHidden() ? hiddenQuestionCountsByRank : overtQuestionCountsByRank;
			
			byte offset = question.getTimeOffset();
			offset = (byte) -(offset+1);
			while(questionCountsByRank.size() <= rank) {
				questionCountsByRank.add(new int[MAX_OFFSET]);
			}
			questionCountsByRank.get(rank)[offset] += 1;

			++nrNodes;
		}
	}

	@SuppressWarnings("unchecked")
	CountQuestionsByOffset2(String[] args) {
        OptionParser optParser = new OptionParser(Options.class);
        
        opts = (Options) optParser.parse(args, true);

		JobManager.initialize(opts.jobs, opts.jobs + 2);
		Thread thread = new Thread(JobManager.getInstance(), "Job Manager");
		thread.setDaemon(true);
		thread.start();

		Experiment.initialize(opts.config);
		final Experiment experiment = Experiment.getInstance();
		experiment.buildPrefixes();
		experiment.buildWordPrefixes();
		
		lm  = experiment.getLM(opts.lm);

		File tmpDir = opts.datadir == null ? new File("count-questions-" + lm.getId()) : new File(opts.datadir);
		if (!tmpDir.isDirectory()) {
			tmpDir.mkdirs();
		}
		dataDir = tmpDir;
		
		int maxClusterId = 0;
		for(BinaryTreeIterator<HistoryTreePayload> it = lm.getHistoryTree().getPostOrderIterator(); it.hasNext(); ) {
			int clusterId = it.next().clusterid;
			if (clusterId > maxClusterId) maxClusterId = clusterId;
		}
		
		int nrClusters = maxClusterId+1;
		nodes = new BinaryTree[nrClusters];
		for(BinaryTreeIterator<HistoryTreePayload> it = lm.getHistoryTree().getPostOrderIterator(); it.hasNext(); ) {
			BinaryTree<HistoryTreePayload> node = it.nextNode();
			int clusterid = node.getPayload().clusterid;
			nodes[clusterid] = node;
			//discountWeights[clusterid].lambda = node.getPayload().lambda;
		}
	}
	
	void initialize() throws IOException {
		final Experiment.Files files = Experiment.getInstance().getFiles();
		nrDataFiles = files.getInterpolateData().size();
		
		// mask everything to <NULL>
		long overtMask = 0;
		
		final TrainingDataFilter filter = new MaskedFuturesTrainingDataFilter(overtMask);

		for(int split=0; split<nrDataFiles; ++split) {
			final String[] filenames = files.getInterpolateDataFiles(split);
			final int id = split;
			
			Runnable run = new Runnable() {
				@Override
				public void run() {
					
					try {
						TrainingDataUtil.combineAndReduceContext(filenames, 
								new File(dataDir, makeDataFilename(0, id)).getAbsolutePath(), 
								lm.getOvertOrder(), lm.getHiddenOrder(), 
								lm.getHistoryTree().getPayload().clusterid, filter);
					} catch (IOException e) {
						e.printStackTrace();
					}
					System.out.printf("data #%d done\n", id);
				}
			};
			
			// turns out it takes too much memory to do this in parallel...
			//manager.addJob(group, new Job(run, "data #" + Integer.toString(id)));
			run.run();
		}
	}
	
	void print() {
		while (overtQuestionCountsByRank.size() < hiddenQuestionCountsByRank.size()) {
			overtQuestionCountsByRank.add(new int[MAX_OFFSET]);
		}
		
		while (overtQuestionCountsByRank.size() > hiddenQuestionCountsByRank.size()) {
			hiddenQuestionCountsByRank.add(new int[MAX_OFFSET]);
		}
		
		int nrRanks = overtQuestionCountsByRank.size();
		
		for(int i = 0; i<nrRanks; ++i) {
			
			int totalCount = 0;
			int overtCounts[] = overtQuestionCountsByRank.get(i);
			for(int count : overtCounts) {
				totalCount += count;
			}
			
			int hiddenCounts[] = hiddenQuestionCountsByRank.get(i);
			for(int count : hiddenCounts) {
				totalCount += count;
			}
			
			System.out.print(i);
			for(int j=0; j<lm.getOrder()-1; ++j) {
				System.out.print(',');
				System.out.print(totalCount == 0 ? 0 : (float)overtCounts[j]/totalCount);
				System.out.print(',');
				System.out.print(totalCount == 0 ? 0 : (float)hiddenCounts[j]/totalCount);
			}
			System.out.println();
		}

	}
	/**
	 * @param args
	 * @throws IOException 
	 */
	public static void main(String[] args) throws IOException {
		CountQuestionsByOffset2 c = new CountQuestionsByOffset2(args);
		c.initialize();
		int level = 0;
		while(true) {
			int nrNodes = c.populateDataLevel(level);
			if (nrNodes == 0) break;
			++level;
		}
		c.print();
	}
	
	public static String makeDataFilename(int level, int dataId) {
		return String.format("count-layer-%03d-data-%d", level, dataId);
	}

}
