/**
 * 
 */
package edu.umd.clip.lm.tools;

import java.io.*;
import java.nio.channels.FileChannel;
import java.util.*;

import edu.berkeley.nlp.util.*;

import edu.umd.clip.lm.factors.FactorTuple;
import edu.umd.clip.lm.model.*;
import edu.umd.clip.lm.model.data.ContextFuturesPair;
import edu.umd.clip.lm.model.data.MaskedFuturesTrainingDataFilter;
import edu.umd.clip.lm.model.data.OnDiskTrainingDataReader;
import edu.umd.clip.lm.model.data.OnDiskTrainingDataWriter;
import edu.umd.clip.lm.model.data.ReadableTrainingData;
import edu.umd.clip.lm.model.data.TrainingDataBlock;
import edu.umd.clip.lm.model.data.TrainingDataFilter;
import edu.umd.clip.lm.model.data.TrainingDataReader;
import edu.umd.clip.lm.model.data.TrainingDataUtil;
import edu.umd.clip.lm.model.data.TupleCountPair;
import edu.umd.clip.lm.model.data.WritableTrainingData;
import edu.umd.clip.lm.model.training.*;
import edu.umd.clip.lm.util.*;

/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class DumpNgrams {

	public static class Options {
        @Option(name = "-config", required = true, usage = "XML config file")
		public String config;
        @Option(name = "-file1", required = false, usage = "Training data file 1")
		public String file1;
        @Option(name = "-file2", required = false, usage = "Training data file 2")
		public String file2;
        @Option(name = "-file3", required = false, usage = "Training data file 3")
		public String file3;
        @Option(name = "-file4", required = false, usage = "Training data file 4")
		public String file4;
        @Option(name = "-file5", required = false, usage = "Training data file 5")
		public String file5;

        @Option(name = "-output", required = false, usage = "Output file (Default: stdout)")
		public String output;
        @Option(name = "-jobs", usage = "number of concurrent jobs (default: 1)")
        public int jobs = 1;
        @Option(name = "-forest", required = false, usage = "LM ID to train (default: " + LanguageModel.PRIMARY_LM_ID + ")")
		public String lm = LanguageModel.PRIMARY_LM_ID;   
        @Option(name = "-cache-size", usage = "maximum number of elements to keep in memory")
        public int maxCacheSize = 2000000;
	}
	/**
	 * @param args
	 * @throws IOException 
	 */
	public static void main(String[] args) throws IOException {
        OptionParser optParser = new OptionParser(Options.class);
        Options opts = (Options) optParser.parse(args, true);

		Experiment.initialize(opts.config);
		Experiment experiment = Experiment.getInstance();
		experiment.buildPrefixes();
		
		LanguageModel lm = experiment.getLM(opts.lm);
		
		ArrayList<String> dataFiles = new ArrayList<String>(5);
		
		dataFiles.add(opts.file1);
		if (opts.file2 != null) {
			dataFiles.add(opts.file2);
		}
		
		if (opts.file3 != null) {
			dataFiles.add(opts.file3);				
		}
		
		if (opts.file4 != null) {
			dataFiles.add(opts.file4);				
		}
		
		if (opts.file5 != null) {
			dataFiles.add(opts.file5);				
		}
	
		File tmpFile = File.createTempFile("tmp-file", "data");
		tmpFile.deleteOnExit();
		RandomAccessFile outFile = new RandomAccessFile(tmpFile, "rw");
		outFile.getChannel().truncate(0);
		
		OnDiskTrainingDataWriter writer = new OnDiskTrainingDataWriter(outFile.getChannel());
		
		WritableTrainingData popData = writer.createData();
		
		long overtMask = Experiment.getInstance().getTupleDescription().getOvertFactorsMask();
		TrainingDataFilter filter = new MaskedFuturesTrainingDataFilter(overtMask);

		Long2IntMap counts = new Long2IntMap(20000);
		// Aggregate multiple files into one
		for(String fname : dataFiles) {
			FileChannel channel = new FileInputStream(fname).getChannel();
			TrainingDataReader reader = new OnDiskTrainingDataReader(channel);
			ReadableTrainingData smoothingData = new ReadableTrainingData(reader);

			TrainingDataUtil.reduceContext(smoothingData, popData, lm.getOvertOrder(), 1, filter, counts);
			
			channel.close();
		}
		popData.finish();
		
		System.err.printf("counts (1): %d\n", counts.sumValues());
		
		counts.clear();
		
		outFile.seek(0);
		TrainingDataReader reader = new OnDiskTrainingDataReader(outFile.getChannel());
		ReadableTrainingData data = new ReadableTrainingData(reader);
		while(data.hasNext()) {
			TrainingDataBlock block = data.next();
			
			for(ContextFuturesPair pair : block) {
				String contextString = "";
				for(long word : pair.getContext().data) {
					contextString += FactorTuple.toStringNoNull(word) + " ";
				}
				for(TupleCountPair tc : pair.getFutures()) {
					counts.addAndGet(tc.tuple, tc.count);
					System.out.printf("%s%s %d\n", contextString, FactorTuple.toStringNoNull(tc.tuple), tc.count);
				}
			}
		}
		tmpFile.delete();
		outFile.close();
		System.err.printf("counts (2): %d\n", counts.sumValues());
	}

}
