/**
 * 
 */
package edu.umd.clip.lm.tools;

import java.io.*;
import java.nio.channels.FileChannel;
import java.nio.charset.Charset;

import edu.berkeley.nlp.util.Option;
import edu.umd.clip.lm.model.Experiment;
import edu.umd.clip.lm.model.LanguageModel;
import edu.umd.clip.lm.model.data.AbstractTrainingData;
import edu.umd.clip.lm.model.data.OnDiskTrainingDataWriter;
import edu.umd.clip.lm.model.data.TrainingDataWriter;
import edu.umd.clip.lm.model.data.WritableTrainingData;
import edu.umd.clip.lm.model.training.*;
import edu.umd.clip.lm.util.*;

import edu.berkeley.nlp.util.*;
/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class FLM2TrainingData {

	public static class Options {
        @Option(name = "-input", required = false, usage = "Training data file (Default: stdin)")
		public String input;
        @Option(name = "-output", required = false, usage = "Output file (Default: stdout)")
		public String output;
        @Option(name = "-config", required = true, usage = "XML config file")
		public String config;
        @Option(name = "-jobs", usage = "number of concurrent jobs (default: 1)")
        public int jobs = 1;
        @Option(name = "-min", required = false, usage = "pruning threshold (default: 0)")
		public int threshold = 0;
        @Option(name = "-forest", required = false, usage = "LM ID to train (default: " + LanguageModel.PRIMARY_LM_ID + ")")
		public String lm = LanguageModel.PRIMARY_LM_ID;   
        @Option(name = "-cache-size", usage = "maximum number of elements to keep in memory")
        public int maxCacheSize = 2000000;
	}
	/**
	 * @param args
	 */
	public static void main(String[] args) {
        OptionParser optParser = new OptionParser(Options.class);
        Options opts = (Options) optParser.parse(args, true);

		Experiment.initialize(opts.config);
		Experiment experiment = Experiment.getInstance();
		experiment.buildPrefixes();
		
		try {
			BufferedReader reader = IO.getReader(opts.input == null ? System.in : IO.getInputStream(opts.input));
			FileChannel output;
			if (opts.output == null) {
				output = new FileOutputStream(FileDescriptor.out).getChannel();
			} else {
				output = new FileOutputStream(opts.output).getChannel();
			}
			TrainingDataWriter writer = new OnDiskTrainingDataWriter(output);
			WritableTrainingData data = writer.createData();
			AbstractTrainingData.parse(data, experiment.getLM(opts.lm), reader, opts.jobs, opts.maxCacheSize);
			data.finish();
			output.close();
		} catch(IOException e) {
			e.printStackTrace();
		}
	}
}
