/**
 * 
 */
package edu.umd.clip.lm.programs;

import java.io.*;
import java.nio.channels.FileChannel;
import java.util.*;
import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.regex.*;

import edu.berkeley.nlp.util.*;
import edu.umd.clip.jobs.Job;
import edu.umd.clip.jobs.JobGroup;
import edu.umd.clip.jobs.JobManager;
import edu.umd.clip.lm.factors.*;
import edu.umd.clip.lm.factors.Dictionary;
import edu.umd.clip.lm.model.*;
import edu.umd.clip.lm.model.data.AbstractTrainingData;
import edu.umd.clip.lm.model.data.OnDiskTrainingDataWriter;
import edu.umd.clip.lm.model.data.TrainingDataWriter;
import edu.umd.clip.lm.model.data.WritableTrainingData;
import edu.umd.clip.lm.model.training.*;
import edu.umd.clip.lm.tools.HFTTrainerMDI;
import edu.umd.clip.lm.util.IO;
import edu.umd.clip.lm.util.RandomizedQueue;

/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class InitModel {

	public static class Options {
        @Option(name = "-config", required = true, usage = "Output XML config file")
		public String config;
        @Option(name = "-overt-factors", required = true, usage = "Overt factors, comma separated. (Ex.: W,M-edu.umd.clip.lm.factors.SnowballEnglishSuffix)")
		public String overtFactors;
        @Option(name = "-main-factor", required = false, usage = "The main factor (surface word). Default: W")
		public String mainFactor = "W";
        @Option(name = "-hidden-factors", required = true, usage = "Hidden factors, comma separated. Dependent tags can be specified with slash. (Ex.: T,S/T -- tags T and S, S depends on T)")
		public String hiddenFactors;
        @Option(name = "-order", required = false, usage = "The maximum order of the model. Default: 3")
		public int order = 3;

        @Option(name = "-vocab-limit", required = false, usage = "Approximate vocabulary limit, ignored if -vocab is used. (Default: 20000)")
		public int vocabLimit = 20000;
        @Option(name = "-vocab", required = false, usage = "The vocabulary file")
		public String vocab;
        
        @Option(name = "-data-format", required = false, usage = "the parser for the data. (Default: edu.umd.clip.lm.factors.FLMInputParser)")
		public String dataFormat = edu.umd.clip.lm.factors.FLMInputParser.class.getCanonicalName();
        
        @Option(name = "-data-dir", required = true, usage = "The directory to store binary data files in")
		public String dataDir;
        @Option(name = "-data", required = true, usage = "Comma-separated list of FLM data files")
		public String data;
        @Option(name = "-splits", usage = "number of data splits (default: 4)")
        public int splits = 4;
        
        @Option(name = "-jobs", usage = "number of concurrent jobs (default: 1)")
        public int jobs = 1;
        
        @Option(name = "-pruning-threshold", required = false, usage = "pruning threshold (default: 0)")
		public int pruningThreshold = 0;

        @Option(name = "-db", required = false, usage = "data storage location (default: data)")
		public String db;
        
        @Option(name = "-merge-lines", usage = "merge lines (default: false)")
        public boolean mergeLines = false;
        
        @Option(name = "-templates", usage = "a comma-separated list of template shell scripts")
        public String templates;

        @Option(name = "-randomness", usage = "add randomness to the tree growing algorithm (default: 0)")
        public float randomness = 0f;
	}
	
	private Options opts;
	private static Pattern overtFactorRE = Pattern.compile("^([a-z]+)-([a-z.]+)$", Pattern.CASE_INSENSITIVE);
	private static Pattern hiddenFactorRE = Pattern.compile("^([a-z]+)/([a-z]+)$", Pattern.CASE_INSENSITIVE);
	private long totalWordCount = 0;
	private String[] dataFiles;
	
	private InitModel(Options opts) {
		this.opts = opts;
	}
	
	private Map<String,FactorDescription> createDescriptions() throws InstantiationException, IllegalAccessException, ClassNotFoundException {
		Map<String,FactorDescription> descriptions = new HashMap<String,FactorDescription>();
		descriptions.put(opts.mainFactor, new FactorDescription(opts.mainFactor, "surface word"));
		
		String factors[] = opts.overtFactors.split(",");
		for(String factor : factors) {
			if (descriptions.containsKey(factor)) continue;
			if (factor.matches("^[a-zA-Z]+$")) {
				FactorDescription d = new FactorDescription(factor, factor + " factor");
				d.setDictionary(new Dictionary(1));
				descriptions.put(factor, d);
			} else {
				Matcher matcher = overtFactorRE.matcher(factor);
				if (matcher.matches()) {
					String f = matcher.group(1);
					String classname = matcher.group(2);
					WordToOvertFactorInterface cl = (WordToOvertFactorInterface) Class.forName(classname).newInstance();
					FactorDescription d = new FactorDescription(f, f + " factor");
					d.setDictionary(new Dictionary(1));
					d.setWordToOvertFactor(cl);
					descriptions.put(f, d);
				} else {
					System.err.printf("Failed to recognize overt factor '%s'\n", factor);
				}
			}
		}
		
		factors = opts.hiddenFactors.split(",");
		for(String factor : factors) {
			if (descriptions.containsKey(factor)) continue;
			if (factor.matches("^[a-zA-Z]+$")) {
				FactorDescription d = new FactorDescription(factor, factor + " factor");
				d.setOvert(false);
				d.setDictionary(new Dictionary(1));
				descriptions.put(factor, d);
			} else {
				Matcher matcher = hiddenFactorRE.matcher(factor);
				if (matcher.matches()) {
					factor = matcher.group(1);
					String parent = matcher.group(2);
					FactorDescription parentDesc = descriptions.get(parent);
					if (parentDesc == null) {
						System.err.printf("undefined parent hidden factor: %s\n", parent);
						continue;
					}
					FactorDescription d = new FactorDescription(factor, factor + " factor", parentDesc);
					Vector<Dictionary> dictionaries = new Vector<Dictionary>(Dictionary.FIRST_NORMAL_VALUE);
					for(int i=0; i<Dictionary.FIRST_NORMAL_VALUE; ++i) {
						dictionaries.add(new Dictionary(0));
					}
					d.setDictionaries(dictionaries);
					d.setOvert(false);
					descriptions.put(factor, d);
				}
			}
		}
		return descriptions;
	}
	
	private static final <T> int indexOf(T item, T[] array) {
		for(int idx = 0; idx < array.length; ++idx) {
			if (item.equals(array[idx])) return idx;
		}
		return -1;
	}
	
	@SuppressWarnings("unchecked")
	private FactorTupleDescription createVocabs(Map<String,FactorDescription> descriptions) throws IOException {
		final String factors[] = descriptions.keySet().toArray(new String[descriptions.size()]);
		final String values[] = new String[factors.length];
		final FactorDescription descs[] = new FactorDescription[factors.length];
		final ConcurrentHashMap<String,AtomicLong> mainVocab = opts.vocab == null ? new ConcurrentHashMap<String,AtomicLong>(opts.vocabLimit) : null;
		
		int mainFactorIdx = -1;
		{
			int idx = 0;
			for(Map.Entry<String, FactorDescription> entry : descriptions.entrySet()) {
				factors[idx] = entry.getKey();
				descs[idx] = entry.getValue();
				if (entry.getKey().equals(opts.mainFactor)) {
					mainFactorIdx = idx;
				}
				++idx;
			}
		}
		
		if (mainVocab == null) {
			// finished the specified vocabulary
			BufferedReader reader = IO.getReader(IO.getInputStream(opts.vocab));
			Dictionary dict = new Dictionary(20000);
			for(String line = reader.readLine(); line != null; line = reader.readLine()) {
				line = line.trim();
				if (!line.isEmpty()) {
					dict.add(line);
				}
			}
			dict.lock();
			descs[mainFactorIdx].setDictionary(dict);
		}
		AtomicLong totalWordCount = new AtomicLong();
		for (String dataFile : dataFiles) {
			BufferedReader reader = IO.getReader(IO.getInputStream(dataFile));
			for(String line = reader.readLine(); line != null; line = reader.readLine()) {
				String tokens[] = InputParser.splitLine(line);
				totalWordCount.addAndGet(tokens.length);
				
				for(String token : tokens) {
					Arrays.fill(values, null);
					
					for(String f : FLMInputParser.factorDelimiter.split(token)) {
						Matcher matcher = FLMInputParser.factorRE.matcher(f);
						if (matcher.matches()) {
							String factor = matcher.group(1);
							String value = FLMInputParser.unquote(matcher.group(2));
							int factorIdx = indexOf(factor, factors);
							if (factorIdx == -1) continue;
							values[factorIdx] = value;
						}
					}
					for(int factorIdx = 0; factorIdx < factors.length; ++factorIdx) {
						String value = values[factorIdx];
						FactorDescription desc = descs[factorIdx];
						
						if (factorIdx == mainFactorIdx) {
							if (mainVocab != null) {
								AtomicLong count = mainVocab.get(value);
								if (count == null) {
									count = mainVocab.putIfAbsent(value, new AtomicLong(1));
									if (count != null) {
										// inserted by someone else, increment then
										count.incrementAndGet();
									}
								} else {
									count.incrementAndGet();
								}
							}
						} else {
							FactorDescription parentDesc = desc.getParent();
							if (parentDesc == null) {
								if (value == null) {
									if(desc.getWordToOvertFactor() == null) {
										System.err.printf("No parent? token='%s', value='%s', factor=%s\n", token, value, desc.getId());
										System.err.printf("line: %s\n", line);
										System.err.printf("tokens: %s\n", Arrays.toString(tokens));
										throw new Error("whoopsie!");
									}
									value = desc.getWordToOvertFactor().wordToOvertFactor(values[mainFactorIdx]);
									values[factorIdx] = value;
								}
								Dictionary dict = desc.getDictionary();
								if (Dictionary.isUnk(dict.getId(value))) {
									dict.add(value);
								}
							}
						}
					}
					
					for(int factorIdx = 0; factorIdx < factors.length; ++factorIdx) {
						if (factorIdx == mainFactorIdx && mainVocab == null) continue;
						
						String value = values[factorIdx];
						FactorDescription desc = descs[factorIdx];
						FactorDescription parentDesc = desc.getParent();
						if (parentDesc != null) {
							int parentIdx = indexOf(parentDesc.getId(), factors);
							int parentValue = parentDesc.getDictionary().getId(values[parentIdx]);
							
							Dictionary dict = desc.getDictionary(parentValue);
							if (dict == null) {
								dict = new Dictionary(1);
								desc.setDictionary(parentValue, dict);
							}
							if (Dictionary.isUnk(dict.getId(value))) {
								dict.add(value);
							}
						}
					}
				}
			}
		}
		
		this.totalWordCount = totalWordCount.longValue();
		
		if (mainVocab != null) {
			Map.Entry<String,AtomicLong> wordCounts[] = mainVocab.entrySet().toArray(new Map.Entry[mainVocab.entrySet().size()]);
			Comparator<Map.Entry<String,AtomicLong>> cmp = new Comparator<Map.Entry<String,AtomicLong>>() {
				@Override
				public int compare(Entry<String, AtomicLong> o1,
						Entry<String, AtomicLong> o2) {
					long v1 = o1.getValue().longValue();
					long v2 = o2.getValue().longValue();
					// reverse order
					return (int) Math.signum(v2 - v1);
				}
			};
			
			Arrays.sort(wordCounts, cmp);
			ArrayList<String> vocab = new ArrayList<String>((int)(opts.vocabLimit * 1.2));
			int nrWords = 0;
			while(nrWords < wordCounts.length) {
				vocab.add(wordCounts[nrWords].getKey());
				if (nrWords >= opts.vocabLimit && nrWords+1 < wordCounts.length) {
					 long prevCount = wordCounts[nrWords].getValue().longValue();
					 long nextCount = wordCounts[nrWords+1].getValue().longValue();
					 if (prevCount > nextCount) break;
				}
				++nrWords;
			}
			String[] arrayVocab = vocab.toArray(new String[vocab.size()]);
			Arrays.sort(arrayVocab);
			Dictionary wordDict = new Dictionary(arrayVocab.length);
			for(String word : arrayVocab) {
				wordDict.add(word);
			}
			descriptions.get(opts.mainFactor).setDictionary(wordDict);
		}
		
		// make the vocabularies unmodifiable
		for(FactorDescription desc : descs) {
			FactorDescription parent = desc.getParent();
			if (parent == null) {
				desc.getDictionary().lock();
			} else {
				DictionaryIterator it = parent.getDictionary().iterator(true);
				for(; it.hasNext(); ) {
					int parentValue = it.next();
					Dictionary dict = desc.getDictionary(parentValue);
					if (dict != null) {
						dict.lock();
					}
				}
			}
		}
		
		FactorTupleDescription tupleDescription = new FactorTupleDescription(descs);
		return tupleDescription;
	}
	
	private static ArrayList<int[]> makeSplits(int total, int nrSplits) {
		assert(total % nrSplits == 0);
		int itemsPerSplit = total / nrSplits;
		int[] theDataItems = new int[total];
		
		{
			// shuffle the numbers
			ArrayList<Integer> arr = new ArrayList<Integer>(total);
			for(int i=0; i<total; ++i) {
				arr.add(i);
			}
			RandomizedQueue<Integer> rnd = new RandomizedQueue<Integer>(arr);
			for(int i=0; i<total; ++i) {
				theDataItems[i] = rnd.poll();
			}
		}
		
		ArrayList<int[]> splits = new ArrayList<int[]>(nrSplits);
		for(int split=0; split<nrSplits; ++split) {
			int[] items = new int[itemsPerSplit];
			for(int i=0; i<itemsPerSplit; ++i) {
				items[i] = theDataItems[i*nrSplits + split];
			}
			splits.add(items);
		}
		return splits;
	}
	
	private void convertFLMs() throws IOException {
		{
			// make sure opts.dataDir exists
			File dir = new File(opts.dataDir);
			if (!dir.isDirectory()) {
				if (!dir.mkdir()) {
					throw new Error("failed to create '"+opts.dataDir+"'");
				}
			}
		}
		Experiment.Files files = Experiment.getInstance().getFiles();
		
		final ArrayList<String> fileNames = new ArrayList<String>(dataFiles.length);
		JobManager manager = JobManager.getInstance();
		JobGroup group = manager.createJobGroup("flms");
		for(final String fname : dataFiles) {
			Runnable run = new Runnable() {

				@Override
				public void run() {
					try {
						String flm = convertFLM(fname);
						synchronized(fileNames) {
							fileNames.add(flm);
						}
					} catch (IOException e) {
						e.printStackTrace();
					}
				}
			};
			Job job = new Job(run, "flm "+fname);
			manager.addJob(group, job);
		}
		group.join();
		
		files.setDataFiles(fileNames);

		files.setTrainData(makeSplits(fileNames.size(), opts.splits));
		files.setSmoothData(makeSplits(fileNames.size(), opts.splits));
		files.setInterpolateData(makeSplits(fileNames.size(), opts.splits));
	}
	
	private String convertFLM(String fname) throws IOException {
		String outfile;
		Pattern re = Pattern.compile("^(?:.*/)?([^/]*)$");
		
		Matcher matcher = re.matcher(fname);
		
		if (matcher.matches()) {
			outfile = matcher.group(1);
			if (outfile.endsWith(".gz")) {
				outfile = outfile.substring(0, outfile.length() - 3);
			}
			if (outfile.endsWith(".flm")) {
				outfile = outfile.substring(0, outfile.length() - 4);
			}
			outfile += ".data";
		} else {
			outfile = fname + ".data";
		}
		outfile = opts.dataDir + "/" + outfile;
		
		
		BufferedReader reader = IO.getReader(IO.getInputStream(fname));
		FileChannel output = new FileOutputStream(outfile).getChannel();
		TrainingDataWriter writer = new OnDiskTrainingDataWriter(output);
		WritableTrainingData data = writer.createData();
		AbstractTrainingData.parse(data, opts.order, opts.order, reader, opts.jobs, 3000000, opts.mergeLines);
		data.finish();
		output.close();
		
		return outfile;
	}
	
	private static void processTemplate(File template, File output, Map<String,String> variables) throws IOException {
		BufferedReader reader = IO.getReader(IO.getInputStream(template));
		BufferedWriter writer = IO.getWriter(IO.getOutputStream(output));
		
		String keys[] = new String[variables.size()];
		String replacements[] = new String[variables.size()];
		
		int pos = 0;
		for(Map.Entry<String, String> e : variables.entrySet()) {
			keys[pos] = e.getKey();
			replacements[pos] = e.getValue();
			++pos;
		}
		
		while(true) {
			String line = reader.readLine();
			if (line == null) break;
			
			for(pos=0; pos<keys.length; ++pos) {
				line = line.replace(keys[pos], replacements[pos]);
			}
			
			writer.write(line);
			writer.newLine();
		}
		
		writer.close();
		reader.close();
	}
	
	private void processTemplates() throws IOException {
		if (opts.templates == null) return;
		
		String templates[] = opts.templates.split(",");
		File templateFiles[] = new File[templates.length];
		for(int i=0; i<templates.length; ++i) {
			File file = new File(templates[i]);
			if (!file.isFile()) {
				throw new Error("Can't open template file " + templates[i]);
			}
			templateFiles[i] = file;
		}
		
		HashMap<String,String> variables = new HashMap<String,String>();
		
		variables.put("CONFIGFILE", opts.config);
		variables.put("JOBS", Integer.toString(opts.jobs));
		
		Experiment exp = Experiment.getInstance();
		int lmNum = exp.getNumLMs();
		for(int lmid = 0; lmid < lmNum; ++lmid) {
			LanguageModel lm = exp.getLmByNumId(lmid);
			if (lm == null) continue;
			
			variables.put("MODEL", lm.getId());
			
			for(int i=0; i<templates.length; ++i) {
				String templateName = templateFiles[i].getName();
				if (templateName.contains("FOREST")) {
					// don't touch forest templates
					continue;
				}
				String filename = templateName.replace("MODEL", lm.getId()); 
				File output = new File(filename);
				processTemplate(templateFiles[i], output, variables);
				output.setExecutable(true);
			}
		}
		
		for(ForestModel forest : exp.getForests().values()) {
			variables.remove("MODEL");
			variables.put("FOREST", forest.getName());
			for(int i=0; i<templates.length; ++i) {
				String templateName = templateFiles[i].getName();
				if (templateName.contains("FOREST")) {
					// do only the forest templates
					String filename = templateName.replace("FOREST", forest.getName()); 
					File output = new File(filename);
					processTemplate(templateFiles[i], output, variables);
					output.setExecutable(true);
				}
			}			
		}
	}
	
	private void tuneLM(LanguageModel lm) {
		//Experiment exp = Experiment.getInstance();
		//int vocabSize = exp.getTupleDescription().getDictionary(exp.getTupleDescription().getMainFactorIndex()).size();
		
		/*
		if (forest.getOrder() == 2) {
			forest.setMaxClusters(120000);
			forest.setForceHiddenQuestions(5);
		} else if (forest.getOrder() == 3) {
			forest.setMaxClusters(700000);
			forest.setForceHiddenQuestions(8);
		} else {
			forest.setMaxClusters(1000000);
			forest.setForceHiddenQuestions(forest.getOrder() + 5);
		}
		*/
		
		int order = lm.getOrder();
		if (order > 1) {
			//forest.setMIN_TOTAL_COUNT(totalWordCount / (forest.getMaxClusters() * (order - 1) * (order - 1) * 50 ));
			lm.setMIN_TOTAL_COUNT(2);
		}
		lm.setMIN_COST_DECREASE(1e-7);
		lm.setRandomness(opts.randomness);
	}
	
	public final static String[] LM_NAMES = new String[6];
	static {
		LM_NAMES[1] = "unigram";
		LM_NAMES[2] = "bigram";
		LM_NAMES[3] = "trigram";
		LM_NAMES[4] = "fourgram";
		LM_NAMES[5] = "fivegram";
	}
	
	private void run() throws Exception {
		if (opts.order < 2 || opts.order > 5) {
			System.err.println("LM order must be between 2 and 5");
			return;
		}
		
		dataFiles = opts.data.split(",");
		
		// create the hierarchy of descriptions
		Map<String,FactorDescription> descriptions = createDescriptions();
		
		// populate vocabularies and create tuple description
		FactorTupleDescription tupleDescription = createVocabs(descriptions);
		
		// initialize the experiment
		Experiment.initialize();
		Experiment exp = Experiment.getInstance();
		exp.setTupleDescription(tupleDescription);
		
		// set up the LMs
		LinkedList<LanguageModel> lms = new LinkedList<LanguageModel>();
		for(byte o=1; o<=opts.order; ++o) {
			String lmName = LM_NAMES[o];
			LanguageModel lm = new LanguageModel(lmName + "-lm", o, o);
			exp.addLM(lm);
			lms.addFirst(lm);
			if (lm.getOrder() > 1) {
				tuneLM(lm);
			}
			String forestName = o == opts.order ? LanguageModel.PRIMARY_LM_ID : LM_NAMES[o];
			ForestModel forest = new ForestModel(forestName);
			for(LanguageModel _lm : lms) {
				forest.addLM(_lm);
			}
			exp.addForest(forest);
		}
		
		// create shell scripts to train the LM
		processTemplates();
		
		// create the HFT
		{
			
			BufferedReader[] readers = new BufferedReader[dataFiles.length];
			for(int i=0; i<dataFiles.length; ++i) {
				readers[i] = IO.getReader(IO.getInputStream(dataFiles[i]));
			}
			HFTTrainerMDI.trainHFT(readers, true, null, opts.jobs);
		}
		
		exp.buildPrefixes();
		
		// convert FLM data into binary format
		convertFLMs();
		
		exp.saveConfig(opts.config);
	}
	
	/**
	 * @param args
	 * @throws Exception 
	 */
	public static void main(String[] args) throws Exception {
        OptionParser optParser = new OptionParser(Options.class);
        Options opts = (Options) optParser.parse(args, true);
        
        JobManager.initialize(opts.jobs);
		Thread thread = new Thread(JobManager.getInstance(), "Job Manager");
		thread.setDaemon(true);
		thread.start();

        InitModel init = new InitModel(opts);
        init.run();

	}

}
