/**
 * 
 */
package edu.umd.clip.lm.programs;

import java.io.IOException;
import java.net.*;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;

import com.sleepycat.je.DatabaseException;
import com.sleepycat.je.Environment;

import edu.berkeley.nlp.util.*;
import edu.umd.clip.jobs.*;
import edu.umd.clip.lm.factors.*;
import edu.umd.clip.lm.factors.Dictionary;
import edu.umd.clip.lm.model.*;
import edu.umd.clip.lm.model.data.Context;
import edu.umd.clip.lm.model.decoding.*;
import edu.umd.clip.lm.model.training.*;
import edu.umd.clip.lm.model.training.ContextClusterStorage.ContextClusters;

import edu.umd.clip.lm.storage.*;
import edu.umd.clip.lm.storage.AbstractProbTreeStorage.Key;
import edu.umd.clip.lm.storage.remote.*;
import edu.umd.clip.lm.util.LRUCache;
import edu.umd.clip.lm.util.tree.BinaryTree;

/**
 * @author Denis Filimonov <den@cs.umd.edu>
 *
 */
public class GenerateText {
	public static class Options {
        @Option(name = "-config", required = true, usage = "XML config file")
		public String config;
        @Option(name = "-jobs", required = false, usage = "number of concurrent jobs (default: 1)")
        public int jobs = 1;
        @Option(name = "-forest", required = false, usage = "LM ID to train (default: " + LanguageModel.PRIMARY_LM_ID + ")")
		public String lm = LanguageModel.PRIMARY_LM_ID;        
        @Option(name = "-debug", usage = "decoder debug level (default: 0)")
        public int debug = 0;
        @Option(name = "-host", usage = "remote storage hostname or IP address (default: use local storage)")
        public String host = null;
        @Option(name = "-port", usage = "remove storage port (default: 2332)")
        public int port = 0;
        @Option(name = "-words", usage = "approxiate number of words to generate (default: 1000)")
        public int words = 1000;
        @Option(name = "-jerboa", required = false, usage = "use Jerboa storage (default: false)")
		public boolean useJerboa = false;        
        @Option(name = "-compact", required = false, usage = "use Compact storage (default: false)")
		public boolean useCompact = false;        
        @Option(name = "-bdb", required = false, usage = "use Berkeley DB storage (default: false)")
		public boolean useBDB = false;        
	}

	static LanguageModel lms[];
	static LanguageModel boLMs[];
	static LanguageModel lm;
	static AbstractProbTreeStorage storage;
	static WordListCache boContextMap;
	
	static class WordListCache extends LRUCache<ContextClusters, long[]> {

		/**
		 * @param hardCapacity
		 * @param softCapacity
		 */
		public WordListCache(int hardCapacity, int softCapacity) {
			super(hardCapacity, softCapacity);
		}

		/* (non-Javadoc)
		 * @see edu.umd.clip.lm.util.LRUCache#loadItem(java.lang.Object)
		 */
		@Override
		protected long[] loadItem(ContextClusters key) {
			return makeWordList(key);
		}
		
	}
	
	@SuppressWarnings("unchecked")
	private static long[] makeWordList(ContextClusters clusters) {
		final Experiment exp = Experiment.getInstance();
		final int nrWords = allOvertFactors.length;
		long words[] = new long[nrWords];
		double probs[] = new double[nrWords];
		
		final int lmSequence[] = exp.getLmIdSequence(lm);
		final int lmIds[] = new int[boLMs.length];
		for(byte i=0; i<boLMs.length; ++i) {
			lmIds[i] = boLMs[i].getIdNum();
		}
		
		for(int pos = 0; pos < nrWords; ++pos) {
			long word = allOvertFactors[pos];
			
			RequestBundle<Key,OnDiskCompactProbTree> bundle = new RequestBundle<Key,OnDiskCompactProbTree>(storage);
			BinaryTree<HistoryTreePayload> requestNodes[] = new BinaryTree[lmSequence.length];
			for(byte i=0; i<boLMs.length; ++i) {
				LanguageModel boLM = boLMs[i];
				requestNodes[boLM.getIdNum()] = boLM.getDecodingRuntime().getNode(clusters.getClusters()[i]);
				
				Key key = new Key(boLMs[i].getIdNum(), clusters.getClusters()[i], word);
				bundle.request(key);
			}
			try {
				bundle.sync();
			} catch (InterruptedException e) {}
			
			OnDiskCompactProbTree probTree = lm.getDecoder().computeProbTree(lmIds, requestNodes, word, bundle.getResults(), null);
			
			double prob = probTree == null ? 0 : probTree.getProb();
			int idx = Arrays.binarySearch(probs, 0, pos, prob);
			if (idx < 0) {
				idx = -idx - 1;
			}
			if (idx < pos) {
				System.arraycopy(probs, idx, probs, idx+1, pos-idx);
				System.arraycopy(words, idx, words, idx+1, pos-idx);
			}
			probs[idx] = prob;
			words[idx] = word;
		}
		// reverse
		long w[] = new long[nrWords];
		for(int i=0; i<nrWords; ++i) {
			w[i] = words[nrWords-1-i];
		}
		return w;
	}
	
	static AtomicLong nrWords = new AtomicLong();
	static AtomicLong nrWordsTried = new AtomicLong();
	
	private static long generateWord(Context ctx) {
		Experiment exp = Experiment.getInstance();
		FactorTupleDescription desc = exp.getTupleDescription();

		int clusters[] = new int[lms.length];
		for(int i=0; i<lms.length; ++i) {
			clusters[i] = lms[i].context2cluster(ctx);
		}
		final int lmSequence[] = exp.getLmIdSequence(lm);
		final int lmIds[] = new int[lms.length];
		for(byte i=0; i<lms.length; ++i) {
			lmIds[i] = lms[i].getIdNum();
		}

		int boClusters[] = new int[boLMs.length];
		for(byte i=0; i<boLMs.length; ++i) {
			boClusters[i] = clusters[lms.length - boLMs.length + i];
		}
		
		// the words in decreasing probability order according to a lower order model
		long wordOrder[] = boContextMap.getItem(new ContextClusters(boClusters));
		
 		Random rnd = new Random();

 		double total = 0;
 		double point = rnd.nextDouble();
 		
 		for(int pos = 0; pos < wordOrder.length; ++pos) {
			long word = wordOrder[pos];
			
			RequestBundle<Key,OnDiskCompactProbTree> bundle = new RequestBundle<Key,OnDiskCompactProbTree>(storage);
			BinaryTree<HistoryTreePayload> requestNodes[] = new BinaryTree[lmSequence.length];
			for(byte i=0; i<lms.length; ++i) {
				LanguageModel boLM = lms[i];
				requestNodes[boLM.getIdNum()] = boLM.getDecodingRuntime().getNode(clusters[i]);
				
				Key key = new Key(lms[i].getIdNum(), clusters[i], word);
				bundle.request(key);
			}
			try {
				bundle.sync();
			} catch (InterruptedException e) {}
			
			OnDiskCompactProbTree probTree = lm.getDecoder().computeProbTree(lmIds, requestNodes, word, bundle.getResults(), null);
			
			if (probTree == null) continue;
			
			double prob = probTree.getProb();
			
			if (prob + total >= point) {
				nrWords.incrementAndGet();
				nrWordsTried.addAndGet(pos + 1);
				
				double factorPoint = Math.random();
				double factorTotal = 0;
				for(int i = 0; i<probTree.getSize(); ++i) {
					factorTotal += probTree.getProbabilities()[i];
					if (factorTotal >= factorPoint) {
						long hiddenFactors = desc.unpackHiddenFactorsFromInt(probTree.getCompactHiddenFactors()[i]);
						return word | hiddenFactors;
					}
				}
			} else {
				total += prob; 
			}
		}
 		System.err.printf("nothing found? total = %g, nrWords = %d\n", total, wordOrder.length);
		return desc.createEndTuple();
	}
	
	private static final AtomicInteger wordCount = new AtomicInteger();
	private static long allOvertFactors[];
	
	private static ArrayList<FactorTuple> generateSentence(LanguageModel lm) {
		Experiment exp = Experiment.getInstance();
		FactorTupleDescription desc = exp.getTupleDescription();
		FactorTuple startTuple = new FactorTuple(desc.createStartTuple());
		
		ArrayList<FactorTuple> sentence = new ArrayList<FactorTuple>(100);
		byte mainFactor = desc.getMainFactorIndex();
		
		byte order = lm.getOrder();
		
		for(int i=0; i<order-1; ++i) {
			sentence.add(startTuple);
		}
		
		while(true) {
			Context ctx = new Context(order);
			for(byte i=0; i<order-1; ++i) {
				ctx.data[i] = sentence.get(sentence.size()-(order-1)+i).getBits();
			}
			FactorTuple word = new FactorTuple(generateWord(ctx));
			sentence.add(word);
			wordCount.incrementAndGet();
			if (Dictionary.isEnd(word.getValue(mainFactor))) break;
		}
		return sentence;
	}
	
	/**
	 * @param args
	 * @throws DatabaseException 
	 * @throws IOException 
	 * @throws ClassNotFoundException 
	 */
	public static void main(String[] args) throws DatabaseException, IOException, ClassNotFoundException {
		// TODO Auto-generated method stub
        OptionParser optParser = new OptionParser(Options.class);
        final Options opts = (Options) optParser.parse(args, true);

        LMDecodingOptions lmOpts = new LMDecodingOptions();
        lmOpts.config = opts.config;
        lmOpts.debug = opts.debug;
        if (opts.jobs > 0) lmOpts.jobs = opts.jobs;
        if (opts.host != null) lmOpts.host = opts.host;
        if (opts.port > 0) lmOpts.port = opts.port;
        if (opts.lm != null) lmOpts.forest = opts.lm;
        
        if (opts.host != null && opts.port > 0) lmOpts.storage = LMDecodingOptions.Storage.REMOTE;
        if (opts.useJerboa) lmOpts.storage = LMDecodingOptions.Storage.JERBOA;
        if (opts.useBDB) lmOpts.storage = LMDecodingOptions.Storage.BDB;
        if (opts.useCompact) lmOpts.storage = LMDecodingOptions.Storage.COMPACT;
        
        List<LanguageModel> LMs = LanguageModel.initDecoding(lmOpts);

        Experiment experiment = Experiment.getInstance();
		
		{
			Map<FactorTuple,FactorTuple> factors = experiment.getTupleDescription().getAllOvertFactors();
			allOvertFactors = new long[factors.size()];
			int i=0;
			for(FactorTuple word : factors.keySet()) {
				allOvertFactors[i++] = word.getBits();
			}
		}
		
		lm  = experiment.getLM(opts.lm);
		storage = lm.getDecoder().getStorage();
		
		{
			ArrayList<Integer> lmIds = new ArrayList<Integer>();
			LanguageModel boLM = lm;
			while(boLM != null) {
				lmIds.add(boLM.getIdNum());
				if (boLM.getBackoffLM() == null) break;
				boLM = experiment.getLM(boLM.getBackoffLM());
			}
	
			lms = new LanguageModel[lmIds.size()];
			for(int i=0; i<lmIds.size(); ++i) {
				lms[i] = experiment.getLmByNumId(lmIds.get(i));
				//System.out.printf("boLMs[%d] = %s\n", i, lms[i].getId());
			}
			
			boLMs = new LanguageModel[Math.min(2, lmIds.size()-1)];
			for(int i=0; i<boLMs.length; ++i) {
				boLMs[i] = lms[lms.length - boLMs.length + i];
			}
		}
		
		boContextMap = new WordListCache(2000, 5000);
		
		/*
		InputParser parser = new PlainInputParser(experiment.getTupleDescription());
		long[] sentence = parser.parseSentence("no it was n't black friday");
		forest.getDecoder().evaluateSentence(sentence);
		*/
		
		Runnable run = new Runnable() {
			@Override
			public void run() {
				while(wordCount.intValue() < opts.words) {
					ArrayList<FactorTuple> sentence = generateSentence(lm);
					StringBuilder sb = new StringBuilder();
					sb.append(sentence.get(0).toStringNoNull());
					for(int i=1; i<sentence.size(); ++i) {
						sb.append(' ');
						sb.append(sentence.get(i).toStringNoNull());
					}
					sb.append("\n");
					System.out.print(sb.toString());
					
					long tries = nrWordsTried.longValue();
					long words = nrWords.longValue();
					System.err.printf("%d words generated, %d tries per word\n", words, tries / words);
				}
			}
		};
		
		for(int i=0; i<opts.jobs; ++i) {
			new Thread(run).start();
		}
	}

}
