	package geppetto.phraseHMM.phraseExtraction;


public class PosteriorPrunning {

	
	/*
	// Todo should receive an abstract class model. The model should be able to
	// output the word by word posteriors
	public static PhraseTable build(BilingualCorpus corpus,WordTrie sourcePhraseVocab, WordTrie foreingPhraseVocab, int maxSourcePhraseLen, int maxTargetPhraseLen) {
		PhraseTable pt = new PhraseTable(corpus,maxSourcePhraseLen,maxTargetPhraseLen,sourcePhraseVocab,foreingPhraseVocab);
		return pt;
	}

	public static void init(PhraseTable pt, BilingualCorpus corpus,
			int maxTargetDuration, int maxSourceDuration, AbstractModel model,
			double threshold) {
		long initTime, initMem;
		long endMem;
		long endTime;
		initTime = System.currentTimeMillis();
		System.gc();
		initMem = (Runtime.getRuntime().totalMemory() - Runtime.getRuntime()
				.freeMemory())
				/ (1024 * 1024);
		for (int i = 0; i < corpus.getNumberOfTrainingSentences(); i++) {
			int[] foreingSentence = corpus.getForeignSentence(i,
					BilingualCorpus.TRAIN_CORPUS);
			int[] sourceSentence = corpus.getSourceSentence(i,
					BilingualCorpus.TRAIN_CORPUS);
			System.out.println("source "
					+ corpus.getSourceSentenceString(i,
							BilingualCorpus.TRAIN_CORPUS));
			System.out.println("Target "
					+ corpus.getForeignSentenceString(i,
							BilingualCorpus.TRAIN_CORPUS));
			ArrayList<Phrase> phrases[] = processSentence(pt, i,
					BilingualCorpus.TRAIN_CORPUS, foreingSentence,
					sourceSentence, maxTargetDuration, maxSourceDuration,
					model, threshold);
			
			pt.addPhrases(processSentence(pt, i, BilingualCorpus.TRAIN_CORPUS,
					foreingSentence, sourceSentence, maxTargetDuration,
					maxSourceDuration, model, threshold));
		}
		endTime = System.currentTimeMillis();
		System.gc();
		System.gc();
		System.gc();
		endMem = (Runtime.getRuntime().totalMemory() - Runtime.getRuntime()
				.freeMemory())
				/ (1024 * 1024);
		System.out.println("Spent in procssing sentence Time "
				+ (endTime - initTime) / 1000 + " mem " + (endMem - initMem));
		System.out.println("Total number of distinct phrases "
				+ pt._numberOfPhrases);

		// Normalize
		initTime = System.currentTimeMillis();
		System.gc();
		initMem = (Runtime.getRuntime().totalMemory() - Runtime.getRuntime()
				.freeMemory())
				/ (1024 * 1024);

		pt.normalizePhrases();
		endTime = System.currentTimeMillis();
		System.gc();
		System.gc();
		System.gc();
		endMem = (Runtime.getRuntime().totalMemory() - Runtime.getRuntime()
				.freeMemory())
				/ (1024 * 1024);
		System.out.println("Spent in normalizing phrases "
				+ (endTime - initTime) / 1000 + " mem " + (endMem - initMem));
	}


	public static ArrayList<Phrase>[] processSentence(PhraseTable pt,
			int sentenceNumber, byte sentenceSource, int[] foreingSentence,
			int[] sourceSentence, int maxTargetDuration, int maxSourceDuration,
			AbstractModel model, double threshold) {
		
		//+1 is for null phrases
		ArrayList<Phrase>[] phrases = new ArrayList[maxSourceDuration+1];
		for(int i = 0; i < maxSourceDuration+1; i++){
			phrases[i] = new ArrayList<Phrase>(); 
		}
		int fSize = foreingSentence.length;
		int sSize = sourceSentence.length;

		for (int foreignStartPos = 0; foreignStartPos < fSize; foreignStartPos++) {
			for (int foreingDuration = 1; foreingDuration <= maxTargetDuration; foreingDuration++) {
				int foreignEndPos = foreignStartPos + foreingDuration - 1;
				if (foreignEndPos < fSize) {
					// TODO Note target phrase is being added even if its not
					// going to contribute to any phrase pair
					int[] foreing = new int[foreingDuration];
					for (int f = 0; f < foreingDuration; f++) {
						foreing[f] = foreingSentence[foreignStartPos + f];
					}
					int targetPhraseId = pt._targetPhrasesVocab.getPhraseId(foreing);
					if(targetPhraseId == -1) continue; //Phrase does not exist
					for (int sourceStartPos = 0; sourceStartPos < sSize; sourceStartPos++) {
						for (int sourceDuration = 1; sourceDuration <= maxSourceDuration; sourceDuration++) {
							int sourceEndPos = sourceStartPos + sourceDuration
									- 1;
							if (sourceEndPos < sSize) {
								int[] source = new int[sourceDuration];
								for (int s = 0; s < sourceDuration; s++) {
									source[s] = sourceSentence[sourceStartPos
											+ s];
								}
								int sourcePhraseId = pt._sourcePhrasesVocab.getPhraseId(source);
								if(sourcePhraseId == -1)continue;
								double prob = model.getPhrasePosterior(
										sentenceNumber, sentenceSource,
										foreingSentence, sourceSentence,
										sourceStartPos, sourceEndPos,
										foreignStartPos, foreignEndPos);
								if (prob > threshold) {
									// Add phrase
									phrases[sourceDuration-1].add(new Phrase(sourcePhraseId,
											targetPhraseId, prob));
								}
							}
						}
					}
					// Add null phrase
					double prob = model.getNullPhrasePosterior(sentenceNumber,
							sentenceSource, foreingSentence, sourceSentence,
							foreignStartPos, foreignEndPos);
					if (prob > threshold) {
						// Add null phrase
						phrases[maxSourceDuration].add(new Phrase(targetPhraseId, prob));
					}
				}
			}
		}
		return phrases;
	}

	
	public static void printPhrasesWithAlignments(PhraseTable pt,
			AbstractModel model, double thres, AlignmentsSet al,
			AlignmentsSet gold, BilingualCorpus corpus, PrintStream stream)
			throws UnsupportedEncodingException {
	//	System.out.println(AlignmentEvaluator.evaluate(al, gold));
		BaseExp.printBegin(stream);

		for (int i = 0; i < al.getAlignments().size(); i++) {

			Alignment a = al.getAlignmentByPos(i);
			Alignment g = gold.getAlignmentByPos(i);
			if (a.getForeignLenght() > 10 || a.getSourceLenght() > 10)
				continue;
			ArrayList<Phrase>[] phrases = processSentence(pt, i,
					BilingualCorpus.TEST_CORPUS, corpus.getForeignSentence(i,
							BilingualCorpus.TEST_CORPUS), corpus
							.getSourceSentence(i, BilingualCorpus.TEST_CORPUS),
					3, 3, model, thres);
		//	System.out.println("Got " + phrases.size() + "phrases");
			AlignerOutputLatex.outputWithGoldPostLatex(a, g, corpus, stream,
					"Sentence nr " + i);
			stream.println("\\begin{verbatim}");
			//TODO Caution with maxphrase len if different maxsourcelen
			for(int len = 0; len < pt._maxSourcePhraseLen; len ++){
				for (int j = 0; j < phrases[len].size(); j++) {
					stream.println(new String(phrases[len].get(j).stringRep(corpus,
							pt._sourcePhrasesVocab, pt._targetPhrasesVocab).getBytes("utf8")));
				}
			}
			stream.println("\\end{verbatim}");
			stream.println("\\clearpage\\pagebreak");
		}
		stream.println("\\clearpage\\pagebreak");
		BaseExp.printEnd(stream);
		stream.close();
	}

	
	public static void main(String[] args) throws IOException {
		String corpusDescription = args[0];
		int size = Integer.parseInt(args[1]); // 100k
		int maxSentenceSize = Integer.parseInt(args[2]); // 40
		double thresh = Double.parseDouble(args[3]); // 40
		String modelDir = args[4];
		String latexDir = args[5];
		System.out.println("Corpus " + corpusDescription);
		System.out.println("Size " + size);
		System.out.println("Max Sentence size " + maxSentenceSize);
		System.out.println("MTrehs " + thresh);
		System.out.println("Model dir" + modelDir);

		BilingualCorpus corpus = BilingualCorpus.getCorpusFromFileDescription(
				corpusDescription, size, maxSentenceSize);
		modelDir = modelDir + "/" + "baseline" + "/" + corpus.getName() + "/"
				+ size + "/model/" + "/MHMM/forward";
		latexDir = latexDir + "/" + "baseline" + "/" + corpus.getName() + "/"
				+ size + "/model/" + "/MHMM/forward";

		System.out.println("Latex dir" + latexDir);
		HMM hmm = HMM.loadModel(corpus, modelDir);
		System.out.println("Starting init of phrase table");
		int maxPhraseSize =3;
		WordTrie sourcePhrasesVocab = new WordTrie(maxPhraseSize);
		sourcePhrasesVocab.addPhrasesAllSentences(corpus._trainSourceSentences);
		WordTrie foreignPhrasesVocab = new WordTrie(maxPhraseSize);
		foreignPhrasesVocab.addPhrasesAllSentences(corpus._trainForeignSentences);
		
		PhraseTable pt = PosteriorPrunning.build(corpus,sourcePhrasesVocab,foreignPhrasesVocab,3,3);
		StaticTools.createDir(latexDir);
		PrintStream printer = new PrintStream(latexDir
				+ "/posteriorPrunnig.tex");
		printPhrasesWithAlignments(pt, hmm, thresh, hmm
				.viterbiAlignments(BilingualCorpus.TEST_CORPUS), corpus
				.getGold(), corpus, printer);
	}
	*/

	


}
