package geppetto.cat.models;



import geppetto.cat.alignments.Alignment;
import geppetto.cat.alignments.AlignmentEvaluator;
import geppetto.cat.alignments.AlignmentsSet;
import geppetto.cat.constrains.ConstrainedProjectionStats;
import geppetto.cat.corpus.BilingualCorpus;
import geppetto.cat.models.stats.EStepStats;

import java.io.IOException;
import java.io.PrintStream;




/**
 * Implementation of IBM Model 1
 * @author javg
 *
 */
public class M1 extends AbstractModel {

	protected SparseCountTable _count;
	public AbstractSparseTranslationTable _tb;
	public double _smoothing;

	
	
	public BilingualCorpus getCorpus() {
		return _corpus;
	}

	public String getName() {
		return "IBM Model 1";
	}


	public M1() {

	}

	public M1(BilingualCorpus corpus) {
		this(corpus, 0);
	}

	public M1(BilingualCorpus corpus, AbstractSparseTranslationTable tb) {
		this(corpus, tb, 0);
	}

	public M1(BilingualCorpus corpus, double smoothing) {
		this(corpus,null,smoothing);
	}

	public M1(BilingualCorpus corpus, AbstractSparseTranslationTable tb,
			double smoothing) {
		System.out.println("Started the Initialization of " + getName()
				+ " reusing translation table");
		_corpus = corpus;
		_sourceVocabSize = corpus.getSourceSize();
		_foreignVocabSize = corpus.getForeignSize();
		_nSentences = corpus.getNumberOfTrainingSentences();
		if(tb == null){
			_tb = new SparseTranslationTable(corpus); 
		}else{
			_tb = tb;
		}
		_smoothing = smoothing;
		System.out.println("Ended the Initialization of " + getName());
	}

	
	public void printStamp(PrintStream file){
			file.println("Smooth value: " + _smoothing);
			file.println("Iterations: " + _numberTrainingIterations);
	}
	
	public void saveModel(String directory) {
		super.saveModel(directory);
		((SparseTranslationTable)_tb).saveTable(directory);
	}

	public static M1 loadModel(BilingualCorpus corpus, String directory) {
		M1 model = new M1();
		model._corpus = corpus;
		if (!corpus.checkDescription(directory)) {
			System.out.println("Corpus is not the same");
			System.exit(1);
		}
		model._tb = SparseTranslationTable.LoadTranslationTable(corpus,directory);
		model._trained = true;
		return model;
	}


	/**
	 * 
	 */
	public double[][] makeProbCache(int[] f, int[] s) {
		// TODO Why do we need this?
		final int sSize = s.length;
		final int fSize = f.length;
		// cache the probabilities for speed.
		double[][] probCache = new double[fSize][sSize + 1];
		for (int fi = 0; fi < fSize; fi++) {
			for (int si = 0; si < sSize; si++) {
				probCache[fi][si] = _tb.getProbability(s[si], f[fi]);
			}

			double val = _tb.getNullProbability(f[fi]);
			probCache[fi][sSize] = val;
		}
		return probCache;
	}

	/**
	 * 
	 */
	public double getNullPhrasePosterior(int phraseNumber, byte phraseSource,
			int[] foreingSentence, int[] sourceSentence, int startForeignIndex,
			int endForeignIndex) {
		double observationProb = 1;
		for (int foreignIndex = startForeignIndex; foreignIndex <= endForeignIndex; foreignIndex++) {
			observationProb *= _tb
					.getNullProbability(foreingSentence[foreignIndex]);
		}
		return observationProb;
	}

	/**
	 * 
	 */
	public double getPhrasePosterior(int phraseNumber, byte phraseSource,
			int[] foreingSentence, int[] sourceSentence, int startSourceIndex,
			int endSourceIndex, int startForeignIndex, int endForeignIndex) {
		double observationProb = 1;
		for (int foreignIndex = startForeignIndex; foreignIndex <= endForeignIndex; foreignIndex++) {
			double probAux = 0;
			for (int sourceIndex = startSourceIndex; sourceIndex <= endSourceIndex; sourceIndex++) {
				probAux += _tb.getProbability(sourceSentence[sourceIndex],
						foreingSentence[foreignIndex]);
			}
			observationProb *= probAux;
		}
		double divider = Math.pow((endSourceIndex - startSourceIndex + 1),
				(endForeignIndex - startForeignIndex + 1));
		observationProb = observationProb / divider;
		return observationProb;
	}

	/**
	 * Builds the posterior probability of a given sentence. Dependes on the
	 * methods calculateProbability and getNullProbability to calculate the
	 * specific probabilities.
	 */
	public double calculatePosteriors(int[] source, int[] foreign,
			double[][] posteriors, double[][] probCache) {
		double likelihood = 0;
		for (int f = 0; f < foreign.length; f++) {
			double sum = 0.0;
			for (int s = 0; s < source.length; s++) {
				sum += probCache[f][s];
			}
			// Assume there is a null word in each sentence
			// This might be changed to test
			sum += probCache[f][source.length];
			assert !Double.isNaN(sum) : getName() + " calclute posterior " + ": Sum is NaN";
			if (sum != 0) { // If sum = 0 don't divide
				likelihood += Math.log(sum);
				for (int s = 0; s < source.length; s++) {
					double newProb = probCache[f][s] / sum;
					posteriors[s][f] = newProb;
				}
				double newNullProb = probCache[f][source.length] / sum;
				posteriors[source.length][f] = newNullProb;
			} 
//			else {
//				System.out.println(getName() + " calculatePosterior"
//						+ ": Sum is zero not calculating entry");
//			}
		}
		return likelihood;
	}

	public EStepStats eStep() {
		double totalLikelihood = 0.0;
		clearCounts();
		for (int i = 0; i < _nSentences; i++) {
			// Calculate Posterirors
			int[] s = _corpus.getSourceSentence(i, BilingualCorpus.TRAIN_CORPUS);
			int[] f = _corpus.getForeignSentence(i,
					BilingualCorpus.TRAIN_CORPUS);
			double[][] posteriors = new double[s.length + 1][f.length];
			double[][] probCache = makeProbCache(f, s);
			totalLikelihood += calculatePosteriors(s, f, posteriors, probCache);
			// Add counts
			addCounts(s, f, posteriors);
		}
		EStepStats d = new EStepStats();
		d.logLikelihood = totalLikelihood;
		d.numSents = _nSentences;
		return d;
	}

	public void initializeTrain() {
		_count = new SparseCountTable(_corpus, _smoothing);
	}

	public void finalizeTrain() {
		_count = null;
	}

	public void clearCounts() {
		_count.initializeToSmoothingValue();
	}

	public void addCounts(int[] source, int[] foreign, double[][] posteriors) {
		for (int f = 0; f < foreign.length; f++) {
			for (int s = 0; s < source.length; s++) {
				_count.addToCount(source[s], foreign[f], posteriors[s][f]);
			}
			_count.addToNullCount(foreign[f], posteriors[source.length][f]);
		}
	}

	public void updateTranslationProbabilities(AbstractSparseTranslationTable tb,
			SparseCountTable count) {
		tb.clear();
		int numZeroNorm = 0;
		int[] zeroNormSents = new int[5];
		for (int si = 0; si < _sourceVocabSize; si++) {
			// Iterate over all non zero counts devide by their sum and add to
			// TT table
			int[] poss = count.getNotZeroCountsByWord(si);
			double lambdaE = count.getNormalizing(si);
			assert !Double.isNaN(lambdaE) : "lambdaE is NaN";
			if (lambdaE > 1.0e-200) {
				// System.out.println("Start updating tb " + poss.length);
				for (int pos = 0; pos < poss.length; pos++) {
					double prob = count.getCountByIndex(poss[pos]) / lambdaE;
					tb.setProbabilityByPos(poss[pos], prob);
				}
			} else {
				if (zeroNormSents.length > numZeroNorm)
					zeroNormSents[numZeroNorm] = si;
				numZeroNorm += 1;
			}
		}
		if (numZeroNorm > 0) {
			System.out.print(getName() + " Normalizer close to zero in "
					+ numZeroNorm + "cases ");
			System.out.println(((100.0 * numZeroNorm) / _sourceVocabSize) + "%");
			System.out.println("First few are ");
			for (int i = 0; i < numZeroNorm && i < zeroNormSents.length; i++) {
				System.out.print(" " + zeroNormSents[i]);
			}
			System.out.println();
		}
		double nullNormalizer = count.getNormalizingNull();
		assert !Double.isNaN(nullNormalizer);
		for (int j = 0; j < _foreignVocabSize; j++) {
			double aux = count.getNullCounts(j);
			assert !Double.isNaN(aux) : "UpdateTranslationProbabilities: Null Counts is NaN";
			if (aux < 1.0e-200 || nullNormalizer < 1.0e-200)
				tb.setNullProbability(j, 0);
			else
				tb.setNullProbability(j, aux / nullNormalizer);
		}
	}

	public void mStep() {
		updateTranslationProbabilities(_tb, _count);
	}


	public double calculateProbability(int sourceWord, int foreignWord,
			int sourcePos, int foreignPos, int sourceLen, int foreignLen) {
		return _tb.getProbability(sourceWord, foreignWord);
	}

	public double getNullProbability(int foreignWord, int foreignPos,
			int sourceLen, int foreignLen) {
		return _tb.getNullProbability(foreignWord);
	}

	public Alignment viterbiAlignment(int sentenceNumber, byte sentenceSource, boolean projectPosteriors, ConstrainedProjectionStats stats) {
		
		
		int sourceLen = _corpus.getSourceSentenceLength(sentenceNumber,
				sentenceSource);
		int foreignLen = _corpus.getForeignSentenceLength(sentenceNumber,
				sentenceSource);
		Alignment a = new Alignment(sentenceNumber, sentenceSource, sourceLen,
				foreignLen);
		int[] source = _corpus
				.getSourceSentence(sentenceNumber, sentenceSource);
		int[] foreign = _corpus.getForeignSentence(sentenceNumber,
				sentenceSource);
		double[][] posteriors = new double[sourceLen + 1][foreignLen];
		double[][] probCache = makeProbCache(foreign, source);
		calculatePosteriors(source, foreign, posteriors, probCache);
		for (int j = 0; j < foreign.length; j++) {
			int pos = 0;
			double max = 0;
			for (int i = 0; i < source.length; i++) {
				double prob = posteriors[i][j];
				// System.out.println("Posteriro" + i +" " + j+ " "+prob);
				if (prob > max) {
					max = prob;
					pos = i;
				}
			}
			double nullProb = posteriors[sourceLen][j];
			if (max > nullProb) {
				a.add(pos, j);
			}
		}
		return a;
	}
	
	
	
	public Alignment posteriorDecodingAlignment(int sentenceNumber,
			byte sentenceSource, float treshhold,boolean projectPosteriors, ConstrainedProjectionStats stats) {
		int sourceLen = _corpus.getSourceSentenceLength(sentenceNumber,
				sentenceSource);
		int foreignLen = _corpus.getForeignSentenceLength(sentenceNumber,
				sentenceSource);
		Alignment a = new Alignment(sentenceNumber, sentenceSource, sourceLen,
				foreignLen);
		int[] source = _corpus
				.getSourceSentence(sentenceNumber, sentenceSource);
		int[] foreign = _corpus.getForeignSentence(sentenceNumber,
				sentenceSource);
		double[][] probCache = makeProbCache(foreign, source);
		double[][] posteriors = new double[sourceLen + 1][foreignLen];
		calculatePosteriors(source, foreign, posteriors,  probCache);
		for (int j = 0; j < foreign.length; j++) {
			for (int i = 0; i < source.length; i++) {
				if (posteriors[i][j] > treshhold){
					a.add(i, j);
				}
				a._posteriors[i][j] = posteriors[i][j];
				
			}
		}
		return a;
	}

	public static void main(String[] args) throws IOException {
		String corpusDescription = args[0];
		int size = Integer.parseInt(args[1]); // 100k
		int maxSentenceSize = Integer.parseInt(args[2]); // 40
		int numberIterations = Integer.parseInt(args[3]); // 5
		// System.out.println("Corpus "+corpusName);
		System.out.println("Size " + size);
		System.out.println("Max Sentence size " + maxSentenceSize);
		System.out.println("Number of iterations " + numberIterations);

		BilingualCorpus corpus = BilingualCorpus.getCorpusFromFileDescription(
				corpusDescription, size, maxSentenceSize);
		M1 m1 = new M1(corpus);
		m1.train(numberIterations,false,"");

		AlignmentsSet viter = m1.viterbiAlignments(BilingualCorpus.TEST_CORPUS);
		System.out
				.println(AlignmentEvaluator.evaluate(viter, corpus.getGold()));
		float tresh = m1.tuneTreshholdAER(BilingualCorpus.DEV_CORPUS,false);
		AlignmentsSet post = m1.posteriorAlignments(
				BilingualCorpus.TEST_CORPUS, tresh,false,false);
		System.out.println("Posterior Decoding "
				+ AlignmentEvaluator.evaluate(post, corpus.getGold()));
	}

	

	/*
	 * 
	 * public static void main(String[] args) throws IOException { String
	 * corpusDescription = args[0]; int size = Integer.parseInt(args[1]); //100k
	 * int maxSentenceSize = Integer.parseInt(args[2]); //40 int
	 * numberIterations = Integer.parseInt(args[3]); //5
	 * //System.out.println("Corpus "+corpusName); System.out.println("Size
	 * "+size); System.out.println("Max Sentence size "+maxSentenceSize);
	 * System.out.println("Number of iterations "+numberIterations);
	 * 
	 * 
	 * System.gc(); System.gc(); System.gc(); double initM =
	 * (Runtime.getRuntime().totalMemory() -
	 * Runtime.getRuntime().freeMemory())/(1024*1024); long init =
	 * System.currentTimeMillis(); BilingualCorpus forwardCorpus =
	 * BilingualCorpus.
	 * getCorpusFromFileDescription(corpusDescription,size,maxSentenceSize);
	 * 
	 * 
	 * for(int i= 0; i < forwardCorpus.getNumberOfTrainingSentences(); i++){
	 * System.out.println(forwardCorpus.getSourceSentenceString(i,
	 * BilingualCorpus.TRAIN_CORPUS));
	 * System.out.println(forwardCorpus.getForeignSentenceString(i,
	 * BilingualCorpus.TRAIN_CORPUS)); }
	 * 
	 * long end = System.currentTimeMillis(); System.gc(); System.gc();
	 * System.gc(); double endM = (Runtime.getRuntime().totalMemory() -
	 * Runtime.getRuntime().freeMemory())/(1024*1024);
	 * 
	 * System.out.println(); System.out.println(); System.out.println();
	 * System.out.println("Init corpus Spent Memory " + (endM - initM) + " time " +
	 * (end-init)); System.out.println(); System.out.println();
	 * System.out.println();
	 * 
	 * 
	 * 
	 * System.gc(); System.gc(); System.gc(); initM =
	 * (Runtime.getRuntime().totalMemory() -
	 * Runtime.getRuntime().freeMemory())/(1024*1024); init =
	 * System.currentTimeMillis();
	 * 
	 * 
	 * Model1 m1 = new Model1(forwardCorpus); m1.train(numberIterations);
	 * AlignmentsSet m1A =
	 * m1.viterbiAlignments(BilingualCorpus.TEST_CORPUS,false,null); float aux[] =
	 * new float[3]; //for(Alignment a: m1A._alignments){ //
	 * AlignerOutput.outputWithGold(a,
	 * forwardCorpus.getGold().get(a.getSentenceNumber()), forwardCorpus,
	 * System.out, aux); // } m1._tb.printTable(System.out); Evaluation2 eval1 =
	 * AlignmentEvaluator.evaluate(m1A,forwardCorpus.getGold());
	 * 
	 * System.out.println(eval1.toString());
	 * 
	 * end = System.currentTimeMillis(); System.gc(); System.gc(); System.gc();
	 * endM = (Runtime.getRuntime().totalMemory() -
	 * Runtime.getRuntime().freeMemory())/(1024*1024);
	 * 
	 * System.out.println(); System.out.println(); System.out.println();
	 * System.out.println("Spent Memory " + (endM - initM) + " time " +
	 * (end-init)); System.out.println(); System.out.println();
	 * System.out.println();
	 * 
	 * System.gc(); System.gc(); System.gc();
	 * 
	 * initM = (Runtime.getRuntime().totalMemory() -
	 * Runtime.getRuntime().freeMemory())/(1024*1024); init =
	 * System.currentTimeMillis(); NewM1 nm1 = new NewM1(forwardCorpus);
	 * 
	 * nm1.train(numberIterations); AlignmentsSet nm1A =
	 * nm1.viterbiAlignments(BilingualCorpus.TEST_CORPUS,false,null);
	 * //for(Alignment a: nm1A._alignments){ //AlignerOutput.outputWithGold(a,
	 * forwardCorpus.getGold().get(a.getSentenceNumber()), forwardCorpus,
	 * System.out, aux); //} nm1._tb.printTable(System.out); Evaluation2 neval1 =
	 * AlignmentEvaluator.evaluate(nm1A,forwardCorpus.getGold());
	 * System.out.println(neval1.toString()); end = System.currentTimeMillis();
	 * 
	 * System.gc(); System.gc(); System.gc(); endM =
	 * (Runtime.getRuntime().totalMemory() -
	 * Runtime.getRuntime().freeMemory())/(1024*1024);
	 * 
	 * 
	 * System.out.println(); System.out.println(); System.out.println();
	 * System.out.println("Spent Memory " + (endM - initM) + " time " +
	 * (end-init)); System.out.println(); System.out.println();
	 * System.out.println();
	 *  }
	 */

}
