package geppetto.phraseHMM;


import geppetto.cat.alignments.Alignment;
import geppetto.cat.alignments.AlignmentEvaluator;
import geppetto.cat.alignments.AlignmentsSet;
import geppetto.cat.alignments.AlignmentEvaluator.Evaluation;
import geppetto.cat.common.StaticTools;
import geppetto.cat.constrains.ConstrainedProjectionStats;
import geppetto.cat.constrains.SentenceConstrainedProjectionStats;
import geppetto.cat.corpus.BilingualCorpus;
import geppetto.cat.models.AbstractModel;
import geppetto.cat.models.AbstractSparseTranslationTable;
import geppetto.cat.models.SparseCountTable;
import geppetto.cat.models.SparseTranslationTable;
import geppetto.cat.models.stats.EStepStats;
import geppetto.cat.models.stats.SentenceEStepStats;

import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;





public class IBMM1 extends AbstractModel{

	
	protected SparseCountTable _count;
	public AbstractSparseTranslationTable _tb;
	public double _smoothing;
//	 Sentences we are currently working on
	public int[] _sourceSentenceIDS;
	public int[] _foreignSentenceIDS;
	public int _sentenceNumber;
	public byte _sentenceSource;
	public int _sSize;
	public int _fSize;
	public int _previousSSize;
//	Keeps the word posteriors. 
	//Position (foreing word) * index (source word)
	public Trellis _statePosteriors;
	//The likelihood of a sentence
	public double _scaledLikelihood=1;
	public double[] _inverseLikelihoodScalors;
	public double _logLikelihood;
	// Cache for probabilities
	public Trellis _probCache;
	//public double probCacheNormalizer;
	public IBMM1() {

	}

	
	public IBMM1(BilingualCorpus corpus, double smoothing) {
		this(corpus,null,smoothing);
	}

	public IBMM1(BilingualCorpus corpus, SparseTranslationTable tb,
			double smoothing) {
		System.out.println("Started the Initialization of " + getName());
		_corpus = corpus;
		_sourceVocabSize = corpus.getSourceSize();
		_foreignVocabSize = corpus.getForeignSize();
		_nSentences = corpus.getNumberOfTrainingSentences();
		if(tb == null){
			_tb = new SparseTranslationTable(corpus); 
		}else{
			_tb = tb;
		}
		int maxsSize = corpus.getMaxSourceLen();
		int maxfSize = corpus.getMaxForeignLen();
		_smoothing = smoothing;
		_statePosteriors = new Trellis(maxfSize, maxsSize +1, true); 	
		_probCache = new Trellis(maxfSize, maxsSize + 1);
		_inverseLikelihoodScalors = new double[maxfSize];
		System.out.println("Ended the Initialization of " + getName());
	}

	
	// LOAD AND SAVE CODE 
	
	public void printStamp(PrintStream file){
			file.println("Smooth value: " + _smoothing);
			file.println("Iterations: " + _numberTrainingIterations);
	}
	
	public void saveModel(String directory) {
		super.saveModel(directory);
		((SparseTranslationTable)_tb).saveTable(directory);
	}

	public static IBMM1 loadModel(BilingualCorpus corpus, String directory) {
		IBMM1 model = new IBMM1();
		model._corpus = corpus;
		if (!corpus.checkDescription(directory)) {
			System.out.println("Corpus is not the same");
			System.exit(1);
		}
		model._tb = SparseTranslationTable.LoadTranslationTable(corpus,directory);
		model._trained = true;
		return model;
	}
	
	public void initializeTrain() {
		//Don't use smoothing on counts use smoothing on posteriors
		_count = new SparseCountTable(_corpus, 0);
	}
	
	public void finalizeTrain() {
		_count = null;
		_trained = true;
	}

	public String getName() {
		return "IBM M1";
	}
	
	/**
	 * Fills the sentence specific information for a given sentence number and source
	 * @param sentenceNumber
	 * @param sentenceSource
	 */
	public void initSentence(int sentenceNumber, byte sentenceSource){
		_sourceSentenceIDS =  _corpus.getSourceSentence(sentenceNumber, sentenceSource);
		_foreignSentenceIDS = _corpus.getForeignSentence(sentenceNumber,sentenceSource);
		_sentenceNumber = sentenceNumber;
		_sentenceSource = sentenceSource;
		_sSize = _sourceSentenceIDS.length;
		_fSize = _foreignSentenceIDS.length;
	}
	
	/**
	 * E-Step for a particular sentence:
	 * Creates caches
	 * Computes Posteriors
	 * Project Posteriors
	 * @return The statistics of this particular sentence
	 */
	public SentenceEStepStats sentenceEStep(){
		SentenceEStepStats stats = new SentenceEStepStats();
		makeCaches();	
		makePosteriors(_sSize, _fSize);
		
		stats.loglikelihood=_logLikelihood;
		//if(stats.loglikelihood < 1.0e-200) return stats;
		SentenceConstrainedProjectionStats pstats = projectPosteriors();
		stats.pstats = pstats;
		return stats;
	}
	
	
	public EStepStats createModelStats(){
		return new EStepStats();
	}
	
	
	public EStepStats eStep() {
		initializeCounts();
		EStepStats stats = createModelStats();
		_previousSSize = -1;
	//	clearProjectionStats();
		for (int i = 0; i < _nSentences; i++) {
			initSentence(i, BilingualCorpus.TRAIN_CORPUS);
			SentenceEStepStats sstats = sentenceEStep();
			stats.add(sstats,_sSize);	
//			if(sstats.loglikelihood < 1.0e-200){
//				_previousSSize = _sSize;
//				continue;
//			}
			_previousSSize = _sSize;
			updateFractionalCounts();
		}
	//	printProjectionStats();
		return stats;
	}

	/**
	 * Creates all the caches required for this particular model
	 * @param sSize
	 * @param fSize
	 * @param prevSourceLen
	 */
	public void makeCaches(){
		makeWordTranslationProbCache();
	}
	
	public void makeWordTranslationProbCache() {
		// No need to clean since it replaces the values		
		// cache the probabilities for speed.
		
		for (int fi = 0; fi < _fSize; fi++) {
			for (int si = 0; si < _sSize; si++) {
				double prob = _tb.getProbability(_sourceSentenceIDS[si], _foreignSentenceIDS[fi]);
				
				//DEBUG
//				if(Double.isNaN(prob) || Double.isInfinite(prob)){
//					System.out.println(" words foreing " + _corpus.getForeignString(_foreignSentenceIDS[fi]) + " source " + _corpus.getSourceString(_sourceSentenceIDS[si]));
//					throw new RuntimeException("Error making cache");
//				}
				_probCache.setProb(fi, si, prob);
			}
			// Add entry for null entries
			double prob = _tb.getNullProbability(_foreignSentenceIDS[fi]);
			_probCache.setProb(fi, _sSize, prob);
		}
			
		//printWordTranslationProbChache();
		
	}
	
	//No scaling for model1
	public double getRealLikelihood(){
		return _scaledLikelihood;
	}
	
	void printWordTranslationProbChache() {
		System.out.println();
		final int fSize = _foreignSentenceIDS.length;
		System.out.println("foreing "
				+ java.util.Arrays.toString(_foreignSentenceIDS));
		final int sSize = _sourceSentenceIDS.length;
		System.out.println("source "
				+ java.util.Arrays.toString(_sourceSentenceIDS));
		System.out.println("Source" + sSize + "Foreign" + fSize);

		System.out.print("\t\t");
		for (int si = 0; si < sSize; si++) {
			System.out.print(_corpus.getSourceString(_sourceSentenceIDS[si])
					+ "\t\t");
		}
		System.out.print("Null\t");
		System.out.print("sum");
		System.out.println();
		double globalSum=0;
		double[] columnSum = new double[sSize+1];
		for (int fi = 0; fi < fSize; fi++) {
			System.out.print(_corpus.getForeignString(_foreignSentenceIDS[fi])
					+ "\t\t");
			double sum = 0;
			for (int si = 0; si < sSize; si++) {
				System.out.print(StaticTools.prettyPrint(_probCache.getProb(fi,
						si), "00.0E00", 8)
						+ "\t");
				sum+=_probCache.getProb(fi,si);
				columnSum[si]+=_probCache.getProb(fi,si);
			}
			System.out.print(StaticTools.prettyPrint(_probCache.getProb(fi,
					sSize), "00.0E00", 8)
					+ "\t");
			sum+=_probCache.getProb(fi,sSize);
			columnSum[sSize]+=_probCache.getProb(fi,sSize);
			System.out.print(StaticTools.prettyPrint(sum, "00.0E00", 8)+ " ");
			System.out.println();
			globalSum += sum;
		}
		System.out.print("sum\t\t" );
		for (int si = 0; si < sSize+1; si++) {
			System.out.print(StaticTools.prettyPrint(columnSum[si], "00.0E00", 8)+ "\t");
		}
		System.out.print(StaticTools.prettyPrint(globalSum, "00.0E00", 8));
		System.out.println();
	}
	
	public void initializeCounts() {
		_count.initializeToSmoothingValue();
	}

	//Stubs for methods used in models that do posteriors projections
	public void clearProjectionStats(){}	
	public void printProjectionStats(){}
	public SentenceConstrainedProjectionStats projectPosteriors(){return null;}
	
	
	public void updateFractionalCounts(){
		updateObservationCounts();
	}
	
	public void updateObservationCounts(){
		double smoothValue = _smoothing;
		for (int f = 0; f < _fSize; f++) {
			for (int s = 0; s < _sSize; s++) {
				_count.addToCount(_sourceSentenceIDS[s], _foreignSentenceIDS[f], _statePosteriors.getProb(f, s)+smoothValue);
			}
			_count.addToNullCount(_foreignSentenceIDS[f], _statePosteriors.getProb(f, _sSize)+smoothValue);
		}
	}
	
	public void makePosteriors(int sSize, int fSize){
		makeStatePosteriors();
	}
	
	public void makeStatePosteriors(){
		double likelihood = 1;
		for (int f = 0; f < _fSize; f++) {
			double sum = 0.0;
			for (int s = 0; s < _sSize; s++) {
				sum += _probCache.getProb(f, s);
			}
			// Assume there is a null word in each sentence
			// This might be changed to test
			sum += _probCache.getProb(f, _sSize);
			assert !Double.isNaN(sum) : getName() + " calclute posterior " + ": Sum is NaN";
			if (sum != 0) { // If sum = 0 don't divide
				likelihood *= sum;
				for (int s = 0; s < _sSize; s++) {
					double newProb = _probCache.getProb(f, s) / sum;
					_statePosteriors.setProb(f, s, newProb);
				}
				double newNullProb = _probCache.getProb(f,_sSize) / sum;
				_statePosteriors.setProb(f, _sSize, newNullProb);
			} 
		}
		_scaledLikelihood = likelihood;
		_logLikelihood = Math.log(likelihood);
	}
	
	public void mStep() {
		updateTranslationProbabilities(((SparseTranslationTable)_tb), _count);
	}


	public void updateTranslationProbabilities(SparseTranslationTable tb,
			SparseCountTable count) {
		tb.clear();
		int numZeroNorm = 0;
		int[] zeroNormSents = new int[5];
		for (int si = 0; si < _sourceVocabSize; si++) {
			// Iterate over all non zero counts devide by their sum and add to
			// TT table
			int[] poss = count.getNotZeroCountsByWord(si);
			double lambdaE = count.getNormalizing(si);
			assert !Double.isNaN(lambdaE) : "lambdaE is NaN";
			if (lambdaE > 1.0e-200) {
				// System.out.println("Start updating tb " + poss.length);
				for (int pos = 0; pos < poss.length; pos++) {
					double prob = count.getCountByIndex(poss[pos]) / lambdaE;
					tb.setProbabilityByPos(poss[pos], prob);
				}
			} else {
				if (zeroNormSents.length > numZeroNorm)
					zeroNormSents[numZeroNorm] = si;
				numZeroNorm += 1;
			}
		}
		if (numZeroNorm > 0) {
			System.out.print(getName() + " Normalizer close to zero in "
					+ numZeroNorm + "cases ");
			System.out.println(((100.0 * numZeroNorm) / _sourceVocabSize) + "%");
			System.out.println("First few are ");
			for (int i = 0; i < numZeroNorm && i < zeroNormSents.length; i++) {
				System.out.print(" " + zeroNormSents[i]);
			}
			System.out.println();
		}
		double nullNormalizer = count.getNormalizingNull();
		assert !Double.isNaN(nullNormalizer);
		for (int j = 0; j < _foreignVocabSize; j++) {
			double aux = count.getNullCounts(j);
			assert !Double.isNaN(aux) : "UpdateTranslationProbabilities: Null Counts is NaN";
			if (aux < 1.0e-200 || nullNormalizer < 1.0e-200)
				tb.setNullProbability(j, 0);
			else
				tb.setNullProbability(j, aux / nullNormalizer);
		}
	}
	
	
	void sanityCheckTranslationProbs() {
		System.out.println("Performing sanity chek for translation");
		for (int si = 0; si < _corpus.getSourceSize(); si++) {
			double sum = 0;
			for (int fi = 0; fi < _corpus.getForeignSize(); fi++) {
				double prob = _tb.getProbability(si, fi);
				if (prob < 0) {
					System.out.println("Translation probs negative prob");
					System.exit(-1);
				}
				sum += prob;
			}

			if (sum == 0.0) {
				// Word not seen
				continue;
			}
			assert Math.abs(1 - sum) > 1.0E-8: getName() + " translation probs " + ": Sum is not 1";
		}
		double sum = 0;
		for (int fi = 0; fi < _corpus.getForeignSize(); fi++) {
			sum += _tb.getNullProbability(fi);
		}
		if (Math.abs(1 - sum) > 1.0E-8) {
			System.out.println("Trannslation probs do not sum to one");
			System.exit(-1);
		}

		System.out.println("Passed sanity chek for translation");
		System.out.println();
	}
	
	public double getNullPhrasePosterior(int sentenceNumber, byte sentenceSource, int[] foreingSentence, int[] sourceSentence, int startForeignIndex, int endForeignIndex) {
		double observationProb = 1;
		for (int foreignIndex = startForeignIndex; foreignIndex <= endForeignIndex; foreignIndex++) {
			observationProb *= _tb
					.getNullProbability(foreingSentence[foreignIndex]);
		}
		return observationProb;
	}

	public double getPhrasePosterior(int sentenceNumber, byte sentenceSource, int[] foreingSentence, int[] sourceSentence, int startSourceIndex, int endSourceIndex, int startForeignIndex, int endForeignIndex) {
		double observationProb = 1;
		for (int foreignIndex = startForeignIndex; foreignIndex <= endForeignIndex; foreignIndex++) {
			double probAux = 0;
			for (int sourceIndex = startSourceIndex; sourceIndex <= endSourceIndex; sourceIndex++) {
				probAux += _tb.getProbability(sourceSentence[sourceIndex],
						foreingSentence[foreignIndex]);
			}
			observationProb *= probAux;
		}
		double divider = Math.pow((endSourceIndex - startSourceIndex + 1),
				(endForeignIndex - startForeignIndex + 1));
		observationProb = observationProb / divider;
		return observationProb;
	}
	
	public void initDecoding(int sentenceNumber, byte sentenceSource){
		initSentence(sentenceNumber, sentenceSource);
		_previousSSize=-1;
		makeCaches();
		makePosteriors(_sSize, _fSize);
	}
	
	public Alignment posteriorDecodingAlignment(int sentenceNumber, byte sentenceSource, float treshhold,boolean projectPosteriors, ConstrainedProjectionStats stats) {
		initDecoding(sentenceNumber, sentenceSource);
		if(projectPosteriors){
			SentenceConstrainedProjectionStats ss = projectPosteriors();
			if(ss != null){
				stats.add(ss);
			}
		}
		
		Alignment a = new Alignment(sentenceNumber, sentenceSource, _sSize,_fSize);
		for (int foreignPosition = 0; foreignPosition < _fSize; foreignPosition++) {
			for (int sourcePosition = 0; sourcePosition < _sSize; sourcePosition++) {
				if (_statePosteriors.getProb(foreignPosition, sourcePosition) > treshhold)
					a.add(sourcePosition, foreignPosition);
				a._posteriors[sourcePosition][foreignPosition] = _statePosteriors.getProb(foreignPosition, sourcePosition);
			}
		}
		return a;
	}

	public Alignment viterbiAlignment(int sentenceNumber, byte sentenceSource, boolean projectPosteriors, ConstrainedProjectionStats stats) {
		initDecoding(sentenceNumber, sentenceSource);
		if(projectPosteriors){
			stats.add(projectPosteriors());
		}
		Alignment a = new Alignment(sentenceNumber, sentenceSource, _sSize,_fSize);
		for (int foreignPos = 0; foreignPos < _fSize; foreignPos++) {
			int pos = 0;
			double max = 0;
			for (int sourcePos = 0; sourcePos < _sSize; sourcePos++) {
				double prob = _statePosteriors.getProb(foreignPos,sourcePos);
				if (prob > max) {
					max = prob;
					pos = sourcePos;
				}
			}
			double nullProb = _statePosteriors.getProb(foreignPos,_sSize);
			if (max > nullProb) {
				a.add(pos, foreignPos);
			}
		}
		return a;
	}
	
	public static void main(String[] args) throws IOException {
		String corpusDescription = args[0];
		int size = Integer.parseInt(args[1]); // 100k
		int maxSentenceSize = Integer.parseInt(args[2]); // 40
		int numberIterations = Integer.parseInt(args[3]); // 5
		double smoothing = Double.parseDouble(args[4]);
		boolean trainWithResults = Boolean.parseBoolean(args[5]);
		int numberIterationsWithResults = Integer.parseInt(args[6]);
		// System.out.println("Corpus "+corpusName);
		System.out.println("Size " + size);
		System.out.println("Max Sentence size " + maxSentenceSize);
		System.out.println("Number of iterations " + numberIterations);
		System.out.println("Smooth " + smoothing);
		System.out.println("Train with results " + trainWithResults);
		System.out.println("Number of Iterations with results "
				+ numberIterationsWithResults);

		BilingualCorpus corpus = BilingualCorpus.getCorpusFromFileDescription(
				corpusDescription, size, maxSentenceSize);
		
		BilingualCorpus revCorpus = corpus.reverse();
		
		IBMM1 m1 = new IBMM1(corpus,smoothing);
	//	IBMM1 m1b = new IBMM1(revCorpus,smoothing);
		
		if (!trainWithResults) {
			m1.train(numberIterations,false,"");
//			m1b.train(numberIterations,false,"");
		} else {
			
			ArrayList<Evaluation[]> evalsList = m1.trainWithResults(
					numberIterationsWithResults, BilingualCorpus.DEV_CORPUS,false,"");
			System.out.println("Viterbi all");
			Evaluation[] evals = evalsList.get(0);
			for (int i = 0; i < evals.length; i++) {
				System.out.println("Iter " + i + evals[i]);
			}
			System.out.println(" ----- ");
			System.out.println("Viterbi Rare");
			evals = evalsList.get(1);
			for (int i = 0; i < evals.length; i++) {
				System.out.println("Iter " + i + evals[i]);
			}
			System.out.println(" ----- ");
			System.out.println("Viterbi Common");
			evals = evalsList.get(2);
			for (int i = 0; i < evals.length; i++) {
				System.out.println("Iter " + i + evals[i]);
			}
			System.out.println(" ----- ");
			
			System.out.println("Precision all");
			 evals = evalsList.get(3);
			for (int i = 0; i < evals.length; i++) {
				System.out.println("Iter " + i + evals[i]);
			}
			System.out.println(" ----- ");
			System.out.println("Precision Rare");
			evals = evalsList.get(4);
			for (int i = 0; i < evals.length; i++) {
				System.out.println("Iter " + i + evals[i]);
			}
			System.out.println(" ----- ");
			System.out.println("Precision Common");
			evals = evalsList.get(5);
			for (int i = 0; i < evals.length; i++) {
				System.out.println("Iter " + i + evals[i]);
			}
			System.out.println(" ----- ");		
		}
		
		
		
		//To compare models
		AlignmentsSet set = m1.viterbiAlignments(BilingualCorpus.TEST_CORPUS);
		System.out.println(AlignmentEvaluator.evaluate(set, corpus.getGold()));
		
		System.out.println("Without decoding");	
//		float treshold = m1.tuneTreshholdAER(BilingualCorpus.DEV_CORPUS,false);
//		AlignmentsSet sa2 = m1.posteriorAlignments(
//				BilingualCorpus.TEST_CORPUS, treshold,false,false);
//		Evaluation eval22 = AlignmentEvaluator.evaluate(sa2, corpus.getGold());
//		System.out.println("Forward Posterioir decoding no projection" + eval22);	
	
		
		
//		float tresholdb = m1b.tuneTreshholdAER(BilingualCorpus.DEV_CORPUS,false);
//		AlignmentsSet sa2b = m1b.posteriorAlignments(
//				BilingualCorpus.TEST_CORPUS, tresholdb,false,false);
//		Evaluation eval22b = AlignmentEvaluator.evaluate(sa2b, revCorpus.getGold());
//		System.out.println("Backward  Posterioir decoding no projection" + eval22b);
//		
//		getCurves(corpus, BilingualCorpus.TEST_CORPUS, false, m1, "fnp");
//		getCurves(revCorpus, BilingualCorpus.TEST_CORPUS, false, m1b, "bnp");
//		
//		
//		System.out.println("With bijective decoding");
//		BijectiveM1 bm1 = new BijectiveM1(m1._corpus,(SparseTranslationTable)m1._tb,m1._smoothing,0.001,0,1000,1000);
//		BijectiveM1 bm1b = new BijectiveM1(m1b._corpus,(SparseTranslationTable)m1b._tb,m1._smoothing,0.001,0,1000,1000);
//		treshold = bm1.tuneTreshholdAER(BilingualCorpus.DEV_CORPUS,true);
//		sa2 = bm1.posteriorAlignments(
//				BilingualCorpus.TEST_CORPUS, treshold,true,true);
//		eval22 = AlignmentEvaluator.evaluate(sa2, corpus.getGold());
//		System.out.println("Forward Posterioir decoding with projection" + eval22);
//	
//		tresholdb = bm1b.tuneTreshholdAER(BilingualCorpus.DEV_CORPUS,true);
//		sa2b = bm1b.posteriorAlignments(
//				BilingualCorpus.TEST_CORPUS, tresholdb,true,true);
//		eval22b = AlignmentEvaluator.evaluate(sa2b, revCorpus.getGold());
//		System.out.println("Backward  Posterioir decoding with projection" + eval22b);
//	
//		
//		getCurves(corpus, BilingualCorpus.TEST_CORPUS, true,bm1, "fbp");
//		getCurves(revCorpus, BilingualCorpus.TEST_CORPUS, true, bm1b, "bbp");
//		
//		System.out.println("With symmetricDecoding decoding");
//		SymmetricM1 sm1 = new SymmetricM1(corpus,revCorpus,(SparseTranslationTable)m1._tb,(SparseTranslationTable)m1b._tb,m1._smoothing,0.001,0.001,1000,1000);
//		SymmetricM1 sm1b = sm1.getBackwardModel();
//		
//		treshold = sm1.tuneTreshholdAER(BilingualCorpus.DEV_CORPUS,true);
//		sa2 = sm1.posteriorAlignments(
//				BilingualCorpus.TEST_CORPUS, treshold,true,true);
//		eval22 = AlignmentEvaluator.evaluate(sa2, corpus.getGold());
//		System.out.println("Forward Posterioir decoding with projection" + eval22);
//	
//		tresholdb = sm1b.tuneTreshholdAER(BilingualCorpus.DEV_CORPUS,true);
//		sa2b = sm1b.posteriorAlignments(
//				BilingualCorpus.TEST_CORPUS, tresholdb,true,true);
//		eval22b = AlignmentEvaluator.evaluate(sa2b, revCorpus.getGold());
//		System.out.println("Backward  Posterioir decoding with projection" + eval22b);
//		
//		getCurves(corpus, BilingualCorpus.TEST_CORPUS, true, sm1, "fsp");
//		getCurves(revCorpus, BilingualCorpus.TEST_CORPUS, true, sm1b, "bsp");
	}
	
	//TODO move to common place
	public static void getCurves(BilingualCorpus corpus, byte sentenceSource, boolean projectPosteriors, AbstractModel model, String grepString){
		int[] sentences = new int[corpus.getNumSentences(sentenceSource)];
		for (int i = 0; i < sentences.length; i++) {
			sentences[i] = i;
		}
		AlignmentsSet predicted = model.posteriorAlignments(sentences,
				sentenceSource, 1f , projectPosteriors,false);
		for(Alignment a : predicted._alignments){
			a.resetPoints();
		}
		int divisions = 100;
		for (int i = 1; i <= divisions; i++) {
			model.predictAux(predicted, 1f * i / divisions);
			float[] results = AlignmentEvaluator.calculateMeasures(predicted,
					corpus.getAlignments(sentences, sentenceSource));
			
			for(Alignment a : predicted._alignments){
				a.resetPoints();
			}
			System.out.println(grepString + " " + 1f * i / divisions + " P: " + results[0] + " R: " + results[1]);
		}
	}
	
	public void close() {}


	
}
