package geppetto.phraseTable;

import geppetto.cat.common.StaticTools;
import geppetto.cat.corpus.BilingualCorpus;
import geppetto.cat.models.AbstractSparseTranslationTable;
import geppetto.cat.models.M1;
import geppetto.phraseHMM.WordTrie;
import geppetto.phraseHMM.lexicalWeighting.AbstractLexicalWeightingCalculator;
import geppetto.phraseHMM.lexicalWeighting.MosesLexicalWeightingCalculator;
import geppetto.phraseHMM.lexicalWeighting.UniformLexicalWeightingCalculator;
import geppetto.phraseHMM.phraseExtraction.GeneralPhraseExtraction;
import geppetto.phraseHMM.phraseExtraction.HistogramGlobalPrunning;
import geppetto.phraseHMM.phraseExtraction.phraseAccepter.AlwaysAccept;
import geppetto.phraseProbability.PhraseProbabilityCalculator;
import geppetto.phraseProbability.smoothing.DiscountSmoothingProbabilityCalc;
import geppetto.phraseScorer.DirectionalModelPosterior;
import geppetto.phraseTable.phrase.Phrase;
import geppetto.phraseTable.phrase.feature.PenaltyFeature;
import geppetto.phraseTable.phrase.feature.ProbabilityFeature;
import geppetto.phraseTable.phrase.feature.global.AbstractGlobalFeatureCalc;
import geppetto.phraseTable.prunning.global.AbstractGlobalPhrasePrunner;
import gnu.trove.TDoubleProcedure;
import gnu.trove.TIntDoubleHashMap;
import gnu.trove.TIntIntHashMap;
import gnu.trove.TIntObjectHashMap;
import gnu.trove.TObjectProcedure;

import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.zip.GZIPInputStream;
import java.util.zip.GZIPOutputStream;



/**
 * 
 * @author javg
 *
 * TODO: TO MUCH METHOS ON INSERTING PHRASES AND COUNTS AND SO ON.......
 */
public class PhraseTable {
	
	public BilingualCorpus _corpus;
	
	//Phrases vocab
	public WordTrie _sourcePhrasesVocab;
	public WordTrie _targetPhrasesVocab;
	/**
	*Contains all existing phrases.
	*Its an array indexed by source lengnt and then target lenght.
	*For each source len as a hashtable indexed by sourcePhraseID and for each of this
	* contains an hashtable of phrases indexed by foreing phrase id.
	*/
	public TIntObjectHashMap<TIntObjectHashMap<Phrase>> _phraseTable[][];
	public TIntObjectHashMap<TIntObjectHashMap<Phrase>> _phraseTableInv[][];

	
	/**
	 * Sum of null phrases by target lenght
	 */
	public double _nullSum[];
	
	// Has the probability if phrases aligning to null by target lenght
	public TIntObjectHashMap<Phrase>[] _nullPhrases;

	public int _numberOfPhrases = 0;
	public int _maxSourcePhraseLen;
	public int _maxTargetPhraseLen;

	public boolean countNormalized = false;	
	public int[] _numPhrasesWithCounts = new int[]{0,0,0,0,0};
	
	//Indicates wheter the phrase table is normalized or not.
	public boolean normalized = false;
	protected LinkedList<PhraseTableListenner> resourceListenners = new LinkedList<PhraseTableListenner>();
	protected LinkedList<AbstractGlobalFeatureCalc> featureListenners = new LinkedList<AbstractGlobalFeatureCalc>();
	protected LinkedList<AbstractGlobalPhrasePrunner> prunnerListenners = new LinkedList<AbstractGlobalPhrasePrunner>();
	
	public PhraseTable(BilingualCorpus corpus, int maxSourcePhraseLen, int maxTargetPhraseLen, WordTrie sourcePhrasesVocab, WordTrie foreignPhrasesVocab) {
		_corpus = corpus;
		_maxSourcePhraseLen = maxSourcePhraseLen;
		_maxTargetPhraseLen = maxTargetPhraseLen;
		_sourcePhrasesVocab = sourcePhrasesVocab;
		_targetPhrasesVocab = foreignPhrasesVocab;
		_phraseTable = new TIntObjectHashMap[maxSourcePhraseLen][maxTargetPhraseLen];
		_phraseTableInv = new TIntObjectHashMap[maxSourcePhraseLen][maxTargetPhraseLen];
		_nullPhrases = new TIntObjectHashMap[maxTargetPhraseLen];
		_nullSum = new double[maxTargetPhraseLen];
		for(int targetLen = 0; targetLen < maxTargetPhraseLen; targetLen++){
			for(int sourceLen = 0; sourceLen < maxSourcePhraseLen; sourceLen++){
				_phraseTable[sourceLen][targetLen] = new  TIntObjectHashMap<TIntObjectHashMap<Phrase>>();
				_phraseTableInv[sourceLen][targetLen] = new  TIntObjectHashMap<TIntObjectHashMap<Phrase>>();
			}
			_nullPhrases[targetLen] = new TIntObjectHashMap<Phrase>();
			_nullSum[targetLen] = 0;
		}
		
	}
	
	public void compact(){
		for(int targetLen = 0; targetLen < _maxTargetPhraseLen; targetLen++){
			_nullPhrases[targetLen].compact();
			for(int sourceLen = 0; sourceLen < _maxSourcePhraseLen; sourceLen++){
				TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrases = _phraseTable[sourceLen][targetLen];
				int keys[] = phrases.keys();
				for(int i =0; i < keys.length; i++){
					phrases.get(keys[i]).compact();
				}
			}
		}
	}

	
	public void saveTableZip(String directory){
		String outFilename = directory + "phraseTablebin.gz";
		DataOutputStream out;
		try {
			out = new DataOutputStream(new GZIPOutputStream(
					new FileOutputStream(outFilename)));
			saveTableAux(out);
			out.close();
		}  catch (IOException e) {
			e.printStackTrace();
			System.out.println("Error saving Phrase Table");
			System.exit(1);
		}
		
	}
	
	
	
	public void saveTableAux(DataOutputStream out){
		try {
			System.out.println("------------Saving Phrase Table");
		//	System.out.println("_maxLength" + _maxSourcePhraseLen);
			out.writeInt(_maxSourcePhraseLen);
			out.writeInt(_maxTargetPhraseLen);
		//	System.out.println("number prhases" + _numberOfPhrases);
			out.writeInt(_numberOfPhrases);
			//Save all phrases don't care about sums
		//	int i =1;
			for(int targetLen = 0; targetLen < _maxTargetPhraseLen; targetLen++){
				for(int sourceLen = 0; sourceLen < _maxSourcePhraseLen; sourceLen++){
					TIntObjectHashMap<TIntObjectHashMap<Phrase>> _phrasesBySource = _phraseTable[sourceLen][targetLen];
					//TODO Should not use this kind of iterators with gnu trove
					for(int sourcePhrases = 0; sourcePhrases < _phrasesBySource.size(); sourcePhrases++){
						TIntObjectHashMap<Phrase> phrases = _phrasesBySource.get(_phrasesBySource.keys()[sourcePhrases]);
						for(int allPhrases = 0; allPhrases < phrases.size(); allPhrases++){
							Phrase p = phrases.get(phrases.keys()[allPhrases]);
							p.toSaveFormat(out);
							//			System.out.println("Phrase " +i);
							//			i++;
						}
					}
				}
				//Save all null phrases don't care about sums
				for(int nullPhrases = 0; nullPhrases < _nullPhrases[targetLen].size(); nullPhrases++){
					Phrase p = _nullPhrases[targetLen].get(_nullPhrases[targetLen].keys()[nullPhrases]);
					p.toSaveFormat(out);
					//		System.out.println("Phrase " +i);
					//		i++;
				}	
			}
		} catch (IOException e) {
			e.printStackTrace();
			System.out.println("Error saving Phrase Table");
			System.exit(1);
		}
		System.out.println("------------Saving Phrase Table END");
	}

	
	PhraseTable(){
		
	}
	
	public static PhraseTable loadFromFile(BilingualCorpus corpus, WordTrie sourcePhrasesVocab, WordTrie targetPhrasesVocab, String directory){
		PhraseTable pt = new PhraseTable();
		pt._corpus = corpus;
		pt._sourcePhrasesVocab = sourcePhrasesVocab;
		pt._targetPhrasesVocab = targetPhrasesVocab;
		
		System.out.println("------------Loading Table");
		String inFilename = directory + "phraseTablebin.gz";
		try {
			DataInputStream data_in = new DataInputStream(new GZIPInputStream(
					new FileInputStream(inFilename)));
			
			 
			pt._maxSourcePhraseLen = data_in.readInt();
			pt._maxTargetPhraseLen = data_in.readInt();
//			System.out.println("read maxlent " + pt._maxSourcePhraseLen);
			pt._phraseTable = new TIntObjectHashMap[pt._maxSourcePhraseLen][pt._maxTargetPhraseLen];
			pt._phraseTableInv = new TIntObjectHashMap[pt._maxSourcePhraseLen][pt._maxTargetPhraseLen];
			pt._nullPhrases = new TIntObjectHashMap[pt._maxTargetPhraseLen];
			pt._nullSum = new double[pt._maxTargetPhraseLen];
			for(int targetLen = 0; targetLen < pt._maxTargetPhraseLen; targetLen++){
				for(int sourceLen = 0; sourceLen < pt._maxSourcePhraseLen; sourceLen++){
					pt._phraseTable[sourceLen][targetLen] = new  TIntObjectHashMap<TIntObjectHashMap<Phrase>>();
					pt._phraseTableInv[sourceLen][targetLen] = new  TIntObjectHashMap<TIntObjectHashMap<Phrase>>();
				}
				pt._nullPhrases[targetLen] = new TIntObjectHashMap<Phrase>();
				pt._nullSum[targetLen] = 0;
			}
			int numberPhrases = data_in.readInt();
//			System.out.println("read nu,ber phrases " + pt._numberOfPhrases);
			for(int i = 0; i < numberPhrases; i++){
				Phrase p = Phrase.fromSaveFormat(data_in);
//				System.out.println("Adding phrase " + p.stringRep(corpus, sourcePhrasesVocab, targetPhrasesVocav));
				if(p._sourcePhraseID == -1){
					pt.insertNullPhrase(p);
				}else{
					pt.insertPhrase(p, sourcePhrasesVocab.getPhraseIndexesById(p._sourcePhraseID).length-1, targetPhrasesVocab.getPhraseIndexesById(p._targetPhraseID).length-1);
				}
//				System.out.println("read phrase " + (i+1));
			}
			data_in.close();
		} catch (FileNotFoundException e) {
			System.out.println("Translation Table does not exits");
			System.exit(1);
		} catch (IOException e) {
			System.out.println("Error reading TranslationTable" + e.toString());
			System.exit(1);
		}
		System.out.println("------------Loading Table END");
		pt.compact();
		pt.normalizePhrases();
		return pt;
	}
	
	
	public boolean equals(PhraseTable pt){
		if(pt._maxSourcePhraseLen != _maxSourcePhraseLen){
			return false;
		}
		if(pt._numberOfPhrases != _numberOfPhrases){
			return false;
		}
		//Test nulls
		if(pt._nullSum != _nullSum){
			return false;
		}
		
		if(!_nullPhrases.equals(pt._nullPhrases)){
			return false;
		}
		
		for(int targetLen = 0; targetLen < _maxTargetPhraseLen; targetLen++){
			for(int sourceLen = 0; sourceLen < _maxSourcePhraseLen; sourceLen++){
								
				TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrases = _phraseTable[sourceLen][targetLen];
				TIntObjectHashMap<TIntObjectHashMap<Phrase>> otherPhrases = pt._phraseTable[sourceLen][targetLen];
				if(!phrases.equals(otherPhrases))return false;
			}
		}
		return true;
	}
	
	/**
	 * TODO 
	 *  -Array list of phrases indexed by source len change to trove structure
	 *  - See what is happening with null phrases
	 */
	public void addPhrases(ArrayList<Phrase>[][] pt) {
		for(int targetLen =0; targetLen < _maxTargetPhraseLen; targetLen++){
			for(int sourceLen =0; sourceLen < _maxSourcePhraseLen; sourceLen++){
				ArrayList<Phrase> phrases = pt[sourceLen][targetLen];
				for (int i = 0; i < phrases.size(); i++) {
					Phrase p = phrases.get(i);
					insertPhrase(p, sourceLen,targetLen);
				}
			}
		//Adding null phrases
//			ArrayList<Phrase> phrases = pt[_maxSourcePhraseLen][targetLen];
//			for (int i = 0; i < phrases.size(); i++) {
//				Phrase p = phrases.get(i);
//				insertNullPhrase(p);
//			}
		}
	}
	
	public void normalizeCounts() {
		System.out.println("normalizing counts");
		double minCounts = 9999;
		double maxCounts = 0;
		for(int targetLen = 0; targetLen < _maxTargetPhraseLen; targetLen++){
			for(int sourceLen = 0; sourceLen < _maxSourcePhraseLen; sourceLen++){
				TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesT = _phraseTable[sourceLen][targetLen];
				int keysT[] = phrasesT.keys();
				for (int i = 0; i < keysT.length; i++) {
					TIntObjectHashMap<Phrase> phrases = phrasesT.get(keysT[i]);
					int[] keys = phrases.keys();
					for (int j = 0; j < keys.length; j++) {
						Phrase p = phrases.get(keys[j]);
						minCounts = Math.min(minCounts, p.getCounts());
						maxCounts = Math.max(maxCounts, p.getCounts());
					}
				}
			}
		}
		System.out.println("the min count was " + minCounts + " and was updated to " + minCounts / minCounts);
		System.out.println("the max count was " + maxCounts + " and was updated to " + maxCounts / minCounts);

		double min1 = 2 /minCounts;
		double min2 = 3 /minCounts;
		double min3 = 4 /minCounts;
		double min4 = 5 /minCounts;

		for(int targetLen = 0; targetLen < _maxTargetPhraseLen; targetLen++){
			for(int sourceLen = 0; sourceLen < _maxSourcePhraseLen; sourceLen++){
				TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesT = _phraseTable[sourceLen][targetLen];
				int keysT[] = phrasesT.keys();
				for (int i = 0; i < keysT.length; i++) {
					TIntObjectHashMap<Phrase> phrases = phrasesT.get(keysT[i]);
					int[] keys = phrases.keys();
					for (int j = 0; j < keys.length; j++) {
						Phrase p = phrases.get(keys[j]);
						p.setCounts(p.getCounts() / minCounts);
						if(p.getCounts() < min1){
							_numPhrasesWithCounts[1]++;
						}
						else if(p.getCounts() < min2){
							_numPhrasesWithCounts[2]++;
						}
						else if(p.getCounts() < min3){
							_numPhrasesWithCounts[3]++;
						}
						else if(p.getCounts() < min4){
							_numPhrasesWithCounts[4]++;
						}
					}
				}
			}
		}
		countNormalized = true;
	}

	/**
	 * Normalize all phrases so that p(f|e) sums to one. At the end leave counts
	 * at zero. Note that this should normalize p(f|e,sd,td) since this is what the model is 
	 * expecting
	 * /TODO: Tenho de tratar dos acessos isto esta muito lento e e chamado varias vezes
	 */
	public void normalizePhrases() {
		if(normalized){
			System.out.println("Phrase table already normalized");
			System.exit(-1);
		}

		System.out.println("Normalizing phrases");
		for(int targetLen = 0; targetLen < _maxTargetPhraseLen; targetLen++){
			double nullSum = _nullSum[targetLen];
			for(int sourceLen = 0; sourceLen < _maxSourcePhraseLen; sourceLen++){
				TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesT = _phraseTable[sourceLen][targetLen];
				int keysT[] = phrasesT.keys();
				for (int i = 0; i < keysT.length; i++) {
					TIntObjectHashMap<Phrase> phrases = phrasesT.get(keysT[i]);
					int[] keys = phrases.keys();
					for (int j = 0; j < keys.length; j++) {
						Phrase p = phrases.get(keys[j]);
						//p._features.addFeature("prob",new ProbabilityFeature(_probCalc.calculateProbablity(this, p, sourceLen, targetLen)));
						//p._features.addFeature("rprob",new ProbabilityFeature(_probCalc.calculateProbablityReverse(this, p, sourceLen, targetLen)));
						p._features.addFeature("pen",new PenaltyFeature(), 4);
					}
				}
			}
			int[] keys = _nullPhrases[targetLen].keys();
			for (int i = 0; i < keys.length; i++) {
				Phrase p = _nullPhrases[targetLen].get(keys[i]);
				p._features.addFeature("prob",new ProbabilityFeature(p.getCounts() / nullSum) ,0);
				p._features.addFeature("prob",new ProbabilityFeature(0), 0);
			}
			_nullSum[targetLen] = 0;
		}
		normalized = true;
	}
	
	/**
	 * Debug function that check if the phrase table is properly normalized
	 *
	 */
	public void checkIfNormalized(){
		for(int targetLen = 0; targetLen < _maxTargetPhraseLen; targetLen++){
			for(int sourceLen = 0; sourceLen < _maxSourcePhraseLen; sourceLen++){
				TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesT = _phraseTable[sourceLen][targetLen];
				int keysT[] = phrasesT.keys();
				for (int i = 0; i < keysT.length; i++) {
					TIntObjectHashMap<Phrase> phrases = phrasesT.get(keysT[i]);
					int[] keysS = phrases.keys();
					if(keysS.length > 0){
						double prob = 0;
						for (int j = 0; j < keysS.length; j++) {
							Phrase p = phrases.get(keysS[j]);
							//prob += p.getProb();
						}
						if(Math.abs(1 - prob) > 1.0E-2){
							System.out.println("Prob not normalized has " + prob);
							System.exit(-1);
						}
					}
				}
			}
			double prob = 0;
			int[] keys = _nullPhrases[targetLen].keys();
			for (int i = 0; i < keys.length; i++) {
				Phrase p = _nullPhrases[targetLen].get(keys[i]);
				//prob += p.getProb();
			}
			if(Math.abs(1 - prob) > 1.0E-2){
				System.out.println("Prob of null phrases not normalized has " + prob);
				System.exit(-1);
			}
		}
	}
	/*
	public void checkIfUniform(PhraseProbabilityCalculator prob){
		for(int sourceLen = 0; sourceLen < _maxSourcePhraseLen; sourceLen++){
			for(int targetLen = 0; targetLen < _maxTargetPhraseLen; targetLen++){
				for(int targetId : _targetPhraseSums[sourceLen][targetLen].keys()){
					String targetPhrase = Phrase.getTargetString(_corpus, _sourcePhrasesVocab, _targetPhrasesVocab, targetId);
					double sum = 0;
					LinkedList<Integer> searchedWords = new LinkedList<Integer>();
					for(int sourceL = 0; sourceL < _maxSourcePhraseLen; sourceL++){
						for(int targetL = 0; targetL < _maxTargetPhraseLen; targetL++){
							for(int sourceId : _sourcePhraseSums[sourceL][targetL].keys()){
								if(searchedWords.contains(sourceId)) continue;
								TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesT = _phraseTable[sourceL][targetL];
								if(phrasesT.containsKey(sourceId) && phrasesT.get(sourceId).contains(targetId)){
									sum += prob.calculateProbablityReverse(this, phrasesT.get(sourceId).get(targetId), sourceL, targetLen);
								}
								else{
									sum += prob.calculateProbablityReverse(this, new Phrase(sourceId, targetId, 0), sourceL, targetLen);
								}
								searchedWords.add(sourceId);
						}
					}
					}
					System.out.println(targetPhrase + " -> " +sum);
				}
			}
		}
	}*/
	
	public double getPercentageOverCount(double limit){
		double total = 0;
		double count = 0;
		for(int targetLen = 0; targetLen < _maxTargetPhraseLen; targetLen++){
			for(int sourceLen = 0; sourceLen < _maxSourcePhraseLen; sourceLen++){
				TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesT = _phraseTable[sourceLen][targetLen];
				int keysT[] = phrasesT.keys();
				for (int i = 0; i < keysT.length; i++) {
					TIntObjectHashMap<Phrase> phrases = phrasesT.get(keysT[i]);
					int[] keys = phrases.keys();
					for (int j = 0; j < keys.length; j++) {
						Phrase p = phrases.get(keys[j]);
						if(p.getCounts()>limit){
							count+=p.getCounts();
						}
						total += p.getCounts();
					}
				}
			}
		}
		return count/total;
	}
	
	/**
	 * Return the prob of a null phrase. No backing off is being made
	 * @param target 
	 * @return
	 */
	/*public double getNullProb(int[] target) {
		int targetPhraseId = _targetPhrasesVocab.getPhraseId(target);
		if(targetPhraseId == -1 ) return 0;
		return _nullPhrases[target.length].get(targetPhraseId).getProb();
	}
	
	public double getNullProb(int targetPhraseId) {
		Phrase p = _nullPhrases[_targetPhrasesVocab.getPhraseIndexesById(targetPhraseId).length-1].get(targetPhraseId);
		if(p==null) return 0;
		return p.getProb();
	}*/

	/**
	 * Gets the prob of a source sentence. No backoff os used
	 * @param source
	 * @param target
	 * @return
	 */
	/*public double getPhraseProb(int[] source, int[] target) {
		int sourcePhraseId = _sourcePhrasesVocab.getPhraseId(source);
		int targetPhraseId = _targetPhrasesVocab.getPhraseId(target);
		return getPhraseProb(sourcePhraseId, targetPhraseId, source.length, target.length);
	}*/
	
	public Phrase getPhrase(int[] source, int[] target) {
		int sourcePhraseId = _sourcePhrasesVocab.getPhraseId(source);
		int targetPhraseId = _targetPhrasesVocab.getPhraseId(target);
		return getPhrase(sourcePhraseId, targetPhraseId, source.length, target.length);
	}

	/*
	public void addToCounts(Phrase p, double counts, int sourceLen) {
		// System.out.println("Adding to counts " + p._sourcePhraseID + " " +
		// p._targetPhraseID + " " + counts);
		p.addCounts(counts);		
		double prevCounts = _sourcePhraseSums[sourceLen].get(p._sourcePhraseID);
		_sourcePhraseSums[sourceLen].put(p._sourcePhraseID, prevCounts + counts);
	}
	*/

	public void addToNullCounts(int[] target, double counts) {
		int targetPhraseId = _targetPhrasesVocab.getPhraseId(target);
		_nullPhrases[target.length-1].get(targetPhraseId).addCounts(counts);
		_nullSum[target.length-1] += counts;
	}

	/*
	public void addToNullCounts(Phrase p, double counts) {
		p.addCounts(counts);
		_nullSum += counts;
	}
	*/

	public Phrase getPhrase(int sourcePhraseId, int targetPhraseId, int sourceSize, int foreingSize){
		int sourceLen  = sourceSize -1;
		int targetLen = foreingSize -1;
		TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesT = _phraseTable[sourceLen][targetLen];
		if (phrasesT.contains(sourcePhraseId)) {
			TIntObjectHashMap<Phrase> phrases = phrasesT.get(sourcePhraseId);
			return phrases.get(targetPhraseId);
		} else {
			return null;
		}
	}

	/*
	public Phrase getPhrase(int[] sourceSentenceIDS, int[] targetSentenceIDS,
			int sourceIndexStart, int sourceIndexEnd, int targetIndexStart,
			int targetIndexEnd) {
		int[] sourcePhrase = new int[sourceIndexEnd - sourceIndexStart + 1];
		int[] targetPhrase = new int[targetIndexEnd - targetIndexStart + 1];
		// Build phrases
		for (int i = sourceIndexStart; i <= sourceIndexEnd; i++) {
			try {
				sourcePhrase[i - sourceIndexStart] = sourceSentenceIDS[i];
			} catch (ArrayIndexOutOfBoundsException e) {
				System.out.println("source len " + sourceSentenceIDS.length);
				System.out.println("source index start " + sourceIndexStart);
				System.out.println("source duration " + sourceIndexStart);
				System.out.println("source index end " + sourceIndexEnd);

				throw e;
			}
		}
		for (int i = targetIndexStart; i <= targetIndexEnd; i++) {
			try {
				targetPhrase[i - targetIndexStart] = targetSentenceIDS[i];
			} catch (ArrayIndexOutOfBoundsException e) {
				System.out.println("target len " + targetSentenceIDS.length);
				System.out.println("target index start " + targetIndexStart);
				System.out.println("target index end " + targetIndexEnd);
				throw e;
			}
		}
		return getPhrase(sourcePhrase, targetPhrase);
	}
	*/
	public Phrase getNullPhrase(int[] targetSentenceIDS, int targetIndexStart,
			int targetIndexEnd) {

		int[] targetPhrase = new int[targetIndexEnd - targetIndexStart + 1];
		for (int i = targetIndexStart; i <= targetIndexEnd; i++) {
			try {
				targetPhrase[i - targetIndexStart] = targetSentenceIDS[i];
			} catch (ArrayIndexOutOfBoundsException e) {
				System.out.println("target len " + targetSentenceIDS.length);
				System.out.println("target index start " + targetIndexStart);
				System.out.println("target index end " + targetIndexEnd);
				throw e;
			}
		}
		int targetPhraseId = _targetPhrasesVocab.getPhraseId(targetPhrase);
		return _nullPhrases[targetPhrase.length].get(targetPhraseId);
	}

	/*
	public Phrase getPhrase(int[] source, int[] target) {
		int sourcePhraseId = _sourcePhrasesVocab.getPhraseId(source);
		int targetPhraseId = _targetPhrasesVocab.getPhraseId(target);
		if (sourcePhraseId == -1 || targetPhraseId == -1)
			return null;
		TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesT = _phraseTable[source.length-1];
		if (phrasesT.contains(sourcePhraseId)) {
			TIntObjectHashMap<Phrase> phrases = phrasesT.get(sourcePhraseId);
			if (phrases.contains(targetPhraseId)) {
				Phrase p = phrases.get(targetPhraseId);
				return p;
			} else {
				return null;
			}
		} else {
			return null;
		}
	}
	*/

/*	public double getPhraseProb(int sourcePhraseId, int targetPhraseId, int sourceSize, int targetSize) {
		if (sourcePhraseId == -1 || targetPhraseId == -1)
			return 0;
		
		int sourceLen  = sourceSize -1;
		int targetLen  = targetSize -1;
		TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesT = _phraseTable[sourceLen][targetLen];
		if (phrasesT.contains(sourcePhraseId)) {
			TIntObjectHashMap<Phrase> phrases = phrasesT.get(sourcePhraseId);
			if (phrases.contains(targetPhraseId)) {
				Phrase p = phrases.get(targetPhraseId);
				return p.getProb();
			} else {
				return 0;
			}
		} else {
			return 0;
		}
	}*/

	public void insertNullPhrase(int foreignPhraseId, double counts, int targetLen) {
		if (_nullPhrases[targetLen].contains(foreignPhraseId)) {
			_nullPhrases[targetLen].get(foreignPhraseId).addCounts(counts);
		} else {
			_nullPhrases[targetLen].put(foreignPhraseId, new Phrase(foreignPhraseId, counts));
			_numberOfPhrases++;
		}
		_nullSum[targetLen] += counts;

	}

	//TODO trie should have method to return size
	public void insertNullPhrase(Phrase p) {
		int targetLen = _targetPhrasesVocab.getPhraseIndexesById(p._targetPhraseID).length-1;
		//If exist just update counts
		if (_nullPhrases[targetLen].contains(p._targetPhraseID)) {
			_nullPhrases[targetLen].get(p._targetPhraseID).addCounts(p.getCounts());
		} else {
			_nullPhrases[targetLen].put(p._targetPhraseID, p);
			_numberOfPhrases++;
		}
		_nullSum[targetLen] += p.getCounts();
	}
	
	public void insertPhrase(int sourcePhraseId, int targetPhraseId,
			double counts, int sourceLen, int targetLen) {
		insertPhrase(new Phrase(sourcePhraseId,
						targetPhraseId, counts), sourceLen, targetLen);
	}

	
	public void insertPhrase(Phrase p, int sourceLen, int targetLen) {
		sourceLen--;
		targetLen--;
		TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesT = _phraseTable[sourceLen][targetLen];	
		TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesTInv = _phraseTableInv[sourceLen][targetLen];	
		TIntObjectHashMap<Phrase> phrasesBySource = phrasesT.get(p._sourcePhraseID);
		TIntObjectHashMap<Phrase> phrasesByTarget = phrasesTInv.get(p._targetPhraseID);
		if(phrasesBySource != null){
			Phrase phrase = phrasesBySource.get(p._targetPhraseID);
			//If phrase exists
			if(phrase != null){
				phrase.addCounts(p.getCounts());
				phrase.get_reorderingInfo().merge(p.get_reorderingInfo());
			}else{
				phrasesBySource.put(p._targetPhraseID,p);
				_numberOfPhrases++;
			}
		}else{
			phrasesBySource = new TIntObjectHashMap<Phrase>();
			phrasesBySource.put(p._targetPhraseID,p);
			phrasesT.put(p._sourcePhraseID, phrasesBySource);
			_numberOfPhrases++;
		}
		if(phrasesByTarget != null){
			Phrase phrase = phrasesByTarget.get(p._sourcePhraseID);
			if(phrase != null){
				//just to look similar to the previous block
			}
			else{
				phrasesByTarget.put(p._sourcePhraseID, p);				
			}
		}else{
			phrasesByTarget = new TIntObjectHashMap<Phrase>();
			phrasesByTarget.put(p._sourcePhraseID, p);
		}
		notifyNewPhrasePair(p);
	}
	
	public void calculateFeatures(){
		for(int targetLen = 0; targetLen < _maxTargetPhraseLen; targetLen++){
			for(int sourceLen = 0; sourceLen < _maxSourcePhraseLen; sourceLen++){
				TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesT = _phraseTable[sourceLen][targetLen];
				for (int i = 0; i < phrasesT.keys().length; i++) {
					TIntObjectHashMap<Phrase> phrases = phrasesT.get(phrasesT.keys()[i]);
					for (int j = 0; j < phrases.keys().length; j++) {
						Phrase p = phrases.get(phrases.keys()[j]);
						beforeCalculateFeature(p);
						calculatePhraseFeatures(p);
						afterCalculateFeature(p);
					}
				}
			}
		}
		notifyAllPhrasesLoaded();
	}
	
	public void prunePhrasePairs(){
		for(int targetLen = 0; targetLen < _maxTargetPhraseLen; targetLen++){
			for(int sourceLen = 0; sourceLen < _maxSourcePhraseLen; sourceLen++){
				TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesT = _phraseTable[sourceLen][targetLen];
				for (int i = 0; i < phrasesT.keys().length; i++) {
					TIntObjectHashMap<Phrase> phrases = phrasesT.get(phrasesT.keys()[i]);
					for (int j = 0; j < phrases.keys().length; j++) {
						Phrase p = phrases.get(phrases.keys()[j]);
						beforePruneCheck(p);
						if(shouldPrunePhrase(p)){
							afterPruneCheck(p, true);
							p.pruned = true;
							//phrases.remove(phrases.keys()[j]);
						}
						else{
							afterPruneCheck(p, false);
						}
					}
				}
			}
		}
		notifyAllPhrasesLoaded();
	}

	/**
	 * Prints the entire phrase table
	 * @param out
	 */
	public void print(PrintStream out) {
		out.println("Phrase Table");
		out.println("target phrase-id || source Phrase-source-id  : counts : prob");
		for(int targetLen = 0; targetLen < _maxTargetPhraseLen; targetLen++){
			for(int sourceLen = 0; sourceLen < _maxSourcePhraseLen; sourceLen++){
				TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesT = _phraseTable[sourceLen][targetLen];
				for (int i = 0; i < phrasesT.keys().length; i++) {
					TIntObjectHashMap<Phrase> phrases = phrasesT.get(phrasesT.keys()[i]);
					for (int j = 0; j < phrases.keys().length; j++) {
						out.println(phrases.get(phrases.keys()[j]).stringRep(_corpus,
								_sourcePhrasesVocab, _targetPhrasesVocab));
					}
				}
			}
			for (int i = 0; i < _nullPhrases[targetLen].keys().length; i++) {
				out.println(_nullPhrases[targetLen].get(_nullPhrases[targetLen].keys()[i]).stringRep(
						_corpus, _sourcePhrasesVocab, _targetPhrasesVocab));
			}
			out.println("sum" + _nullSum[targetLen]);
		}
		out.println("Printed " + _numberOfPhrases + " phrases");
	}
	
	public void printSimple(PrintStream out) {
		for(int targetLen = 0; targetLen < _maxTargetPhraseLen; targetLen++){
			for(int sourceLen = 0; sourceLen < _maxSourcePhraseLen; sourceLen++){
			TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesT = _phraseTable[sourceLen][targetLen];
			for (int i = 0; i < phrasesT.keys().length; i++) {
				TIntObjectHashMap<Phrase> phrases = phrasesT.get(phrasesT.keys()[i]);
				for (int j = 0; j < phrases.keys().length; j++) {
					out.println(phrases.get(phrases.keys()[j]).stringSimpleRep(_corpus,
							_sourcePhrasesVocab, _targetPhrasesVocab));
				}
			}
		}
		for (int i = 0; i < _nullPhrases[targetLen].keys().length; i++) {
			out.println(_nullPhrases[targetLen].get(_nullPhrases[targetLen].keys()[i]).stringSimpleRep(
					_corpus, _sourcePhrasesVocab, _targetPhrasesVocab));
		}
		}
	}
	
	public void printMosesFormat(PrintStream out) {
		printMosesFormat(out, new PrintSpecification());
	}
	
	public void printMosesFormat(PrintStream out, PrintSpecification printSpec) {
		for (int targetLen = 0; targetLen < _maxTargetPhraseLen; targetLen++) {
			for (int sourceLen = 0; sourceLen < _maxSourcePhraseLen; sourceLen++) {
				TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesT = _phraseTable[sourceLen][targetLen];
				int[] keysT = phrasesT.keys();
				for (int i = 0; i < keysT.length; i++) {
					TIntObjectHashMap<Phrase> phrases = phrasesT.get(keysT[i]);
					int[] keys = phrases.keys();
					for (int j = 0; j < keys.length; j++) {
						Phrase p = phrases.get(keys[j]);
						if(!p.isPruned()){
							out.println(p
								.stringMosesRep(_corpus, _sourcePhrasesVocab,
										_targetPhrasesVocab, printSpec));
						}
					}
				}
			}
			for (int i = 0; i < _nullPhrases[targetLen].keys().length; i++) {
				out.println(_nullPhrases[targetLen].get(
						_nullPhrases[targetLen].keys()[i]).stringSimpleRep(
						_corpus, _sourcePhrasesVocab, _targetPhrasesVocab));
			}
		}
	}
	
	public void printHeaders(PrintStream printStream) {
		printStream.println();
		TIntObjectHashMap<Phrase> phrases = _phraseTable[0][0].get(_phraseTable[0][0].keys()[0]); //hoping that there are at least 1-to-1 phrases
		Phrase somephrase = phrases.get(phrases.keys()[0]);
		printStream.println(somephrase._features.getHeaders());
	}

	
	public static void main(String[] args) throws IOException {
		String corpusDescription = args[0];
		int size = Integer.parseInt(args[1]); 
		int maxSentenceSize = Integer.parseInt(args[2]); 
		int maxSourcePhraseSize = Integer.parseInt(args[3]); 
		int maxTargetPhraseSize = Integer.parseInt(args[4]); 
		int threshold = Integer.parseInt(args[5]); 
		System.out.println("Corpus " + corpusDescription);
		System.out.println("Size " + size);
		System.out.println("Max Sentence size " + maxSentenceSize);
		System.out.println("Max Source Phrase Size " + maxSourcePhraseSize);
		System.out.println("Max Target Phrase Size " + maxTargetPhraseSize);
		System.out.println("Threshold " + threshold);
		
		
		System.out.println("Load bilingual corpus");
		BilingualCorpus corpus = BilingualCorpus.getCorpusFromFileDescription(corpusDescription, size, maxSentenceSize);
		System.out.println("Load Phrases vocab");
		WordTrie sourcePhrasesVocab = new WordTrie(maxSourcePhraseSize);
		sourcePhrasesVocab.addPhrasesAllSentences(corpus._trainSourceSentences);
		sourcePhrasesVocab.compactTrie();
		System.out.println("source vocab built with " + sourcePhrasesVocab._phraseCounter);
		WordTrie foreignPhrasesVocab = new WordTrie(maxTargetPhraseSize);
		foreignPhrasesVocab.addPhrasesAllSentences(corpus._trainForeignSentences);
		foreignPhrasesVocab.compactTrie();
		System.out.println("foreing vocab built with " + foreignPhrasesVocab._phraseCounter);
		System.out.println("Starting init of phrase table");
	
		M1 model =  new M1(corpus);
		model.train(5,false,"");
		
		/*PhraseTable pt = GeneralPhraseExtraction.build(corpus, BilingualCorpus.TRAIN_CORPUS,sourcePhrasesVocab, foreignPhrasesVocab, maxSourcePhraseSize, maxTargetPhraseSize, 
				new GlobalProbabilityCalculator(), new DirectionalModelPosterior(model), new AlwaysAccept(), new HistogramGlobalPrunning(0), new UniformLexicalWeightingCalculator(), false);
		//pt.print(System.out);	
		
		String dir = "tmp/";
		pt.saveTableZip(dir);
		System.out.println("After saving");
		PhraseTable pt2 = PhraseTable.loadFromFile(corpus, sourcePhrasesVocab, foreignPhrasesVocab, dir);
		pt2.print(System.out);
	
		System.out.println("Tables are the smae " + pt.equals(pt2));*/
	}
	
	public void addListenner(PhraseTableListenner resource){
		resourceListenners.add(resource);
	}
	
	public void addListenner(AbstractGlobalFeatureCalc calc){
		featureListenners.add(calc);
	} 
	
	public void addListenner(AbstractGlobalPhrasePrunner calc){
		prunnerListenners.add(calc);
	} 

	public void removeAllFeatureListeners(){
		featureListenners = new LinkedList<AbstractGlobalFeatureCalc>();
	}	

	public LinkedList<AbstractGlobalFeatureCalc> getFeatureListenners() {
		return featureListenners;
	}
	
	public LinkedList<AbstractGlobalPhrasePrunner> getPrunnerListenners() {
		return prunnerListenners;
	}

	protected void notifyNewPhrasePair(Phrase p){
		for(PhraseTableListenner calc : resourceListenners){
			calc.notifyNewPhrase(p);
		}
	}
	
	protected void notifyAllPhrasesLoaded(){
		for(PhraseTableListenner calc : resourceListenners){
			calc.loadPhrasesComplete(this);
		}
	}
	
	protected void beforeCalculateFeature(Phrase p){
		for(PhraseTableListenner calc : resourceListenners){
			calc.beforeCalculateFeature(p, this);
		}
	}
	
	protected void afterCalculateFeature(Phrase p){
		for(PhraseTableListenner calc : resourceListenners){
			calc.afterCalculateFeature(p, this);
		}
	}

	private void beforePruneCheck(Phrase p) {
		for(PhraseTableListenner calc : resourceListenners){
			calc.beforePruneCheckPhrase(p, this);
		}
	}
	
	private void afterPruneCheck(Phrase p, boolean wasPruned) {
		for(PhraseTableListenner calc : resourceListenners){
			calc.afterPruneCheckPhrase(p, this, wasPruned);
		}		
	}
	
	protected void calculatePhraseFeatures(Phrase p){
		for(AbstractGlobalFeatureCalc calc : featureListenners){
			calc.calculateFeature(p, this);
		}
	}
	
	protected boolean shouldPrunePhrase(Phrase p){
		for(AbstractGlobalPhrasePrunner prunner : prunnerListenners){
			if(prunner.shouldPrunePhrase(p, this)){
				return true;
			}
		}
		return false;
	}

}