package geppetto.phraseHMM.phraseExtraction;

import geppetto.cat.common.StaticTools;
import geppetto.cat.corpus.BilingualCorpus;
import geppetto.phraseHMM.WordTrie;
import geppetto.phraseTable.PhraseTable;
import geppetto.phraseTable.phrase.Phrase;
import gnu.trove.TIntArrayList;
import gnu.trove.TIntObjectHashMap;

import java.io.IOException;



/**
 * Extracts all phrases up to a given lenght. Keeps all phrases that have counts
 * bigger than N
 * 
 * @author javg
 * 
 */
public class HistogramPrunning {

	static int[][] sourceTMPArray ;
	static int[][] targetTMPArray ;
	
	public static PhraseTable build(BilingualCorpus corpus, WordTrie sourcePhrasesVocab, WordTrie foreignPhrasesVocab, int maxSourceDuration ,
			int maxTargetDuration, int minimumCount) {
	
		//Avoid constant array creation
		sourceTMPArray = new int [maxSourceDuration][];
		for(int i = 0; i < maxSourceDuration; i++){
			sourceTMPArray[i] = new int[i+1];
		}
		targetTMPArray = new int [maxTargetDuration][];
		for(int i = 0; i < maxTargetDuration; i++){
			targetTMPArray[i] = new int[i+1];
		}
		PhraseTable pt = new PhraseTable(corpus,maxSourceDuration,maxTargetDuration, sourcePhrasesVocab,foreignPhrasesVocab);
		init(pt, corpus, maxTargetDuration, maxSourceDuration, minimumCount);
		return pt;
	}

	public static void init(PhraseTable pt, BilingualCorpus corpus,
			int maxTargetDuration, int maxSourceDuration, int minimumCount) {
		long initTime;
		long endTime;
		initTime = System.currentTimeMillis();
		System.gc();
		for (int i = 0; i < corpus.getNumberOfTrainingSentences(); i++) {
			int[] foreingSentence = corpus.getForeignSentence(i,
					BilingualCorpus.TRAIN_CORPUS);
			int[] sourceSentence = corpus.getSourceSentence(i,
					BilingualCorpus.TRAIN_CORPUS);
			// System.out.println("source " + _corpus.getSourceSentenceString(i,
			// BilingualCorpus.TRAIN_CORPUS));
			// System.out.println("Target " +
			// _corpus.getForeignSentenceString(i,
			// BilingualCorpus.TRAIN_CORPUS));
			processSentence(pt, foreingSentence, sourceSentence,
					 maxSourceDuration,maxTargetDuration);
		}
		endTime = System.currentTimeMillis();
		System.out.println("Spent in procssing sentence Time "
				+ (endTime - initTime) / 1000 + " mem " + StaticTools.getUsedMemory());
		System.out.println("Total number of distinct phrases "
				+ pt._numberOfPhrases);
		initTime = System.currentTimeMillis();
		removePhrasesByHistogram(pt, minimumCount);
		endTime = System.currentTimeMillis();
		System.out.println("Spent in removing phrases by treshold "
				+ (endTime - initTime) / 1000 + " mem " + StaticTools.getUsedMemory());
		System.out.println("Total number of distinct phrases after prunning"
				+ pt._numberOfPhrases);

		
		initTime = System.currentTimeMillis();
		pt.compact();
		endTime = System.currentTimeMillis();
		System.out.println("Spent in compacting "
				+ (endTime - initTime) / 1000 + " mem " + StaticTools.getUsedMemory());
		System.out.println("Total number of distinct phrases after prunning"
				+ pt._numberOfPhrases);

		
		// Normalize
		initTime = System.currentTimeMillis();
		pt.normalizePhrases();
		endTime = System.currentTimeMillis();
		System.out.println("Spent in normalizing phrases "
				+ (endTime - initTime) / 1000 + " mem " + StaticTools.getUsedMemory());
	}

	
	/**
	 * Removes all phrases which have co-occurences smaller than minimum counts
	 * @param pt
	 * @param minimumCouts
	 * TODO: This is not the best approach. One should remove based on the total number of sentences.
	 * For instace assume two source phrases s1 and s2:
	 * s1 has 1000 coocoring foreing phrases so below five makes sense
	 * s2 has 10 coocoring foreing phrases so below five might not make sense
	 */
	public static void removePhrasesByHistogram(PhraseTable pt, int minimumCouts) {
		for(int targetLen =0; targetLen  < pt._maxTargetPhraseLen; targetLen++){
			for(int sourceLen =0; sourceLen  < pt._maxSourcePhraseLen; sourceLen++){
				TIntObjectHashMap<TIntObjectHashMap<Phrase>> phrasesT = pt._phraseTable[sourceLen][targetLen];
				int keysT[] = phrasesT.keys();
				for (int i = 0; i < keysT.length; i++) {
					TIntObjectHashMap<Phrase> phrases = phrasesT.get(keysT[i]);
					int[] keys = phrases.keys();
					if (keys.length == 0)
						continue;			
					//different approach
					for (int j = 0; j < keys.length; j++) {
						Phrase p = phrases.get(keys[j]);
						if (p.getCounts() < minimumCouts) {
							phrases.remove(keys[j]);
							pt._numberOfPhrases--;
							int sourcePhraseId = p._sourcePhraseID;
							//TODO pt._sourcePhraseSums[sourceLen][targetLen].put(sourcePhraseId,pt._sourcePhraseSums[sourceLen][targetLen].get(sourcePhraseId)-p.getCounts());
						}
					}
				}
			}
			int[] keys = pt._nullPhrases[targetLen].keys();
			for (int i = 0; i < keys.length; i++) {
				Phrase p = pt._nullPhrases[targetLen].get(keys[i]);
				if(p.getCounts() < minimumCouts){
				pt._nullPhrases[targetLen].remove(p._targetPhraseID);
				}
			}
		}
	}

	public static void processSentence(PhraseTable pt, int[] foreingSentence,
			int[] sourceSentence, int maxSourceDuration, int maxTargetDuration) {

		TIntArrayList[] foreingPhrases = new TIntArrayList[maxTargetDuration];
		for(int i = 0; i < maxTargetDuration; i++){
			foreingPhrases[i]=new TIntArrayList();
		}
		TIntArrayList[] sourcePhrases = new TIntArrayList[maxSourceDuration];
		for(int i = 0; i < maxSourceDuration; i++){
			sourcePhrases[i]=new TIntArrayList();
		}
		// Collect all sentences
		for (int foreignIndex = 0; foreignIndex < foreingSentence.length; foreignIndex++) {
			int phraseStartForeingIndex = foreignIndex;
			for (int targetDuration = 1; targetDuration <= maxTargetDuration; targetDuration++) {
				int phraseEndForeignIndex = phraseStartForeingIndex
						+ targetDuration;
				if (phraseEndForeignIndex > foreingSentence.length)
					break;
				//int[] foreing = new int[targetDuration];
				int[] foreing = targetTMPArray[targetDuration-1];
				for (int f = 0; f < targetDuration; f++) {
					foreing[f] = foreingSentence[foreignIndex + f];
				}
				int targetPhraseId = pt._targetPhrasesVocab
						.getPhraseId(foreing);
				if(targetPhraseId == -1) continue;
				foreingPhrases[targetDuration-1].add(targetPhraseId);
			}
		}
		for (int sourceIndex = 0; sourceIndex < sourceSentence.length; sourceIndex++) {
			int phraseStartSourceIndex = sourceIndex;
			for (int sourceDuration = 1; sourceDuration <= maxSourceDuration; sourceDuration++) {
				int phraseEndSourceIndex = phraseStartSourceIndex
						+ sourceDuration;
				if (phraseEndSourceIndex > sourceSentence.length)
					break;
				int[] source = sourceTMPArray[sourceDuration-1];
				//int[] source = new int[sourceDuration];
				for (int s = 0; s < sourceDuration; s++) {
					source[s] = sourceSentence[sourceIndex + s];
				}
				int sourcePhraseId = pt._sourcePhrasesVocab.getPhraseId(source);
				if(sourcePhraseId == -1) continue;
				sourcePhrases[sourceDuration-1].add(sourcePhraseId);
			}
		}
		for(int targetLen = 0; targetLen < maxTargetDuration; targetLen++){
			for (int i = 0; i < foreingPhrases[targetLen].size(); i++) {
				int foreingId = foreingPhrases[targetLen].get(i);
				for(int sourceLen = 0; sourceLen < maxSourceDuration; sourceLen++){
					for (int j = 0; j < sourcePhrases[sourceLen].size(); j++) {
						int sourceId = sourcePhrases[sourceLen].get(j);
						pt.insertPhrase(sourceId, foreingId, 1, sourceLen,targetLen);
				}
			}
			pt.insertNullPhrase(foreingId, 1,targetLen);
		}
		}
	}

	public static void main(String[] args) throws IOException {
		String corpusDescription = args[0];
		int size = Integer.parseInt(args[1]); 
		int maxSentenceSize = Integer.parseInt(args[2]); 
		int maxSourcePhraseSize = Integer.parseInt(args[3]); 
		int maxTargetPhraseSize = Integer.parseInt(args[4]); 
		int threshold = Integer.parseInt(args[5]); 
		System.out.println("Corpus " + corpusDescription);
		System.out.println("Size " + size);
		System.out.println("Max Sentence size " + maxSentenceSize);
		System.out.println("Max Source Phrase Size " + maxSourcePhraseSize);
		System.out.println("Max Target Phrase Size " + maxTargetPhraseSize);
		System.out.println("Threshold " + threshold);
		
		System.out.println("Load bilingual corpus");
		BilingualCorpus corpus = BilingualCorpus.getCorpusFromFileDescription(corpusDescription, size, maxSentenceSize);
		System.out.println("Load Phrases vocab");
		WordTrie sourcePhrasesVocab = new WordTrie(maxSourcePhraseSize);
		sourcePhrasesVocab.addPhrasesAllSentences(corpus._trainSourceSentences);
		sourcePhrasesVocab.compactTrie();
		System.out.println("source vocab built with " + sourcePhrasesVocab._phraseCounter);
		WordTrie foreignPhrasesVocab = new WordTrie(maxTargetPhraseSize);
		foreignPhrasesVocab.addPhrasesAllSentences(corpus._trainForeignSentences);
		foreignPhrasesVocab.compactTrie();
		System.out.println("foreing vocab built with " + foreignPhrasesVocab._phraseCounter);
		System.out.println("Starting init of phrase table");
		PhraseTable pt = HistogramPrunning.build(corpus, sourcePhrasesVocab, foreignPhrasesVocab, maxSourcePhraseSize, maxTargetPhraseSize, threshold);
		pt.print(System.out);
		
		
		
	}
	
}
