package langnstats.project.languagemodel.interpolation;

import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.ArrayList;

import langnstats.project.Global;
import langnstats.project.languagemodel.hmm.HMM;
import langnstats.project.languagemodel.ngram.Ngram;
import langnstats.project.languagemodel.srilm.NGramLanguageModel;
import langnstats.project.languagemodel.srilm.SriLMParameters.DiscountingMethod;
import langnstats.project.lib.LanguageModel;
import langnstats.project.lib.WordType;
import langnstats.project.lib.crossvalidation.CrossValidationTokenSet;
import langnstats.project.lib.crossvalidation.TrainTokens;

public class LinearInterpolation {
	private double[][] model_probs;
	private ArrayList<LanguageModel> lmodels;
	private double[] lambda;
	private double[] fractions;
	private double new_pp;
	private double old_pp;
	private double stopLL;
	
	private double sum_logprobs;
	private double[] prob_components;
	
	
	private double stopRatio;
	
	public void train(TrainTokens traintokens) {
		LanguageModel lmodel;
		for (int i=0; i<lmodels.size(); i++) {
			lmodel = lmodels.get(i);
			lmodel.train(traintokens);
		}
	}
	
	public void interpolate(TrainTokens testTokens) {
		int i, j;
		int nmodels = lmodels.size();
		boolean first = true;
		LanguageModel model;
		double ll = 0.0;
		
		WordType[] tokenArray = testTokens.getTokenArray();
		model_probs = new double[nmodels][];
		
		WordType wordType;
		for (i=0; i<nmodels; i++) {
			model = lmodels.get(i);
			model_probs[i] = new double[tokenArray.length];
			wordType = null;
			
			for (j=0; j<tokenArray.length-1 ; j++) {
				// read model to model_probs
				model_probs[i] = model.predict(wordType);
				wordType = tokenArray[j];
				
			}
		}
		
		lambda = new double[nmodels];
		fractions = new double[nmodels];
		prob_components = new double[nmodels];
		                            
		
		// initialize lambda
		for (i=0; i<nmodels; i++) {
			lambda[i] = 1.0 / nmodels;
		}
		// initialize tags ?
		new_pp = 10e98;
		
		// training
		//while ((first == true) || (old_pp - new_pp > 0.00000001)) {
		while ((first == true) || (stopLL - ll > 0.00000001)) {
			old_pp = new_pp;
			if (first == false) {
				double total_nonfixed_lambdas = 0.0;
				double total_nonfixed_fractions = 0.0;
				// assign new lambda
				for (i=0; i<nmodels; i++) {
					total_nonfixed_lambdas += lambda[i];
					total_nonfixed_fractions += fractions[i];
				}
				
				for (i=0; i<nmodels; i++) {
					lambda[i] = (fractions[i] / tokenArray.length);
					//lambda[i] *= (total_nonfixed_lambdas / (total_nonfixed_fractions / 1));
				}
			}
			
			ll = eval(nmodels, tokenArray);
			first = false;
			printLambda(nmodels);
			
		}
		
	}
	
	private void printLambda(int nmodels) {
		for (int i=0; i<nmodels; i++) {
			System.out.println("lambda[" + i + "] = " + lambda[i]);			
		}
	}
	
	private double eval(int nmodels, WordType[] tokenArray) {
		int i, j;
		double total_prob;
		
		sum_logprobs = 0.0;
		for (i=0; i<nmodels; i++) {
			fractions[i] = 0.0;
		}
		
		total_prob = 0.0;
		for (j=0; j<tokenArray.length; j++) {
			for (i=0; i<nmodels; i++) {
				prob_components[i] = lambda[i] * model_probs[i][tokenArray[j].getIndex()];
				total_prob += prob_components[i];
			}
			
			for (i=0; i<nmodels; i++) {
				fractions[i] += prob_components[i] / total_prob;
			}
			sum_logprobs += Global.log(2, total_prob);
			total_prob = 0.0;
		}
		
		new_pp = Math.exp(-sum_logprobs/tokenArray.length);
		//System.out.println("PP = " + new_pp);
		System.out.println("Stop LL = " + stopLL + "\tLog-likelihood = " + sum_logprobs/tokenArray.length);
		return sum_logprobs/tokenArray.length;
	}
	
	private double test(TrainTokens testTokens) {
		int i, j;
		int nmodels;
		LanguageModel model;
		WordType[] tokenArray;
		WordType wordType;
		double[] predicts;
		double sum_logprobs;
		double min_logprob = 0;
		
		nmodels = lmodels.size(); 
		tokenArray = testTokens.getTokenArray();
		
		for (i=0; i<nmodels; i++) {
			wordType = null;
			model = lmodels.get(i);
			sum_logprobs = 0.0;
			for (j=0; j<tokenArray.length; j++) {
				predicts = model.predict(wordType);
				wordType = tokenArray[j];
				sum_logprobs += Global.log(2, predicts[wordType.getIndex()]);
			}
			System.out.println(model.getDescription() + ": " + sum_logprobs/tokenArray.length);
			min_logprob = Math.min(min_logprob, sum_logprobs/tokenArray.length);
		}
		
		return min_logprob;
	}
	
	
	private static NGramLanguageModel[] buidlNgram(){
		NGramLanguageModel[] res = new NGramLanguageModel[2];
		int[] discount = new int[3];
		int[] cutoff = new int[3];
		cutoff[0] = 0;
		cutoff[1] = 1;
		cutoff[2] = 2;
		discount[0] = discount[1] = discount[2] = 7;
		res[0] = new NGramLanguageModel(DiscountingMethod.Keyser_Ney,3,cutoff,discount);
		discount = new int[2];
		cutoff = new int[2];
		cutoff[0] = 0;
		cutoff[1] = 1;
		discount[0] = discount[1]  = 7;
		res[1] = new NGramLanguageModel(DiscountingMethod.Good_Turing,2,cutoff,discount);		
		return res;
		
	}
	/**
	 * @param args
	 */
	public static void main(String[] args) throws IOException {
		double min_ll;
		
		Global.debugOut.println("Starting JLMTools...");
		
		Global.debugOut.println("Preparing training-evaluation set...");
		WordType[] trainATokens = WordType.parse(Global.getTrainA());
		WordType[][] tokens = CrossValidationTokenSet.divide(trainATokens, 10);
		
		WordType[] trainTokens = tokens[0];
		WordType[] evalTokens = tokens[1];
				
		Global.debugOut.println("Creating Language Models...");

		LinearInterpolation li = new LinearInterpolation(0.999);
		NGramLanguageModel[] models = buidlNgram();
		NGramLanguageModel model1 = models[0];
		NGramLanguageModel model2 = models[1];
		/*
		HMM hmm1 = HMM.create(2);
		hmm1.setThreshold(0.01);
		HMM hmm2 = HMM.create(3);
		hmm2.setThreshold(0.01);
		*/
		
		Global.debugOut.println("Training Language Models...");
		/*
		hmm1.train(new TrainTokens(trainTokens));
		hmm2.train(new TrainTokens(trainTokens));
		*/
		/*
		 * Codes below show how to train the model and save them
		 * But just load the model for testing purpose. 
		 */
		// In case that we really need to train, 
		// But only if you have SRILM executables
		// So I will deal with it at the end.
		
//		model1.train(new TrainTokens(trainTokens));
//		model2.train(new TrainTokens(trainTokens));
//		ObjectOutputStream modelSer = new ObjectOutputStream(new FileOutputStream("data/sampleModels"));
//		modelSer.writeObject(models);
		
		
		/*
		 * So you can just deal with the the models as below
		 * If you want to train more models, please tell me,
		 * and I can deal with that.
		 */
		ObjectInputStream modelDeSer = new ObjectInputStream(new FileInputStream("data/sampleModels"));
		try {
			models = (NGramLanguageModel[]) modelDeSer.readObject();
			model1 = models[0];
			model2 = models[1];
		} catch (ClassNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		
		li.addLanguageModel(model1);
		li.addLanguageModel(model2);
		
		Global.debugOut.println("Testing Langugage Models...");
		
		min_ll = li.test(new TrainTokens(evalTokens));
		li.setStopLL(min_ll);

		li.interpolate(new TrainTokens(evalTokens));
		
		Global.debugOut.println("Program ended normally!");
	}

	public double getStopRatio() {
		return stopRatio;
	}
	
	public void setStopLL(double ll) {
		this.stopLL = ll;
	}

	public void setStopRatio(double stopRatio) {
		this.stopRatio = stopRatio;
	}

	public LinearInterpolation(double stopRatio) {
		super();
		this.stopRatio = stopRatio;
		lmodels = new ArrayList<LanguageModel>();
	}

	public void addLanguageModel(LanguageModel model) {
		lmodels.add(model);
	}

}
