package langnstats.project;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

import langnstats.project.ParserTools.StanfordParserHandel;
import langnstats.project.languagemodel.hmm.HMM;
import langnstats.project.languagemodel.loglinear.LogLinear;
import langnstats.project.languagemodel.ngram.Ngram;
import langnstats.project.languagemodel.srilm.NGramLanguageModel;
import langnstats.project.languagemodel.srilm.SriLMParameters.DiscountingMethod;
import langnstats.project.lib.LanguageModel;
import langnstats.project.lib.WordType;
import langnstats.project.lib.crossvalidation.CrossValidationResult;
import langnstats.project.lib.crossvalidation.CrossValidationTokenSet;
 
public class Global {
	public static final File dataFolder = new File("data/");
	private static final File trainA_WindowsFile = new File(dataFolder, "trainA.txt");
	private static final File trainA_UnixFile = new File(dataFolder, "trainA.unix");
	
	public static final String modelFileExtension = "mdl";
	public static final File modelsFolder = new File("models/");
	
	public static final PrintStream debugOut = System.out; 
	public static final PrintStream debugError = System.out;
	
	public static final File resultFolder = new File("result/");
	public static final File testFile = new File(resultFolder, "out.txt");
	
	public static final double THRESHOLD = 0.0001;
	public static String newLine = System.getProperty("line.separator");
	
	public static StanfordParserHandel parser = new StanfordParserHandel();
	
	public static File getTrainA(){
		if(System.getProperty("os.arch").equals("x86")){ return trainA_WindowsFile; }
		else{ return trainA_UnixFile; }
	}
	
	public static void save(LanguageModel lm, File ofile) throws IOException{
		FileOutputStream fos = new FileOutputStream(ofile);
		ObjectOutputStream oos = new ObjectOutputStream(fos);
		oos.writeObject(lm);
		oos.close();		
	}
	public static LanguageModel load(File infile) throws IOException, ClassNotFoundException{
		FileInputStream fis = new FileInputStream(infile);
		ObjectInputStream ois = new ObjectInputStream(fis);
		Object o = ois.readObject();
		return (LanguageModel)o;
	}
	
	public static double log(double base, double d){
		return Math.log(d)/Math.log(base);
	}	

	
	public static double getAverageLogLikelihood(LanguageModel model, WordType[] evalTokens){
       
        double sumLogLikelihood = 0;
        WordType wordType = null;
        int tokenLength = evalTokens.length;
        System.out.println("tokenLength = "+tokenLength);
        for(int i=0; i<tokenLength; i++) {
        	double[] predictions = model.predict(wordType);
        	checkPredictions(predictions);
        	
        	wordType = evalTokens[i];
        	sumLogLikelihood += log(2,predictions[wordType.getIndex()]);
        	if(Double.isNaN(sumLogLikelihood)){
        		System.err.println("Error");
        	}
        }
        System.out.println("AverageLogLikelihood=" + sumLogLikelihood/tokenLength);
        return sumLogLikelihood/tokenLength;
        
	}
	
	public static void checkPredictions(double[] predictions){
		double sum=0;
		if( predictions.length!=WordType.size() ){ 
			throw new IllegalArgumentException("Prediction length ("+predictions.length+") should be equal to WordType size("+WordType.size()+")");
		}
		
		for(double d : predictions){
			sum += d;			
		}
		if( Math.abs(sum-1)>THRESHOLD ){ 
			throw new RuntimeException("Sum of Predictions ("+sum+") not 1");
		}
	}
	
	
	private static void getAllNGramModels(List<LanguageModel> lst){
		
		
		int[] discount = new int[3];
		int[] cutoff = new int[3];
		cutoff[0] = 0;
		cutoff[1] = 3;
		cutoff[2] = 3;
		discount[0] = discount[1] = discount[2] = 7;
//		lst.add(new NGramLanguageModel(DiscountingMethod.Good_Turing,3,cutoff,discount));
		
		cutoff = new int[3];
		cutoff[0] = 0;
		cutoff[1] = 2;
		cutoff[2] = 3;
		lst.add(new NGramLanguageModel(DiscountingMethod.Good_Turing,3,cutoff,discount));

		cutoff = new int[3];
		cutoff[0] = 0;
		cutoff[1] = 2;
		cutoff[2] = 2;
		lst.add(new NGramLanguageModel(DiscountingMethod.Good_Turing,3,cutoff,discount));

//		cutoff = new int[3];
//		cutoff[0] = 0;
//		cutoff[1] = 4;
//		cutoff[2] = 3;
//		lst.add(new NGramLanguageModel(DiscountingMethod.Good_Turing,3,cutoff,discount));
		discount = new int[4];
//		cutoff = new int[4];
//		cutoff[0] = 0;
//		cutoff[1] = 2;
//		cutoff[2] = 2;
//		cutoff[3] = 2;
//		discount[0] = discount[1] = discount[2] = discount[3] = 7;
//		lst.add(new NGramLanguageModel(DiscountingMethod.Good_Turing,4,cutoff,discount));
		cutoff = new int[4];
		cutoff[0] = 0;
		cutoff[1] = 4;
		cutoff[2] = 3;
		cutoff[3] = 2;
		discount[0] = discount[1] = discount[2] = discount[3] = 7;
		lst.add(new NGramLanguageModel(DiscountingMethod.Good_Turing,4,cutoff,discount));
		cutoff = new int[4];
		cutoff[0] = 0;
		cutoff[1] = 3;
		cutoff[2] = 3;
		cutoff[3] = 3;
		discount[0] = discount[1] = discount[2] = discount[3] = 7;
		lst.add(new NGramLanguageModel(DiscountingMethod.Good_Turing,4,cutoff,discount));
//		cutoff = new int[4];
//		cutoff[0] = 0;
//		cutoff[1] = 5;
//		cutoff[2] = 4;
//		cutoff[3] = 3;
//		discount[0] = discount[1] = discount[2] = discount[3] = 7;
//		lst.add(new NGramLanguageModel(DiscountingMethod.Good_Turing,4,cutoff,discount));

					
		
	}
	
	public static List<LanguageModel> getAllModels(){
		List<LanguageModel> models = new ArrayList<LanguageModel>();
//		models.addAll(Ngram.makeAll(1));
//		for(int i=2; i<3; i++){
//			models.add(new NGramLanguageModel());
//			//models.add(new HMM(i, WordType.size()));
//		}
		getAllNGramModels(models);
//		models.add(new Ngram(1));
		if(System.getProperty("UseHMM","F").equals("T"))
			models.add(HMM.create(20));
		models.add(new LogLinear());
		return models;
	}
	
	
	public static void main(String[] args) throws IOException{
		System.setErr(Global.debugError);
		
		Global.debugOut.println("Starting JLMTools...");
	
		Global.debugOut.println("Preparing training-evaluation set...");
		WordType[] trainATokens = WordType.parse(Global.getTrainA());
		CrossValidationTokenSet cvts = new CrossValidationTokenSet(trainATokens, 10);
		
		Global.debugOut.println("Creating Language Models...");
		List<LanguageModel> models = getAllModels();
		
		Global.debugOut.println("Starting Cross-validation("+cvts.getValidationSize()+")...");
		CrossValidationResult[] results = cvts.crossValidate(models);
		
		Global.debugOut.println("Sorting results...");
		Arrays.sort(results);
		Global.debugOut.print("Best Model: ");
		Global.debugOut.println("<"+results[results.length-1].getModelDescription()+">");
		
		Global.debugOut.println("Program ended normally!");
	}

	public static void exportTokens(WordType[] tokenArray, File f)
	throws FileNotFoundException {
		PrintWriter writer = new PrintWriter(f);
		for(WordType wordType : tokenArray){
			writer.println(wordType.getOriginalTag());
		}
		writer.close();
	}
}
