//	Aviv Charikar	Yael Tzirulnikov	305343600	307955609

package modelTraining;

import java.io.*;
import java.util.*;
import java.util.Map.Entry;

public class LidstoneModelTraining {
	private int vocSize;
	private HashMap<String, HashMap<String, CountAndProbability>> bigramTrain;
	private int bigramTrainCount;
	private HashMap<String, Double> backoffNorms;
	private HashMap<String, CountAndProbability> unigramTrain;
	private int unigramTrainCount;
	private double unseenProb;
	private HashMap<String, HashMap<String, Integer>> bigramValid;
	private int bigramValidCount;
	private HashMap<String, Integer> unigramValid;
	private int unigramValidCount;
	private double unigramLambda;
	private double bigramLambda;
	
	public LidstoneModelTraining(int vocSize, String devFilename, int trainSize) throws IOException {
		this(vocSize, devFilename, trainSize, 0, 0);
	}
	
	public LidstoneModelTraining(int vocSize, String devFilename, int trainSize, double unigramLambda, double bigramLambda) throws IOException {
		this.vocSize = vocSize;
		bigramTrain = new HashMap<String, HashMap<String, CountAndProbability>>();
		bigramTrainCount = 0;
		backoffNorms = new HashMap<String, Double>();
		unigramTrain = new HashMap<String, CountAndProbability>();
		unigramTrainCount = 0;
		bigramValid = new HashMap<String, HashMap<String, Integer>>();
		bigramValidCount = 0;
		unigramValid = new HashMap<String, Integer>();
		unigramValidCount = 0;
		readDevFile(devFilename, trainSize);
		setLambdas(unigramLambda, bigramLambda);
	}
	
	private void readDevFile(String devFilename, int trainSize) throws IOException {
		BufferedReader reader = new BufferedReader(new FileReader(devFilename));
		String line;
		while ((line = reader.readLine()) != null) {
			// Handle only sentences
			if ((line.length() == 0) || (line.startsWith("<TRAIN")))
				continue;
			
			// Split the words in the sentence and add them to the counts
			String[] lineWords = line.split(" ");
			String lastTrainWord = null;
			String lastValidWord = null;
			for (String word : lineWords) {
				if (word.length() > 0) {
					// Add the first words to the training set and the last to the validation set
					if (unigramTrainCount < trainSize) {
						// Add the unigram to the training set
						CountAndProbability countAndProbability = unigramTrain.get(word);
						if (countAndProbability == null)
							unigramTrain.put(word, new CountAndProbability(1, 0));
						else
							countAndProbability.count++;
						unigramTrainCount++;
						
						// Add the bigram to the training set
						if (lastTrainWord != null) {
							HashMap<String, CountAndProbability> map = bigramTrain.get(lastTrainWord);
							if (map == null) {
								map = new HashMap<String, CountAndProbability>();
								bigramTrain.put(lastTrainWord, map);
							}
							countAndProbability = map.get(word);
							if (countAndProbability == null)
								map.put(word, new CountAndProbability(1, 0));
							else
								countAndProbability.count++;
							bigramTrainCount++;
						}
						
						lastTrainWord = word;
					}
					else {
						// Add the unigram to the validation set
						Integer count = unigramValid.get(word);
						if (count == null)
							unigramValid.put(word, 1);
						else
							unigramValid.put(word, count + 1);
						unigramValidCount++;
						
						// Add the bigram to the validation set
						if (lastValidWord != null) {
							HashMap<String, Integer> map = bigramValid.get(lastValidWord);
							if (map == null) {
								map = new HashMap<String, Integer>();
								bigramValid.put(lastValidWord, map);
							}
							count = map.get(word);
							if (count == null)
								map.put(word, 1);
							else
								map.put(word, count + 1);
							bigramValidCount++;
						}
						
						lastValidWord = word;
					}
				}
			}
		}
		reader.close();
	}
	
	public double getUnigramLambda() {
		return unigramLambda;
	}
	
	public double getBigramLambda() {
		return bigramLambda;
	}
	
	public void setLambdas(double unigramLambda, double bigramLambda) {
		backoffNorms.clear();
		this.unigramLambda = unigramLambda;
		this.bigramLambda = bigramLambda;
		
		// Calculate the unigram probabilities based on the unigram count in the training and the lambda used for smoothing
		for (Entry<String, CountAndProbability> entry : unigramTrain.entrySet())
			entry.getValue().probability = (entry.getValue().count + unigramLambda) / (unigramTrainCount + unigramLambda * vocSize);
		
		// An unseen word has a count of 0
		unseenProb = unigramLambda / (unigramTrainCount + unigramLambda * vocSize);
		
		// Calculate the observed bigram probabilities based on the bigram count in the training and the lambda used for smoothing
		// and calculate the backoff normalization factors
		for (Entry<String, HashMap<String, CountAndProbability>> word1Entry : bigramTrain.entrySet()) {
			double bigramBackoffSum = 0;
			double unigramBackoffSum = 0;
			for (Entry<String, CountAndProbability> word2Entry : word1Entry.getValue().entrySet()) {
				CountAndProbability countAndProbability = word2Entry.getValue();
				countAndProbability.probability = (countAndProbability.count + bigramLambda) / (getWordCount(word1Entry.getKey()) + bigramLambda * vocSize);
				bigramBackoffSum += countAndProbability.probability;
				unigramBackoffSum += getWordProbability(word2Entry.getKey());
			}
			backoffNorms.put(word1Entry.getKey(), (1 - bigramBackoffSum) / (1 - unigramBackoffSum));
		}
	}
	
	public Set<String> getAllTrainingWords() {
		return unigramTrain.keySet();
	}
	
	public int getCountTraining() {
		return unigramTrainCount;
	}
	
	public int getWordCount(String word) {
		// Return the count of the word or 0 if the word is unseen
		CountAndProbability countAndProbability = unigramTrain.get(word);
		if (countAndProbability == null)
			return 0;
		return countAndProbability.count;
	}
	
	public int getBigramCount(String word1, String word2) {
		HashMap<String, CountAndProbability> map = bigramTrain.get(word1);
		if (map == null)
			return 0; // word1 is unseen
		CountAndProbability countAndProbability = map.get(word2);
		if (countAndProbability == null)
			return 0; // the bigram is unseen
		return countAndProbability.count;
	}
	
	public double getWordProbability(String word) {
		// Return the probability of the word or the unseen probability if the word is unseen
		CountAndProbability countAndProbability = unigramTrain.get(word);
		if (countAndProbability == null)
			return unseenProb;
		return countAndProbability.probability;
	}
	
	public double getUnseenWordProbability() {
		return unseenProb;
	}
	
	public double getBigramProbability(String word1, String word2) {
		HashMap<String, CountAndProbability> map = bigramTrain.get(word1);
		if (map == null)
			return getWordProbability(word2); // word1 is unseen - backoff to the unigram probability of word2
		CountAndProbability countAndProbability = map.get(word2);
		if (countAndProbability == null)
			return backoffNorms.get(word1) * getWordProbability(word2); // the bigram is unseen - backoff to the unigram probability of word2 multiplied by the normalization factor of word1
		return countAndProbability.probability;
	}
	
	public int getCountValidation() {
		return unigramValidCount;
	}
	
	public double calcPerplexityOnValidationSet() {
		// Calculate the perplexity on the validation set
		double perp = 0;
		for (Entry<String, HashMap<String, Integer>> word1Entry : bigramValid.entrySet()) {
			for (Entry<String, Integer> word2Entry : word1Entry.getValue().entrySet())
				perp += word2Entry.getValue() * Math.log(getBigramProbability(word1Entry.getKey(), word2Entry.getKey())) / Math.log(2);
		}
		perp /= unigramValidCount;
		perp = Math.pow(2, -perp);
		return perp;
	}
	
	public double calcPerplexityOnTestSet(BasicModelTraining testSet) {
		// Calculate the perplexity on the test set
		double perp = 0;
		for (Entry<String, HashMap<String, Integer>> word1Entry : testSet.getBigrams().entrySet()) {
			for (Entry<String, Integer> word2Entry : word1Entry.getValue().entrySet())
				perp += word2Entry.getValue() * Math.log(getBigramProbability(word1Entry.getKey(), word2Entry.getKey())) / Math.log(2);
		}
		perp /= testSet.getUnigramCount();
		perp = Math.pow(2, -perp);
		return perp;
	}
	
	private class CountAndProbability {
		public int count;
		public double probability;
		
		public CountAndProbability(int count, double probability) {
			this.count = count;
			this.probability = probability;
		}
	}
}
