package hmm;

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.Scanner;

/**
 * This class represents an Hidden Markov Model
 */
public class Hmm {
	
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
	
	public enum Smoothing {
		
		LAPLACE,
		TYPE2;
		
		public double smooth(double count,double corpusTotal,double corpuraTotal) {
			
			switch(this) {
				case LAPLACE:
					return (count + 1)/(corpusTotal+corpuraTotal);
        case TYPE2:
          double k = corpusTotal/corpuraTotal+0.07;
          return (count+k)/(corpusTotal+k*corpuraTotal);
				default:
					return (Double) null;
			}
		}
	}
	
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
	
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
	
	  private final LinkedList<String> allUniqueTags; //A set of all the tags in the "world"
  	private final LinkedList<String> allUniqueBaseWords; //A set of all the base words in the "world"
  	private final Map<String,Integer> corpusTagsCount;
  	private final Map<String,Integer> corpusTagedWordsCount;

  	private final Map<String,Integer> tagsToIndex;
  	private final Map<String,Integer> wordsToIndex;
  	
  	private int numTagsInCorpura = 0;
  	private int numBaseWordsInCorpura = 0;
  	private int numSentencesInCorpura = 0;
  	private int numSentencesInCorpus = -1;
  	
  	private final String trainingCorpusPath;
  	private final String[] corpura; //Paths for the rest of the corpura of this HMM
  	
  	private final Smoothing smoothingMethod;
  	private double[] pi; //The vector of the initial state probabilities
  	private double[][] A; //The state transition matrix
  	private double[][] B; //The output matrix;
  	
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
  	
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
  	
  	/**
  	 * @param the first element of corpura is the trainingSet
  	 */
  	public Hmm(String[] corpura,Smoothing smoothingMethod) 
  										throws IOException {
  		
  		this.corpura = corpura;
  		this.smoothingMethod = smoothingMethod;
  		this.trainingCorpusPath = corpura[0];
  		
  		// initializing the statistics collections
  		allUniqueTags = new LinkedList<String>();
  		allUniqueBaseWords = new LinkedList<String>();
  		corpusTagsCount = new HashMap<String,Integer>();
  		corpusTagedWordsCount = new HashMap<String,Integer>();
  		tagsToIndex = new HashMap<String,Integer>();
  		wordsToIndex = new HashMap<String,Integer>();
  		
  	}
  	
  	/**
  	 * Used for testing only!
  	 * @param allUniqueTags
  	 * @param allUniqueBaseWords
  	 * @param tagsToIndex
  	 * @param wordsToIndex
  	 * @param pi
  	 * @param a
  	 * @param b
  	 */
  	public Hmm(	LinkedList<String> allUniqueTags,
  				LinkedList<String> allUniqueBaseWords, 
  				double[] pi, double[][] a, double[][] b) {
      this.allUniqueTags = allUniqueTags;
      this.allUniqueBaseWords = allUniqueBaseWords;
      this.pi = pi;
      A = a;
      B = b;
      this.tagsToIndex = null;
      this.wordsToIndex = null;
      trainingCorpusPath = null;
      smoothingMethod = Smoothing.LAPLACE;
      corpura = null;
      corpusTagsCount = null;
      corpusTagedWordsCount = null;
    }

    public void init() throws IOException {
  		
  		// initializing the statistics
  		numTagsInCorpura = 0;
  		numSentencesInCorpura = 0;
  		numBaseWordsInCorpura = 0;
  	  	numSentencesInCorpus = -1;
  		
  		// calculating all of the statistics
  		for(String fileName:corpura){
  			getStatistics(fileName);
  			
  			if(fileName.equals(trainingCorpusPath)){
  				numSentencesInCorpus = numSentencesInCorpura;
  			}
  		}
  		calculateTagStatistics();
  		
  		// initialize matrices
  		int numTags = allUniqueTags.size();
  		A = new double[numTags][numTags];
  		int numWords = allUniqueBaseWords.size();
  		B = new double[numTags][numWords];
  		pi = new double[numTags];

  		calculateTransitionMatrix();
  		calcualteOutSymbolsMatrix();
  		calculateInitialProbabilites();
  	}
  	
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////

////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
  	
	public int getNumOfTags() {
  		return numTagsInCorpura;
  	}
  	
  	public int getNumWords() {
  		return numBaseWordsInCorpura;
  	}
  	
  	public int getNumSentences() {
  		return numSentencesInCorpura;
  	}
  	
  	public int getNumSentencesInCorpus() {
  		return numSentencesInCorpus;
  	}
  	
  	@SuppressWarnings("unchecked") public LinkedList<String> getAllTags() {
  		return (LinkedList<String>) allUniqueTags.clone();
  	}
  	
  	@SuppressWarnings("unchecked") public LinkedList<String> getAllBaseWords() {
  		return (LinkedList<String>) allUniqueBaseWords.clone();
  	}
  	
  	public Map<String,Integer> getTagsToIndex() {
  		return tagsToIndex;
  	}
  	
  	public Map<String,Integer> getBaseWordsToIndex() {
  		return wordsToIndex;
  	}
  	
  	public double[][] getA() {
  		return A;
  	}
  	
  	public double[][] getB() {
  		return B;
  	}
  	
  	public double[] getPi() {
  		return pi;
  	}
  	
  	public Map<String,Integer> getCourpusTagCount() {
  		return corpusTagsCount;
  	}
  	
  	public Map<String,Integer> getCourpusTagedWordCount() {
  		return corpusTagedWordsCount;
  	}
  	
  	
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
  	
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
  
  	/**
  	 * Calculates the state transition matrix according to a mapping between the 
  	 * tags and the number of times they appear in the corpus
  	 * @param numAppearances
  	 * @return
  	 * @throws IOException 
  	 */
  	private void calculateTransitionMatrix() throws IOException {
  		
  		// calculating the tag statistics
  		tagCount();
  		
  		int tagCount = allUniqueTags.size();
  		// calculating the smoothing
  		for(int i=0; i<tagCount; ++i) {
  			for(int j=0; j<tagCount; ++j) {
  				
  				A[i][j]=smoothingMethod.smooth(	A[i][j],
  												corpusTagsCount.get(allUniqueTags.get(i)),
  												tagCount);
  			}
  		}
  		
  	}
  	
  	/**
  	 * Calculates the output matrix
  	 * @throws IOException 
  	 */
  	private void calcualteOutSymbolsMatrix() throws IOException {
  		
  		// calculating the word tag statistics 
  		wordCount();
  		
  		int wordCount = allUniqueBaseWords.size();
  		int tagCount = allUniqueTags.size();
  		// calculating the smoothing
  		for(int i=0; i<tagCount; ++i) {
  			for(int j=0; j<wordCount; ++j) {
  				
  				B[i][j]=smoothingMethod.smooth(	B[i][j],
  												corpusTagedWordsCount.get(allUniqueTags.get(i)),
  												wordCount);
  			} 
  		}
  	}

	/**
  	 * Calculates the vector of the initial state probabilities
  	 * @throws IOException 
  	 */
  	private void calculateInitialProbabilites() throws IOException {
  		
  		// calculating the init statistics
  		initStatistics();
  		
  		// calculating the smoothing
  		for (int i = 0; i < pi.length; i++) {
			pi[i]=smoothingMethod.smooth(pi[i],
										numSentencesInCorpus,
										allUniqueTags.size());
		}
  	}
  	
  	
  	private void tagCount() throws IOException {
  		
  		
  		BufferedReader in = new BufferedReader(new FileReader(trainingCorpusPath));
  		// initializing the file to the starting position of reading
  		startPosReading(in); 
  		
  		// using "start start start" as a delimiter to get all the sentences
  		Scanner sentences = new Scanner(in);
  		sentences.useDelimiter("start(\\s)*start(\\s)*start(\\s)+");
  		String str;
  		while(sentences.hasNext()){
  			str = sentences.next();
  			String[] taggedWords = str.split("(\\s)*\\n");
  			
  			for(int i=1; i<taggedWords.length; ++i) {
  				
  				String prevTag = getTag(taggedWords[i-1]);
  				String curTag = getTag(taggedWords[i]);
  				
  				int prevPos = tagsToIndex.get(prevTag);
  				int curPos = tagsToIndex.get(curTag);
  				
				++A[prevPos][curPos];
  			}
  		}
  		in.close();
  	}

  	private void wordCount() throws IOException {
  		
  		BufferedReader in = new BufferedReader(new FileReader(trainingCorpusPath));
  		// initializing the file to the starting position of reading
  		startPosReading(in); 
  		
  		// using "start start start" as a delimiter to get all the sentences
  		Scanner sentences = new Scanner(in);
  		sentences.useDelimiter("start(\\s)*start(\\s)*start(\\s)+");
  		String str;
  		while(sentences.hasNext()){
  			str = sentences.next();
  			String[] taggedWords = str.split("(\\s)*\\n");
  			
  			for(int i=0; i<taggedWords.length; ++i) {
  				
  				String baseWord = getBaseWord(taggedWords[i]); 
  				String curTag = getTag(taggedWords[i]);
  				if(!baseWord.equals("")) {
  					
  					int wordPos = wordsToIndex.get(baseWord);
  					int tagPos = tagsToIndex.get(curTag);
  					B[tagPos][wordPos]++;
  					
  				}
  			}
  		}
  		in.close();
		
	}
  	
  	private void initStatistics() throws IOException {
  		
  		BufferedReader in = new BufferedReader(new FileReader(trainingCorpusPath));
   	    // initializing the file to the starting position of reading
   	    startPosReading(in); 
   	    
   	    // using "start start start" as a delimiter to get all the sentences
   	    Scanner sentences = new Scanner(in);
   	    sentences.useDelimiter("start(\\s)*start(\\s)*start(\\s)+");
   	    String str;
   	    while(sentences.hasNext()){
			str = sentences.next();
			String[] taggedWords = str.split("(\\s)*\\n");
			
			int startingTag = tagsToIndex.get(getTag(taggedWords[0]));
			pi[startingTag]++;
   	    }
  	}
  	
  	private void calculateTagStatistics() throws IOException {
  		
  		// initializing the mapping
  		for (String tag : allUniqueTags) {
  			corpusTagsCount.put(tag, 0);
  			corpusTagedWordsCount.put(tag,0);
		}
  		
  		BufferedReader in = new BufferedReader(new FileReader(trainingCorpusPath));
   	    // initializing the file to the starting position of reading
   	    startPosReading(in); 
   	    
   	    // using "start start start" as a delimiter to get all the sentences
   	    Scanner sentences = new Scanner(in);
   	    sentences.useDelimiter("start(\\s)*start(\\s)*start(\\s)+");
   	    String str;
   	    while(sentences.hasNext()){
   	    	
			str = sentences.next();
			String[] taggedWords = str.split("(\\s)*\\n");
			
			for(int i=0; i<taggedWords.length; ++i) {
				
				String tag = getTag(taggedWords[i]);
				
				if(i < (taggedWords.length-1)) {
					int tagCount = corpusTagsCount.get(tag)+1;
					corpusTagsCount.put(tag, tagCount);
				}
				int wordCount = corpusTagedWordsCount.get(tag)+1;
				corpusTagedWordsCount.put(tag, wordCount);
			}
   	    }
   	    
  	}
  	
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////

////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
  	
  	private String getTag(String word) {
  		
  		String[] items = word.split("( )+");
  		int index = (items.length >= 3)?3:items.length;
  		String elementToAdd = ""; 
  		
  		if(index > 1) {
  			elementToAdd = items[index-1];
  		} else {
  			elementToAdd = "unknown";
  		}
  		
  		return elementToAdd;
  	}
  	
  	private void processTag(String word) {
  		
  		String elementToAdd = getTag(word);
  		
  		if( !(elementToAdd.equals("start")) ) {
  			
  			numTagsInCorpura++;
  			
  			if(!allUniqueTags.contains(elementToAdd) ) { 
  				allUniqueTags.add(elementToAdd);
  				tagsToIndex.put(elementToAdd,allUniqueTags.size()-1);
  			}
  		}
  		
  	}
  	
  	private String getBaseWord(String str) {
  		String[] items = str.split(" +");
  		
  		int index = (items.length>1)?items.length-2:0;
  		return items[index];
  	}
  	
  	private void processBaseWords(String str) {
  		String baseWord = getBaseWord(str);
  		
  		if(	!baseWord.equals("start") ) {
  		
  			numBaseWordsInCorpura++;
  			
  			if(!(allUniqueBaseWords.contains(baseWord))){
  				allUniqueBaseWords.add(baseWord);
  	  			wordsToIndex.put(baseWord,allUniqueBaseWords.size()-1);
  			}
  		}
  	}
  	
  	private void getStatistics(String fileName) throws IOException {
  		
  	    BufferedReader in = new BufferedReader(new FileReader(fileName));
  	    // initializing the file to the starting position of reading
  	    startPosReading(in);
  	    
  	    // evaluating the rest of the file
  	    String str;
  	    while ((str = in.readLine()) != null) {
  	    	processTag(str);
  	  		processBaseWords(str);
  	  		if(str.matches("start(\\s)*start(\\s)*start")) {
  	  			numSentencesInCorpura++;
  	  		}
  	    }
  	    in.close();
  	}

	private void startPosReading(BufferedReader in) throws IOException {
		for(int i=0; i<3; ++i) {
  	    	in.readLine();
  	    }
	}
  	
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
}
