package parser;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;

import smoothing.Smoothing;

/**
 * Class used for parsing the data set files:
 * generating train and test files, counting events and generating
 * observed vocabulary
 */
public class Parser {

	public static final long VOCAB_SIZE = 300000L; // vocabulary size
	public static final int ERROR_VAL = -1;	
	public static final String WORD_DELIM = " "; // delimiter between two words
	public static String HEADER_START; // indicates start of header line    = "<TRAIN";

	/**
	 * Splits given development data set file to a train set and a validation set.
	 * First round(proportion*|develFile|) words are train, the rest are validation  
	 * @param develFileName name of development set file
	 * @param trainFileName name of generated train file
	 * @param validFileName name of generated validation file
	 * @param splitProp indicates the rate of division between train and validation
	 * @throws IOException if had errors opening files or writing to files
	 */
	public static void splitDataSet(String develFileName, String trainFileName,
			String validFileName, double splitProp) throws IOException {

		// open development file
		BufferedReader develReader = new BufferedReader(new FileReader(new File(develFileName)));
		
		// create new train file	
		PrintWriter writer = new PrintWriter(new BufferedWriter(new FileWriter(new File(trainFileName))));
		
		// calculate number of words to be in generated train file
		long develNumWords = numEvents(develFileName);
		long trainNumWords = Math.round(splitProp * develNumWords);
		
		// copy first portion of words from development file to train file
		List<String> remainingWords = copyWords(develReader, trainNumWords, writer);
		
	    // close generated train file
	    writer.close();
		
		// create new test file
	    writer = new PrintWriter(new BufferedWriter(new FileWriter(new File(validFileName))));
		
	    // write remaining words to generated validation file
	    writeWords(remainingWords, writer);
	    
		// calculate number of words to be in generated validation file		
		long testNumWords = develNumWords - trainNumWords - remainingWords.size();
	    
		// copy remaining portion of words from development file to validation file 
		copyWords(develReader, testNumWords, writer);
	    
	    // close generated validation file
		writer.close();
	    
	    // close development file
	    develReader.close();
	}

	/**
	 * Copies first <code>numWordsToCopy</code> words in given source file to given
	 * destination file
	 * @param srcReader source file
	 * @param numWordsToCopy number of first words to be copied
	 * @param dstWriter destination file
	 * @return list of all words at last read line which exceed number of words to copy
	 * @throws IOException if had error reading of writing files
	 */
	private static List<String> copyWords(BufferedReader srcReader, long numWordsToCopy,
			PrintWriter dstWriter) throws IOException {
		
		// initialize return value
		List<String> remainingWords = new LinkedList<String>();
		
		// read current line from source file
		String currLine = srcReader.readLine();
		
		// copy first portion of words from source file to destination file
		boolean keepReading = true;
		int wordCtr = 0;
		while (keepReading && (currLine != null)) {

			// skip if empty or header line
			if (currLine.startsWith(HEADER_START) || currLine.trim().isEmpty()) {

				// read next line
				currLine = srcReader.readLine();
				continue;
			}

			// read line word by word
			String[] words = currLine.split(WORD_DELIM);
			long numWordsCurrLine = words.length;
			for (int i = 0 ; i < numWordsCurrLine ; ++i) {

				// do not exceed number of words to be copied
				if (wordCtr < numWordsToCopy) {
					// copy current word to destination file
					dstWriter.print(words[i] + WORD_DELIM);
					++wordCtr;
				} else {
					// copy is done
					keepReading = false;
					// get all remaining words
					copySubArray(words, i, remainingWords);
					break;
				}
			}

			// start a new line in destination file
			dstWriter.println("");
			
			// read next line from source file
			currLine = srcReader.readLine();
		}
		
		// return remaining words from last parsed line to written to next file 
		return remainingWords;
	}

	/**
	 * Writes words in given list into given file.
	 * All words are separated with <code>WORD_DELIM</code>
	 * When done, ends the line
	 * @param remainingWords words to write
	 * @param writer file to update
	 */
	private static void writeWords(List<String> remainingWords,
			PrintWriter writer) {
		
		// write each given word
		for (String currWord : remainingWords) {
			writer.print(currWord + WORD_DELIM);
		}
		
		// end line
		writer.println();
	}
	
	/**
	 * Copies given array's elements, from start'th element to the last one,
	 * to given list
	 * @param srcArray source array
	 * @param start start index of sub-array to copy
	 * @param dstList destination list
	 */
	private static void copySubArray(String[] srcArray, int start, List<String> dstList) {
		
		// copy all entries from given index to the end
		int numWords = srcArray.length;
		for (int i = start ; i < numWords ; ++i) {
			dstList.add(srcArray[i]);
		}
	}
	
	/**
	 * Counts instances of each word in given file.
	 * Returns the observed vocabulary- a map where 
	 * key is a word and value is its number of instances
	 * @param fileName name of file to process
	 * @return observed vocabulary of given file
	 * @throws IOException if had errors reading file
	 */
	public static Map<String, Integer> genObsVoc(String fileName) throws IOException {
		
		// initialize return value
		Map<String, Integer> obsVoc = new HashMap<String, Integer>();

		// open file
		BufferedReader reader = new BufferedReader(new FileReader(new File(
				fileName)));

		// read file line by line
		String currLine = reader.readLine();

		while (currLine != null) {

			// skip if empty or header line
			if (currLine.startsWith(HEADER_START) || currLine.trim().isEmpty()) {

				// read next line
				currLine = reader.readLine();
				continue;
			}

			// update word count according to current line
			String[] words = currLine.split(WORD_DELIM);
			for (String currWord : words) {
				if (obsVoc.get(currWord) == null) {
					obsVoc.put(currWord, 1);
					continue;
				}
				obsVoc.put(currWord, obsVoc.get(currWord) + 1);
			}

			// read next line
			currLine = reader.readLine();
		}

		// close file
		reader.close();

		// return the observed vocabulary
		return obsVoc;
	}
	
	/**
	 * Splits the words in given file to sets, where words in a each 
	 * set have the same number of instances in given file
	 * @param fileName name of file to process
	 * @return a map where key is number of instances and value is list of
	 * all words with this number of instances
	 * @throws IOException if had errors reading file
	 */
	public static Map<Integer, List<String>> wordsByInstNum(String fileName) throws IOException {
		
		// initialize return value
		Map<Integer, List<String>> instToWords = new HashMap<Integer, List<String>>();
		
		// count number of instances of each word in given file
		Map<String, Integer> obsVoc = genObsVoc(fileName);
		
		// accumulate words by number of instances		
		for (Map.Entry<String, Integer> currEntry : obsVoc.entrySet()) {
			
			// check if any previous word had same number of instances
			if (instToWords.containsKey(currEntry.getValue())) {
				
				// add current word to matching list
				instToWords.get(currEntry.getValue()).add(currEntry.getKey());
				continue;
			}
			
			// first word with this number of instances
			List<String> words = new LinkedList<String>();
			words.add(currEntry.getKey());
			instToWords.put(currEntry.getValue(), words);
		}
		
		// return the generated mapping
		return instToWords;
	}
	
	/**
	 * Counts number of events (words) in given file
	 * @param fileName name of file to process
	 * @return number of events (words) in given file
	 * @throws IOException if had errors reading file
	 */
	public static long numEvents(String fileName) throws IOException {
		
		// initialize return value
		long numEvents = 0;

		// open file
		BufferedReader reader = null;

		File file = new File(fileName);
		reader = new BufferedReader(new FileReader(file));

		// read file line by line
		String currLine = reader.readLine();

		while (currLine != null) {

			// skip if empty or header line
			if (currLine.startsWith(HEADER_START) || currLine.trim().isEmpty()) {

				// read next line
				currLine = reader.readLine();
				continue;
			}

			// update event count according to current line
			numEvents += currLine.split(WORD_DELIM).length;

			// read next line
			currLine = reader.readLine();
		}

		// close file
		reader.close();

		// return number of events
		return numEvents;
	}
	
	/**
	 * Calculates the perplexity of given model on given validation file
	 * @param model smoothing model to check
	 * @param validFileName name of validation file
	 * @return perplexity of the model on the validation file
	 * @throws IOException if had errors reading file
	 */
	public static double perplexity(Smoothing model, String validFileName)
			throws IOException {

		// initialize sum of log-probabilities
		double sumLogProb = 0.0;

		// initialize word count of validation file
		long numWords = 0;

		// open file
		BufferedReader reader = new BufferedReader(
				new FileReader(validFileName));

		// read file line by line
		String currLine = reader.readLine();
		while (currLine != null) {

			// skip if empty or header line
			if (currLine.startsWith(Parser.HEADER_START)
					|| currLine.trim().isEmpty()) {

				// read next line
				currLine = reader.readLine();
				continue;
			}

			// read each word
			String[] words = currLine.split(WORD_DELIM);
			for (String currWord : words) {

				// sum log-MLE for current word according to given model
				sumLogProb += Math.log(model.mleModel(currWord))
						/ Math.log(2.0);

				// increment word count
				numWords += 1;
			}

			// read next line
			currLine = reader.readLine();
		}

		// close file
		reader.close();

		// normalize sum of log-probabilities
		double normSum = sumLogProb / ((double) numWords);

		// return perplexity
		return Math.pow(2.0, -normSum);
	}
}
