package parser;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;

import document.Document;

/**
 * Class used for parsing the data set files:
 * generating train and test files, counting events and generating
 * observed vocabulary
 */
public class Parser {

	public static final String WORD_DELIM = " "; // delimiter between two words
	public static final String HEADER_DELIM = "\\s+"; // delimiter between two tokens in header line
	public static final String HEADER_START = "<TRAIN"; // indicates start of header line
	public static final String HEADER_END = ">"; // indicates end of header line
	private static final int TOPICS_START = 2; // start index of topic tokens in header line 
	public static int VOCAB_SIZE = 0; // vocabulary size (set after generating observed vocabulary)
	public static Map<String,Integer> obsVoc = new HashMap<String, Integer>(); // maps each observed (not rare) word to its index
	
	/**
	 * Counts instances of each word in given file.
	 * Returns the observed vocabulary- a map where 
	 * key is a word and value is its number of instances
	 * @param fileName name of file to process
	 * @param minInstances instances threshold for rare words
	 * @throws IOException if had errors reading file
	 */
	public static void genObsVoc(String fileName, int minInstances) throws IOException {
		
		// initialize counter of each different word
		Map<String, Integer> wordCounter = new HashMap<String, Integer>();

		// initialize observed vocabulary
		obsVoc.clear();
		
		// open file
		BufferedReader reader = new BufferedReader(new FileReader(new File(
				fileName)));

		// read file line by line
		String currLine = reader.readLine();

		while (currLine != null) {

			// skip if empty or header line
			if (currLine.startsWith(HEADER_START) || currLine.trim().isEmpty()) {

				// read next line
				currLine = reader.readLine();
				continue;
			}

			// update word count according to current line
			String[] words = currLine.split(WORD_DELIM);
			for (String currWord : words) {
				if (wordCounter.get(currWord) == null) {
					wordCounter.put(currWord, 1);
					continue;
				}
				wordCounter.put(currWord, wordCounter.get(currWord) + 1);
			}

			// read next line
			currLine = reader.readLine();
		}

		// close file
		reader.close();

		// remove rare words
		discardRare(wordCounter, minInstances);

		// set an index to each word
		setObsVoc(wordCounter.keySet());
		
		// set vocabulary size
		VOCAB_SIZE = wordCounter.size();
	}
	
	/**
	 * Removes rare words from the word count
	 * @param wordCounter maps each word to its number of instances in data set
	 * @param minInstances words with this number of instances and lower are considered rare
	 */
	private static void discardRare(Map<String, Integer> wordCounter, int minInstances) {
		
		// iterate over observed words
		for (Iterator<Map.Entry<String, Integer>> it = wordCounter.entrySet()
				.iterator(); it.hasNext();) {
			
			// remove current word if is rare
			Map.Entry<String, Integer> currEntry = it.next();
			if (currEntry.getValue() <= minInstances) {
				it.remove();
			}
		}
	}
	
	/**
	 * Sets each word with an index
	 * @param words not rare words observed in data set
	 */
	private static void setObsVoc(Set<String> words) {
	
		// initialize word counter
		int numWords = 0;
		
		// match each word to an index
		for (String currWord : words) {
			
			// match current word to its index
			obsVoc.put(currWord, numWords);
			
			// increment counter
			++numWords;
		}
	}
	
	/**
	 * Parses the documents out of given data set file.
	 * Each document is represented as a histogram of its words and distribution
	 * over the topics 
	 * @param fileName data set file
	 * @return list of all documents in data set
	 * @throws IOException 
	 */
	public static List<Document> parseDocuments(String fileName) throws IOException {
		
		// initialize return value
		List<Document> documents = new LinkedList<Document>();
		
		// holds topic assigned by corpus for current document being read
		ArrayList<String> realTopics = null;
		
		// open file
		BufferedReader reader = new BufferedReader(new FileReader(new File(
				fileName)));
		
		// read file line by line
		String currLine = reader.readLine();

		// initialize document counter
		int numDocs = 0;
		
		while (currLine != null) {

			// skip if empty line
			if (currLine.trim().isEmpty()) {

				// read next line
				currLine = reader.readLine();
				continue;
			}
			
			// check if header line			
			if (currLine.startsWith(HEADER_START)) {

				// parse real topics
				realTopics = parseRealTopic(currLine);
				
				// read next line
				currLine = reader.readLine();
				continue;
			}
			
			// create histogram of current document's word
			int[] wordsHistogram = createHistogram(currLine);
			
			// create current document
			Document currDoc = new Document(realTopics, wordsHistogram, numDocs);
			documents.add(currDoc);
			
			// increment document counter
			++numDocs;
			
			// read next line
			currLine = reader.readLine();
		}

		// close file
		reader.close();
		
		// return parsed documents		
		return documents;
	}
	
	/**
	 * Extracts topics from given header line, as assigned
	 * by corpus
	 * @param headerLine header line to parse
	 * @return list of topics as assigned by corpus
	 */
	private static ArrayList<String> parseRealTopic(String headerLine) {
	
		// extract topic tokens
		String[] tokens = headerLine.trim().split(HEADER_DELIM);
		ArrayList<String> realTopics = new ArrayList<String>();
		for (int i = TOPICS_START ; i < tokens.length ; ++i) {
			realTopics.add(tokens[i].trim());
		}
		
		// remove header-end character from last topic token
		realTopics.set(realTopics.size() - 1, tokens[tokens.length - 1].replace(HEADER_END, ""));
		
		// remove parsed topics
		return realTopics;
	}
	
	/**
	 * Creates a histogram of not rare words in given document 
	 * @param docContent content of document to process (no header)
	 * @return document's word histogram
	 */
	private static int[] createHistogram(String docContent) {
		
		// initialize return value
		int[] histogram = new int[VOCAB_SIZE];
		
		// read document word by word
		String[] words = docContent.trim().split(WORD_DELIM);
		for (String currWord : words) {
			
			// skip rare words
			if (!obsVoc.containsKey(currWord)) {
				continue;
			}
			
			// get index of current word in vocabulary
			int wordIndex = obsVoc.get(currWord);
			
			// increment counter for current word
			histogram[wordIndex] += 1;
		}
		
		// return word histogram
		return histogram;
	}
	
	/**
	 * Counts number of events (words) in given file
	 * @param fileName name of file to process
	 * @return number of events (words) in given file
	 * @throws IOException if had errors reading file
	 */
	public static long numEvents(String fileName) throws IOException {
		
		// initialize return value
		long numEvents = 0;

		// open file
		BufferedReader reader = null;

		File file = new File(fileName);
		reader = new BufferedReader(new FileReader(file));

		// read file line by line
		String currLine = reader.readLine();

		while (currLine != null) {

			// skip if empty or header line
			if (currLine.startsWith(HEADER_START) || currLine.trim().isEmpty()) {

				// read next line
				currLine = reader.readLine();
				continue;
			}

			// update event count according to current line
			numEvents += currLine.split(WORD_DELIM).length;

			// read next line
			currLine = reader.readLine();
		}

		// close file
		reader.close();

		// return number of events
		return numEvents;
	}
	
	// TODO MOVE TO A SMOOTHING OBJECT
//	/**
//	 * Calculates the perplexity of given model on given validation file
//	 * @param model smoothing model to check
//	 * @param validFileName name of validation file
//	 * @return perplexity of the model on the validation file
//	 * @throws IOException if had errors reading file
//	 */
//	public static double perplexity(Smoothing model, String validFileName)
//			throws IOException {
//
//		// initialize sum of log-probabilities
//		double sumLogProb = 0.0;
//
//		// initialize word count of validation file
//		long numWords = 0;
//
//		// open file
//		BufferedReader reader = new BufferedReader(
//				new FileReader(validFileName));
//
//		// read file line by line
//		String currLine = reader.readLine();
//		while (currLine != null) {
//
//			// skip if empty or header line
//			if (currLine.startsWith(Parser.HEADER_START)
//					|| currLine.trim().isEmpty()) {
//
//				// read next line
//				currLine = reader.readLine();
//				continue;
//			}
//
//			// read each word
//			String[] words = currLine.split(WORD_DELIM);
//			for (String currWord : words) {
//
//				// sum log-MLE for current word according to given model
//				sumLogProb += Math.log(model.mleModel(currWord))
//						/ Math.log(2.0);
//
//				// increment word count
//				numWords += 1;
//			}
//
//			// read next line
//			currLine = reader.readLine();
//		}
//
//		// close file
//		reader.close();
//
//		// normalize sum of log-probabilities
//		double normSum = sumLogProb / ((double) numWords);
//
//		// return perplexity
//		return Math.pow(2.0, -normSum);
//	}
}
