package processing;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.Hashtable;
import java.util.List;
import util.DocumentTermFrequency;
import util.Index;
import util.PathsAndConstants;

public class IndexGenerator {

	private Index index;

	private List<String> stopWords;

	public IndexGenerator() {
		this.stopWords = new ArrayList<String>();
		try {

			String currentLine;

			BufferedReader bReader = new BufferedReader(new FileReader(
					PathsAndConstants.stopWordsPath));

			while ((currentLine = bReader.readLine()) != null) {
				stopWords.add(currentLine);
			}

			bReader.close();
		} catch (IOException e) {
			e.printStackTrace();
		}
	}

	public void generateIndex() {
		
		this.index = new Index();

		File corpusRoot = new File(PathsAndConstants.testPath);

		// for each file in the corpus, try to get its' tokens
		visitDocuments(corpusRoot);

		ObjectOutputStream out = null;

		try {
			out = new ObjectOutputStream(new FileOutputStream(
					PathsAndConstants.indexPath));
			System.out.println("Storing Index...");
			out.writeObject(this.index);
		} catch (IOException e) {
			e.printStackTrace();
		} finally {
			// Close the ObjectOutputStream
			try {
				if (out != null) {
					out.flush();
					out.close();
				}
			} catch (IOException ex) {
				ex.printStackTrace();
			}
		}

	}

	// dfs into the corpus to open all documents to retrive its tokens
	private void visitDocuments(File corpusRoot) {
		if (corpusRoot.isDirectory()) {
			File[] lista = corpusRoot.listFiles();
			for (int i = 0; i < lista.length; i++)
				visitDocuments(lista[i]);
		} else {
			if (corpusRoot.getName().endsWith(".xml")) {
				System.out.println("Processing " + corpusRoot.getName());
				processDocument(corpusRoot);
			}
		}
	}

	private void processDocument(File fileIn) {
		List<String> docWords = null;
		Hashtable<String, Integer> termFrequency = null;
		String currentLine = null;
		String currentWord = null;
		String docPath = fileIn.getPath().replace(
				"." + PathsAndConstants.FILE_SEPARATOR, "");

		try {

			termFrequency = new Hashtable<String, Integer>();

			BufferedReader bReader = new BufferedReader(new FileReader(fileIn));

			while ((currentLine = bReader.readLine()) != null) {

				// remove the xml tags from the document
				currentLine = removeXmlTags(currentLine);

				if (!currentLine.equals("")) {

					docWords = Arrays.asList(currentLine.split(" "));

					for (int i = 0; i < docWords.size(); i++) {

						// remove punctuation and stemm each word in the
						// document line
						currentWord = docWords.get(i);
						currentWord = removePunctuation(currentWord);
						currentWord = Stemmer.normalize(currentWord);

						// add the word in the tokens list if is not empty
						if (stopWords.contains(currentWord)) {
							docWords.set(i, "");

						} else if (!currentWord.equals("")) {

							Integer frequency = termFrequency.put(currentWord,
									1);

							if (frequency != null) {
								frequency++;
								termFrequency.put(currentWord, frequency);
							}
						}
					}
				}
			}
			addToIndex(docPath, termFrequency);
			bReader.close();
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

	private void addToIndex(String docPath,
			Hashtable<String, Integer> termFrequency) {

		Enumeration<String> keys = termFrequency.keys();
		while (keys.hasMoreElements()) {

			String term = keys.nextElement();
			DocumentTermFrequency docTermFreq = new DocumentTermFrequency(
					docPath, termFrequency.get(term));
			List<DocumentTermFrequency> list;

			if (!this.index.getHashMap().containsKey(term)) {
				list = new ArrayList<DocumentTermFrequency>();
				list.add(docTermFreq);
				this.index.getHashMap().put(term, list);
			} else {
				list = this.index.getHashMap().get(term);
				list.add(docTermFreq);
				this.index.getHashMap().put(term, list);
			}

		}
	}

	private String removePunctuation(String str) {
		String result = str;
		result = result.replaceAll("[^A-Za-z0-9]", "");
		return result;
	}

	private String removeXmlTags(String xmlText) {
		String str = xmlText.replaceAll("\\<.*?\\>", "").trim();
		str = str.replaceAll("\n", " ");
		return str;
	}

	public Index getIndex() {
		return index;
	}

	public void setIndex(Index index) {
		this.index = index;
	}

}
