import java.io.FileWriter;
import java.util.Map;
import Classes.Path;
import PreProcessData.*;

/**
 * !!! YOU CANNOT CHANGE ANYTHING IN THIS CLASS !!! 
 * This is for INFSCI 2140 in 2020
 *
 */
public class HW1Main {

	public static void main(String[] args) throws Exception {
		// Main entrance
		HW1Main hm1 = new HW1Main();
		
		/* --------------------------------------------------- */
		/* ---------- Process 'trectext' collection ---------- */
		/* --------------------------------------------------- */
		long startTime = System.currentTimeMillis(); // Start time of running code
		hm1.PreProcess("trectext");                  // 1.96min - 503473 files
		long endTime = System.currentTimeMillis();   // End time of running code
		System.out.println("Text corpus running time: " + (endTime - startTime) / 60000.0 + " min");
		
		/* --------------------------------------------------- */
		/* ---------- Process 'trecweb' collection ----------- */
		/* --------------------------------------------------- */
		startTime = System.currentTimeMillis(); // Start time of running code
		hm1.PreProcess("trecweb");              // 1.39min - 198361 files
		endTime = System.currentTimeMillis();   // End time of running code
		System.out.println("Web corpus running time: " + (endTime - startTime) / 60000.0 + " min");
	}

	/**
	 * Process data given specific dataType
	 * @param dataType 
	 * @throws Exception
	 */
	public void PreProcess(String dataType) throws Exception {
		// 1. Loading the collection file and initiate the DocumentCollection class
		DocumentCollection corpus;
		if(dataType.equals("trectext")) {
			corpus = new TrectextCollection(); // Get 'trectext' collection 
		} else {
			corpus = new TrecwebCollection(); // Get 'trecweb' collection
		}

		// 2. Loading stopword list and initiate the StopWordRemover and WordNormalizer class
		StopWordRemover stopwordRemoverObj = new StopWordRemover();
		WordNormalizer normalizerObj = new WordNormalizer();

		// 3. Initiate the BufferedWriter to output result
		FileWriter wr = new FileWriter(Path.ResultHM1 + dataType); 

		// 4. Initiate a doc object, which can hold its number and content
		Map<String, Object> doc = null;

		// 5. Process the corpus, document by document, iteractively
		int count = 0;
		while ((doc = corpus.nextDocument()) != null) {
			// 5.1 Load document number of the document
			String docno = doc.keySet().iterator().next();

			// 5.2 Load document content
			char[] content = (char[])doc.get(docno);

			// 5.3 Write docno into the result file
			wr.append(docno + "\n");

			// 5.4 Initiate the WordTokenizer class
			WordTokenizer tokenizer = new WordTokenizer(content);

			// 5.5 Initiate a word object, which can hold a word
			char[] word = null;

			// 5.6 Process the document word by word iteratively
			while ((word = tokenizer.nextWord()) != null) {
				// Each word is transformed into lowercase
				word = normalizerObj.lowercase(word);

				// Filter out stopwords, and only non-stopwords will be written into result file
				if (!stopwordRemoverObj.isStopword(word))
					// Stemmed format of each word is written into result file
					wr.append(normalizerObj.stem(word) + " ");
					
			}
			
			wr.append("\n"); // Finish processing one document
			count++;
			
			if(count % 10000==0) {
				System.out.println("Finish " + count + " docs");
			}
		}
		
		System.out.println("Total document count:  " + count);
		wr.close();
	}
}