package no.ntnu.idi.deid;

import java.io.File;
import java.io.IOException;
import org.annolab.tt4j.TreeTaggerException;
import no.ntnu.idi.deid.config.FilePaths;
import no.ntnu.idi.deid.decision.SensitivityFinalizer;
import no.ntnu.idi.deid.documentunits.Document;
import no.ntnu.idi.deid.patternmatching.search.Search;
import no.ntnu.idi.deid.patternmatching.search.dictionary.lucene.IndexBuilder;
import no.ntnu.idi.deid.postprocessor.DocumentPostProcessor;
import no.ntnu.idi.deid.preprocessor.Documents;
import no.ntnu.idi.deid.statistical.tfidf.DocumentFrequencyMain;


public class Main {
	
		/**
	 * @param args
	 */
	public static void main(String[] args) {
//		

		Documents documents = null;
		// Build the token index
		IndexBuilder.buildIndex(FilePaths.getTermDictionaryDirectory(),
				FilePaths.getTermIndexDirectory());
		try {
			System.out.println("Starting documentPreProcessor..");
			documents = new Documents();
			System.out.println("Finished preprocessing "+ documents.getDocuments().size() + " documents");
			
		} catch (IOException e) {
			e.printStackTrace();
			System.exit(0);
		} catch (TreeTaggerException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		
//		DocumentFrequencyMain documentFrequencyMain = new DocumentFrequencyMain(
//				new File(FilePaths.getSentenceIDFDatabase()),
//				new File(FilePaths.getTokenIDFDatabase()));
		
		System.out.println("Token search starting..");
		for (Document doc : documents) {
			Search search = new Search(doc);
			search.searchAllSentences();
			search.searchAllTokens();
			search.finalSearch();
		}
//		SensitivityFinalizer.runFinalizer(documents,documentFrequencyMain);
		SensitivityFinalizer.runFinalizer(documents);
				
		System.out.println("Token search finished");
		DocumentPostProcessor.processDocuments(documents);
		
	}

}
