package recherche;

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;

import tools.FrenchStemmer;
import tools.FrenchTokenizer;
import tools.Normalizer;
import view.SearchEngine;

public class LancementAppli {
	// En fonction de l'ordi

	// Version Elyes
	//private static final String DIRNAME = "/Users/ejouini/Documents/workspace/corpus/texte/0007/000716";
	//private static final String DIRNAME = "/Users/ejouini/Documents/workspace/corpus/texte";
	//private static final String DIRNAME = "E:\\j0ax\\workspace\\corpus\\texte\\0049\\004997";
	//private static final String DIRNAME = "E:\\j0ax\\workspace\\corpus\\texte";

	//Version Aurelie
	//private static final String DIRNAME = "/Users/aurelie/Documents/Polytech/java/Projet_EIT/corpus/texte";

	//Version Jade
	//private final static String DIRNAME = "C:\\Users\\Jade\\Documents\\TRAVAIL\\CI3\\EIT\\corpus\\texte\\0007";//\\000711";
	//private static final String DIRNAME = "C:\\Users\\Jade\\Documents\\TRAVAIL\\CI3\\EIT\\corpus\\texte";

	private static String DIRNAME;
	
	public static String STOP_WORDS = "./stopWords";
	
	
	public LancementAppli() {
		
		
	}
	
	public void setDirName(String dirname) {
		DIRNAME = dirname;
		
	}
	
	
	/**
	 * @throws IOException 
	 * @throws InterruptedException 
	 * 
	 */
	public void indexationStemmer() throws IOException, InterruptedException{
		Normalizer stemmer = new FrenchStemmer(STOP_WORDS);
		Indexation i = new Indexation();

		
		//on remplit "mapFichier"
		i.parserListeFichierRecurs (DIRNAME);
		

		// On donne une taille statique a la map contenant la liste des documents
		i.setUrlTitle(new HashMap<Integer, ArrayList<String>>(i.getMapFichiers().size()));

		// On lance la recuperation des mots depuis les données extraites des fichiers 	
		i.triMapFichiers(stemmer, i.getDfsStemmer(), true, true); //Stopwords et doUrl
	
		
		i.serializeMap(i.getDfsStemmer(), "indexStemmer.ser");
		i.serializeURL();
		
		i.deserialization = true;
	}
	
	/**
	 * @throws IOException 
	 * @throws InterruptedException 
	 * 
	 */
	public void indexationTokenizer() throws IOException, InterruptedException{
		Normalizer tokenizer = new FrenchTokenizer(STOP_WORDS);
		Indexation i = new Indexation();

		
		//on remplit "mapFichier"
		i.parserListeFichierRecurs (DIRNAME);
		

		// On donne une taille statique a la map contenant la liste des documents
		i.setUrlTitle(new HashMap<Integer, ArrayList<String>>(i.getMapFichiers().size()));

		// On lance la recuperation des mots depuis les données extraites des fichiers 	
		//Stopwords et doUrl
		i.triMapFichiers(tokenizer, i.getDfsTokenizer(), true, true);

		
		i.serializeMap(i.getDfsTokenizer(), "indexTokenizer.ser");
		i.serializeURL();
		
		i.deserialization = true;
	}

	public HashMap<String, String> recherche(ArrayList<String> req, Modele mod, Normalizer norm, HashMap<String, HashMap<Integer, Integer>> dfs, String filename){
		Indexation i = new Indexation();

		if (!i.deserialization) {
			
			i.deserializeMap(dfs, filename);
			i.deserializeURL();
			
			i.deserialization = true;
		}


		try {
			mod.addRequeteMap(i.getUrlTitle(), norm, dfs);		
		} catch (IOException e) {
			e.printStackTrace();
		}
		return mod.execute(dfs, i.getUrlTitle());

	}

	/**
	 * @param args
	 * @throws IOException 
	 * @throws InterruptedException 
	 */
	public static void main(String[] args) throws IOException, InterruptedException {
		
		
		SearchEngine fenetre = new SearchEngine();
		
	}
}
