package tools;

import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Vector;

import tools.FrenchTokenizerAutomaton.Signal;

public class FrenchTokenizer implements Normalizer{

	private FrenchTokenizerAutomaton transducer;
	private ArrayList<String> stopWords;
	
	public FrenchTokenizer() {
		this.transducer = new FrenchTokenizerAutomaton();
	}
	public FrenchTokenizer(String stopWordFileName) throws IOException{
		this.transducer = new FrenchTokenizerAutomaton();
		this.stopWords = new ArrayList<String>();
		//lecture du fichier texte	
		InputStream ips=new FileInputStream(stopWordFileName); 
		InputStreamReader ipsr=new InputStreamReader(ips);
		BufferedReader br=new BufferedReader(ipsr);
		String line;
		while ((line=br.readLine())!=null){
			this.stopWords.add(line);
		}
		br.close(); 
	}
	
//	@Override
//	public ArrayList<String> normalize(String fileName) throws IOException {
//		return this.normalize(fileName,false);
//	}
//	@Override
//	public ArrayList<String> normalize(String fileName, boolean removeStopWords) throws IOException {
//		String text = "";
//		//lecture du fichier texte	
//		InputStream ips=new FileInputStream(fileName); 
//		InputStreamReader ipsr=new InputStreamReader(ips);
//		BufferedReader br=new BufferedReader(ipsr);
//		String line;
//		while ((line=br.readLine())!=null){
//			text += line + " ";
//		}
//		br.close(); 
//		return this.tokenize(text.toLowerCase(), removeStopWords);
//	}
	
	//Surcharge méthode de normalize pour enregistrer l'url et le titre de l'article
//	@Override
//	public ArrayList<String> normalize(String fileName, ArrayList<String> URL_title, boolean removeStopWords) throws IOException {
//		String text = "";
//		//lecture du fichier texte	
//		InputStream ips=new FileInputStream(fileName); 
//		InputStreamReader ipsr=new InputStreamReader(ips);
//		BufferedReader br=new BufferedReader(ipsr);
//		String line;
//		//Traitement du fichier:
//		line = br.readLine();
//		//R�cup�ration url
//		String[] url=line.split("URL=");
//		line = br.readLine();
//		//R�cuperation titre
//		String[] title=line.split("TITLE=");
//		if(title.length==0){
//			title = new String[2];
//			title[0] = " ";
//			title[1] = " ";
//		}
//		//Ajout url et titre dans un tableau
//		URL_title.add(url[1]);
//		URL_title.add(title[1]);
//		//Ajoute du titre dans le text a indexer
//		text += title[1];
//		while ((line=br.readLine())!=null){
//			text += line + " ";
//		}
//		br.close(); 
//		return this.tokenize(text.toLowerCase(),removeStopWords);
//	}

	// Surcharge de normalize qui prend en parametre le contenu d'un fichier en ArrayList
	public ArrayList<String> normalize_ok(ArrayList<String> fileContent, ArrayList<String> URL_title, boolean removeStopWords, boolean doUrl) throws IOException {
		String text;	
		String line;		
		line = fileContent.get(0);
		//récupération URL
		String[] url=line.split("URL=");
		line = fileContent.get(1);	
		//Récuperation titre
		String[] title=line.split("TITLE=");
		if(title.length==0){
			title = new String[2];
			title[0] = " ";
			title[1] = " ";
		}
		if (doUrl){
			//Ajout url et titre dans un tableau
			URL_title.add(url[1]);
			URL_title.add(title[1]);
		}
		
		int i = 2;
		//Ajoute du titre dans le text a indexer
		text = title[1];
		while ( i < fileContent.size() ){
			text += fileContent.get(i) + " ";
			i++;
		} 
		return this.tokenize(text.toLowerCase(), removeStopWords);
	}
	
	public ArrayList<String> normalize_req(ArrayList<String> contenuRequete, boolean removeStopWords) throws IOException {
		String text = "";
		int i = 0;
		while (i < contenuRequete.size()){
			text += contenuRequete.get(i) + " ";
			i++;
		}
		return this.tokenize(text.toLowerCase(), removeStopWords);
	}
	
	public ArrayList<String> tokenize(String text){
		return this.tokenize(text, false);
	}
	/**
	 * This method drives the automaton execution over the stream of chars.
	 */
	public ArrayList<String> tokenize(String text , boolean removeStopWords) {
		char[] textContent = text.toCharArray();
		ArrayList<String> tokens = new ArrayList<String>();
		// Initialize the execution
		int begin = -1;
		transducer.reset();
		String word;
		// Run over the chars
		for(int i=0 ; i<textContent.length ; i++) {
			Signal s = transducer.feedChar( textContent[i] );
			switch(s) {
			case start_word:
				begin = i;
				break;
			case end_word:
				word = text.substring(begin, i);
				this.addToken(tokens, word);
				begin = -1;
				break;
			case end_word_prev:
				word = text.substring(begin, i-1);
				this.addToken(tokens, word);
				break;
			case switch_word:
				word = text.substring(begin, i);
				this.addToken(tokens, word);
				begin = i;
				break;
			case switch_word_prev:
				word = text.substring(begin, i-1);
				this.addToken(tokens, word);
				begin = i;
				break;
			case cancel_word:
				begin = -1;
				break;
			}
		}
		// Add the last one
		if (begin != -1) {
			word = text.substring(begin, text.length());
			this.addToken(tokens, word);
		}
		if (removeStopWords) {
			tokens.removeAll(this.stopWords);
		}
		return tokens;
	}

	private ArrayList<String> addToken(ArrayList<String> list, String token) {
		list.add(token);			
		return list;
	}


//	public static void main(String[] args) {
//		String stopWordFileName = "../../frenchST.txt";
//		//		String test = "Ceci est un test de tokenisation. Avec des abats-jours, des aujourd'hui et des jusqu'�, s'il veut bien l'autre sera l�.";	
//		String fileName = "";
//		try {
//			System.out.println((new FrenchTokenizer()).normalize(fileName));
//		} catch (IOException e) {
//			// TODO Auto-generated catch block
//			e.printStackTrace();
//		}
//	}


}
