package tools;

import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Serializable;
import java.util.ArrayList;

import tools.FrenchTokenizerAutomaton.Signal;

public class FrenchTokenizer implements Normalizer, Serializable {
	
	/**
	 * 
	 */
	private static final long serialVersionUID = 1L;
	private FrenchTokenizerAutomaton transducer;
	private ArrayList<String> stopWords;
	
	public FrenchTokenizer() {
		this.transducer = new FrenchTokenizerAutomaton();
	}
	
	public FrenchTokenizer(String stopWordFileName) throws IOException{
		this.transducer = new FrenchTokenizerAutomaton();
		
		this.stopWords = new ArrayList<String>();
		//lecture du fichier texte	
		InputStream ips=new FileInputStream(stopWordFileName); 
		InputStreamReader ipsr=new InputStreamReader(ips);
		BufferedReader br=new BufferedReader(ipsr);
		String line;
		while ((line=br.readLine())!=null){
			this.stopWords.add(line);
		}
		br.close();
	}
	
	

	public ArrayList<String> normalize(String fileName) throws IOException {
		String text = "";
		//lecture du fichier texte	
		InputStream ips=new FileInputStream(fileName); 
		InputStreamReader ipsr=new InputStreamReader(ips);
		BufferedReader br=new BufferedReader(ipsr);
		String line;
		while ((line=br.readLine())!=null){
			text += line + " ";
		}
		br.close(); 
		return this.tokenize(text.toLowerCase());
	}
	
	
	/**
	 * This method drives the automaton execution over the stream of chars.
	 */
	public ArrayList<String> tokenize(String text) {
		char[] textContent = text.toCharArray();
		ArrayList<String> tokens = new ArrayList<String>();
		// Initialize the execution
		int begin = -1;
		transducer.reset();
		String word;
		// Run over the chars
		for(int i=0 ; i<textContent.length ; i++) {
			Signal s = transducer.feedChar( textContent[i] );
			switch(s) {
			case start_word:
				begin = i;
				break;
			case end_word:
				word = text.substring(begin, i);
				this.addToken(tokens, word);
				begin = -1;
				break;
			case end_word_prev:
				word = text.substring(begin, i-1);
				this.addToken(tokens, word);
				break;
			case switch_word:
				word = text.substring(begin, i);
				this.addToken(tokens, word);
				begin = i;
				break;
			case switch_word_prev:
				word = text.substring(begin, i-1);
				this.addToken(tokens, word);
				begin = i;
				break;
			case cancel_word:
				begin = -1;
				break;
			}
		}
		// Add the last one
		if (begin != -1) {
			word = text.substring(begin, text.length());
			this.addToken(tokens, word);
		}
		
		return tokens;
	}
	
	private ArrayList<String> addToken(ArrayList<String> list, String token) {
		list.add(token);			
		return list;
	}
	

	public static void main(String[] args) {
		String stopWordFileName = "C:/Users/Nath/workspace/EIT_2/frenchST.txt";
	//String test = "";	
		String fileName =  "C:/Users/Nath/workspace/EIT_2/lemonde/texte.95-1.txt";
		String encoding = "ISO-8859-1";
		FrenchTokenizer ft;
		try {
			ft = new FrenchTokenizer(stopWordFileName);
			ArrayList<String> resultat = ft.normalize(fileName, encoding, true);
			for(String word : resultat){
				System.out.println(word);
			}
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}	
		
	}


	@Override
	public ArrayList<String> normalize(String fileName, String encoding,
			boolean removeStopWords) throws IOException {
		String text = "";
		//lecture du fichier texte	
		InputStream ips=new FileInputStream(fileName); 
		InputStreamReader ipsr=new InputStreamReader(ips,encoding);
		BufferedReader br=new BufferedReader(ipsr);
		String line;
		while ((line=br.readLine())!=null){
			text += line + " ";
		}
		br.close(); 
		
		ArrayList<String> words = this.tokenize(text.toLowerCase());
		
		if(removeStopWords){
			ArrayList<String> result = new ArrayList<String>();
			
			for(String word: words){
				boolean remove = false;
				for(String stopWord : this.stopWords){
					if(word.equals(stopWord)){
						remove = true;
					}
				}
				if(!remove){
					result.add(word);
				}
			}
			
			return result;
		}
		else{
			return words;
		}
	}

	@Override
	public ArrayList<String> normalizeText(String text, boolean removeStopWords) {
		
		ArrayList<String> words = this.tokenize(text.toLowerCase());
		
		if(removeStopWords){
			ArrayList<String> result = new ArrayList<String>();
			
			for(String word: words){
				boolean remove = false;
				for(String stopWord : this.stopWords){
					if(word.equals(stopWord)){
						remove = true;
					}
				}
				if(!remove){
					result.add(word);
				}
			}
			
			return result;
		}
		else{
			return words;
		}
	}

}
