package treetagger;

import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;

import treetagger.FrenchTokenizerAutomaton.Signal;

public class FrenchTokenizer implements Normalizer{
	public static final Charset ENCODING = StandardCharsets.ISO_8859_1;
	private FrenchTokenizerAutomaton transducer;
	
	public FrenchTokenizer() {
		this.transducer = new FrenchTokenizerAutomaton();
	}
	
	@Override
	public ArrayList<String> normalize(String fileName){
            try{

                String text = "";
                //lecture du fichier texte	
                InputStream ips=new FileInputStream(fileName); 
                InputStreamReader ipsr=new InputStreamReader(ips, ENCODING);
                BufferedReader br=new BufferedReader(ipsr);
                String line;
                while ((line=br.readLine())!=null){
                        text += line + " ";
                }
                br.close(); 
                return this.tokenize(text.toLowerCase());
            }
            catch(IOException e){
                System.out.println(e.getLocalizedMessage());
                return null;
            }
	}
	
	
	/**
	 * This method drives the automaton execution over the stream of chars.
	 */
	public ArrayList<String> tokenize(String text) {
		char[] textContent = text.toCharArray();
		ArrayList<String> tokens = new ArrayList<String>();
		// Initialize the execution
		int begin = -1;
		transducer.reset();
		String word;
		// Run over the chars
		for(int i=0 ; i<textContent.length ; i++) {
			Signal s = transducer.feedChar( textContent[i] );
			switch(s) {
			case start_word:
				begin = i;
				break;
			case end_word:
				word = text.substring(begin, i);
				this.addToken(tokens, word);
				begin = -1;
				break;
			case end_word_prev:
				word = text.substring(begin, i-1);
				this.addToken(tokens, word);
				break;
			case switch_word:
				word = text.substring(begin, i);
				this.addToken(tokens, word);
				begin = i;
				break;
			case switch_word_prev:
				word = text.substring(begin, i-1);
				this.addToken(tokens, word);
				begin = i;
				break;
			case cancel_word:
				begin = -1;
				break;
			}
		}
		// Add the last one
		if (begin != -1) {
			word = text.substring(begin, text.length());
			this.addToken(tokens, word);
		}
		
		return tokens;
	}
	
	private ArrayList<String> addToken(ArrayList<String> list, String token) {
		list.add(token);			
		return list;
	}
	

	public static void main(String[] args) {
		String stopWordFileName = "/home/xtannier/Cours/10-11/L3_TIW/td/td2/frenchST.txt";
//		String test = "Ceci est un test de tokenisation. Avec des abats-jours, des aujourd'hui et des jusqu'�, s'il veut bien l'autre sera l�.";	
		String fileName = "";
		System.out.println((new FrenchTokenizer()).normalize(fileName));
	}

}
