package inabit.compiler.lexical;

import inabit.compiler.lexical.automaton.Automata;
import inabit.compiler.lexical.automaton.AutomataRunner;
import inabit.compiler.lexical.automaton.transitions.InclusiveTransition;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;


/**Rule compiler lexical analyzer*/
/**@author Rafael Rocha*/
public abstract class LexicalAnalyzer {
	
	private  int currentRow;
	private  int currentCol;
	private  int currentIndex;
	private Automata automata;
	private Map<String, TokenClass> reservedWords;
	private String fileString;
	
	/**Lexical analyzer gives the tokens of a rule file*/
	public LexicalAnalyzer(String initialStateLabel) {
		fileString = "";
		reservedWords = new HashMap<String, TokenClass>();
		automata = new Automata();
		automata.addState(initialStateLabel);
		automata.setInitialState(initialStateLabel);
		addUnivoqueSymbols(automata, getTokenClasses());
		setup(automata);
	}
	
	protected void addReservedWord(String word, TokenClass token){
		this.reservedWords.put(word, token);
	}
	
	/**Adds the univoque symbols to the generator automaton given the initial state*/
	protected abstract List<? extends TokenClass> getTokenClasses();
	
	/**Creates the automaton recognizer for the tokens of the language*/
	protected abstract void setup(Automata automata);
	
	protected void addUnivoqueSymbols(Automata automaton, List<? extends TokenClass> tkClasses) {
		String initialState = automaton.getInitialState();
		
		for(TokenClass tkClass : tkClasses) {
			if(!tkClass.isReservedWord() && tkClass.isUnivoque()) {
				
				AutomataRunner runner = new AutomataRunner(automaton);
				runner.init();
				
				String tkString = tkClass.getStateLabel();
				String destiny = tkClass.getStateLabel();
				boolean newWord = false;
				String state = initialState;
				
				for(int i = 0; i < tkString.length(); i++) {
					if(!runner.transition(tkString.charAt(i))) {
						newWord = true;
					}
					if(newWord){
						if(i == tkString.length()-1){
							automaton.addState(destiny);
							automaton.setFinalState(destiny);
							automaton.addTransition(new InclusiveTransition(state, destiny, tkString.charAt(i)));
						}else {
							String newState = destiny + "_" + tkString.charAt(i);
							automaton.addState(newState);
							automaton.addTransition(new InclusiveTransition(state, newState, tkString.charAt(i)));
							state = newState;
						}	
					}
				}
			}
		}
	}
	
	/**reset the rows and columns position as well as the file index*/
	public void reset() {
		currentRow = 1;
		currentCol = 1;
		currentIndex = 0;
	}
	
	/**loads the rule file and set the lexer ready to get the tokens*/
	public  void loadFromFile(File file) throws IOException {
		String LINE_SEPARATOR = System.getProperty("line.separator");
		BufferedReader reader = new BufferedReader(new FileReader(file));
		while(reader.ready()) {
			fileString += reader.readLine() + LINE_SEPARATOR;
		}
		fileString += Character.toString('\1');
		reset();
	}
	
	public void loadFromText(String text){
		this.fileString = text;
		reset();
	}
	
	/**get the next token and updates the position on file*/
	public Token nextToken() throws Exception{
		AutomataRunner runner = new AutomataRunner(automata);
		runner.init();
		Token tk = null;
		int line = currentRow;
		int col = currentCol;
		char currentChar;
		
		//executes the automaton runner until it halts
		while(currentIndex < fileString.length() && runner.transition(fileString.charAt(currentIndex))) {
			
			currentCol++;
			currentChar = fileString.charAt(currentIndex++);
			//update the line and column position
			if(Character.isWhitespace(currentChar)) {
				col++;
				if(currentChar == '\n') {
					currentRow++;
					line = currentRow;
					col = 1;
					currentCol = 1;
				}
			}
		}
		
		String state = runner.getCurrentState();
		String word = runner.getRecognizedWord();
		
		TokenClass tkClass = null;
		
		if(reservedWords.containsKey(word)) {
			tkClass = reservedWords.get(word);
		}else {
			tkClass = getTokenClass(automata, state);
		}
		
		//if recognized the token class returns the token
		if(tkClass != null) {
			tk = new Token(tkClass, line, col);
			if(!tk.getTokenClass().isUnivoque()) {
				setTokenValue(tk, word);
			}
			return tk;
		}else if(currentIndex++ == fileString.length()-1) {//if reached the end of file returns the token
			return new Token(eof(), line, col);
		}else {//if coundn't find the token class emit an error message
			throw new Exception("Lexical Error: could not recognize word \"" + word + "\" at [" + line + ", " + col+"]");
		}
	}
	/**Set the value of the token if it isn't univoque*/
	public abstract void setTokenValue(Token tk, String recognizedWord);
	
	public abstract TokenClass eof();
	
	protected abstract TokenClass getTokenClass(Automata automata, String state);

	/**return true if the file hasn't finished yet*/
	public boolean hasTokens() {
		return currentIndex < fileString.length();
	}
	
}
