package br.usp.poli.compilador2008.lexical;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.lang.reflect.InvocationTargetException;



/*
 * Lexic Analyser (Analisador Lexico ):
 * 
 * Receives an input File (which becomes part of the object), and then creates a LexAutomata
 * which will be used to read this input file character by character and produce Tokens.
 * 
 * Tokens are defined in the Token class, which is also part of this package.
 * 
 * The Lexic Analyser has also a element called TokenSet, which is basically a set of Tokens,
 * and the functions needed to handle them.
 * 
 * There is another important class available for the rest of the code via the Lexic analyser,
 *  which is the SymbolTable. This class is responsible for generating one entry per number or ID read and
 * identified by the LexAutomata.
 * 
 */


public class Lexical {	
	private File source;				// Source file
	private TokenSet _tokenSet;			// Set of tokens
	private int currentToken;			// Current token
	private LexAutomata _lexAutomata;	// Automata that identifies the current token
	private SymbolTable _symbolTable;	// Table that contains all the symbols found in the lexyc analisys

	// Constructors
	public Lexical(String path){		
		this(new File(path));
	}

	public Lexical(File file){
		this.source = new File(file.getAbsolutePath());
		this._tokenSet = new TokenSet();
		this._lexAutomata = new LexAutomata();
		this._symbolTable = new SymbolTable();
		resetTokenCounter();
	}

	/**
	 * If the lexical analyzer file is not null, populates the TokenSet
	 * and SymbolTable with values found in the source code. This
	 * implementation is not so smart since the size of the source
	 * code is limited by the size supported by tokenSet, symbolTable
	 * and the lineReader, which are all stored in the memory.
	 * 
	 * @throws IOException
	 * @throws InvocationTargetException
	 */
	public void populateTokenSet() throws IOException, InvocationTargetException{

		int line;
		int column;
		int columnBeginning;

		LineReader lr = new LineReader(source);		//class that reads the source file line by line	

		//Sets the initial values of the word marker
		line =0;
		column =0;
		columnBeginning=0;

		//reads the first line
		String tmpLine = lr.readNext();

		//evaluates all the content of this line
		while(tmpLine != null){

			//boolean variable used to indicate that a word is being read,
			//or in other words, that a valid symbol was read (not ' ' nor '\n').
			boolean busy=false;
			System.out.println(tmpLine);			

			//stores the read line as an array to ease the evaluation
			char[] array = tmpLine.toCharArray();

			for (column = 0; column < array.length; column++){
				//if a space or TAB symbol is detected
				if(array[column] == ' ' || array[column] == '\t'){
					//and another valid symbol/string was read before
					if(busy==true){
						//then it evaluates the previous symbol/string
						evalLastState(line, columnBeginning);					
						//and sets the start of the next word for the next column
						columnBeginning = column + 1;
						//and resets the automata
						this._lexAutomata.reset();
					}
					//if no symbol/string was being read
					else{
						//then adds 1 to the position of the next word
						columnBeginning = column + 1;
						//and resets the automata
						this._lexAutomata.reset();
					}
				}
				//if a symbol was read
				else{
					//sends the symbol for the automata for evaluation
					_lexAutomata.nxtValue(array[column]);
					//and flags the marker
					busy = true;
				}

			}
			
			//at the end of the line, if a word was being read
			if(busy==true) {
				//then it evaluates the previous symbol/string
				evalLastState(line, columnBeginning);
				//and resets the automata
				this._lexAutomata.reset();
			}
			
			//then the line counter is incremented
			line = line + 1;
			//and the next line is read
			tmpLine = lr.readNext();
		}

		//at the end of file, sets the EOF token
		Token EOF = new Token(TokenType.EOF, null, String.valueOf(line), String.valueOf(column));
		this._tokenSet.addToken(EOF);
		System.out.println("*****************************");
		System.out.println("Lexical analysis completed!!");
		System.out.println("*****************************");
		System.out.println('\n');
	}

	//Analyze the last automata state
	private void evalLastState(int linha, int coluna){
		if(_lexAutomata.getState().compareTo("Start")!=0) {

			// if it is ID, adds the value to the tokenSet
			if(_lexAutomata.getState().compareTo("ID") == 0){
				int pos = this._symbolTable.addSymbol(_lexAutomata.getWord());
				try {
					this._tokenSet.addToken(new Token(_lexAutomata.getState(),String.valueOf(pos),String.valueOf(linha),String.valueOf(coluna)));
				} catch (InvocationTargetException e) {
					e.printStackTrace();
				}
			}

			// if it is NUM adds the value to the tokenSet
			else if (_lexAutomata.getState().compareTo("NUM")==0){
				int num = Integer.parseInt(_lexAutomata.getWord());
				try {
					this._tokenSet.addToken(new Token(_lexAutomata.getState(),String.valueOf(num),String.valueOf(linha),String.valueOf(coluna)));
				} catch (InvocationTargetException e) {
					e.printStackTrace();
				}
			}
			else{
				try {
					this._tokenSet.addToken(new Token(_lexAutomata.getState(),null,String.valueOf(linha),String.valueOf(coluna)));
				} catch (InvocationTargetException e) {
					e.printStackTrace();
				}
			}
		}
	}


	public void resetTokenCounter(){
		this.currentToken = 0;
	}

	public TokenSet getTokenSet(){
		return this._tokenSet;
	}

	/**Reads the next token, acting as an interator. Returns null if there is no next value.
	 * 
	 * @return Token
	 */
	public Token getNextToken(){
		Token retValue = null;
		if(currentToken<_tokenSet.getlength()){
			this.currentToken = currentToken + 1;		
			retValue = this._tokenSet.getToken(currentToken);
		}
		return retValue;
	}

	/**
	 * Returns the current Token.
	 * @return Token
	 */
	public Token getCurrentToken(){
		Token retValue = this._tokenSet.getToken(currentToken);
		return retValue;
	}


	/**
	 * Given a target File, saves the current token set in TXT format in the target file.
	 * 
	 * 
	 * 
	 * @param dest: target file
	 */
	public void saveTokenSetToFile(File dest){
		try{
			// Creates the file
			File save = new File(renameForLex(dest));

			FileOutputStream fstream = new FileOutputStream(save);
			PrintStream ps = new PrintStream(fstream);

			for(int i=0;i<this._tokenSet.getlength();i++){
				ps.println(this._tokenSet.getToken(i).toString());
				System.out.println(this._tokenSet.getToken(i).toString() + '\n');
			}
			fstream.flush();
			// Closes the output stream
			fstream.close();
		} catch (Exception e) { //Catch exception if any			
		}
	}

	// Saves the new file with a "Lexico" sufix
	public String renameForLex(File in){
		String newDest = (String) in.getAbsolutePath().subSequence(0, (in.getAbsolutePath().length())-4);
		newDest = newDest.concat("_Lexico.txt");
		return newDest;
	}

}
