/*
 *  Copyright 2010 Wesley Gooch, Ryan Nix, Cody Wenzel.
 *  Montana State University - Spring Semester 2010
 *  Computer Science 450 - Compilers
 *  Project: Micro-Pascal Compiler
 */
package msu.compiler.scanner;

import msu.compiler.scanner.fsas.StateMachineFactory;

/**
 * 
 * @author Wesley Gooch
 */
public class Scanner {

    private Tokenizer tokenizer;

    public Scanner(Tokenizer tokenizer) {
        this.tokenizer = tokenizer;
    }

    /**
     * Scans and Returns the next token in the tokenizer.
     *
     * @return The next token in the tokenizer.
     */
    public Token scanNextToken() {
        int currentChar = tokenizer.readNextChar();

        if(currentChar <=0 || tokenizer.isAtEOF()) {
            // Either the EOF was reached or some error so don't bother
            // dispatching... just return the token.
            return tokenizer.captureToken();
        } else {
            return dispatch((char)currentChar);
        }
    }

    /**
     * The dispatch method decides who is responsible for determining the next
     * token. Based on the given char, it builds and executes the appropriate
     * FSA or if it can't decide then the character does not belong
     * to the language.
     *
     * @param ch The character to dispatch against.
     * @return The Token generated from the resulting FSA or an error.
     */
    private Token dispatch(char ch) {
        StateMachineFactory stateMachine = new StateMachineFactory(tokenizer);
        
        if(Tokenizer.isLetter(ch) || ch == '_') {
            stateMachine.executeFSA("IdentifierFSA");
            //check for reserved words only after identifer token
            tokenizer.checkReservedWords();
        } else if(Tokenizer.isDigit(ch)) {
            stateMachine.executeFSA("DigitFSA");
        } else { // We are Dealing with a Symbol.
            switch (ch) {
                case '.':
                    tokenizer.setCurrentTokenType(Token.MP_PERIOD);
                    break;
                case ',':
                    tokenizer.setCurrentTokenType(Token.MP_COMMA);
                    break;
                case ';':
                    tokenizer.setCurrentTokenType(Token.MP_SCOLON);
                    break;
                case '(':
                    tokenizer.setCurrentTokenType(Token.MP_LPAREN);
                    break;
                case ')':
                    tokenizer.setCurrentTokenType(Token.MP_RPAREN);
                    break;
                case '=':
                    tokenizer.setCurrentTokenType(Token.MP_EQUAL);
                    break;
                case '+':
                    tokenizer.setCurrentTokenType(Token.MP_PLUS);
                    break;
                case '-':
                    tokenizer.setCurrentTokenType(Token.MP_MINUS);
                    break;
                case '*':
                    tokenizer.setCurrentTokenType(Token.MP_TIMES);
                    break;
                case '>':
                    stateMachine.executeFSA("GreaterThanFSA");
                    break;
                case '<':
                    stateMachine.executeFSA("LessThanFSA");
                    break;
                case ':':
                    stateMachine.executeFSA("ColonFSA");
                    break;
                case '\'':
                    stateMachine.executeFSA("StringLiteralFSA");
                    break;
                default:
                    tokenizer.setCurrentTokenType(Token.MP_ERROR);
                    break;
            }
        }
        return tokenizer.captureToken();
    }
}
