/**
 * \file Lexer.cpp
 *
 * Asteriskos Compiler 1
 * Written 2010-2011, by Anthony Berlin and Alexander Carlqvist
 *
 * This file is part of ICC.
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include "Lexer.h"
#include "ValueTable.h"
#include "Warning.h"

#include <charutil/charutil.h>

#include <fstream>

///
/// Constructor for 'Lexer'.
///
Lexer::Lexer(TranslationUnit* transUnit, list<Error*>* errorList)
{
    m_transUnit = transUnit;
    m_errorList = errorList;
    m_tokenStream = new (GC) list<Token*>;

    init_keywords();

    m_braceDepth = 0;
    m_parenDepth = 0;
    m_squareDepth = 0;
    m_lastToken = NULL;

    for (int index = 0; index <= 3; ++index)
        m_peek[index] = 0;

    m_pos = 0;
    m_state = (State)0;
}

///
/// Initialize the keyword hashtable.
///
void Lexer::init_keywords()
{
    // Allocate memory for the RID Symbol Table.
    sValueTable.RIDTable = new SymbolTable;

    // Initialize the keywords.
    add_keyword("import", RID_IMPORT, false);

    add_keyword("void", RID_VOID, false);
    add_keyword("int", RID_INT, false);
    add_keyword("bool", RID_BOOL, false);
    add_keyword("string", RID_STRING, false);

    add_keyword("static", RID_STATIC, false);
    add_keyword("const", RID_CONST, true);
    add_keyword("extern", RID_EXTERN, false);

    add_keyword("signed", RID_SIGNED, true);
    add_keyword("unsigned", RID_UNSIGNED, true);

    add_keyword("if", RID_IF, false);
    add_keyword("else", RID_ELSE, false);
    add_keyword("while", RID_WHILE, false);

    add_keyword("return", RID_RETURN, false);
    add_keyword("break", RID_BREAK, false);
    add_keyword("continue", RID_CONTINUE, false);

    add_keyword("put", RID_PUT, false);
    add_keyword("get", RID_GET, false);
    add_keyword("asm", RID_ASM, false);
}

///
/// Add a keyword into the keyword hashtable.
///
void Lexer::add_keyword(string name, TokenId type, bool isSpecifier)
{
    // Create a keyword, represented as a Token.
    Token* keyword = new Token((SourceLine*)0, type, name);

    keyword->isKeyword = true;
    keyword->isSpecifier = isSpecifier;

    // Add the keyword into 'm_keywords'.
    m_keywords[name] = keyword;

    // Create a Symbol for the RID SymbolTable. This makes sure you cannot use this name as an
    // identifier, when declaring something in the language.
    Symbol* symbol = new Symbol;
    symbol->m_token = keyword;

    // Push the symbol onto the global RID (Reserved IDentifier).
    sValueTable.RIDTable->push(symbol);
}

///
/// Perform lexical analysis on the translation-unit associated with this lexer, and create a token
/// stream for the parser to use.
///
void Lexer::analyse()
{
    SourceLine* curLine = NULL;
    Token* nextToken = NULL;

    // Loop through the translation-unit, one line at a time, to perform a lexical analysis on
    // each line.
    foreach (curLine, *m_transUnit->get_lines())
    {
        nextToken = NULL_TOKEN;

        m_pos = -1;

        for (;;)
        {
            nextToken = get_next_token(curLine);

            if (nextToken != NULL_TOKEN)
            {
                if (nextToken->m_type != TOK_NONE)
                {
                    m_lastToken = nextToken;
                    m_tokenStream->push_back(nextToken);
                }
                else
                    break;
            }
            else
                break;
        }
    }

    // Report an error if there are any bracket mismatches.
    report_bracket_mismatches(curLine);

    // Add the end-of-file token to the token stream.
    m_tokenStream->push_back(new Token(curLine, TOK_EOF, "<eof>"));
}

///
/// Report an error if there are any bracket mismatches.
///
void Lexer::report_bracket_mismatches(SourceLine* curLine)
{
    if (m_braceDepth > 0)
        m_errorList->push_back(new Error(
            ERR_UNTERMINATED_BRACKET,
            curLine,
            new string("{")));
    else if (m_braceDepth < 0)
        m_errorList->push_back(new Error(
            ERR_OPENING_BRACKET_MISSING,
            curLine,
            new string("{}")));

    if (m_squareDepth > 0)
        m_errorList->push_back(new Error(
            ERR_UNTERMINATED_BRACKET,
            curLine,
            new string("[")));
    else if (m_squareDepth < 0)
        m_errorList->push_back(new Error(
            ERR_OPENING_BRACKET_MISSING,
            curLine,
            new string("[]")));

    if (m_parenDepth > 0)
        m_errorList->push_back(new Error(
            ERR_UNTERMINATED_BRACKET,
            curLine,
            new string("(")));
    else if (m_parenDepth < 0)
        m_errorList->push_back(new Error(
            ERR_OPENING_BRACKET_MISSING,
            curLine,
            new string("()")));
}

///
/// Find the next token in the translation unit, and return that token.
///
Token* Lexer::get_next_token(SourceLine* curLine)
{
    if (curLine == NULL)
        return NULL_TOKEN;

    Token* nextToken = new Token(curLine, TOK_NONE, "");
    int lineSize = curLine->m_data.length();
    bool exitLoop = false;

    m_state = STATE_START;

    for (;;)
    {
        if (exitLoop)
            break;

        // Move along.
        ++m_pos;

        if (m_pos >= lineSize)
            break;

        // Peek at the next four characters.
        peek(curLine);

        // Check the state of the lexical analysis, and perform the appropriate action(s).
        short unsigned int startReturnValue;

        switch (m_state)
        {
            case STATE_START:
                startReturnValue = lex_state_start(curLine, nextToken);

                switch (startReturnValue)
                {
                    case 0:
                        break;
                    case 1:
                        continue;
                    default:
                        exitLoop = true;
                }

                break;
            case STATE_INT_LITERAL:
                if (lex_state_int_literal(curLine, nextToken))
                    exitLoop = true;

                break;
            case STATE_STRING_LITERAL:
                if (lex_state_string_literal(curLine, nextToken))
                    exitLoop = true;

                break;
            case STATE_IDENT:
                if (lex_state_ident(curLine, nextToken))
                    exitLoop = true;

                break;
            default:
                if (!charutil::is_whitespace(m_peek[0]))
                    m_errorList->push_back(new Error(
                        ERR_INVALID_TOKEN,
                        nextToken->m_line,
                        new string(nextToken->m_lexeme)));
        }
    }

    return nextToken;
}

///
/// Get the next four characters from the current position. We always look ahead three additional
/// characters (even when there aren't any characters left on the current line).
///
void Lexer::peek(SourceLine* curLine)
{
    for (int i = m_pos; i < m_pos+4; ++i)
    {
        try
        {
            m_peek[i - m_pos] = curLine->m_data[i];
        }
        catch (...)
        {
            m_peek[i - m_pos] = 0;
        }
    }
}

///
/// State: Start forming a token.
///
short unsigned int Lexer::lex_state_start(SourceLine* curLine, Token* nextToken)
{
    // Integer literal.
    if (charutil::is_digit(m_peek[0]))
    {
        nextToken->m_type = TOK_INTEGER;
        m_state = STATE_INT_LITERAL;
        nextToken->poke(m_peek[0]);

        // In case it's a single '0'.
        if (!charutil::is_digit(m_peek[1]))
            return 2;

        return 1;
    }

    // String literal.
    if (m_peek[0] == '\"')
    {
        nextToken->isLiteral = true;
        nextToken->m_type = TOK_STRING;
        m_state = STATE_STRING_LITERAL;
        return 1;
    }

    // Identifier.
    if (charutil::is_letter(m_peek[0]) || m_peek[0] == '_')
    {
        nextToken->isIdentifier = true;
        nextToken->m_type = TOK_IDENT;
        m_state = STATE_IDENT;
        lex_state_ident(curLine, nextToken);
        return 1;
    }

    // Punctuator.
    if (is_punctuator_char(m_peek[0]))
    {
        nextToken->isPunctuator = true;

        switch (m_peek[0])
        {
            case '+':
                nextToken->m_type = TOK_PLUS;
                nextToken->poke(m_peek[0]);
                return 2;
            case '-':
                nextToken->m_type = TOK_MINUS;
                nextToken->poke(m_peek[0]);
                return 2;
            case '*':
                nextToken->m_type = TOK_ASTERISK;
                nextToken->poke(m_peek[0]);
                return 2;
            case '/':
                nextToken->m_type = TOK_SLASH;
                nextToken->poke(m_peek[0]);
                return 2;
            case '&':
                nextToken->poke(m_peek[0]);
                switch (m_peek[1])
                {
                    case '&':
                        nextToken->m_type = TOK_AND;
                        nextToken->poke(m_peek[1]);
                        ++m_pos;
                        return 2;
                    default:
                        break;
                }
            case '|':
                nextToken->poke(m_peek[0]);
                switch (m_peek[1])
                {
                    case '|':
                        nextToken->m_type = TOK_OR;
                        nextToken->poke(m_peek[1]);
                        ++m_pos;
                        return 2;
                    default:
                        break;
                }
            case '=':
                switch (m_peek[1])
                {
                    case '=':
                        nextToken->m_type = TOK_EQUAL;
                        nextToken->poke(m_peek[1]);
                        ++m_pos;
                        return 2;
                    default:
                        nextToken->m_type = TOK_ASSIGN;
                        nextToken->poke(m_peek[0]);
                        return 2;
                }
            case '>':
                switch (m_peek[1])
                {
                    case '=':
                        nextToken->m_type = TOK_GEQ;
                        nextToken->poke(m_peek[1]);
                        ++m_pos;
                        return 2;
                    default:
                        nextToken->m_type = TOK_GREATER;
                        nextToken->poke(m_peek[0]);
                        return 2;
                }
            case '<':
                switch (m_peek[1])
                {
                    case '=':
                        nextToken->m_type = TOK_LEQ;
                        nextToken->poke(m_peek[1]);
                        ++m_pos;
                        return 2;
                    default:
                        nextToken->m_type = TOK_LESS;
                        nextToken->poke(m_peek[0]);
                        return 2;
                }
            case '!':
                switch (m_peek[1])
                {
                    case '=':
                        nextToken->m_type = TOK_NOT_EQUAL;
                        nextToken->poke(m_peek[1]);
                        ++m_pos;
                        return 2;
                    default:
                        nextToken->m_type = TOK_NOT;
                        nextToken->poke(m_peek[0]);
                        return 2;
                }
            case ',':
                nextToken->m_type = TOK_COMMA;
                nextToken->poke(m_peek[0]);
                return 2;
            case '.':
                nextToken->m_type = TOK_DOT;
                nextToken->poke(m_peek[0]);
                return 2;
            case ';':
                nextToken->m_type = TOK_SEMICOLON;
                nextToken->poke(m_peek[0]);
                return 2;
            case '(':
                nextToken->m_type = TOK_OPEN_PAREN;
                nextToken->poke(m_peek[0]);

                ++m_parenDepth;

                return 2;
            case ')':
                nextToken->m_type = TOK_CLOSE_PAREN;
                nextToken->poke(m_peek[0]);

                --m_parenDepth;

                if (m_parenDepth < 0)
                {
                    m_errorList->push_back(new Error(
                        ERR_OPENING_BRACKET_MISSING,
                        curLine,
                        new string("()")));
                    ++m_parenDepth;
                }

                return 2;
            case '{':
                nextToken->m_type = TOK_OPEN_BRACE;
                nextToken->poke(m_peek[0]);

                ++m_braceDepth;

                return 2;
            case '}':
                nextToken->m_type = TOK_CLOSE_BRACE;
                nextToken->poke(m_peek[0]);

                --m_braceDepth;

                if (m_braceDepth < 0)
                {
                    m_errorList->push_back(new Error(
                        ERR_OPENING_BRACKET_MISSING,
                        curLine,
                        new string("{}")));
                    ++m_braceDepth;
                }

                return 2;
        }
    }

    // Invalid token.
    if (!charutil::is_whitespace(m_peek[0]))
    {
        nextToken->poke(m_peek[0]);
        m_errorList->push_back(new Error(
            ERR_INVALID_TOKEN,
            curLine,
            new string(nextToken->m_lexeme)));
        return 2;
    }

    return 0;
}

///
/// State: Integer literal.
///
bool Lexer::lex_state_int_literal(SourceLine* curLine, Token* nextToken)
{
    if (charutil::is_digit(m_peek[0]))
    {
        nextToken->poke(m_peek[0]);
        return false;
    }

    --m_pos;
    return true;
}

///
/// State: String literal.
///
bool Lexer::lex_state_string_literal(SourceLine* curLine, Token* nextToken)
{
    if (m_peek[0] != '"')
    {
        if (m_peek[0] == '\\')
        {
            switch (m_peek[1])
            {
                case '\\':
                case 'n':
                case 'N':
                case 'r':
                case 'R':
                case '"':
                case '\'':
                case '0':
                case 'a':
                case 'A':
                case 'f':
                case 'F':
                case 't':
                case 'T':
                case 'v':
                case 'V':
                    nextToken->poke(m_peek[0]);
                    nextToken->poke(m_peek[1]);
                    ++m_pos;
                    break;
                default:
                    m_errorList->push_back(new Warning(WARNING_UNKNOWN_ESCAPE_SEQUENCE,
                                                       curLine,
                                                       NULL));
                    break;
            }
        }
        else
            nextToken->poke(m_peek[0]);
    }
    else
        return true;

    return false;
}

///
/// State: Identifier.
///
bool Lexer::lex_state_ident(SourceLine* curLine, Token* nextToken)
{
    if (charutil::is_letter_or_digit(m_peek[0]) || m_peek[0] == '_')
        nextToken->poke(m_peek[0]);
    else
    {
        --m_pos;
        peek(curLine);
    }

    if ((!charutil::is_letter_or_digit(m_peek[1]) && m_peek[1] != '_')
        || m_pos >= (signed int)(curLine->m_data.length()-1))
    {
        if (m_keywords.count(nextToken->m_lexeme))
        {
            Token* keyword = m_keywords[nextToken->m_lexeme];

            if (keyword->isSpecifier)
                nextToken->isSpecifier = true;

            nextToken->isKeyword = true;
            nextToken->m_type = keyword->m_type;
        }
        else if (nextToken->m_lexeme == "true" ||
                 nextToken->m_lexeme == "false")
        {
            nextToken->isLiteral = true;
            nextToken->m_type = TOK_BOOLEAN;
        }
        else if (nextToken->m_lexeme == "declared")
        {
            nextToken->isKeyword = true;
            nextToken->m_type = RID_DECLARED;
        }

        return true;
    }

    return false;
}

///
/// Return true if 'character' is a valid punctuator character.
///
inline bool Lexer::is_punctuator_char(char character) const
{
    return character == '+'
        || character == '-'
        || character == '*'
        || character == '/'
        || character == '='
        || character == '&'
        || character == '|'
        || character == '<'
        || character == '>'
        || character == '!'
        || character == '.'
        || character == ','
        || character == ';'
        || character == '('
        || character == ')'
        || character == '['
        || character == ']'
        || character == '{'
        || character == '}';
}

///
/// Dump the token stream to disc.
///
void Lexer::dump_to_disc(string& appFileName) const
{
    string path = m_transUnit->get_path() + ".token_stream_dump";

    std::ofstream fileStream(path.c_str());

    if (!fileStream || !fileStream.is_open())
    {
        cout << appFileName << ": Failed to write to disc.\n";
        return;
    }

    foreach (Token* currentToken, *m_tokenStream)
        fileStream << currentToken->to_string() << "\n";

    fileStream.close();
}
