/*
 * Copyright (C) 2015 soud
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included
 * in all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 */

#include "erupt.h"
#include "lexer.h"

static lexer_t *create_lexer(char *target_file, char *source);
static char peek(lexer_t *lexer);
static char eat(lexer_t *lexer);
static void emit_token(lexer_t *lexer, tokentype type);
static bool read_symbol(lexer_t *lexer);
static tokentype get_keyword(lexer_t *lexer);
static bool is_number(char c);
static bool is_ident(char c);
static bool is_whitespace(char c);
static void read_chars(lexer_t *lexer, char id);
static void read_number(lexer_t *lexer);
static void read_ident(lexer_t *lexer);
static void read_comment(lexer_t *lexer);

static const keyword_t symbols[] = {
        {"+"   , TOKEN_PLUS},
        {"-"   , TOKEN_MIN},
        {"*"   , TOKEN_STAR},
        {"/"   , TOKEN_SLASH},
        {"**"  , TOKEN_STAR_STAR},
        {"%"   , TOKEN_MOD},
        {"&"   , TOKEN_B_AND},
        {"|"   , TOKEN_B_OR},
        {"~"   , TOKEN_B_NOT},
        {"^"   , TOKEN_B_XOR},
        {"!"   , TOKEN_BANG},
        {"<"   , TOKEN_LT},
        {">"   , TOKEN_GT},
        {"<<"  , TOKEN_L_SHIFT},
        {">>"  , TOKEN_R_SHIFT},
        {"="   , TOKEN_EQ},

        {"+="  , TOKEN_PLUS_EQ},
        {"-="  , TOKEN_MIN_EQ},
        {"-="  , TOKEN_MIN_EQ},
        {"->"  , TOKEN_R_ARROW},
        {"*="  , TOKEN_STAR_EQ},
        {"%="  , TOKEN_MOD_EQ},
        {"&="  , TOKEN_B_AND_EQ},
        {"|="  , TOKEN_B_OR_EQ},
        {"~="  , TOKEN_B_NOT_EQ},
        {"^="  , TOKEN_B_XOR_EQ},
        {"/="  , TOKEN_SLASH_EQ},
        {"!="  , TOKEN_NOT_EQ},
        {"!="  , TOKEN_NOT_EQ},
        {"<="  , TOKEN_LT_EQ},
        {">="  , TOKEN_GT_EQ},
        {"=="  , TOKEN_EQ_EQ},

        {"&&"  , TOKEN_AND},
        {"||"  , TOKEN_OR},
        {"::"  , TOKEN_NS_SEP},
        {":"   , TOKEN_COLON},
        {"("   , TOKEN_L_PAREN},
        {")"   , TOKEN_R_PAREN},
        {"["   , TOKEN_L_BRACKET},
        {"]"   , TOKEN_R_BRACKET},
        {"{"   , TOKEN_L_BRACE},
        {"}"   , TOKEN_R_BRACE},
        {"."   , TOKEN_DOT},
        {","   , TOKEN_COMMA},
        {";"   , TOKEN_SEMI_COLON},
};

static const keyword_t keywords[] = {
        {"fun"    , TOKEN_FUNCTION},
        {"True"   , TOKEN_TRUE},
        {"False"  , TOKEN_FALSE},
        {"mut"    , TOKEN_MUTABLE},
        {"use"    , TOKEN_IMPORT},
        {"return" , TOKEN_RETURN},
        {"for"    , TOKEN_FOR},
        {"while"  , TOKEN_WHILE},
        {"break"  , TOKEN_BREAK},
        {"cont"   , TOKEN_CONTINUE},
        {"if"     , TOKEN_IF},
        {"unless" , TOKEN_UNLESS},
        {"else"   , TOKEN_ELSE},
};

lexer_t *lex(char *target_file, char *source)
{
        char c;
        lexer_t *lexer = create_lexer(target_file, source);

        verbose_printf("starting lexical analysis\n");

        while (peek(lexer) != '\0') {
                lexer->start = lexer->pos;

                /* check for comment block */
                if (strncmp(lexer->source + lexer->pos, "(*", 2) == 0) {
                        read_comment(lexer);
                        continue;
                }

                /* check for symbols */
                if (read_symbol(lexer))
                        continue;

                c = eat(lexer);

                /* special cases like strings, numbers, identifiers and whitespace */
                switch (c) {
                case '"':
                        read_chars(lexer, '"');
                        break;
                case '\'':
                        read_chars(lexer, '\'');
                        break;
                case '\n':
                        ++lexer->line_n;
                        break;
                case ' ':
                case '\t':
                case '\r':
                        while (is_whitespace(peek(lexer)))
                                eat(lexer);
                        break;
                default:
                        if (is_ident(c)) {
                                read_ident(lexer);
                        } else if (is_number(c)) {
                                read_number(lexer);
                        } else {
                                emit_token(lexer, TOKEN_ERROR);
                                file_fatal_error(lexer->line_n, "erroneous token '%c'\n", c);
                        }
                }
        }

        lexer->start = lexer->pos;
        emit_token(lexer, TOKEN_EOF);

        verbose_printf("lexical analysis done\n");

        return lexer;
}

static lexer_t *create_lexer(char *target_file, char *source)
{
        lexer_t *lexer = smalloc(sizeof(lexer_t));

        lexer->target_file = target_file;
        lexer->source      = source;
        lexer->line_n      = 1;
        lexer->start       = 0;
        lexer->pos         = 0;
        lexer->ts          = NULL;
        lexer->tail        = NULL;

        return lexer;
}

static char peek(lexer_t *lexer)
{
        return lexer->source[lexer->pos];
}

static char eat(lexer_t *lexer)
{
        /* assign before advancing, avoiding fencepost errors */
        char c = peek(lexer);

        ++lexer->pos;

        return c;
}

static void emit_token(lexer_t *lexer, tokentype type)
{
        char *value    = token_value(lexer->source, lexer->start, lexer->pos);
        token_t *token = new_token(type, value, lexer->start, lexer->pos, lexer->line_n);

        token->prev = NULL;
        token->next = NULL;

        if (lexer->tail == NULL)
                lexer->ts = token;
        else
                lexer->tail->next = token;

        lexer->tail = token;
}

static bool is_number(char c)
{
        return (c >= '0' && c <= '9') || c == '.';
}

static bool is_ident(char c)
{
        return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_';
}

static bool is_whitespace(char c)
{
        return c == ' ' || c == '\t' || c == '\r';
}

static void read_chars(lexer_t *lexer, char id)
{
        bool is_string = id == '\"';

        while (eat(lexer) != id) {
                /* allow escaped quotation chars in strings and chars */
                if (lexer->source[lexer->pos - 1] == '\\')
                        eat(lexer);

                if (peek(lexer) == EOF)
                        file_fatal_error(lexer->line_n, "unexpected EOF while scanning string\n");
        }

        if (is_string)
                emit_token(lexer, TOKEN_STRING);
        else
                emit_token(lexer, TOKEN_CHAR);
}

static bool read_symbol(lexer_t *lexer)
{
        size_t n_symbols = sizeof(symbols) / sizeof(symbols[0]);
        keyword_t token = {NULL, TOKEN_ERROR};

        for (size_t i = 0; i < n_symbols; ++i)
                if (strncmp(lexer->source + lexer->pos, symbols[i].name, strlen(symbols[i].name)) == 0)
                        token = symbols[i];

        if (token.name) {
                for (size_t j = 0; j < strlen(token.name); ++j)
                        eat(lexer);

                emit_token(lexer, token.type);
                return true;
        }

        return false;
}

static tokentype get_keyword(lexer_t *lexer)
{
        char *keyword     = token_value(lexer->source, lexer->start, lexer->pos);
        size_t n_keywords = sizeof(keywords) / sizeof(keywords[0]);

        for (size_t i = 0; i < n_keywords; ++i)
                if (strcmp(keyword, keywords[i].name) == 0)
                        return keywords[i].type;

        return TOKEN_IDENT;
}

static void read_number(lexer_t *lexer)
{
        while (is_number(peek(lexer)))
                eat(lexer);

        emit_token(lexer, TOKEN_NUMBER);
}

static void read_ident(lexer_t *lexer)
{
        while (is_ident(peek(lexer)) || is_number(peek(lexer)))
                eat(lexer);

        emit_token(lexer, get_keyword(lexer));
}

static void read_comment(lexer_t *lexer)
{
        size_t start_line = lexer->line_n;

        while (1) {
                if (peek(lexer) == EOF)
                        file_fatal_error(lexer->line_n, "unterminated comment\n");

                if (peek(lexer) == '\n')
                        ++lexer->line_n;

                if (eat(lexer) == '*' && peek(lexer) == ')') {
                        eat(lexer);
                        verbose_printf("skipped comment on line %zu\n",
                                       start_line);
                        break;
                }
        }

        /* don't tokenize comments */
}

void destroy_lexer(lexer_t *lexer)
{
        if (!lexer)
                return;

        if (lexer->ts)
                destroy_tokens(lexer->ts);

        if (lexer->tail)
                free(lexer->tail);

        free(lexer);
        verbose_printf("destroyed lexer\n");
}
