#ifndef LEXER_HPP
#define LEXER_HPP

#include <cstddef>
#include "linear_algo.hpp"
#include "free_glyph.hpp"
#include <cassert>
#include <cctype>
#include <cstring>
#include "common.hpp"

// HACK: rename of Token Kind
enum class Token_Type
{
    Token_End = 0,
    Token_Invalid,
    Token_Preproc,
    Token_Symbol,
    Token_Open_Paren,
    Token_Close_Paren,
    Token_Open_Curly,
    Token_Close_Curly,
    Token_Semicolon,
    Token_Keyword,
    Token_Comment,
    Token_String,
};

const char *TokenTypeName(Token_Type tk_t);

struct Token
{
    Token_Type token_type;
    const char *text;
    size_t text_len;
    Vec2f position;
};

struct Lexer
{
    Free_Glyph_Atlas *atlas;
    const char *content;
    size_t content_len;
    size_t cursor;
    size_t line;
    size_t bol;
    float x;
};
// --------------------------------------- expose apis
Lexer LexerNew(Free_Glyph_Atlas *atlas,
               const char *content, size_t content_len);

Token LexerNext(Lexer *lex);

// NOTE: private section
#define LEXER_IMPL // impl this file apis, ignore this macro should be ok
// -------------------------------------------------- apis impl
struct Literal_Token
{
    const char *text;

    // HACK: rename of kind
    Token_Type tk_type;
};

Literal_Token literal_tokens[] = {
    {"(", Token_Type::Token_Open_Paren},
    {")", Token_Type::Token_Close_Paren},
    {"{", Token_Type::Token_Open_Curly},
    {"}", Token_Type::Token_Close_Curly},
    {";", Token_Type::Token_Semicolon},
};

#define literal_tokens_count (sizeof(literal_tokens) / sizeof(literal_tokens[0]))

const char *keywords[] = {
    "auto",
    "break",
    "case",
    "char",
    "const",
    "continue",
    "default",
    "do",
    "double",
    "else",
    "enum",
    "extern",
    "float",
    "for",
    "goto",
    "if",
    "int",
    "long",
    "register",
    "return",
    "short",
    "signed",
    "sizeof",
    "static",
    "struct",
    "switch",
    "typedef",
    "union",
    "unsigned",
    "void",
    "volatile",
    "while",
    "alignas",
    "alignof",
    "and",
    "and_eq",
    "asm",
    "atomic_cancel",
    "atomic_commit",
    "atomic_noexcept",
    "bitand",
    "bitor",
    "bool",
    "catch",
    "char16_t",
    "char32_t",
    "char8_t",
    "class",
    "co_await",
    "co_return",
    "co_yield",
    "compl",
    "concept",
    "const_cast",
    "consteval",
    "constexpr",
    "constinit",
    "decltype",
    "delete",
    "dynamic_cast",
    "explicit",
    "export",
    "false",
    "friend",
    "inline",
    "mutable",
    "namespace",
    "new",
    "noexcept",
    "not",
    "not_eq",
    "nullptr",
    "operator",
    "or",
    "or_eq",
    "private",
    "protected",
    "public",
    "reflexpr",
    "reinterpret_cast",
    "requires",
    "static_assert",
    "static_cast",
    "synchronized",
    "template",
    "this",
    "thread_local",
    "throw",
    "true",
    "try",
    "typeid",
    "typename",
    "using",
    "virtual",
    "wchar_t",
    "xor",
    "xor_eq",
};

#define keywords_count (sizeof(keywords) / sizeof(keywords[0]))

const char *token_type_name(Token_Type tk_type)
{
    switch (tk_type)
    {
    case Token_Type::Token_End:
        return "end of content";
    case Token_Type::Token_Invalid:
        return "invalid token";
    case Token_Type::Token_Preproc:
        return "preprocessor directive";
    case Token_Type::Token_Symbol:
        return "symbol";
    case Token_Type::Token_Open_Paren:
        return "open paren";
    case Token_Type::Token_Close_Paren:
        return "close paren";
    case Token_Type::Token_Open_Curly:
        return "open curly";
    case Token_Type::Token_Close_Curly:
        return "close curly";
    case Token_Type::Token_Semicolon:
        return "semicolon";
    case Token_Type::Token_Keyword:
        return "keyword";
    default:
        UNREACHABLE("token type name");
    }

    return NULL;
}

Lexer LexerNew(Free_Glyph_Atlas *atlas, const char *content, size_t content_len)
{
    Lexer lexer = {0};
    lexer.atlas = atlas;
    lexer.content = content;
    lexer.content_len = content_len;

    return lexer;
}

bool lexer_starts_with(Lexer *lexer, const char *prefix)
{
    size_t prefix_len = strlen(prefix);
    if (prefix_len == 0)
    {
        return true;
    }

    if (lexer->cursor + prefix_len - 1 >= lexer->content_len)
    {
        return false;
    }

    for (size_t i = 0; i < prefix_len; ++i)
    {
        if (prefix[i] != lexer->content[lexer->cursor + i])
        {
            return false;
        }
    }

    return true;
}

void lexer_chop_char(Lexer *lexer, size_t len)
{
    for (size_t i = 0; i < len; ++i)
    {
        // todos: get rid of this assert by checking the length of the choped
        // prefix upfront
        assert(lexer->cursor < lexer->content_len);

        char x = lexer->content[lexer->cursor];

        lexer->cursor += 1;

        if (x == '\n')
        {
            lexer->line += 1;
            lexer->bol = lexer->cursor;
            lexer->x = 0;
        }
        else
        {
            if (lexer->atlas)
            {
                size_t glyph_index = x;
                // todos: support for glyphs outside of ASCII range
                if (glyph_index >= GLYPH_METRICS_CAPACITY)
                {
                    glyph_index = '?';
                }
                Glyph_Metric metric = lexer->atlas->metrics[glyph_index];
                lexer->x += metric.ax;
            }
        }
    }
}

void lexer_trim_left(Lexer *lexer)
{
    while (lexer->cursor < lexer->content_len &&
           isspace(lexer->content[lexer->cursor]))
    {
        lexer_chop_char(lexer, 1);
    }
}

bool is_symbol_start(char x)
{
    return isalpha(x) || x == '_';
}

bool is_symbol(char x)
{
    return isalnum(x) || x == '_';
}

Token LexerNext(Lexer *lexer)
{
    Token token{}; // default init in cpp code
    token.text = &lexer->content[lexer->cursor];

    token.position.x = lexer->x;
    token.position.y = -(float)lexer->line * FREE_GLYPH_FONT_SIZE;

    if (lexer->cursor >= lexer->content_len)
        return token;

    // ----------------------------------- handle "
    if (lexer->content[lexer->cursor] == '"')
    {
        // todos: Token String should also handle escapse sequences
        // HACK: rename attri here
        token.token_type = Token_Type::Token_String;
        lexer_chop_char(lexer, 1);
        while (lexer->cursor < lexer->content_len &&
               lexer->content[lexer->cursor] != '"' &&
               lexer->content[lexer->cursor] != '\n')
        {
            lexer_chop_char(lexer, 1);
        }

        if (lexer->cursor < lexer->content_len)
        {
            lexer_chop_char(lexer, 1);
        }

        token.text_len = &lexer->content[lexer->cursor] - token.text;

        return token;
    }
    // -------------------------- handle #
    if (lexer->content[lexer->cursor] == '#')
    {
        // todos: preproc should also handle newlines
        token.token_type = Token_Type::Token_Preproc;
        while (lexer->cursor < lexer->content_len &&
               lexer->content[lexer->cursor] != '\n')
        {
            lexer_chop_char(lexer, 1);
        }

        if (lexer->cursor < lexer->content_len)
        {
            lexer_chop_char(lexer, 1);
        }

        token.text_len = &lexer->content[lexer->cursor] - token.text;

        return token;
    }
    // -------------------------- handle //
    if (lexer_starts_with(lexer, "//"))
    {
        token.token_type = Token_Type::Token_Comment;

        while (lexer->cursor < lexer->content_len &&
               lexer->content[lexer->cursor] != '\n')
        {
            lexer_chop_char(lexer, 1);
        }

        if (lexer->cursor < lexer->content_len)
        {
            lexer_chop_char(lexer, 1);
        }

        token.text_len = &lexer->content[lexer->cursor] - token.text;

        return token;
    }

    for (size_t i = 0; i < literal_tokens_count; ++i)
    {
        if (lexer_starts_with(lexer, literal_tokens[i].text))
        {
            // NOTE: this code assums that there is no newlines in literal_tokens[i].text
            size_t text_len = strlen(literal_tokens[i].text);

            token.token_type = literal_tokens[i].tk_type;
            token.text_len = text_len;

            lexer_chop_char(lexer, text_len);

            return token;
        }
    }

    if (is_symbol_start(lexer->content[lexer->cursor]))
    {
        token.token_type = Token_Type::Token_Symbol;

        while (lexer->cursor < lexer->content_len &&
               is_symbol(lexer->content[lexer->cursor]))
        {
            lexer_chop_char(lexer, 1);
            token.text_len += 1;
        }

        for (size_t i = 0; i < keywords_count; ++i)
        {
            size_t keyword_len = strlen(keywords[i]);

            if (keyword_len == token.text_len &&
                memcmp(keywords[i],
                       token.text, keyword_len) == 0)
            {
                token.token_type = Token_Type::Token_Keyword;
                break;
            }
        }

        return token;
    }

    lexer_chop_char(lexer, 1);
    token.token_type = Token_Type::Token_Invalid;
    token.text_len = 1;
    return token;
}
#endif // LEXER_HPP