#include "Lexer.h"
#include <iostream>
#include <regex>

namespace LEXER
{
    void Lexer::advance()
    {
        if (this->source[0] == '\n')
        {
            currentLine++;
            currentColumn = 0;
        }
        currentColumn++;
        // 截取source移除当前字符
        this->source = this->source.substr(1);
    }

    void Lexer::skipWhitespace()
    {
        while (this->source[0] == ' ' || source[0] == '\t' || source[0] == '\r' || source[0] == '\n')
        {
            advance();
        }
    }

    void Lexer::skipComment()
    {
        // 检查是否是单行注释
        if (this->source.substr(0, 2) == "//")
        {
            // 跳过 "//"
            advance(); // 跳过第一个 '/'
            advance(); // 跳过第二个 '/'

            // 跳过注释内容，直到遇到换行符或源代码结束
            while (!this->source.empty() && this->source[0] != '\n')
            {
                advance();
            }

            // 如果还未到达源代码末尾，且当前字符是换行符，跳过换行符
            if (!this->source.empty() && this->source[0] == '\n')
            {
                advance();
                currentLine++;
                currentColumn = 1;
            }
        }
    }

    bool Lexer::is_indent(std::string token)
    {
        std::regex indentRegex("^[a-zA-Z_][a-zA-Z0-9_]*$");
        return std::regex_match(token, indentRegex);
    }

    bool Lexer::is_number(std::string token)
    {
        std::regex numberRegex("[0-9][0-9]*$");
        return std::regex_match(token, numberRegex);
    }

    TokenType Lexer::get_token_type(std::string token)
    {
        if (token.empty())
        {
            return TokenType::UNKNOWN;
        }

        if (isdigit(token[0]))
        {
            return TokenType::NUMBER;
        }

        switch (token[0])
        {
        case 'c':
            if (token == "const")
                return TokenType::CONSTSYM;
            if (token == "call")
                return TokenType::CALLSYM;
            break;
        case 'v':
            if (token == "var")
                return TokenType::VARSYM;
            break;
        case 'p':
            if (token == "procedure")
                return TokenType::PROCSYM;
            break;
        case 'b':
            if (token == "begin")
                return TokenType::BEGINSYM;
            break;
        case 'e':
            if (token == "end")
                return TokenType::ENDSYM;
            else if (token == "else")
                return TokenType::ElSESYM;
            break;
        case 'i':
            if (token == "if")
                return TokenType::IFSYM;
            break;
        case 't':
            if (token == "then")
                return TokenType::THENSYM;
            break;
        case 'w':
            if (token == "while")
                return TokenType::WHILESYM;
            if (token == "write")
                return TokenType::WRITESYM;
            break;
        case 'r':
            if (token == "read")
                return TokenType::READSYM;
            break;
        case 'd':
            if (token == "do")
                return TokenType::DOSYM;
            break;
        case ':':
            if (token == ":=")
                return TokenType::BECOMES;
            break;
        case '+':
            return TokenType::PLUS;
            break;
        case '-':
            return TokenType::MINUS;
            break;
        case '*':
            return TokenType::TIMES;
            break;
        case '/':
            return TokenType::SLASH;
            break;
        case '=':
            return TokenType::EQL;
            break;
        case '[':
            return TokenType::LBRACKET;
            break;
        case ']':
            return TokenType::RBRACKET;
            break;
        case '!':
            if (token == "!=")
                return TokenType::NEQ;
            break;
        case '<':
            if (token == "<=")
                return TokenType::LEQ;
            return TokenType::LSS;
            break;
        case '>':
            if (token == ">=")
                return TokenType::GEQ;
            return TokenType::GTR;
            break;
        case '(':
            return TokenType::LPAREN;
            break;

        case ')':
            return TokenType::RPAREN;
            break;
        case ',':
            return TokenType::COMMA;
            break;
        case ';':
            return TokenType::SEMICOLON;
            break;
        case '.':
            return TokenType::PERIOD;
            break;
        case '{':
            return TokenType::LBRACE;
            break;
        case '}':
            return TokenType::RBRACE;
            break;
        default:
            break;
        }

        if (is_indent(token))
        {
            return TokenType::IDENT;
        }

        if (is_number(token))
        {
            return TokenType::NUMBER;
        }
        return TokenType::UNKNOWN;
    }

    Token Lexer::getNextToken()
    {
        skipWhitespace();
        skipComment();
        skipWhitespace();
        Token token;
        std::smatch match;

        if (std::regex_search(this->source, match, tokenRegex))
        {
            if (match.position(1) != 0)
            {
                std::cerr << "无法识别的词法单元: " << this->source << std::endl;
                exit(1);
                return Token{TokenType::UNKNOWN, source, currentLine, currentColumn};
            }
            std::string tokenStr = match[1];
            TokenType type = get_token_type(tokenStr);
            token = Token{type, tokenStr, currentLine, currentColumn};
            // tokens.push_back(Token{type, tokenStr, currentLine, currentColumn});
            this->source = match.suffix().str();
            this->currentColumn += tokenStr.length();
        }
        else
        {
            if (this->source.length() == 0)
            {
                return Token{TokenType::PROGRAM_END, "", currentLine, currentColumn};
            }
            // 无法匹配任何词法单元
            std::cerr << "无法识别的词法单元: " << this->source << std::endl;
            return Token{TokenType::UNKNOWN, this->source, currentLine, currentColumn};
        }
        return token;
    }

    std::vector<Token> Lexer::lex(std::string source)
    {
        this->source = source;
        currentChar = source[0];
        currentLine = 1;
        currentColumn = 1;
        int cnt = 0;
        tokens.push_back(Token{TokenType::NUL, "", currentLine, currentColumn});
        while (this->source.length() > 0)
        {
            skipWhitespace();
            skipComment();
            skipWhitespace();
            Token token = getNextToken();
            tokens.push_back(token);
        }

        return tokens;
    }
}