"""
Author: runyu.ma

lexer.py
"""

import re
from collections import namedtuple

Token = namedtuple('Token', ['type', 'value', 'line', 'column'])

class LexerError(Exception):
    pass

class Lexer:
    def __init__(self):
        # 当前已识别的关键字（可动态新增）
        self.keywords = set()

        # 基本的 token 规格：按优先级从高到低
        self.token_specification = [
            ('NUMBER',      r'\d+(\.\d*)?'),        # 整数或浮点数
            ('STRING',      r'"([^"\\]|\\.)*"'),    # 双引号字符串
            ('SSTRING',     r's"([^"\\]|\\.)*"'),    # 双引号字符串
            ('PSTRING',     r'p"([^"\\]|\\.)*"'),
            ('ID',          r'[A-Za-z_{][\w{}]*'),       # 标识符（或关键字）
            ('OP',          r'==|!=|<=|>=|>>|<<|\+|-|\*|/|%|=|<|>|&|\||\$|!'),
            ('LPAREN',      r'\('),
            ('RPAREN',      r'\)'),
            #('LBRACE',      r'\{'),
            #('RBRACE',      r'\}'),
            ('CAST',        r'@'),
            ('COMMA',       r','),
            ('COLON',       r':'),
            ('SEMICOLON',   r';'),
            ('NEWLINE',     r'\n'),                 # 换行
            ('SKIP',        r'[ \t]+'),             # 空白
            ('MISMATCH',    r'.'),                  # 其他不匹配
        ]

        # 编译成一条大正则
        self.token_regex = self._build_regex()

    def _build_regex(self):
        parts = []
        for name, pattern in self.token_specification:
            parts.append(f'(?P<{name}>{pattern})')
        return re.compile('|'.join(parts))

    def add_keyword(self, keyword: str):
        """
        动态添加一个关键字。例如：lexer.add_keyword('if')
        """
        self.keywords.add(keyword)

    def tokenize(self, code: str):
        """
        将输入字符串切分成 Token 列表
        """
        tokens = []
        line_num = 1
        line_start = 0
        for mo in self.token_regex.finditer(code):
            kind  = mo.lastgroup
            value = mo.group(kind)
            column = mo.start() - line_start + 1

            if kind == 'NUMBER':
                # 数字类型转换
                value = float(value) if '.' in value else int(value)

            elif kind == 'ID':
                # 如果在关键字集合里，就把类型改成 KEYWORD_NAME（全部大写）
                if value in self.keywords:
                    kind = value.upper()
                else:
                    kind = 'ID'

            elif kind == 'STRING':
                # 去掉两端双引号并反转义
                value = self._unescape_string(value)
            
            elif kind == 'SSTRING':
                # 去掉两端双引号并反转义
                value = bytes(value[2:-1], "utf-8").decode("unicode_escape")
            
            elif kind == 'PSTRING':
                # 去掉两端双引号并反转义
                value = bytes(value[2:-1], "utf-8").decode("unicode_escape")

            elif kind == 'NEWLINE':
                line_num  += 1
                line_start = mo.end()
                continue

            elif kind == 'SKIP':
                continue

            elif kind == 'MISMATCH':
                raise LexerError(f'Unexpected token {value!r} at line {line_num} col {column}')

            tokens.append(Token(kind, value, line_num, column))

        return tokens

    def _unescape_string(self, s: str) -> str:
        # "..." -> ...，并处理常见转义
        return bytes(s[1:-1], "utf-8").decode("unicode_escape")


def build_default_lexer():
    '''创建默认的lexer'''
    lexer = Lexer()
    for kw in ['begin', 'end', 'return', 'ptr']:#'func', 'if', 'else', 'return', 都不是关键词而是ID
        lexer.add_keyword(kw)
    return lexer
