#
# mydbms/pysql_compiler/lexer.py
#
import re
from collections import namedtuple

Token = namedtuple('Token', ['type', 'value', 'line', 'col'])


class Lexer:
    """SQL词法分析器"""

    def __init__(self, sql_string: str):
        self.sql = sql_string
        # <<<--- 修改：添加 JOIN, INNER, LEFT, ON ...关键字 ---<<<
        self.keywords = {'SELECT', 'FROM', 'WHERE', 'INSERT', 'INTO', 'VALUES',
                         'UPDATE', 'SET', 'DELETE', 'CREATE', 'TABLE', 'INDEX', 'ON', 'DISTINCT',
                         'ORDER', 'BY', 'ASC', 'DESC', 'GROUP', 'HAVING', 'AS', 'COUNT', 'TRUNCATE', 'IN',
                         'JOIN', 'INNER', 'LEFT', 'RIGHT',
                         'PRIMARY', 'KEY', 'AUTO_INCREMENT', 'NOT', 'NULL', 'CHECK', 'UNIQUE', 'DROP',
                         'ALTER', 'ADD', 'COLUMN',
                         'AND', 'OR', 'LIKE', 'IS',
                         'SUM', 'AVG', 'MAX', 'MIN',
                         'SHOW', 'TABLES',
                         'EXPLAIN',
                         'FOREIGN', 'KEY', 'REFERENCES',
                         'CONSTRAINT',
                         'TRUE','FALSE'
                         }
        # >>>--- 修改结束 ---<<<

        self.token_specs = [
            ('COMMENT', r'--.*'),
            ('NUMBER', r'\d+(\.\d*)?'),
            ('STRING', r"'(?:''|[^'])*'"),
            # <<<--- 核心修改：将ID规则改回，并新增DOT符号 ---<<<
            ('ID', r'[A-Za-z_][A-Za-z0-9_]*'),
            ('DOT', r'\.'),
            # >>>--- 修改结束 ---<<<
            ('STAR', r'\*'),  # 将 STAR 单独拿出来，以区分乘法和通配符
            ('OP', r'<=|>=|<>|!=|=|<|>|\+|-|/|\|\|'),  # 在 OP 中添加 +, -, /
            ('COMMA', r','),
            ('LPAREN', r'\('),
            ('RPAREN', r'\)'),
            ('SEMICOLON', r';'),
            ('NEWLINE', r'\n'),
            ('SKIP', r'[ \t]+'),
            ('MISMATCH', r'.'),
        ]
        self.tokenizer_regex = re.compile('|'.join('(?P<%s>%s)' % pair for pair in self.token_specs))

    def tokenize(self):
        line_num = 1
        line_start = 0
        for mo in self.tokenizer_regex.finditer(self.sql):
            kind = mo.lastgroup
            value = mo.group()
            column = mo.start() - line_start + 1

            if kind == 'NEWLINE':
                line_start = mo.end()
                line_num += 1
                continue
            elif kind == 'ID' and value.upper() in self.keywords:
                kind = value.upper()

            if kind == 'COMMENT' or kind == 'SKIP':
                continue
            elif kind == 'MISMATCH':
                raise RuntimeError(f'Unexpected character: {value!r} at line {line_num} column {column}')

            if kind in self.keywords:
                value = value.upper()

            yield Token(kind, value, line_num, column)