import re
from enum import Enum
from typing import List, Tuple, Optional

class TokenType(Enum):
    """
    SQL 词法单元类型枚举。
    设计说明：将 SQL 关键字、标识符、常量、运算符、分隔符等分类，便于后续语法分析和扩展。
    """
    # 关键字
    SELECT = "SELECT"
    FROM = "FROM"
    WHERE = "WHERE"
    CREATE = "CREATE"
    TABLE = "TABLE"
    INSERT = "INSERT"
    INTO = "INTO"
    VALUES = "VALUES"
    DELETE = "DELETE"
    INT = "INT"
    VARCHAR = "VARCHAR"
    USE = "USE"
    DATABASE = "DATABASE"
    # 标识符
    IDENTIFIER = "IDENTIFIER"
    # 常量
    NUMBER = "NUMBER"
    STRING = "STRING"
    # 运算符
    EQUALS = "EQUALS"
    GREATER_THAN = "GREATER_THAN"
    LESS_THAN = "LESS_THAN"
    GREATER_EQUALS = "GREATER_EQUALS"
    LESS_EQUALS = "LESS_EQUALS"
    NOT_EQUALS = "NOT_EQUALS"
    # 分隔符
    COMMA = "COMMA"
    SEMICOLON = "SEMICOLON"
    LEFT_PAREN = "LEFT_PAREN"
    RIGHT_PAREN = "RIGHT_PAREN"
    STAR = "STAR"  # 新增：支持 SELECT * 语法
    # 其他
    EOF = "EOF"

class Token:
    """
    词法单元对象，包含类型、原始文本及位置信息。
    输出格式：[种别码, 词素值, 行号, 列号]
    """
    def __init__(self, token_type: TokenType, lexeme: str, line: int, column: int):
        self.token_type = token_type
        self.lexeme = lexeme
        self.line = line
        self.column = column

    def __str__(self):
        return f"Token({self.token_type}, '{self.lexeme}', line={self.line}, column={self.column})"

    def __repr__(self):
        return self.__str__()
    
    def to_list(self):
        """输出标准格式：[种别码, 词素值, 行号, 列号]"""
        return [self.token_type.value, self.lexeme, self.line, self.column]

class LexerError(Exception):
    """
    词法分析异常，包含错误位置，便于调试。
    错误提示格式：[错误类型, 位置, 原因]
    """
    def __init__(self, message, line, column, error_type="LEXICAL_ERROR"):
        super().__init__(f"{message} at line {line}, column {column}")
        self.error_type = error_type
        self.line = line
        self.column = column
        self.message = message
    
    def to_dict(self):
        """输出标准错误格式"""
        return {
            "error_type": self.error_type,
            "position": f"line {self.line}, column {self.column}",
            "reason": self.message
        }

class Lexer:
    """
    SQL 词法分析器。
    负责将 SQL 源字符串分割为 Token 列表，供语法分析器使用。
    设计决策：
      - 支持关键字、标识符、数字、字符串、运算符、分隔符等。
      - 保留详细位置信息，便于错误定位和 IDE 高亮。
      - 采用单次扫描，避免重复遍历，提升性能。
      - 关键路径避免正则，直接字符判断，减少不必要的计算。
    """

    def __init__(self, sql_string: str):
        self.sql_string = sql_string
        self.tokens: List[Token] = []
        self.current_pos = 0
        self.line = 1
        self.column = 1
        # 关键字映射表，便于扩展
        self.keywords = {
            'SELECT': TokenType.SELECT,
            'FROM': TokenType.FROM,
            'WHERE': TokenType.WHERE,
            'CREATE': TokenType.CREATE,
            'TABLE': TokenType.TABLE,
            'INSERT': TokenType.INSERT,
            'INTO': TokenType.INTO,
            'VALUES': TokenType.VALUES,
            'DELETE': TokenType.DELETE,
            'INT': TokenType.INT,
            'VARCHAR': TokenType.VARCHAR,
            'USE': TokenType.USE,
            'DATABASE': TokenType.DATABASE
        }

    def tokenize(self) -> List[Token]:
        """
        主入口：将 SQL 字符串分割为 Token 列表。
        设计说明：单次扫描，遇到未知字符立即报错，保证健壮性。
        """
        while self.current_pos < len(self.sql_string):
            current_char = self.sql_string[self.current_pos]

            # 跳过空白字符
            if current_char.isspace():
                if current_char == '\n':
                    self.line += 1
                    self.column = 1
                else:
                    self.column += 1
                self.current_pos += 1
                continue
            
            # 处理SQL注释
            if current_char == '-' and self.current_pos + 1 < len(self.sql_string) and self.sql_string[self.current_pos + 1] == '-':
                # 单行注释，跳过到行尾
                while self.current_pos < len(self.sql_string) and self.sql_string[self.current_pos] != '\n':
                    self.current_pos += 1
                    self.column += 1
                continue

            # 处理标识符和关键字
            if current_char.isalpha() or current_char == '_':
                token = self._process_identifier()
                self.tokens.append(token)
                continue

            # 处理数字
            if current_char.isdigit():
                token = self._process_number()
                self.tokens.append(token)
                continue

            # 处理字符串
            if current_char == "'":
                token = self._process_string()
                self.tokens.append(token)
                continue

            # 处理运算符
            if current_char in ['=', '>', '<', '!']:
                token = self._process_operator()
                self.tokens.append(token)
                continue

            # 处理分隔符和星号
            if current_char in [',', ';', '(', ')', '*']:
                token = self._process_delimiter_or_star()
                self.tokens.append(token)
                continue

            # 未知字符
            raise LexerError(f"Unknown character: {current_char}", self.line, self.column)

        # 添加EOF标记，便于语法分析终止
        self.tokens.append(Token(TokenType.EOF, "", self.line, self.column))
        return self.tokens

    def _process_identifier(self) -> Token:
        """
        处理标识符和关键字。
        设计说明：扫描字母、数字、下划线，区分关键字和普通标识符。
        """
        start_pos = self.current_pos
        start_line = self.line
        start_column = self.column

        while (self.current_pos < len(self.sql_string) and
               (self.sql_string[self.current_pos].isalnum() or
                self.sql_string[self.current_pos] == '_')):
            self.column += 1
            self.current_pos += 1

        lexeme = self.sql_string[start_pos:self.current_pos]
        upper_lexeme = lexeme.upper()

        if upper_lexeme in self.keywords:
            token_type = self.keywords[upper_lexeme]
        else:
            token_type = TokenType.IDENTIFIER

        return Token(token_type, lexeme, start_line, start_column)

    def _process_number(self) -> Token:
        """
        处理数字常量（仅支持整数和简单小数）。
        设计说明：不支持科学计数法，避免正则，提升性能。
        """
        start_pos = self.current_pos
        start_line = self.line
        start_column = self.column

        has_dot = False
        while self.current_pos < len(self.sql_string):
            current_char = self.sql_string[self.current_pos]
            if current_char.isdigit():
                self.column += 1
                self.current_pos += 1
            elif current_char == '.' and not has_dot:
                has_dot = True
                self.column += 1
                self.current_pos += 1
            else:
                break

        lexeme = self.sql_string[start_pos:self.current_pos]
        return Token(TokenType.NUMBER, lexeme, start_line, start_column)

    def _process_string(self) -> Token:
        """
        处理字符串常量，支持转义。
        设计说明：遇到未闭合字符串立即报错，防止资源泄漏。
        """
        start_line = self.line
        start_column = self.column

        # 跳过开头的单引号
        self.current_pos += 1
        self.column += 1

        start_pos = self.current_pos
        escaped = False

        while self.current_pos < len(self.sql_string):
            current_char = self.sql_string[self.current_pos]

            if current_char == "'" and not escaped:
                break
            elif current_char == '\\' and not escaped:
                escaped = True
            else:
                escaped = False

            if current_char == '\n':
                self.line += 1
                self.column = 1
            else:
                self.column += 1

            self.current_pos += 1

        if self.current_pos >= len(self.sql_string) or self.sql_string[self.current_pos] != "'":
            raise LexerError("Unclosed string literal", start_line, start_column)

        lexeme = self.sql_string[start_pos:self.current_pos]

        # 跳过结尾的单引号
        self.current_pos += 1
        self.column += 1

        return Token(TokenType.STRING, lexeme, start_line, start_column)

    def _process_operator(self) -> Token:
        """
        处理运算符，包括多字符（如>=, <=, !=）。
        设计说明：优先匹配长运算符，避免歧义。
        """
        start_line = self.line
        start_column = self.column

        current_char = self.sql_string[self.current_pos]
        next_char = self.sql_string[self.current_pos + 1] if self.current_pos + 1 < len(self.sql_string) else None

        if current_char == '=':
            self.current_pos += 1
            self.column += 1
            return Token(TokenType.EQUALS, "=", start_line, start_column)
        elif current_char == '>':
            if next_char == '=':
                self.current_pos += 2
                self.column += 2
                return Token(TokenType.GREATER_EQUALS, ">=", start_line, start_column)
            else:
                self.current_pos += 1
                self.column += 1
                return Token(TokenType.GREATER_THAN, ">", start_line, start_column)
        elif current_char == '<':
            if next_char == '=':
                self.current_pos += 2
                self.column += 2
                return Token(TokenType.LESS_EQUALS, "<=", start_line, start_column)
            else:
                self.current_pos += 1
                self.column += 1
                return Token(TokenType.LESS_THAN, "<", start_line, start_column)
        elif current_char == '!':
            if next_char == '=':
                self.current_pos += 2
                self.column += 2
                return Token(TokenType.NOT_EQUALS, "!=", start_line, start_column)
            else:
                raise LexerError("Unexpected character after '!'", start_line, start_column)

    def _process_delimiter_or_star(self) -> Token:
        """
        处理分隔符（逗号、分号、括号）和星号（*）。
        """
        current_char = self.sql_string[self.current_pos]
        start_line = self.line
        start_column = self.column

        self.current_pos += 1
        self.column += 1

        if current_char == ',':
            return Token(TokenType.COMMA, ",", start_line, start_column)
        elif current_char == ';':
            return Token(TokenType.SEMICOLON, ";", start_line, start_column)
        elif current_char == '(':
            return Token(TokenType.LEFT_PAREN, "(", start_line, start_column)
        elif current_char == ')':
            return Token(TokenType.RIGHT_PAREN, ")", start_line, start_column)
        elif current_char == '*':
            return Token(TokenType.STAR, "*", start_line, start_column)
