"""
SQL词法分析器 (Lexical Analyzer)
使用有限自动机(FA)原理实现SQL语句的词法分析
"""

import re
from enum import Enum
from typing import List, Tuple, Optional


class TokenType(Enum):
    """词法单元类型"""
    # 关键字
    SELECT = "SELECT"
    FROM = "FROM"
    WHERE = "WHERE"
    INSERT = "INSERT"
    INTO = "INTO"
    VALUES = "VALUES"
    UPDATE = "UPDATE"
    SET = "SET"
    DELETE = "DELETE"
    CREATE = "CREATE"
    TABLE = "TABLE"
    DROP = "DROP"
    ALTER = "ALTER"
    ADD = "ADD"
    COLUMN = "COLUMN"
    DATABASE = "DATABASE"
    USE = "USE"
    SHOW = "SHOW"
    TABLES = "TABLES"
    DESCRIBE = "DESCRIBE"
    BEGIN = "BEGIN"
    COMMIT = "COMMIT"
    ROLLBACK = "ROLLBACK"
    TRANSACTION = "TRANSACTION"
    PRIMARY = "PRIMARY"
    KEY = "KEY"

    # 数据类型
    INT = "INT"
    VARCHAR = "VARCHAR"
    FLOAT = "FLOAT"
    BOOLEAN = "BOOLEAN"
    DATE = "DATE"
    TEXT = "TEXT"

    # 操作符
    EQUALS = "="
    NOT_EQUALS = "!="
    GREATER = ">"
    LESS = "<"
    GREATER_EQUALS = ">="
    LESS_EQUALS = "<="
    AND = "AND"
    OR = "OR"
    NOT = "NOT"
    LIKE = "LIKE"
    IN = "IN"
    BETWEEN = "BETWEEN"
    IS = "IS"
    NULL = "NULL"

    # 分隔符
    COMMA = ","
    SEMICOLON = ";"
    LPAREN = "("
    RPAREN = ")"
    DOT = "."
    STAR = "*"

    # 字面量
    IDENTIFIER = "IDENTIFIER"
    STRING_LITERAL = "STRING_LITERAL"
    NUMBER_LITERAL = "NUMBER_LITERAL"

    # 特殊标记
    EOF = "EOF"


class Token:
    """词法单元"""
    def __init__(self, token_type: TokenType, value: str, line: int, column: int):
        self.type = token_type
        self.value = value
        self.line = line
        self.column = column

    def __str__(self):
        return f"Token({self.type.value}, '{self.value}', line={self.line}, col={self.column})"

    def __repr__(self):
        return self.__str__()


class Lexer:
    """SQL词法分析器"""

    # 关键字映射
    KEYWORDS = {
        'SELECT': TokenType.SELECT,
        'FROM': TokenType.FROM,
        'WHERE': TokenType.WHERE,
        'INSERT': TokenType.INSERT,
        'INTO': TokenType.INTO,
        'VALUES': TokenType.VALUES,
        'UPDATE': TokenType.UPDATE,
        'SET': TokenType.SET,
        'DELETE': TokenType.DELETE,
        'CREATE': TokenType.CREATE,
        'TABLE': TokenType.TABLE,
        'DROP': TokenType.DROP,
        'ALTER': TokenType.ALTER,
        'ADD': TokenType.ADD,
        'COLUMN': TokenType.COLUMN,
        'DATABASE': TokenType.DATABASE,
        'USE': TokenType.USE,
        'SHOW': TokenType.SHOW,
        'TABLES': TokenType.TABLES,
        'DESCRIBE': TokenType.DESCRIBE,
        'BEGIN': TokenType.BEGIN,
        'COMMIT': TokenType.COMMIT,
        'ROLLBACK': TokenType.ROLLBACK,
        'TRANSACTION': TokenType.TRANSACTION,
        'PRIMARY': TokenType.PRIMARY,
        'KEY': TokenType.KEY,
        'INT': TokenType.INT,
        'VARCHAR': TokenType.VARCHAR,
        'FLOAT': TokenType.FLOAT,
        'BOOLEAN': TokenType.BOOLEAN,
        'DATE': TokenType.DATE,
        'TEXT': TokenType.TEXT,
        'AND': TokenType.AND,
        'OR': TokenType.OR,
        'NOT': TokenType.NOT,
        'LIKE': TokenType.LIKE,
        'IN': TokenType.IN,
        'BETWEEN': TokenType.BETWEEN,
        'IS': TokenType.IS,
        'NULL': TokenType.NULL,
    }

    def __init__(self, sql: str):
        self.sql = sql
        self.tokens: List[Token] = []
        self.position = 0
        self.line = 1
        self.column = 1
        self.current_char = self.sql[0] if sql else None

    def advance(self):
        """前移一个字符"""
        if self.position >= len(self.sql):
            self.current_char = None
        else:
            self.current_char = self.sql[self.position]

        self.position += 1
        self.column += 1

    def peek(self) -> Optional[str]:
        """查看下一个字符"""
        peek_pos = self.position
        if peek_pos >= len(self.sql):
            return None
        return self.sql[peek_pos]

    def skip_whitespace(self):
        """跳过空白字符"""
        while self.current_char and self.current_char.isspace():
            if self.current_char == '\n':
                self.line += 1
                self.column = 1
            self.advance()

    def read_identifier(self) -> str:
        """读取标识符"""
        result = ''
        while self.current_char and (self.current_char.isalnum() or self.current_char == '_'):
            result += self.current_char
            self.advance()
        return result

    def read_string_literal(self) -> str:
        """读取字符串字面量"""
        result = ''
        quote = self.current_char
        self.advance()  # 跳过开始的引号

        while self.current_char and self.current_char != quote:
            if self.current_char == '\\':
                self.advance()
                if self.current_char:
                    if self.current_char == 'n':
                        result += '\n'
                    elif self.current_char == 't':
                        result += '\t'
                    elif self.current_char == 'r':
                        result += '\r'
                    elif self.current_char == '"':
                        result += '"'
                    elif self.current_char == "'":
                        result += "'"
                    elif self.current_char == '\\':
                        result += '\\'
                    self.advance()
            else:
                result += self.current_char
                self.advance()

        if self.current_char == quote:
            self.advance()  # 跳过结束的引号

        return result

    def read_number_literal(self) -> str:
        """读取数字字面量"""
        result = ''
        while self.current_char and (self.current_char.isdigit() or self.current_char == '.'):
            result += self.current_char
            self.advance()
        return result

    def tokenize(self) -> List[Token]:
        """执行词法分析"""
        while self.current_char is not None:
            if self.current_char.isspace():
                self.skip_whitespace()
                continue

            # 标识符或关键字
            if self.current_char.isalpha() or self.current_char == '_':
                identifier = self.read_identifier()
                token_type = self.KEYWORDS.get(identifier.upper(), TokenType.IDENTIFIER)
                self.tokens.append(Token(token_type, identifier, self.line, self.column - len(identifier)))
                continue

            # 字符串字面量
            if self.current_char in ('"', "'"):
                string_value = self.read_string_literal()
                self.tokens.append(Token(TokenType.STRING_LITERAL, string_value, self.line, self.column - len(string_value) - 2))
                continue

            # 数字字面量
            if self.current_char.isdigit():
                number_value = self.read_number_literal()
                self.tokens.append(Token(TokenType.NUMBER_LITERAL, number_value, self.line, self.column - len(number_value)))
                continue

            # 操作符和分隔符
            if self.current_char == '=':
                self.tokens.append(Token(TokenType.EQUALS, '=', self.line, self.column))
                self.advance()
            elif self.current_char == '!' and self.peek() == '=':
                self.tokens.append(Token(TokenType.NOT_EQUALS, '!=', self.line, self.column))
                self.advance()
                self.advance()
            elif self.current_char == '>' and self.peek() == '=':
                self.tokens.append(Token(TokenType.GREATER_EQUALS, '>=', self.line, self.column))
                self.advance()
                self.advance()
            elif self.current_char == '<' and self.peek() == '=':
                self.tokens.append(Token(TokenType.LESS_EQUALS, '<=', self.line, self.column))
                self.advance()
                self.advance()
            elif self.current_char == '>':
                self.tokens.append(Token(TokenType.GREATER, '>', self.line, self.column))
                self.advance()
            elif self.current_char == '<':
                self.tokens.append(Token(TokenType.LESS, '<', self.line, self.column))
                self.advance()
            elif self.current_char == ',':
                self.tokens.append(Token(TokenType.COMMA, ',', self.line, self.column))
                self.advance()
            elif self.current_char == ';':
                self.tokens.append(Token(TokenType.SEMICOLON, ';', self.line, self.column))
                self.advance()
            elif self.current_char == '(':
                self.tokens.append(Token(TokenType.LPAREN, '(', self.line, self.column))
                self.advance()
            elif self.current_char == ')':
                self.tokens.append(Token(TokenType.RPAREN, ')', self.line, self.column))
                self.advance()
            elif self.current_char == '.':
                self.tokens.append(Token(TokenType.DOT, '.', self.line, self.column))
                self.advance()
            elif self.current_char == '*':
                self.tokens.append(Token(TokenType.STAR, '*', self.line, self.column))
                self.advance()
            else:
                # 未知字符，跳过
                self.advance()

        # 添加EOF标记
        self.tokens.append(Token(TokenType.EOF, '', self.line, self.column))
        return self.tokens


def test_lexer():
    """测试词法分析器"""
    sql = """
    CREATE TABLE users (
        id INT,
        name VARCHAR(50),
        age INT,
        email VARCHAR(100)
    );

    INSERT INTO users VALUES (1, 'Alice', 25, 'alice@example.com');
    SELECT * FROM users WHERE age > 20;
    """

    lexer = Lexer(sql)
    tokens = lexer.tokenize()

    print("词法分析结果:")
    for token in tokens:
        if token.type != TokenType.EOF:
            print(f"  {token}")


if __name__ == "__main__":
    test_lexer()
