"""
SQL词法分析器
使用PLY库实现SQL语句的词法分析
"""

import ply.lex as lex
import re

class SQLLexer:
    """SQL词法分析器"""
    
    def __init__(self):
        """初始化词法分析器"""
        self.lexer = lex.lex(module=self)
        self.tokens = self.tokens
    
    # 定义token类型
    tokens = (
        # 关键字
        'SELECT', 'FROM', 'WHERE', 'INSERT', 'INTO', 'VALUES', 'UPDATE', 'SET',
        'DELETE', 'CREATE', 'TABLE', 'DROP', 'INT', 'VARCHAR', 'FLOAT', 'BOOLEAN',
        'AND', 'OR', 'NOT', 'NULL', 'PRIMARY', 'KEY', 'INDEX',
        
        # 权限相关关键字
        'LOGIN', 'LOGOUT', 'USER', 'PASSWORD', 'ROLE', 'GRANT', 'REVOKE', 'TO', 'ON', 'FOR',
        'PERMISSIONS', 'OWNER', 'SHOW', 'GRANTS', 'USERS', 'CHANGE',
        
        # 操作符
        'EQ', 'NE', 'LT', 'LE', 'GT', 'GE', 'PLUS', 'MINUS', 'TIMES', 'DIVIDE',
        'ASSIGN', 'COMMA', 'SEMICOLON', 'LPAREN', 'RPAREN', 'DOT',
        
        # 字面量
        'NUMBER', 'STRING', 'IDENTIFIER'
    )
    
    # 关键字映射
    keywords = {
        'select': 'SELECT',
        'from': 'FROM',
        'where': 'WHERE',
        'insert': 'INSERT',
        'into': 'INTO',
        'values': 'VALUES',
        'update': 'UPDATE',
        'set': 'SET',
        'delete': 'DELETE',
        'create': 'CREATE',
        'table': 'TABLE',
        'drop': 'DROP',
        'int': 'INT',
        'varchar': 'VARCHAR',
        'float': 'FLOAT',
        'boolean': 'BOOLEAN',
        'and': 'AND',
        'or': 'OR',
        'not': 'NOT',
        'null': 'NULL',
        'primary': 'PRIMARY',
        'key': 'KEY',
        'index': 'INDEX',
        
        # 权限相关关键字
        'login': 'LOGIN',
        'logout': 'LOGOUT',
        'user': 'USER',
        'password': 'PASSWORD',
        'role': 'ROLE',
        'grant': 'GRANT',
        'revoke': 'REVOKE',
        'to': 'TO',
        'on': 'ON',
        'for': 'FOR',
        'permissions': 'PERMISSIONS',
        'owner': 'OWNER',
        'show': 'SHOW',
        'grants': 'GRANTS',
        'users': 'USERS',
        'change': 'CHANGE'
    }
    
    # 操作符定义
    t_EQ = r'='
    t_NE = r'!='
    t_LT = r'<'
    t_LE = r'<='
    t_GT = r'>'
    t_GE = r'>='
    t_PLUS = r'\+'
    t_MINUS = r'-'
    t_TIMES = r'\*'
    t_DIVIDE = r'/'
    t_ASSIGN = r':='
    t_COMMA = r','
    t_SEMICOLON = r';'
    t_LPAREN = r'\('
    t_RPAREN = r'\)'
    t_DOT = r'\.'
    
    # 数字字面量
    def t_NUMBER(self, t):
        r'\d+(\.\d+)?'
        if '.' in t.value:
            t.value = float(t.value)
        else:
            t.value = int(t.value)
        return t
    
    # 字符串字面量
    def t_STRING(self, t):
        r"'([^'\\]|\\.)*'"
        t.value = t.value[1:-1]  # 去掉引号
        return t
    
    # 标识符
    def t_IDENTIFIER(self, t):
        r'[a-zA-Z_][a-zA-Z0-9_]*'
        t.type = self.keywords.get(t.value.lower(), 'IDENTIFIER')
        return t
    
    # 注释
    def t_COMMENT(self, t):
        r'--.*'
        pass  # 忽略注释
    
    # 空白字符
    t_ignore = ' \t\n\r'
    
    # 错误处理
    def t_error(self, t):
        print(f"词法错误: 非法字符 '{t.value[0]}' 在位置 {t.lexpos}")
        t.lexer.skip(1)
    
    def tokenize(self, text):
        """对输入文本进行词法分析"""
        self.lexer.input(text)
        tokens = []
        while True:
            tok = self.lexer.token()
            if not tok:
                break
            tokens.append(tok)
        return tokens
    
    def test(self, text):
        """测试词法分析器"""
        print(f"输入: {text}")
        print("Token列表:")
        tokens = self.tokenize(text)
        for token in tokens:
            print(f"  {token.type}: {token.value}")
        return tokens

# 测试函数
if __name__ == "__main__":
    lexer = SQLLexer()
    
    # 测试用例
    test_cases = [
        "SELECT * FROM users WHERE age > 20",
        "CREATE TABLE users (id INT, name VARCHAR(50))",
        "INSERT INTO users VALUES (1, 'Alice')",
        "UPDATE users SET age = 25 WHERE id = 1",
        "DELETE FROM users WHERE age < 18"
    ]
    
    for test_case in test_cases:
        print("\n" + "="*50)
        lexer.test(test_case)
