import re
from dataclasses import dataclass
from typing import List, Optional, Generator

@dataclass
class Token:
    type: str
    value: str
    line: int
    column: int

class Lexer:
    KEYWORDS = {
        '设', '型', '与', '或', '非', '真', '假', '空',
        '若', '则', '否则', '契合', '以', '类型', '两仪',
        '数', '徽数', '分', '字', '词', '列', '元', '集', '映',
        '周行', '周遍', '遍', '道法', '变体', '记录',
        '篇', '章', '引用', '示人', '借', '异步', '等待',
        '角色', '接收', '发送', '回复', '类型类', '实现',
        '约束', '管道', '部分', '之', '汇编块', '指向',
        '原始指针', '不安全', '中断处理', '启', '返回'
    }

    TOKEN_TYPES = [
        ('COMMENT', r'#.*'),
        ('STRING', r'"[^"]*"'),
        ('NUMBER', r'\d+(\.\d+)?'),
        ('LPAREN', r'\('),
        ('RPAREN', r'\)'),
        ('LBRACKET', r'\['),
        ('RBRACKET', r'\]'),
        ('LBRACE', r'\{'),
        ('RBRACE', r'\}'),
        ('COMMA', r','),
        ('COLON', r':'),
        ('ARROW', r'->'),
        ('FAT_ARROW', r'=>'),
        ('PIPE', r'\|>'),
        ('ASSIGN', r'='),
        ('EQ', r'=='),
        ('NE', r'!='),
        ('LE', r'<='),
        ('GE', r'>='),
        ('LT', r'<'),
        ('GT', r'>'),
        ('PLUS', r'\+'),
        ('MINUS', r'-'),
        ('MUL', r'\*'),
        ('DIV', r'/'),
        ('MOD', r'%'),
        ('DOT', r'\.'),
        ('IDENTIFIER', r'[^\s\(\)\[\]\{\},:+\-*/%<>=!|"\'.]+') # 广义正则表达式用于 unicode 标识符
    ]

    def __init__(self, source_code: str):
        self.source_code = source_code
        self.tokens: List[Token] = []
        self.indent_stack = [0]
        self.current_line = 1
        self.current_column = 1

    def tokenize(self) -> List[Token]:
        lines = self.source_code.splitlines()
        for line_num, line in enumerate(lines, 1):
            self.current_line = line_num
            self.current_column = 1
            
            # 处理缩进
            stripped_line = line.lstrip()
            if not stripped_line or stripped_line.startswith('#'):
                continue # 跳过空行和全行注释
            
            indent_level = len(line) - len(stripped_line)
            if indent_level > self.indent_stack[-1]:
                self.indent_stack.append(indent_level)
                self.tokens.append(Token('INDENT', '', line_num, 1))
            elif indent_level < self.indent_stack[-1]:
                while indent_level < self.indent_stack[-1]:
                    self.indent_stack.pop()
                    self.tokens.append(Token('DEDENT', '', line_num, 1))
                if indent_level != self.indent_stack[-1]:
                    raise SyntaxError(f"不一致的缩进在行 {line_num}")

            self._tokenize_line(stripped_line, indent_level + 1)

        # 剩余的 Dedent
        while len(self.indent_stack) > 1:
            self.indent_stack.pop()
            self.tokens.append(Token('DEDENT', '', self.current_line, 1))
            
        self.tokens.append(Token('EOF', '', self.current_line, 1))
        return self.tokens

    def _tokenize_line(self, line: str, start_col: int):
        pos = 0
        while pos < len(line):
            match = None
            
            # 跳过空白
            if line[pos].isspace():
                pos += 1
                continue

            for token_type, pattern in self.TOKEN_TYPES:
                regex = re.compile(pattern)
                match = regex.match(line, pos)
                if match:
                    value = match.group(0)
                    if token_type == 'COMMENT':
                        return # 忽略整行
                    
                    if token_type == 'IDENTIFIER' and value in self.KEYWORDS:
                        token_type = 'KEYWORD'
                    
                    # 特殊处理可能被错误匹配为标识符的运算符
                    # 但我们的正则顺序处理了它（运算符优先）
                    
                    self.tokens.append(Token(token_type, value, self.current_line, start_col + pos))
                    pos = match.end()
                    break
            
            if not match:
                raise SyntaxError(f"非法字符 '{line[pos]}' 在行 {self.current_line}, 列 {start_col + pos}")

