from collections import OrderedDict
import re

SYMBOL_CHARS = set('+-*/%=<>&|!^~;:,(){}[]')
ALL_VALID_CHARS = set("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_" + ''.join(SYMBOL_CHARS) + "'\" \t\n")

class Token:
    def __init__(self, type_, value, line_num, col_num):
        self.type = type_
        self.value = value
        self.line_num = line_num
        self.col_num = col_num

    def __repr__(self):
        return f"Token({self.type}, {self.value}, {self.line_num}, {self.col_num})"

class Lex:
    def __init__(self):
        self.source_code = ""
        self.reset_state()
        self.keywords = OrderedDict([
            ('char', 1), ('int', 2), ('short', 3), ('long', 4),
            ('float', 5), ('double', 6), ('signed', 7), ('unsigned', 8),
            ('struct', 9), ('union', 10), ('enum', 11), ('void', 12),
            ('sizeof', 13), ('typedef', 14), ('const', 15), ('volatile', 16),
            ('auto', 17), ('register', 18), ('static', 19), ('extern', 20),
            ('if', 21), ('else', 22), ('switch', 23), ('default', 24),
            ('case', 25), ('while', 26), ('do', 27), ('for', 28),
            ('break', 29), ('continue', 30), ('return', 31), ('goto', 32)
        ])
        self.token_patterns = [
            ('COMMENT', r'/\*[\s\S]*?\*/|//.*?$'),
            ('STRING', r'"[^"\\]*(\\.[^"\\]*)*"|\'[^\'\\]*(\\.[^\'\\]*)*\''),
            ('ID', r'[A-Za-z_][A-Za-z0-9_]*'),
            ('NUM', r'0[xX][0-9a-fA-F]+|0[0-7]+|[0-9]*\.[0-9]+([eE][+-]?[0-9]+)?|[0-9]+'),
            ('OP', r'\+\+|--|==|!=|<=|>=|&&|\|\||\+=|-=|\*=|/=|%=|[+\-*/%=<>&|!^~]'),
            ('DELIM', r'[;:,(){}\[\]]'),
            ('SKIP', r'[ \t\n]+'),
            ('MISMATCH', r'.'),
        ]

    def reset_state(self):
        self.position = 0
        self.line_num = 1
        self.col_num = 1
        self.errors = []
        self.tokens = []

    def load_source(self, code):
        self.source_code = code
        self.reset_state()

    def _error(self, message, err_str=None):
        if err_str:
            self.errors.append(f"第{self.line_num}行第{self.col_num}列：{message}，原词='{err_str}'")
        else:
            self.errors.append(f"第{self.line_num}行第{self.col_num}列：{message}")
        # 错误token仍返回元组，保持与原有逻辑兼容
        return ('ERROR', err_str or message, self.line_num, self.col_num)

    def get_char(self):
        if self.position >= len(self.source_code):
            return None
        ch = self.source_code[self.position]
        self.position += 1
        if ch == '\n':
            self.line_num += 1
            self.col_num = 1
        else:
            self.col_num += 1
        return ch

    def peek_char(self, n=1):
        pos = self.position + n - 1
        return self.source_code[pos] if pos < len(self.source_code) else None

    def _skip_whitespace(self):
        while self.peek_char() and self.peek_char().isspace():
            self.get_char()

    def _skip_comment(self):
        if self.peek_char() != '/':
            return False
        if self.peek_char(2) == '/':
            while self.peek_char() and self.peek_char() != '\n':
                self.get_char()
            return True
        if self.peek_char(2) == '*':
            self.get_char(); self.get_char(); depth = 1
            while depth > 0:
                if self.peek_char() is None:
                    self._error("未终止的多行注释")
                    return True
                if self.peek_char() == '*' and self.peek_char(2) == '/':
                    self.get_char(); self.get_char(); depth -= 1
                elif self.peek_char() == '/' and self.peek_char(2) == '*':
                    self.get_char(); self.get_char(); depth += 1
                else:
                    self.get_char()
            return True
        return False

    def scan(self):
        scan_dispatch = {
            '"': self.scan_string_literal,
            "'": self.scan_string_literal,
        }

        while self.position < len(self.source_code):
            self._skip_whitespace()
            if self._skip_comment():
                continue
            ch = self.peek_char()
            if not ch:
                break
            if ch not in ALL_VALID_CHARS:
                start_col = self.col_num
                buf = [self.get_char()]
                while True:
                    nxt = self.peek_char()
                    if not nxt or nxt.isspace() or nxt in SYMBOL_CHARS or nxt == '/':
                        break
                    buf.append(self.get_char())
                err_str = ''.join(buf)
                self._error("非法字符序列", err_str)
                continue
            if ch in scan_dispatch:
                token = scan_dispatch[ch]()
            elif ch.isdigit() or (ch == '.' and self.peek_char(2) and self.peek_char(2).isdigit()):
                token = self.scan_number()
            elif ch.isalpha() or ch == '_':
                token = self.scan_identifier()
            else:
                token = self.scan_symbol()
            if token and not isinstance(token, tuple):  # 忽略错误token
                self.tokens.append(token)
        return self.tokens

    def scan_identifier(self):
        start_col = self.col_num
        buf = []
        while self.peek_char() and (self.peek_char().isalnum() or self.peek_char() == '_'):
            buf.append(self.get_char())
        word = ''.join(buf)
        token_type = 'KEYWORD' if word in self.keywords else 'ID'
        return Token(token_type, word, self.line_num, start_col)

    def scan_number(self):
        start_col = self.col_num
        buf = [self.get_char()]
        while True:
            nxt = self.peek_char()
            if not nxt or nxt.isspace() or nxt in SYMBOL_CHARS or nxt == '/':
                break
            buf.append(self.get_char())
        num_str = ''.join(buf)
        if num_str.startswith(('0x', '0X')):
            return self.scan_hex(num_str, start_col)
        elif num_str.startswith('0') and len(num_str) > 1 and num_str[1].isdigit():
            return self.scan_oct(num_str, start_col)
        elif '.' in num_str or 'e' in num_str or 'E' in num_str:
            return self.scan_float(num_str, start_col)
        else:
            return self.scan_decimal(num_str, start_col)

    def scan_hex(self, num_str, start_col):
        for c in num_str[2:]:
            if c not in '0123456789abcdefABCDEF':
                self._error("非法十六进制数", num_str)
                return None  # 错误时不生成token
        return Token('NUM', num_str, self.line_num, start_col)

    def scan_oct(self, num_str, start_col):
        for c in num_str[1:]:
            if c not in '01234567':
                self._error("非法八进制数", num_str)
                return None
        return Token('NUM', num_str, self.line_num, start_col)

    def scan_decimal(self, num_str, start_col):
        if num_str.startswith('0') and len(num_str) > 1:
            self._error("多余前导零", num_str)
        if not num_str.isdigit():
            self._error("非法数字格式", num_str)
            return None
        return Token('NUM', num_str, self.line_num, start_col)

    def scan_float(self, num_str, start_col):
        float_pattern = re.compile(r'^[0-9]*\.?[0-9]+([eE][+-]?[0-9]+)?$')
        if not float_pattern.fullmatch(num_str):
            self._error("非法浮点格式", num_str)
            return None
        return Token('NUM', num_str, self.line_num, start_col)

    def scan_string_literal(self):
        quote = self.get_char()
        buf, esc = [], False
        start_line, start_col = self.line_num, self.col_num - 1
        while True:
            ch = self.get_char()
            if ch is None or ch == '\n':
                self._error("未终止的字符串", quote + ''.join(buf))
                return None
            if esc:
                buf.append(ch); esc = False
            elif ch == '\\':
                esc = True
            elif ch == quote:
                break
            else:
                buf.append(ch)
        return Token('STRING', ''.join(buf), start_line, start_col)

    def scan_symbol(self):
        two_char = self.peek_char() + (self.peek_char(2) or '')
        multi_ops = {'++', '--', '==', '!=', '<=', '>=', '&&', '||', '+=', '-=', '*=', '/=', '%='}
        if two_char in multi_ops:
            c1, c2 = self.get_char(), self.get_char()
            return Token('SYM', c1 + c2, self.line_num, self.col_num - 2)
        single_char = self.get_char()
        return Token('SYM', single_char, self.line_num, self.col_num - 1)

    def get_keyword_code(self, keyword):
        return self.keywords.get(keyword)

    def get_errors(self):
        return self.errors

    def auto_scan(self):
        self.tokens = []
        self.errors = []
        tok_regex = '|'.join('(?P<%s>%s)' % pair for pair in self.token_patterns)
        get_token = re.compile(tok_regex, re.MULTILINE).match
        
        line_num = 1
        line_start = 0
        pos = 0
        
        while True:
            mo = get_token(self.source_code, pos)
            if not mo:
                break
                
            kind = mo.lastgroup
            value = mo.group()
            col = mo.start() - line_start + 1
            
            if kind in {'COMMENT', 'SKIP'}:
                line_num += value.count('\n')
                if '\n' in value:
                    line_start = mo.end() - len(value) + value.rindex('\n') + 1
                continue  # 注释和空白不生成token
            
            token = None
            if kind == 'ID':
                token_type = 'KEYWORD' if value in self.keywords else 'ID'
                token = Token(token_type, value, line_num, col)
            elif kind == 'NUM':
                # 验证数字格式（与手动扫描保持一致，这里简化处理，实际应复用验证逻辑）
                token = Token('NUM', value, line_num, col)
                # 错误处理可在此处补充，但原代码在auto_scan中有重复验证，这里暂时保持简单
            elif kind == 'STRING':
                content = value[1:-1]  # 去除引号
                token = Token('STRING', content, line_num, col)
            elif kind in {'OP', 'DELIM'}:
                token = Token('SYM' if kind == 'OP' else 'DELIM', value, line_num, col)  # 修正类型命名一致性
            elif kind == 'MISMATCH':
                self.errors.append(f"第{line_num}行第{col}列：非法字符 '{value}'")
                continue
            
            if token:
                self.tokens.append(token)
            
            pos = mo.end()
        
        return self.tokens

# 统一接口（调整返回值为Token对象列表）
def run_lexer(code, use_auto=False):
    lexer = Lex()
    lexer.load_source(code)
    if use_auto:
        tokens = lexer.auto_scan()
    else:
        tokens = lexer.scan()
    return tokens, lexer.get_errors()

if __name__ == "__main__":
    test_code = '''//算术表达式
main() {
       int x, y, z;
       x = 9;
       y = 3;
       z = x - y * y - x % 3;
       write(x);
       write(y);
       write(z);
       }
    '''
    lexer = Lex()
    lexer.load_source(test_code)
    print("Tokens:")
    for tok in lexer.scan(): 
        print(tok)  # 直接打印Token对象，调用__repr__
    print("Errors:")
    for err in lexer.get_errors(): 
        print(err)