class Token :
    def __init__(self , type , value , line_num = None ):
        self.type = type
        self.value = value
        self.lineno = ( type,  value , ( line_num or 0)) # 和 JavaScript的 || 一样, 返回第一个为真的值

    def __str__(self):
       # 实现可读的字符串输出
       return f'Token(type={self.type}, value={self.value}, lineno={self.lineno})'

from  enum import Enum
class ScanState(Enum):
    Initial = 0,  # 初始状态
    Comment  =1,
    IdentifierWord  =2 ,
    String  =3  ,
    Number  =4 ,


class WendLexer:
    keywords = {'true': 'BOOLEAN', 'false': 'BOOLEAN', 'print': 'PRINT', 'println': 'PRINT', 'int': 'TYPE',
                'bool': 'TYPE', 'var': 'VAR', 'fun': 'FUN', 'if': 'IF', 'else': 'ELSE', 'while': 'WHILE',
                'return': 'RETURN'}
    double_char = {'==': 'COMP', '<=': 'COMP', '>=': 'COMP', '!=': 'COMP', '&&': 'AND', '||': 'OR'}
    single_char = {'=': 'ASSIGN', '<': 'COMP', '>': 'COMP', '!': 'NOT', '+': 'PLUS', '-': 'MINUS', '/': 'DIVIDE',
                   '*': 'TIMES', '%': 'MOD', '(': 'LPAREN', ')': 'RPAREN', '{': 'BEGIN', '}': 'END', ';': 'SEMICOLON',
                   ':': 'COLON', ',': 'COMMA'}
    tokens = {'ID', 'STRING', 'INTEGER'} | {v for k, v in keywords.items() | double_char.items() | single_char.items()}
    # ID 变量 ,  STRING 字符串, INTEGER 整数
    def tokenize(self, text   ):
        line_num , index, state , accumulate_token =0, 0, ScanState.Initial,  ''
        while index < len(text):
            current_char = text[index  + 0] if index <( len(text) - 0 ) else ' '
            next_char= text[index + 1] if index  <  (len(text) - 1 ) else ' '
            if state ==ScanState.Initial:  # 初始状态
                if current_char=='/' and next_char=='/':  # 注释
                    state = ScanState.Comment  # 进入注释状态
                elif current_char.isalpha() or current_char == '_':  # 标识符
                    state = ScanState.IdentifierWord   # 进入标识符状态
                    accumulate_token += current_char
                elif current_char.isdigit():  # 数字
                    state = ScanState.Number  # 进入数字状态
                    accumulate_token += current_char
                elif current_char == '"':  # 字符串
                    state = ScanState.String  # 进入字符串状态
                    accumulate_token += current_char
                elif current_char in self.single_char:  # 单字符符号
                    token = Token(self.single_char[current_char], current_char, line_num) # 创建Token对象
                    yield token  # 输出Token对象
                elif  current_char+ next_char in self.double_char:  # 双字符符号
                    token = Token(self.double_char[current_char+ next_char], current_char+ next_char, line_num) # 创建Token对象
                    yield token  # 输出Token对象
                    index += 1  # 跳过下一个字符
                elif current_char  not in ['\r', '\t', ' ', '\n']:  # ignore whitespace
                    raise Exception(f'Lexical error: illegal character \'{current_char}\' at line {line_num}')
            elif state ==ScanState.IdentifierWord:  # 标识符状态
                if current_char.isdigit() or current_char == '_'  or current_char.isalpha() :  # 标识符
                    accumulate_token += current_char
                else :
                    if accumulate_token in self.keywords:  # 关键字
                        yield Token(self.keywords[accumulate_token], accumulate_token, line_num)  # 创建Token对象
                    else:  # 标识符
                        yield Token('ID', accumulate_token, line_num)  # 创建Token对象
                        state = ScanState.Initial  # 退出标识符状态
                        index -=1
                        accumulate_token = ''  # 清空累积字符
            elif state ==ScanState.Number:  # 数字状态
                if current_char.isdigit():  # 数字
                    accumulate_token += current_char
                else :
                    yield Token('INTEGER', accumulate_token, line_num)  # 创建Token对象
                    state = ScanState.Initial  # 退出数字状态
                    accumulate_token = ''  # 清空累积字符
                    index -1 ; # 退回一个字符 , 因为数字后面可能还有符号
            elif state ==ScanState.String:  # 字符串状态
                if current_char !=  '"':  # 字符串结束
                    accumulate_token += current_char
                else :
                    yield Token('STRING', accumulate_token, line_num)  # 创建Token对象
                    state = ScanState.Initial  # 退出字符串状态
                    accumulate_token = ''  # 清空累积字符

            if  current_char == '\n':  # 换行
                line_num += 1  # 行号加1
                if state == ScanState.Comment :
                    state = ScanState.Initial  # 退出注释状态
                    accumulate_token = ''  # 清空累积字符

            index +=1 ;
            # 判断是否结束
            if index >= len(text):
                state= ScanState.Initial  # 退出注释状态
                break

        if state != ScanState.Initial :
            raise Exception('Lexical error: unexpected EOF')

