# 词法分析器
import sys
import enum
from collections import namedtuple


# Token = namedtuple('Token', ('name', 'value'))
class Token:
    def __init__(self, kind, value):
        self.kind = kind  # TokenType的成员,如: TokenType.EOF
        self.value = value  # 文本

    @staticmethod
    def check_keyword(text):
        for ty in TokenType:
            if ty.name == text and ty.value >= 100 and ty.value < 200:
                return ty
        return None

    def __repr__(self):
        return f'Token(kind={self.kind!r}, value={self.value!r})'


class TokenType(enum.Enum):
    EOF = -1  # '\0'
    NEWLINE = 0
    NUMBER = 1
    ID = 2  # identifier
    STRING = 3

    # 关键字
    IF = 101
    THEN = 102
    ELSE = 103
    ENDIF = 104
    WHILE = 105
    REPEAT = 106
    ENDWHILE = 107
    PRINT = 108
    LABEL = 109
    GOTO = 110
    LET = 111
    INPUT = 112

    # 操作符
    PLUS = 201
    MINUS = 202
    ASTERISK = 203  # *
    SLASH = 204  # /
    ASSIGN = 205  # =
    EQ = 206  # ==
    NE = 207  # !=
    LT = 208  # <
    GT = 209  # >
    GTEQ = 210  # >=
    LTEQ = 211  # <=


class Lexer:
    def __init__(self, src):
        self.src = src  # 源码字符串
        self.cur_char = ''  # 当前字符
        self.cur_pos = -1  # 当前字符位置
        self.next_char()  # 移动到第一个字符的位置

    # 下一个字符
    def next_char(self):
        self.cur_pos += 1
        if self.cur_pos >= len(self.src):
            self.cur_char = '\0'  # 使用\0来标志字符串结尾（EOF）
        else:
            self.cur_char = self.src[self.cur_pos]

    # 预读(lookahead)一个字符
    def peek(self):
        if self.cur_pos + 1 >= len(self.src):
            return '\0'  # EOF
        return self.src[self.cur_pos + 1]

    def skip_whitespace(self):
        # 这里不全
        while self.cur_char in (' ', '\t', '\r', '\t'):
            self.next_char()

    def skip_comment(self):
        if self.cur_char == '#':
            while self.cur_char != '\n':
                self.next_char()

    def get_token(self):
        # 跳过空白字符
        self.skip_whitespace()
        # 跳过注释 - 单行注释
        self.skip_comment()

        token = None

        # 算术运算符
        if self.cur_char == '+':
            token = Token(TokenType.PLUS, self.cur_char)
        elif self.cur_char == '-':
            token = Token(TokenType.MINUS, self.cur_char)
        elif self.cur_char == '*':
            token = Token(TokenType.ASTERISK, self.cur_char)
        elif self.cur_char == '/':
            token = Token(TokenType.SLASH, self.cur_char)

        elif self.cur_char == '\n':
            token = Token(TokenType.NEWLINE, self.cur_char)

        # 赋值和比较运算符
        elif self.cur_char == '=':
            if self.peek() == '=':  # ==
                last_char = self.cur_char
                self.next_char()
                token = Token(TokenType.EQ, last_char + self.cur_char)
            else:  # =
                token = Token(TokenType.ASSIGN, self.cur_char)
        elif self.cur_char == '>':
            if self.peek() == '=':  # >=
                last_char = self.cur_char
                self.next_char()
                token = Token(TokenType.GTEQ, last_char + self.cur_char)
            else:  # >
                token = Token(TokenType.GT, self.cur_char)
        elif self.cur_char == '<':
            if self.peek() == '=':  # <=
                last_char = self.cur_char
                self.next_char()
                token = Token(TokenType.LTEQ, last_char + self.cur_char)
            else:  # <
                token = Token(TokenType.LT, self.cur_char)

        # 不等于比较运算符
        elif self.cur_char == '!':
            if self.peek() == '=':
                last_char = self.cur_char
                self.next_char()
                token = Token(TokenType.NE, last_char + self.cur_char)
            else:
                self.abort('EXpected !=, got !' + self.peek())

        # 字符串
        elif self.cur_char == '\"':  # 字符串匹配
            self.next_char()  # 移动self.cur_pos, self.cur_char为字符串中第一个字符
            start = self.cur_pos

            while self.cur_char != '\"':
                # 不允许特殊字符出现在字符串中
                if self.cur_char in ('\r', '\n', '\t', '\\', '%'):
                    self.abort("Illegal character in string.")
                self.next_char()

            text = self.src[start:self.cur_pos]
            token = Token(TokenType.STRING, text)

        # 数字
        elif self.cur_char.isdigit():  # 判断数字是否合法 38 2.32合法 .9 1.不合法
            start = self.cur_pos
            while self.peek().isdigit():
                self.next_char()

            if self.peek() == '.':  # 小数点
                self.next_char()  # self.cur_pos在小数点位置

                if not self.peek().isdigit():  # 小数点后面不是数字
                    self.abort('Illegal character in number.')

                while self.peek().isdigit():
                    self.next_char()

            text = self.src[start:self.cur_pos + 1]  # 注意结束位置
            token = Token(TokenType.NUMBER, text)

        # 标识符和关键字
        elif self.cur_char.isalpha() or self.cur_char == '_':  # 第一个字符是字母或者是下划线(_)
            start = self.cur_pos
            while self.peek().isalnum():
                self.next_char()
            text = self.src[start:self.cur_pos + 1]
            keyword = Token.check_keyword(text)
            if keyword is None:  # identifier
                token = Token(TokenType.ID, text)
            else:
                token = Token(keyword, text)

        elif self.cur_char == '\0':
            token = Token(TokenType.EOF, '')
        else:
            self.abort('Invalid token: ' + self.peek())

        self.next_char()
        return token

    def abort(self, message):
        sys.exit('Lexing errror.' + message)

    # 使用生成器实现迭代
    def __iter__(self):
        token = self.get_token()
        while token.kind != TokenType.EOF:
            yield token
            token = lexer.get_token()


if __name__ == '__main__':
    src = 'LET a = 200'
    lexer = Lexer(src)

    while lexer.peek() != '\0':
        print(lexer.cur_char)
        lexer.next_char()
    print('-' * 30)

    src = '+- *          /=== >= <= > < != "hello world" num WHILE IF IFWE 32 32.23 # text chars\n'
    # src = '+- *          /=== >= <= > < !=  # text chars\n'
    lexer = Lexer(src)
    for token in lexer:
        print(token)
