# -*- coding: utf-8 -*-

"""有限自动机，构建的词法分析器能解析如下语句
age >= 45
int age = 40
2+3*5
"""

from enum import Enum

from simple_enum import TokenType

class DfaState(Enum):
    """有限状态自动机的目的是将设计的合法词法记号切割成一个个地token，这就涉及到操作的方法，所以不一定直接将token类型照搬到自动机状态中就有用。
    
    比如有些情况又需要将同一个token类型发散成几个状态，比如整型字面量，0适合单独划分成一个状态。
    
    同时自动机也可以归类处理具有相同特征的token，比如只有一个长度的记号，也能划分出这类token文本片段，
    但是在生成token对象时，还需要给的具体的token类型，还是需要做一次查找判断，那么再见一个字典表，通过text获取到type。
    用两次hash查找，换一次比较操作，同时减少if流程判断。
    
    [Initial] ---- ischar ---> [Id] ---- iskeyword ---> [KeyWord]
    [Initial] ---- 0 ---> [ZeroLiteral]
    [Initial] ---- is9alpha ---> [IntLiteral]
    [Initial] ---- +-*/ ---> [Operation]
    [Initial] ---- ' ---> [StringLiteral]
    [Initial] ---- >< ---> [ComparisionT] ---- = ---> [ComparisionE]
    [Initial] ---- = ---> [Assignment] ---- = ---> [ComparisionE]
    [Initial] ---- ;() ---> [Bound]
    """
    Initial = 11    # 初始状态

    IntLiteral = 1    # 整型字面量状态    [1-9][0-9]*
    ZeroLiteral = 2   # 0字面量    0
    StringLiteral = 3    # 字符串字面量状态    '.*'

    Operation = 4    # 运算操作符状态
    # Plus = 3    # 加号    +
    # Minus = 4    # 减号    -
    # Star = 5    # 星号    *
    # Slash = 6    # 斜杠    /

    ComparisionT = 5    # 比较操作符状态T
    # GT = 7    # 大于    >
    # LT = 8    # 小于    <

    ComparisionE = 6    # 比较操作符状态E
    # EQ = 9    # 等于    ==
    # GE = 10    # 大于等于    >=
    # LE = 11    # 小于等于    <=

    Bound = 7    # 边界符状态
    # Semicolon = 12    # 分号    ;
    # LeftParen = 13    # 左括号    (
    # RightParen = 14    # 右括号    )

    KeyWord = 8    # 关键字状态
    # Int = 15    # 关键字    int
    # String = 16    # 关键字    string
    # Bool = 17    # 关键字    bool

    Id = 9    # 标识符状态
    # Identifier = 18    # 标识符    [a-zA-Z][a-zA-Z0-9_]*

    Assignment  = 10    #赋值状态
    # Assignment = 19    # 赋值    =

    # If = 20    # 关键字    if
    # Else = 21    # 关键字    else
    # While = 22    # 关键字    while
    # For = 23    # 关键字    for
    # Break = 24    # 关键字    break
    # Continue = 25    # 关键字    continue
    # BTrue = 26    # 关键字    true
    # BFalse = 27    # 关键字    false


CHARS = set((chr(i) for i in range(65, 91)))
CHARS.update((chr(i) for i in range(97, 123)))
DIGITALS = set((chr(i) for i in range(48, 58)))
ZERO = '0'
DIGITALS9 = set((chr(i) for i in range(49, 58)))
OPERATIONS = {'+','-','*','/'}
COMPARISONS = {'>','<'}
BOUNDS = {';','(',')'}
ASSIGNMENT = '='
QUOTE = '\''
UNDERLINE = '_'
KEYWORDS = {'int','string','bool','if','else','while','for','break','continue','true','false','and', 'or'}
END = {' ', '\t', '\r', '\n', ')', ';'}

TOKENTYPES = {
    '+': TokenType.Plus,
    '-': TokenType.Minus,
    '*': TokenType.Star,
    '/': TokenType.Slash,

    '>': TokenType.GT,
    '<': TokenType.LT,

    '==': TokenType.EQ,
    '>=': TokenType.GE,
    '<=': TokenType.LE,

    ';': TokenType.Semicolon,
    '(': TokenType.LeftParen,
    ')': TokenType.RightParen,

    'int': TokenType.Int,
    'string': TokenType.String,
    'bool': TokenType.Bool,
    'if': TokenType.If,
    'else': TokenType.Else,
    'while': TokenType.While,
    'for': TokenType.For,
    'break': TokenType.Break,
    'continue': TokenType.Continue,
    'true': TokenType.BTrue,
    'false': TokenType.BFalse,
    'and': TokenType.And,
    'or': TokenType.Or
}


class Token():
    def __init__(self, token_type):
        self.token_type = token_type
        self.text = ''
    
    def append(self, ch):
        self.text += ch


class IllegalEndingError(Exception):
    def __init__(self, *args: object) -> None:
        super().__init__(*args)


class FiniteAuto():
    """有限自动机
    利用散列表高效的查询特性来进行单个字符的判断"""
    def __init__(self) -> None:
        self.tokens = None
        self.state = DfaState.Initial
        self.token = None

    def prepare_container(self, tokens):
        self.tokens = tokens
    
    def handle_Initial(self, ch):
        if ch in CHARS:
            self.state = DfaState.Id
            self.token = Token(TokenType.Identifier)
            self.token.append(ch)
        elif ch in DIGITALS9:
            self.state = DfaState.IntLiteral
            self.token = Token(TokenType.IntLiteral)
            self.token.append(ch)
        elif ch is ZERO:
            self.state = DfaState.ZeroLiteral
            self.token = Token(TokenType.IntLiteral)
            self.token.append(ch)
        elif ch is QUOTE:
            self.state = DfaState.StringLiteral
            self.token = Token(TokenType.StringLiteral)
            self.token.append(ch)
        elif ch in OPERATIONS:
            self.state = DfaState.Operation
            self.token = Token(TOKENTYPES[ch])
            self.token.append(ch)
        elif ch in COMPARISONS:
            self.state = DfaState.ComparisionT
            self.token = Token(TOKENTYPES[ch])
            self.token.append(ch)
        elif ch is ASSIGNMENT:
            self.state = DfaState.Assignment
            self.token = Token(TokenType.Assignment)
            self.token.append(ch)
        elif ch in BOUNDS:
            self.state = DfaState.Bound
            self.token = Token(TOKENTYPES[ch])
            self.token.append(ch)
        
    def handle_Id(self, ch):
        if ch in CHARS or ch in DIGITALS or ch is UNDERLINE:
            self.token.append(ch)
        else:
          if self.token.text in KEYWORDS:
              self.state = DfaState.KeyWord
              self.token.token_type = TOKENTYPES[self.token.text]
          
          self.reset(ch)

    def handle_KeyWord(self, ch):
        self.reset(ch)

    def handle_IntLiteral(self, ch):
        if ch in DIGITALS:
            self.token.append(ch)
        elif ch in END or ch in OPERATIONS or ch in COMPARISONS or ch is ASSIGNMENT:
            self.reset(ch)
        else:
            raise IllegalEndingError('Illegal ending in int literal')

    def handle_ZeroLiteral(self, ch):
        if ch in END or ch in OPERATIONS or ch in COMPARISONS or ch is ASSIGNMENT:
            self.reset(ch)
        else:
            raise IllegalEndingError('Illegal ending in zero literal')

    def handle_StringLiteral(self, ch):
        if self.token.text.count(QUOTE) == 2:
            self.reset(ch)
        else:
            self.token.append(ch)
                
    def handle_Operation(self, ch):
        self.reset(ch)

    def handle_Assignment(self, ch):
        if ch is ASSIGNMENT:
            self.state = DfaState.ComparisionE
            self.token.token_type = TokenType.EQ
            self.token.append(ch)
        else:
            self.reset(ch)

    def handle_ComparisionT(self, ch):
        if ch is ASSIGNMENT:
            self.state = DfaState.ComparisionE
            self.token.token_type = TokenType.EQ
            self.token.append(ch)
        else:
            self.reset(ch)

    def handle_ComparisionE(self, ch):
        self.reset(ch)

    def handle_Bound(self, ch):
        self.reset(ch)

    auto_handles = {
        DfaState.Initial: handle_Initial,
        DfaState.Id: handle_Id,
        DfaState.KeyWord: handle_KeyWord,
        DfaState.IntLiteral: handle_IntLiteral,
        DfaState.ZeroLiteral: handle_ZeroLiteral,
        DfaState.StringLiteral: handle_StringLiteral,
        DfaState.Operation: handle_Operation,
        DfaState.Assignment: handle_Assignment,
        DfaState.ComparisionT: handle_ComparisionT,
        DfaState.ComparisionE: handle_ComparisionE,
        DfaState.Bound: handle_Bound,
    }
    
    def recognize(self, ch):
        # print(self.state, ch)
        """自动机在识别ch前为状态A，消化掉ch，之后变为状态B"""
        self.auto_handles[self.state](self, ch)

    def reset(self, ch):
        """前一个token识别完成，收纳该token；重置自动机状态，清空token篮子；开始识别当前ch"""
        self.tokens.append(self.token)
        self.state = DfaState.Initial
        self.token = None
        self.recognize(ch)

    def tokenize(self, chrs):
        for ch in chrs:
            self.recognize(ch)
        self.recognize('')
    
    def dump_tokens(self):
        return self.tokens


if __name__ == '__main__':
    finiteAuto = FiniteAuto()
    finiteAuto.prepare_container([])
    with open('demo.play', mode='r') as fp:
        while True:
            line = fp.readline()
            if not line:
                break
            finiteAuto.prepare_container([])
            finiteAuto.tokenize(line)
            tokens = finiteAuto.dump_tokens()
            for token in tokens:
                print('%s    %s' % (token.token_type, token.text))
            print('-----------')
            
    tokens = finiteAuto.dump_tokens()
    for token in tokens:
        print('%s    %s' % (token.token_type, token.text))

