import copy
import P0080_FIRSTVT_LASTVT as P0080
import P0090_构造算符优先关系表 as P0090


TOKEN_TYPE = ("TOKEN_TYPE_NUMBER", "TOKEN_TYPE_OPERATOR")
LIST_OPERATOR = ("+", "-", "*", "^", "(", ")", "i")

def Tokenize(expression):
    """
    将输入的表达式进行分词处理，返回分词结果列表。
    
    Args:
        expression (str): 待分词的表达式字符串。
    
    Returns:
        list: 分词结果列表，每个元素为字典类型，包含两个键值对：
            - word (str): 分词结果中的单词。
            - type (str): 分词结果中单词的类型，分为"TOKEN_TYPE_NUMBER"（数字）和"TOKEN_TYPE_OPERATOR"（操作符）。
    
    """
    tokens = []
    i = 0
    while i < len(expression):
        if expression[i].isdigit() or expression[i] == '.':  # 检查是否是数字
            word = ""
            while i < len(expression) and (expression[i].isdigit() or expression[i] == '.'):
                word += expression[i]
                i += 1
            tokens.append({"word": word, "type": "TOKEN_TYPE_NUMBER"})
        elif expression[i] in LIST_OPERATOR:  # 检查是否是操作符
            tokens.append({"word": expression[i], "type": "TOKEN_TYPE_OPERATOR"})
            i += 1
        else:  # 忽略其他字符
            i += 1
    return tokens

def SyntaxAnalyzer(tokens):
    """
    判断输入的tokens是否符合语法规则。
    
    Args:
        tokens (list): 由字典组成的列表，每个字典代表一个token，包含"type"和"word"两个键值对，分别表示token的类型和值。
    
    Returns:
        bool: 如果输入的tokens符合语法规则，返回True；否则返回False。
    
    """
    syntaxPass = True
    nextTokenExpected =["TOKEN_TYPE_NUMBER","TOKEN_TYPE_OPERATOR"]
    for token in tokens:
        if (token["type"] not in nextTokenExpected) and (token["word"] not in nextTokenExpected):
            syntaxPass = False
            break
        if token["type"] == "TOKEN_TYPE_NUMBER":
            nextTokenExpected = ["TOKEN_TYPE_OPERATOR"]
        if token["type"] == "TOKEN_TYPE_OPERATOR":
            if token["word"] == ")":# 如果是右括号，则后边可以跟操作符或操作数
                nextTokenExpected = ["TOKEN_TYPE_NUMBER","TOKEN_TYPE_OPERATOR"]
            else:      
              nextTokenExpected = ["TOKEN_TYPE_NUMBER","("]
    return syntaxPass

if __name__ == '__main__':
    expression = []
    expression.append("6+ 5-9 *+8+3*2+9 ^2")
    expression.append("6+ 5-9 *8+3*2+9 ^2")
    expression.append("6+ 5-9 *(8+3)*2+9 ^2")
    for i in range(len(expression)):
        print("第%d个表达式:" % (i + 1))
        print(expression[i])
        tokens = Tokenize(expression[i])
        for token in tokens:
            print(token["word"] + ", " + token["type"])
        rst = SyntaxAnalyzer(tokens)
        print(f"{expression[i]}语法分析结果:{rst}")
