TOKEN_TYPE = ("TOKEN_TYPE_NUMBER", "TOKEN_TYPE_OPERATOR")
LIST_OPERATOR = ("+", "-", "*", "/")

def Tokenize(expression):
    """
    将输入的表达式进行分词处理，返回分词结果列表。
    
    Args:
        expression (str): 待分词的表达式字符串。
    
    Returns:
        list: 分词结果列表，每个元素为字典类型，包含两个键值对：
            - word (str): 分词结果中的单词。
            - type (str): 分词结果中单词的类型，分为"TOKEN_TYPE_NUMBER"（数字）和"TOKEN_TYPE_OPERATOR"（操作符）。
    
    """
    tokens = []
    i = 0
    while i < len(expression):
        if expression[i].isdigit() or expression[i] == '.':  # 检查是否是数字
            word = ""
            while i < len(expression) and (expression[i].isdigit() or expression[i] == '.'):
                word += expression[i]
                i += 1
            tokens.append({"word": word, "type": "TOKEN_TYPE_NUMBER"})
        elif expression[i] in LIST_OPERATOR:  # 检查是否是操作符
            tokens.append({"word": expression[i], "type": "TOKEN_TYPE_OPERATOR"})
            i += 1
        else:  # 忽略其他字符
            i += 1
    return tokens

if __name__ == "__main__":
    # 测试函数
    print("test 1")
    expression = "1+2"
    tokens = Tokenize(expression)
    for token in tokens:
        print(token)

    print("test 2")
    expression = "11+2+89+2223"
    for token in tokens:
        print(token)

    print("test 3")
    expression = "11.6+2.77777+89*2223"
    tokens = Tokenize(expression)
    for token in tokens:
        print(token)
        
    print("test 4")
    expression = "+11-2*89/2223*/"
    tokens = Tokenize(expression)
    for token in tokens:
        print(token)