import P20020_识别字符串中的Word和Token_增加错误处理 as P20020
# 语法检测
def SyntaxCheck(tokens): 
    #返回存在的语法错误数量
    prevTokenType = ""
    prevToken = ""
    errorNum = 0
    for token in tokens:
        if (
            prevTokenType == "TOKEN_TYPE_OPERATOR"
            and token["type"] == "TOKEN_TYPE_OPERATOR"
        ):
            errorNum += 1
            print("error between: %s %s" % (prevToken, token["word"]))
        if prevTokenType == "TOKEN_TYPE_NUMBER" and token["type"] == "TOKEN_TYPE_NUMBER":
            errorNum += 1
            print("error between: %s %s" % (prevToken, token["word"]))
        prevToken = token["word"]
        prevTokenType = token["type"]
        # print(token)
    if prevTokenType == "TOKEN_TYPE_OPERATOR":  # 最后一个Token不能是TOKEN_TYPE_OPERATOR类型
        errorNum += 1
        print("error in last token: " + prevTokenType)
    if errorNum == 0:
        print("There is no error. Syntax check passed successfully.")
    else:
        print("Found %d errors." % errorNum)
    return errorNum

# 测试
if __name__ == "__main__":
    print("语法检测 test 1")
    expression = "-11+2 + 255.33"
    tokens = P20020.Tokenize(expression)
    SyntaxCheck(tokens)
    print("语法检测 test 2")
    expression = "11+2 + -255.33-"
    tokens = P20020.Tokenize(expression)
    SyntaxCheck(tokens)