# -*- coding: utf-8 -*-

"""
<simple_calculator.py>理解了递归下降算法，但对于二元表达式，也产生了左递归与结合性的问题
...
语法解析器的核心就是写出语法的文法，不同的文法可以构造出不同的语法解析器。
< additive ::= additive + multiplicative > 原始写法，有左递归问题
< additive ::= multiplicative + additive > 改进写法，解决左递归问题，引出右结合问题
< additive ::= multiplicative (+ multiplicative)* > 可以解决左递归问题，同时灵活处置 节点的父子关系，解决结合性问题

优先级：乘法运算优先级高于加减运算，所以在运算文法中，将乘法运算作为加法运算的子规则，解析成ast就是子节点
结合性：同样优先级的运算，左优先还是右优先？，左结合，1+2+3 ::= (1+2)+3，1+2的结果将作为子节点参与后边的 +3 运算
"""

from simple_enum import TokenType, ASTNodeType
from simple_lexer import Token
from simple_parser import SimpleASTNode, TokenReader, IllegalGrammerError

class SimpleParser():
    """定义如下二元运算符：赋值(=)，逻辑运算(and | or)，比较运算(> | < | == | >= | <=)，加法运算(+ | -)，乘法运算(* | /)，分组(())
    
    一）语法规则原始的上下级嵌套文法，丰满了二目操作，规定了优先级顺序：
    expression ::= assignmentExpression Semicolon | boolExpression Semicolon
    assignmentExpression ::= Identifier Assignment boolExpression
    boolExpression ::= comparision | boolExpression And comparision | boolExpression Or comparision
    comparision ::= additiveEx | additiveEx GT additiveEx | additiveEx LT additiveEx | additiveEx EQ additiveEx | additiveEx GE additiveEx | additiveEx LE additiveEx
    additiveEx ::= multiplicativeEX | additiveEx Plus multiplicativeEX | additiveEx Minus multiplicativeEX
    multiplicativeEX ::= primary | multiplicativeEX Star primary | multiplicativeEX Slash primary
    primary ::= Identifier | IntLiteral | LeftParen boolExpression RightParen

    二）可见布尔运算，加法运算，乘法运算等可以连续计算的表达式，要保持左结合的特性，都存在左递归栈溢出问题
    改良这三个表达式，以加法为例：
    additiveEx ::= multiplicativeEX | additiveEx Plus multiplicativeEX | additiveEx Minus multiplicativeEX
    
    additiveEx ::= multiplicativeEX | multiplicativeEX Plus additiveEx | multiplicativeEX Minus additiveEx
    解决了左递归问题，产生了结合性问题

    三）additiveEx ::= multiplicativeEX Plus additiveEx 结合性问题本质上是右递归造成的右结合问题，从前往后解析，从前往后递归调用规则本身算法，形成上下级调用关系，导致前面的节点是后面节点的父节点。
    解决结合性问题，必然要抛弃递归调用的文法，一般递归过程还可以通过循环实现，通过循环，在一个算法里面，前后节点的父子关系就可以灵活处理。
    additiveEx ::= multiplicativeEX Plus additiveEx
    -> multiplicativeEX Plus multiplicativeEX Plus additiveEx
    -> multiplicativeEX Plus multiplicativeEX Plus multiplicativeEX Plus additiveEx
    -> multiplicativeEX Plus multiplicativeEX Plus multiplicativeEX Plus multiplicativeEX Plus additiveEx
    -> multiplicativeEX Plus multiplicativeEX Plus multiplicativeEX Plus multiplicativeEX Plus multiplicativeEX Plus additiveEx
    ...
    multiplicativeEX (Plus multiplicativeEX)*
    如上，通过重写规则文法，在左递归处中止递归下降算法，采用循环结构，解决左递归与结合性问题
    
    boolExpression ::= comparision ((And | Or) comparision)*
    comparision ::= additiveEx ((GT | LT | EQ | GE | LE) additiveEx)?
    additiveEx ::= multiplicativeEX ((Plus | Minus) multiplicativeEX)*
    multiplicativeEX ::= primary ((Star | Slash) primary)*
    """
    
    def __init__(self, tokens) -> None:
        tokens.append(Token(TokenType.Semicolon))
        self.tokenReader = TokenReader(tokens)
    
    def parse_expressionStmt(self):
        node = SimpleASTNode(ASTNodeType.ExpressionStmt, 'root')
        while True:
            pos = self.tokenReader.get_position()

            child1 = self.parse_assignmentExpression()
            if child1 and self.tokenReader.peek().token_type is TokenType.Semicolon:
                self.tokenReader.read()
                node.add_child(child1)
                continue
            self.tokenReader.set_position(pos)

            child2 = self.parse_boolExpression()
            if child2 and self.tokenReader.peek().token_type is TokenType.Semicolon:
                self.tokenReader.read()
                node.add_child(child2)
                continue
            self.tokenReader.set_position(pos)

            if self.tokenReader.peek().token_type is TokenType.Semicolon:
                break
            else:
                raise IllegalGrammerError('out of power of this parser')

        return node
    
    def parse_assignmentExpression(self):
        pos = self.tokenReader.get_position()

        token_id = self.tokenReader.read()
        if token_id.token_type is TokenType.Identifier:
            token_assign = self.tokenReader.read()

            if token_assign.token_type is TokenType.Assignment:
                child = self.parse_boolExpression()
                if child:
                    node = SimpleASTNode(ASTNodeType.AssignmentExpression, text=token_assign.text)
                    node.add_child(SimpleASTNode(ASTNodeType.Identifier, text=token_id.text))
                    node.add_child(child)
                    return node
                else:
                    raise IllegalGrammerError('illegal assignment operation grammer')

        self.tokenReader.set_position(pos)

    def parse_boolExpression(self):
        # print('parse_boolExpression')
        pos = self.tokenReader.get_position()

        child_compL = self.parse_comparision()
        if child_compL:
            while True:
                token_bool = self.tokenReader.read()
                # print('%s %s' % (token_bool.token_type, token_bool.text))
                if token_bool.token_type in (TokenType.And, TokenType.Or):
                    child_compR = self.parse_comparision()
                    if child_compR:
                        node = SimpleASTNode(ASTNodeType.BoolExpression, token_bool.text)
                        node.add_child(child_compL)
                        node.add_child(child_compR)
                        child_compL = node
                    else:
                        raise IllegalGrammerError('illegal bool operation grammer')
                else:
                    self.tokenReader.unread()
                    break

            return child_compL

        self.tokenReader.set_position(pos)

    def parse_comparision(self):
        # print('parse_comparision')
        pos = self.tokenReader.get_position()

        child_addiL = self.parse_additiveEx()
        if child_addiL:
            token_comp = self.tokenReader.read()
            # print('%s %s' % (token_comp.token_type, token_comp.text))
            if token_comp.token_type in (TokenType.GT, TokenType.LT, TokenType.EQ, TokenType.GE, TokenType.LE):
                child_addiR = self.parse_additiveEx()
                if child_addiR:
                    node = SimpleASTNode(ASTNodeType.Comparision, text=token_comp.text)
                    node.add_child(child_addiL)
                    node.add_child(child_addiR)
                    child_addiL = node
                else:
                    raise IllegalGrammerError('illegal comparision operation grammer')
            else:
                self.tokenReader.unread()
            
            return child_addiL

        self.tokenReader.set_position(pos)

    def parse_additiveEx(self):
        # print('parse_additiveEx')
        pos = self.tokenReader.get_position()
        
        child_multiL = self.parse_multiplicativeEX()
        if child_multiL:
            while True:
                token_addi = self.tokenReader.read()
                # print('%s %s' % (token_addi.token_type, token_addi.text))
                if token_addi.token_type in (TokenType.Plus, TokenType.Minus):
                    child_multiR = self.parse_multiplicativeEX()
                    if child_multiR:
                        node = SimpleASTNode(ASTNodeType.AdditiveExpression, token_addi.text)
                        node.add_child(child_multiL)
                        node.add_child(child_multiR)
                        child_multiL = node
                    else:
                        raise IllegalGrammerError('illegal additive operation grammer')
                else:
                    self.tokenReader.unread()
                    break
            return child_multiL
        
        self.tokenReader.set_position(pos)

    def parse_multiplicativeEX(self):
        # print('parse_multiplicativeEX')
        pos = self.tokenReader.get_position()
        
        child_primL = self.parse_primary()
        if child_primL:
            while True:
                token_multi = self.tokenReader.read()
                # print('%s %s' % (token_multi.token_type, token_multi.text))
                if token_multi.token_type in (TokenType.Star, TokenType.Slash):
                    child_primR = self.parse_primary()
                    if child_primR:
                        node = SimpleASTNode(ASTNodeType.MultiplicativeExpression, token_multi.text)
                        node.add_child(child_primL)
                        node.add_child(child_primR)
                        child_primL = node
                    else:
                        raise IllegalGrammerError('illegal multiplicative operation grammer')
                else:
                    self.tokenReader.unread()
                    break
            return child_primL
        
        self.tokenReader.set_position(pos)

    def parse_primary(self):
        pos = self.tokenReader.get_position()
        token_prim = self.tokenReader.read()
        # print('%s %s' % (token_prim.token_type, token_prim.text))
        if token_prim.token_type is TokenType.Identifier:
            return SimpleASTNode(ASTNodeType.Identifier, token_prim.text)

        if token_prim.token_type is TokenType.IntLiteral:
            return SimpleASTNode(ASTNodeType.IntLiteral, token_prim.text)

        if token_prim.token_type is TokenType.LeftParen:
            node = SimpleASTNode(ASTNodeType.Paren, text='()')
            child = self.parse_boolExpression()
            if child:
                token_rightParen = self.tokenReader.read()
                if token_rightParen.token_type is TokenType.RightParen:
                    node.add_child(child)
                    return node
                else:
                    raise IllegalGrammerError('short of right paren')
            else:
                raise IllegalGrammerError('illegal expression grammer in paren')

        self.tokenReader.set_position(pos)


if __name__ == '__main__':
    from simple_lexer import FiniteAuto
    finiteAuto = FiniteAuto()
    finiteAuto.prepare_container([])
    with open('demo.play', 'r') as fp:
        while True:
            stmt = fp.readline()
            if not stmt:
                break
            finiteAuto.tokenize(stmt)
    tokens = finiteAuto.dump_tokens()
    for token in tokens:
        print('%s %s' % (token.token_type, token.text))
    
    parser = SimpleParser(tokens)
    ast = parser.parse_expressionStmt()

    ast.show_ast_pretty()
