# -*- coding: utf-8 -*-

"""
<simple_calculator.py>理解了递归下降算法，但对于二元表达式，也产生了左递归与结合性的问题
<simple_binary.py>解决了结合性问题，同时更深入地处理了表达式的优先级，规范语法规则的文法，EBNF（泛巴克斯-瑙尔范式）
...
在前面的基础上，本语法解析器是手动实现一门脚本的终章。
1）支持解析变量声明，赋值，及表达式运算；
2）支持变量；
3）支持REPL(read-eval-print-loop)，输入-执行-打印循环过程，这是如python，javascript等脚本语言的典型特征。

变量支持等作用域应该在执行阶段才有，语法解析阶段不应该有
"""

from simple_lexer import Token
from simple_enum import ASTNodeType, TokenType
from simple_lexer import FiniteAuto


class SimpleASTNode():
    """多叉树：父节点通过一个线性表（数组，列表）存储多个子节点的引用形成一个树结构，本AST树不用再单独创建多叉树类。
    因为本AST树尚且无须维护一个指向某个节点的指针的需求。
    
    同时实现对每个节点遍历其子节点的方法，以及优雅打印的方法，更灵活。对根节点遍历和打印就是对整AST树的遍历和打印。"""
    def __init__(self, ast_node_type, text) -> None:
        self.ast_node_type = ast_node_type
        self.text = text
        self.parent = None
        self.children = []

    def add_child(self, node):
        node.parent = self
        self.children.append(node)
    
    def __iter__(self):
        def traversal(node):
            yield node
            
            if not node.children:
                return
            
            for child in node.children:
                yield from traversal(child)
        
        yield from traversal(self)
    
    def show_ast_pretty(self):
        unit = '    '
        def traversal(node, form):
            print('%s%s <%s>' % (form, node.ast_node_type, node.text))
            
            if not node.children:
                return
            
            form += unit
            for child in node.children:
                traversal(child, form)
        
        traversal(self, '')


class TokenReader():
    """特意为Token流设置一个指针，方便预读取和回滚"""
    def __init__(self, tokens) -> None:
        self.tokens = tokens
        self.pos = 0
        self.pos_end = len(self.tokens)-1
    
    def peek(self):
        if self.pos <= self.pos_end:
            return self.tokens[self.pos]
    
    def read(self):
        if self.pos <= self.pos_end:
            token  = self.tokens[self.pos]
            self.pos += 1
            return token
    
    def unread(self):
        if self.pos:
            self.pos -= 1
    
    def get_position(self):
        return self.pos
    
    def set_position(self, offset):
        self.pos = offset


class IllegalGrammerError(Exception):
    def __init__(self, *args: object) -> None:
        super().__init__(*args)


class SimpleParser():
    """
    program: expressionStmt+
    expressionStmt: (intDeclaration | assignmentExpression | boolExpression) Semicolon
    intDeclaration ::= Int Identifier (Assignment boolExpression)?
    assignmentExpression ::= Identifier Assignment boolExpression
    boolExpression ::= comparision ((And | Or) comparision)*
    comparision ::= additiveEx ((GT | LT | EQ | GE | LE) additiveEx)?
    additiveEx ::= multiplicativeEX ((Plus | Minus) multiplicativeEX)*
    multiplicativeEX ::= primary ((Star | Slash) primary)*
    primary ::= Identifier | IntLiteral | LeftParen boolExpression RightParen
    """
    def __init__(self) -> None:
        self.tokenReader = None
        self.variables = {}
    
    def load_tokens(self, tokens):
        # 引入哨兵Token，简便处理流程
        tokens.append(Token(token_type=TokenType.Semicolon))
        self.tokenReader = TokenReader(tokens)
        self.variables = {}
    
    def parse_program(self):
        root = SimpleASTNode(ASTNodeType.Program, text='root')
        while True:
            if self.tokenReader.peek().token_type is TokenType.Semicolon:
                break
            node = self.parse_expressionStmt()
            if node:
                root.add_child(node)
            else:
                print('Illegal Grammer')
                break
        return root

    def parse_expressionStmt(self):
        """变量支持，只有在顶级文法中变量申明赋值成功之后，才能添加进变量表中
        在此也可以简单理解JavaScript中对于变量声明需要<var>关键字特别指出，在严格模式中，变量若不声明无法赋值的实现方式"""
        # 回溯点
        pos = self.tokenReader.get_position()

        node = self.parse_intDeclaration()
        if node and self.tokenReader.read().token_type is TokenType.Semicolon:
            
            node_idf = node.children[0]
            name = node_idf.text
            val = None
            node_assign = node_idf.children
            if node_assign:
                val = node_assign[0].children[0].text
            
            self.variables[name] = val

            return node
        # 回溯
        self.tokenReader.set_position(pos)

        node = self.parse_assignmentExpression()
        if node and self.tokenReader.read().token_type is TokenType.Semicolon:
            
            name = node.children[0].text
            val = node.children[1].text
            self.variables[name] = val

            return node
        # 回溯
        self.tokenReader.set_position(pos)

        node = self.parse_boolExpression()
        if node and self.tokenReader.read().token_type is TokenType.Semicolon:
            return node
        # 回溯
        self.tokenReader.set_position(pos)

    def parse_intDeclaration(self):
        pos = self.tokenReader.get_position()
        
        token_int = self.tokenReader.read()
        if token_int.token_type is TokenType.Int:
            
            token_identifier = self.tokenReader.read()
            if token_identifier.token_type is TokenType.Identifier:
                node = SimpleASTNode(ASTNodeType.IntDeclaration, token_int.text)
                node_id = SimpleASTNode(ASTNodeType.Identifier, token_identifier.text)
                node.add_child(node_id)

                token_assign = self.tokenReader.read()
                if token_assign.token_type is TokenType.Assignment:

                    child_exp = self.parse_boolExpression()
                    if child_exp:
                        child_assign = SimpleASTNode(ASTNodeType.AssignmentExpression, text=token_assign.text)
                        node_id.add_child(child_assign)
                        child_assign.add_child(child_exp)
                        return node
                    else:
                        raise IllegalGrammerError('illegal assignment operation grammer')
                else:
                    self.tokenReader.unread()
                    return node

        self.tokenReader.set_position(pos)

    def parse_assignmentExpression(self):
        pos = self.tokenReader.get_position()

        token_id = self.tokenReader.read()
        if token_id.token_type is TokenType.Identifier:
            token_assign = self.tokenReader.read()

            if token_assign.token_type is TokenType.Assignment:
                child = self.parse_boolExpression()
                if child:
                    node = SimpleASTNode(ASTNodeType.AssignmentExpression, text=token_assign.text)
                    node.add_child(SimpleASTNode(ASTNodeType.Identifier, text=token_id.text))
                    node.add_child(child)
                    return node
                else:
                    raise IllegalGrammerError('illegal assignment operation grammer')

        self.tokenReader.set_position(pos)

    def parse_boolExpression(self):
        # print('parse_boolExpression')
        pos = self.tokenReader.get_position()

        child_compL = self.parse_comparision()
        if child_compL:
            while True:
                token_bool = self.tokenReader.read()
                # print('%s %s' % (token_bool.token_type, token_bool.text))
                if token_bool.token_type in (TokenType.And, TokenType.Or):
                    child_compR = self.parse_comparision()
                    if child_compR:
                        node = SimpleASTNode(ASTNodeType.BoolExpression, token_bool.text)
                        node.add_child(child_compL)
                        node.add_child(child_compR)
                        child_compL = node
                    else:
                        raise IllegalGrammerError('illegal bool operation grammer')
                else:
                    self.tokenReader.unread()
                    break

            return child_compL

        self.tokenReader.set_position(pos)

    def parse_comparision(self):
        # print('parse_comparision')
        pos = self.tokenReader.get_position()

        child_addiL = self.parse_additiveEx()
        if child_addiL:
            token_comp = self.tokenReader.read()
            # print('%s %s' % (token_comp.token_type, token_comp.text))
            if token_comp.token_type in (TokenType.GT, TokenType.LT, TokenType.EQ, TokenType.GE, TokenType.LE):
                child_addiR = self.parse_additiveEx()
                if child_addiR:
                    node = SimpleASTNode(ASTNodeType.Comparision, text=token_comp.text)
                    node.add_child(child_addiL)
                    node.add_child(child_addiR)
                    child_addiL = node
                else:
                    raise IllegalGrammerError('illegal comparision operation grammer')
            else:
                self.tokenReader.unread()
            
            return child_addiL

        self.tokenReader.set_position(pos)

    def parse_additiveEx(self):
        # print('parse_additiveEx')
        pos = self.tokenReader.get_position()
        
        child_multiL = self.parse_multiplicativeEX()
        if child_multiL:
            while True:
                token_addi = self.tokenReader.read()
                # print('%s %s' % (token_addi.token_type, token_addi.text))
                if token_addi.token_type in (TokenType.Plus, TokenType.Minus):
                    child_multiR = self.parse_multiplicativeEX()
                    if child_multiR:
                        node = SimpleASTNode(ASTNodeType.AdditiveExpression, token_addi.text)
                        node.add_child(child_multiL)
                        node.add_child(child_multiR)
                        child_multiL = node
                    else:
                        raise IllegalGrammerError('illegal additive operation grammer')
                else:
                    self.tokenReader.unread()
                    break
            return child_multiL
        
        self.tokenReader.set_position(pos)

    def parse_multiplicativeEX(self):
        # print('parse_multiplicativeEX')
        pos = self.tokenReader.get_position()
        
        child_primL = self.parse_primary()
        if child_primL:
            while True:
                token_multi = self.tokenReader.read()
                # print('%s %s' % (token_multi.token_type, token_multi.text))
                if token_multi.token_type in (TokenType.Star, TokenType.Slash):
                    child_primR = self.parse_primary()
                    if child_primR:
                        node = SimpleASTNode(ASTNodeType.MultiplicativeExpression, token_multi.text)
                        node.add_child(child_primL)
                        node.add_child(child_primR)
                        child_primL = node
                    else:
                        raise IllegalGrammerError('illegal multiplicative operation grammer')
                else:
                    self.tokenReader.unread()
                    break
            return child_primL
        
        self.tokenReader.set_position(pos)

    def parse_primary(self):
        pos = self.tokenReader.get_position()
        token_prim = self.tokenReader.read()
        # print('%s %s' % (token_prim.token_type, token_prim.text))
        if token_prim.token_type is TokenType.Identifier:
            return SimpleASTNode(ASTNodeType.Identifier, token_prim.text)

        if token_prim.token_type is TokenType.IntLiteral:
            return SimpleASTNode(ASTNodeType.IntLiteral, token_prim.text)

        if token_prim.token_type is TokenType.LeftParen:
            node = SimpleASTNode(ASTNodeType.Paren, text='()')
            child = self.parse_boolExpression()
            if child:
                token_rightParen = self.tokenReader.read()
                if token_rightParen.token_type is TokenType.RightParen:
                    node.add_child(child)
                    return node
                else:
                    raise IllegalGrammerError('short of right paren')
            else:
                raise IllegalGrammerError('illegal expression grammer in paren')

        self.tokenReader.set_position(pos)

    def dump_variables(self):
        return self.variables


class SimpleREPL():
    def __init__(self) -> None:
        self.variables = {}
        self.finiteAuto = FiniteAuto()
        self.parser = SimpleParser()

    def repl(self):
        while True:
            stmt = input()
            if stmt == 'exit();':
                break

            self.finiteAuto.prepare_container([])
            self.finiteAuto.tokenize(stmt)
            tokens = self.finiteAuto.dump_tokens()

            self.parser.load_tokens(tokens)
            ast = self.parser.parse_program()

            # variables = self.parser.dump_variables()
            # self.variables.update(variables)

            ast.show_ast_pretty()

            result = self.eval(ast)

            for r in result:
                print(r)

    def eval(self, ast):
        if ast.ast_node_type is ASTNodeType.IntLiteral:
            return int(ast.text)

        if ast.ast_node_type is ASTNodeType.Identifier:
            return self.variables[ast.text]

        if ast.ast_node_type is ASTNodeType.Program:
            results = []
            for child in ast.children:
                results.append(self.eval(child))
            return results

        if ast.ast_node_type is ASTNodeType.IntDeclaration:
            node_int = ast
            node_idf = node_int.children[0]
            variable = {node_idf.text: None}
            if node_idf.children:
                node_assign = node_idf.children[0]
                node_expr = node_assign.children[0]
                variable[node_idf.text] = self.eval(node_expr)
            self.variables.update(variable)
            return variable

        if ast.ast_node_type is ASTNodeType.AssignmentExpression:
            node_assign = ast
            node_idf = node_assign.children[0]
            node_expr = node_assign.children[1]
            variable = {node_idf.text: self.eval(node_expr)}
            self.variables.update(variable)
            return variable

        if ast.ast_node_type is ASTNodeType.BoolExpression:
            bool_ =  ast.text
            node_left, node_right = ast.children[0], ast.children[1]
            left, right = self.eval(node_left), self.eval(node_right)
            if bool_ == 'and':
                return left and right
            elif bool_ == 'or':
                return left or right

        if ast.ast_node_type is ASTNodeType.Comparision:
            compa_ = ast.text
            node_left, node_right = ast.children[0], ast.children[1]
            left, right = self.eval(node_left), self.eval(node_right)
            if compa_ == '>':
                return left > right
            elif compa_ == '<':
                return left < right
            elif compa_ == '==':
                return left == right
            elif compa_ == '>=':
                return left >= right
            elif compa_ == '<=':
                return left <= right

        if ast.ast_node_type is ASTNodeType.AdditiveExpression:
            addi_ = ast.text
            node_left, node_right = ast.children[0], ast.children[1]
            left, right = self.eval(node_left), self.eval(node_right)
            if addi_ == '+':
                return left + right
            elif addi_ == '-':
                return left - right

        if ast.ast_node_type is ASTNodeType.MultiplicativeExpression:
            multi_ = ast.text
            node_left, node_right = ast.children[0], ast.children[1]
            left, right = self.eval(node_left), self.eval(node_right)
            if multi_ == '*':
                return left * right
            elif multi_ == '/':
                return left / right

        if ast.ast_node_type is ASTNodeType.Paren:
            return self.eval(ast.children[0])


if __name__ == '__main__':
    # finiteAuto = FiniteAuto()
    # finiteAuto.prepare_container([])

    # with open('demo.play', 'r') as fp:
    #     while True:
    #         stmt = fp.readline()
    #         if not stmt:
    #             break
    #         finiteAuto.tokenize(stmt)
    
    # tokens = finiteAuto.dump_tokens()    
    # for token in tokens:
    #     print('%s %s' % (token.token_type, token.text))
    
    # parser = SimpleParser()
    # parser.load_tokens(tokens)
    # ast = parser.parse_program()
    # variables = parser.dump_variables()

    # ast.show_ast_pretty()
    # print(variables)

    repl = SimpleREPL()
    repl.repl()
