from Parser import Node, D, R, Token, EMPTY, L, lit, AST
import Parser
import re

token_types = ['TOKEN', 'COMMENT', 'LPAREN', 'RPAREN', 'LCURLY', 'COLON', 'NEXTLINE',
    'RCURLY', 'LBRACK', 'RBRACK', 'EQUAL', 'COMMA', 'LANCE',
    'SIMPLE_VAR', 'SPECIAL_VAR', 'TRUE', 'FALSE',
    'ENDL', 'CODEBLOCK', 'STYLEBLOCK', 'ENDBLOCK',
    'INTEGER_LITERAL', 'SYMBOL_LITERAL', 'SPACE', 'ERROR' ]
@Parser.syms.define(*token_types)
def Tokenizer(filename):

    class ParseErr(Exception): 
        def __init__(self, msg):
            self.args = (msg, Parser.get_line_number(input_stream, tokenizer.length))

    token_list = []

    class stack(object):
        line = []
        indent = [ 0 ]
        total = []
        styles_end = 0

    D(COLON, ':' )
    D(ENDL, '\s*\n( *)' )
    
    def ind_val(Nd):
        """return how many spaces the line is indented"""
        return len(Nd.value)

    def colon_indent(Nd):
        stack.line = []
        Nd.value = Nd.nodes[1].value
        Nd.nodes = []
        if ind_val(Nd) > stack.indent[-1]:
            stack.indent.append(ind_val(Nd))
            stack.total.append('c')
        else:
            raise ParseErr('Illegal indentation')  
    D(CODEBLOCK, [COLON, ENDL],
        _handler = colon_indent
    )
    
     
    def match_pairs(*args):
        pairs = { '(':')', '[':']', '{':'}', 'c':'Dedentation', 's':'Dedentation' }
        last = stack.total.pop()
        if last not in args:
            raise ParseErr('Expecting '+ pairs[last])
        return last
        
    def indent_hdlr(Nd):
        d_list = []
        #check if statement is over
        open_statement = 0
        for sym in ['(', '{', '[']:
            open_statement += stack.total.count(sym)
        if not open_statement:
            #statement is over
            d_list = ['newl']

        if ind_val(Nd) > stack.indent[-1]:
            if stack.line:
               stack.total.append('s')
               stack.line=[]
               stack.indent.append(ind_val(Nd))
               return Token('STYLEBLOCK')
            else:
                raise ParseErr('Indentation Not Allowed')
        elif ind_val(Nd) < stack.indent[-1]:
            try:
                stack.line=[]
                dedents = len(stack.indent)-stack.indent.index(ind_val(Nd))-1
                for cnt in range(dedents):
                    stack.indent.pop()
                    d_list.append(match_pairs('s','c'))
            except ValueError:
                raise ParseErr('Indentations do not match')
        return Token('END', d_list)

    D(NEXTLINE, ENDL,
        _handler = indent_hdlr
    )

    def make_hdlr(symbol):
        def on(Nd):
            stack.total.append(symbol)
            stack.line.append(symbol)
        def off(Nd):
            match_pairs(symbol)
            if stack.line:
                stack.line.pop()
        return {'on':on, 'off':off}

    paren_hdlr = make_hdlr( '(' )         
    curly_hdlr = make_hdlr( '{' )         
    brack_hdlr = make_hdlr( '[' )         

    D(LPAREN, r'\(', 
        _handler = paren_hdlr['on']
    )

    D(RPAREN, r'\)',
        _handler = paren_hdlr['off']
    )

    D(LCURLY, r'\{', 
        _handler = curly_hdlr['on'] 
    )

    D(RCURLY, r'\}',
        _handler = curly_hdlr['off']
    )

    D(LBRACK, r'\[', 
        _handler = brack_hdlr['on'] 
    )

    D(RBRACK, r'\]',
        _handler = brack_hdlr['off']
    )

    D(EQUAL, '=')

    D(COMMA, ',')
    
    D(LANCE, '->')

    D(SIMPLE_VAR, '[a-z]\w*')

    D(SPECIAL_VAR, '[A-Z]\w*')

    D(INTEGER_LITERAL, '\d+')

    D(SYMBOL_LITERAL, '`([a-z]\w*)')

    D(SPACE, ' +')

    TRUE = Node('TRUE', 'true', ignore_case=True)

    FALSE = Node('FALSE', 'false', ignore_case=True)

    def nothing_found(Nd):
        raise ParseErr('Nothing Found')

    D(ERROR, '.', _handler = nothing_found)

    QUOTE=Node('QUOTE', r"('(\\'|[^'])*')" + '|' + r'("(\\"|[^"])*")',  
        has_value=True,
        ignore_groups=True,
    )
    def get_quote(aNode):
        aNode.value = ''.join(re.split(r'\\(.)', aNode.value))[1:-1]
    QUOTE._handler = get_quote    

    D(COMMENT, r'#.*?\n' )

    def emit_token(aToken):
        token =  aToken.nodes[0]
        if token.type in ['SPACE', 'COMMENT', 'NEXTLINE']:
           return
        if stack.styles_end:
            if token.type in ['RPAREN', 'RCURLY', 'RBRACK']:
                stack.styles_end -= 1
            else:
                raise ParseErr('First non whitespace character on this line must be a ) or ] or }')    
    
        end_map = {'newl':'ENDSTATEMENT', 'c': 'ENDBLOCK', 's': 'ENDBLOCK'}
        if token.type == 'END':
           for type in token.value:
               new_token = Token(end_map[type])
               print new_token
               token_list.append(new_token)
           stack.styles_end = token.value.count('s')    
        else:
            print token
            token_list.append(token)

    D(TOKEN, 
        CODEBLOCK, NEXTLINE, COLON, QUOTE, COMMENT, LPAREN, RPAREN, LCURLY, 
        TRUE, FALSE,  
        RCURLY, LBRACK, RBRACK, EQUAL, COMMA, SIMPLE_VAR, SPECIAL_VAR, LANCE, 
        INTEGER_LITERAL, SYMBOL_LITERAL, SYMBOL_LITERAL,
        SPACE, ERROR,
        _handler = emit_token 
    )

    tokenizer = TOKEN()
    input_stream = open(filename).read()
    tokenizer.parse(input_stream)
    
    for cnt in range(len(stack.indent)-1):
        token_list.append(Token('ENDBLOCK'))

    return token_list


@Parser.syms.define('Expression', 'ArgList', 'Var', 'Statement', 'Value',
    'ParList', 'FnDcl', 'NewScope', 'ParItem', 'SymPar', 'ValPar')
@Parser.syms.define(sym_type=Parser.Token, *(token_types+['ENDSTATEMENT','QUOTE']))
def construct_AST(tokens):
    
    Parser.reset_names()
    
    D(Value, QUOTE, INTEGER_LITERAL, SYMBOL_LITERAL, TRUE, FALSE)

    D(Statement, 
        [Var, LPAREN, ParList, RPAREN, NewScope], 
        [Expression, ENDSTATEMENT],
        [SIMPLE_VAR, EQUAL, Expression, ENDSTATEMENT],
        [Var, Expression(), ENDSTATEMENT]
    )

    D(ParList, 
        [ParItem, L(COMMA, ParItem)()],
        EMPTY,
    )

    D(ParItem, 
        [SymPar, LANCE, ValPar],
        ValPar,
    )

    D(SymPar, SIMPLE_VAR, SYMBOL_LITERAL)

    D(ValPar, SIMPLE_VAR, Value)

    D(NewScope, [CODEBLOCK, Statement(), ENDBLOCK] )

    D(Expression, 
        [Var, LPAREN, ArgList, RPAREN],
        Var,
        Value,
    )

    D(ArgList, 
        [Expression, L(COMMA, Expression)()],
        EMPTY,
    )

    D(Var, SIMPLE_VAR, SPECIAL_VAR)

    Statements = []
#    print Expression.parse(tokens)
    while tokens:
        exp, token_num = Statement.parse(tokens)
        print exp
        tokens = tokens[token_num:]
        Statements += [exp]

    return Statements
