from collections import deque
from collections import defaultdict

import re

TOKENS = {"STRING" : r"\"((\\\")|.)*\"",
           "CHARLIST" : r"[A-Za-z0-9\\/:\?_,.*+^-]+",
           "LEFT-BRACKET" : r"\{",
           "RIGHT-BRACKET" : r"\}",
           "NEWLINE" : r"\n",
           "COLON" : r":",
           "WHITESPACE" : r"[\t ]", # does not include newline
           "COMMENT" : r"#[^\n]*" # from # sign to end of line, not including newline character
           }

def tokenizer(flname):
    """
    Open and read the file given by the filename as a list of tokens.
    Tokens are given as 2-tuples with an identifier and the value.
    """
    fl = open(flname)
    s = fl.read()
    fl.close()
    while len(s) != 0:
        matched = False
        for name, regex in TOKENS.iteritems():
            match = re.match(regex, s)
            if match is not None:
                matched = True
                matched_string = match.group(0)
                l = len(matched_string)
                s = s[l:]
                yield (name, matched_string)
                break
        if not matched:
            if len(s) > 100:
                s = s[:100]
            msg = "Invalid input in file \"" + flname + \
                "\" starting with content \"" + s + "\""
            raise Exception, msg

def strip_comments_whitespace(tokens):
    """
    Generator that skips over COMMENT and WHITESPACE tokens.
    """
    for token_type, token_value in tokens:
        if token_type in ["COMMENT", "WHITESPACE", "NEWLINE"]:
            continue
        yield (token_type, token_value)

# Configuration file grammar is defined by a context-free grammar
# <top-level> ::= <pairlist>
# <pairlist> ::= ^ | <pair> <pairlist>
# <pair> ::= <key> COLON <value>
# <key> ::= <string_or_charlist>
# <value> ::= <string_or_charlist> | <dict>
# <dict> ::= LEFT-BRACKET <pairlist> RIGHT-BRACKET
# <string_or_charlist> = STRING | CHARLIST
#
# Terminals are : COLON LEFT-BRACKET RIGHT_BRACKET STRING CHARLIST
# Null is ^

# Recursive-descent parser implementation of CFG follows

def string_or_charlist_rule(tokens):
    token_type, token_value = tokens[0]
    if token_type not in ["STRING", "CHARLIST"]:
        e_msg = "Found invalid token (\"" + token_type + "\", \"" + \
            token_value + "\") when expecting STRING or CHARLIST."
        raise Exception, e_msg
    tokens.popleft()
    return token_value


def pair_rule(tokens):
    key = string_or_charlist_rule(tokens)
    
    token_type, token_value = tokens[0]
    if token_type != "COLON":
        e_msg = "Found invalid token (\"" + token_type + "\", \"" + \
            token_value + "\") when expecting COLON."
        raise Exception, e_msg
    tokens.popleft()
    
    token_type, token_value = tokens[0]
    if token_type in ["STRING", "CHARLIST"]:
        value = string_or_charlist_rule(tokens)
    elif token_type == "LEFT-BRACKET":
        value = dict_rule(tokens)
    else:
        e_msg = "Found invalid token (\"" + token_type + "\", \"" + \
            token_value + "\") when expecting STRING, CHARLIST, or dictionary."
        raise Exception, e_msg
    
    return (key, value)

def pairlist_rule(tokens):
    mydict = []
    token_type, token_value = tokens[0]
    while token_type in ["STRING", "CHARLIST"]:
            key, value = pair_rule(tokens)
            mydict.append((key, value))
            if len(tokens) != 0:
                token_type, token_value = tokens[0]
            else:
                break
    return mydict


def top_level_rule(tokens):
    return pairlist_rule(tokens)

def dict_rule(tokens):
    token_type, token_value = tokens[0]
    if token_type != "LEFT-BRACKET":
        e_msg = "Found invalid token (\"" + token_type + "\", \"" + \
            token_value + "\") when expecting open squiggly brace."
        raise Exception, e_msg
    tokens.popleft()

    my_dict = pairlist_rule(tokens)

    token_type, token_value = tokens[0]
    if token_type != "RIGHT-BRACKET":
        e_msg = "Found invalid token (\"" + token_type + "\", \"" + \
            token_value + "\") when expecting open squiggly brace."
        raise Exception, e_msg
    tokens.popleft()

    return my_dict

def parse_file(flname):
    """
    Given a filename, this method tokenizes the file and parse the tokens into lists of 2-tuples (key-value pairs) (our chosen Abstract Syntax Tree).
    """
    all_tokens = tokenizer(flname)
    tokens = strip_comments_whitespace(all_tokens)
    # a deque allows us to lookahead
    token_deque = deque(tokens)
    return top_level_rule(token_deque)
