# Significantly based on Lundh's (2008) article;
# see http://effbot.org/zone/simple-top-down-parsing.htm.

import sys
import re, random
import pyphon, fsa

Token_table = {}

parseOnly = False

#=============================================================================
# Regular expressions are expressed as sequences of Tokens, and Tokens form
# the nodes of the parse tree derived from such sequences. Tokens provide 
# Pratt-style led ("left derivation") and nud ("null denotation") interface.

class Token_base(object):
    value = None
    op = None
    def __repr__(self):
        if self.first and self.second:
            return "(%s %s %s)" % (self.id, self.first, self.second)
        elif self.first:
            return "(%s %s)" % (self.id, self.first)
        elif self.value:
            return "<%r>" % self.first
        else:
            return "<%s>" % (self.id,)

    def nud(self):
        raise SyntaxError('Syntax Error: %r must not begin an expression' % self.id)

    def led(self, left):
        raise SyntaxError('Unknown or ill-placed operator: %r' % self.id)

#=============================================================================
# Factory function and cache for creating new subclasses of Token_base

def Token(id, bp=0, op=None):
    try:
        s = Token_table[id]
    except KeyError:
        class s(Token_base):
            pass
        s.__name__ = "Token-" + id # for debugging
        s.id = id
        s.value = None
        s.fsa = None
        s.op = None
        s.lbp = bp
        s.first = s.second = None
        Token_table[id] = s
    else:
        s.lbp = max(bp, s.lbp)
    return s

def copyToken(t):
    c = Token_table.get(t.id)()
    c.value = t.value
    c.first = t.first
    c.second = t.second
    c.lbp = t.lbp
    c.op = t.op
    return c

#=============================================================================
# Helper functions for creating common sorts of Token_base subclasses.

def infix(id, bp, op=None):
    def led(self, left):
        self.first = left
        self.second = expression(bp)
        if self.op and not parseOnly:
            self.value = self.op()
        return self
    t = Token(id, bp, op)
    t.led = led
    if op:
        t.op = op
    return t

def prefix(id, bp, op=None):
    def nud(self):
        self.first = expression(bp)
        if self.op and not parseOnly:
            self.value = self.op()
        return self
    t = Token(id, bp, op)
    t.nud = nud
    if op:
        t.op = op
    return t

def groupers(openingId, closingId, op=None):
    def nud(self):
        expr = expression()
        advance(closingId)
        self.first = expr
        if self.op and not parseOnly:
            self.value = self.op()
            return self
        else:
            return expr
    Token(openingId).nud = nud
    if op:
        Token(openingId).op = op
    Token(closingId)

def advance(id=None):
    global token
    if id and token.id != id:
        raise SyntaxError('Expected %r' % id)
    token = next()

def postfix(id, bp, op=None):
    """
    Ball baloney on a big fat fructose demonstration. Crabapple-like
    tendencies.
    """
    def led(self, left):
        self.first = left
        if self.op and not parseOnly:
            self.value = self.op()
        return self

    t = Token(id, bp, op)
    t.led = led
    return t

#=============================================================================
# Declaration/creation of the Tokens that define the regular expression
# language

Token("*symbol*") # an individual symbol (feature-bundle; pyphon.Symbol)
Token("*symbol*").nud = lambda self: self

infix(".", 20) # concatenation of acceptors
Token(".").op = lambda self: self.first.value + self.second.value 

infix("|", 10) # disjunction of acceptors
Token("|").op = lambda self: self.first.value | self.second.value

postfix("*", 30) # kleene closure
Token("*").op = lambda self: self.first.value.kleeneClosure()

postfix("+", 30) # nonzero kleene closure
Token("+").op = lambda self: self.first.value.kleeneClosure(positive=True)

groupers("(", ")") # parens

# optionality
groupers("<", ">", 
        lambda self: self.first.value | fsa.makeSymbolTransducer(fsa.AnySymbol,
            None))

# Feature specifications
groupers("[", "]", lambda self: fsa.makeSymbolTransducer(fsa.AnySymbol,
                                                         self.first.value))
prefix("pos", 100)
prefix("pos", 100).op = lambda self: pyphon.Symbol(dict([(self.first.value,1)]))
prefix("neg", 100)
prefix("neg", 100).op = lambda self: pyphon.Symbol(dict([(self.first.value,0)]))
Token("*feature*")
Token("*feature*").nud = lambda self: self
infix(",", 90)
Token(",").op = lambda self: \
    pyphon.Symbol().update(self.first.value).update(self.second.value)

Token("*end*") # appended by the tokenizer to signal end of regular expression

# Transduction/mapping
def mapOp(self):
    if not (isinstance(self.first, Token("*symbol*")) or isinstance(self.first,
            Token("["))):
        raise TypeError("Left operand of ':' must be a symbol, got %r "
                        "instead." % self.first )
    elif not (isinstance(self.second, Token("*symbol*")) or \
            isinstance(self.second, Token("["))):
        raise TypeError("Right operand of ':' must be a symbol, got %r "
                        "instead." % self.second)

    # symbol instances keep their underlying pyphon.Symbol as their "first"
    # attribute
    if isinstance(self.first, Token("*symbol*")):
        input = self.first.first
    else:
        input = self.first.first.value
    if isinstance(self.second, Token("*symbol*")):
        output = self.second.first
    else:
        output = self.second.first.value
    return fsa.makeSymbolTransducer(input, output)
infix(":", 999, mapOp)

# Special symbols - the tokenizer handles creating instances of these
# must be non alphanumeric
specialSymbols = { \
    '-': None,
    '@': fsa.AnySymbol
}

#=============================================================================
# The actual parser implementation, divided into two main steps: (1)
# tokenization and (2) recursive top-down expression evaulation. 

# Tokenization

tokenPattern = re.compile(r"\s*(?:(\w+)|(.))", re.UNICODE)
def tokenize(regexp, baseFeatures=None, diacriticFeatures=None):
    inBundle = False
    noConcat = True
    escaped = False
    for formOrFeature, operator in tokenPattern.findall(regexp):
        #print 'form =', formOrFeature, 'operator =', operator
        if (formOrFeature or (escaped and operator)) and not inBundle:
            if escaped and operator:
                escaped = False
                formOrFeature = operator
            # so it's a form
            for symbol in pyphon.Form(formOrFeature, baseFeatures,
                    diacriticFeatures):
                #print 'symbol =', symbol
                t = Token_table["*symbol*"]()
                t.first = symbol
                t.value = fsa.makeSymbolTransducer(fsa.AnySymbol, symbol)
                if not noConcat:
                    yield Token_table['.']()
                noConcat = False
                yield t
        elif formOrFeature and inBundle:
            # so it's a feature name
            token = Token_table["*feature*"]
            t = token()
            t.first = formOrFeature
            t.value = formOrFeature
            yield t
        elif (operator in ('+', '-')) and inBundle:
            t = Token_table.get(operator=='+' and 'pos' or 'neg')()
            yield t
        #elif inBundle:
        #    SyntaxError('Uninterpretable in feature specification: %r' % \
        #            operator)
        elif operator in ('[', ']'):
            if (operator == '[' and inBundle) or \
               (operator == ']' and not inBundle):
                   raise SyntaxError('Mismatched %r (no nesting allowed)' % operator)

            if operator == '[':
                inBundle = True
                if not noConcat:
                    yield Token_table['.']()
            elif operator == ']':
                inBundle = False
                noConcat = False
            yield Token_table.get(operator)()
        elif operator == '\\':
            escaped = True
        elif operator in specialSymbols:
            t = Token("*symbol*")()
            t.first = specialSymbols[operator]
            t.value = fsa.makeSymbolTransducer(fsa.AnySymbol,
                    specialSymbols[operator])
            if not noConcat:
                yield Token_table['.']()
            noConcat = False
            yield t
        else:
            token = Token_table.get(operator)
            if not token:
                raise SyntaxError("Unknown or ill-placed operator: %s" % operator)

            t = token()
            if t.id in ('(', '<') and not noConcat:
                yield Token_table['.']()
            if t.id in ('(', '|', ':', '<'):
                noConcat = True
            else:
                noConcat = False

            yield t

    endToken = Token_table["*end*"]
    yield endToken()

# Expression evaluation
# This is the heart of the parser.
def expression(rbp=0):
    global token
    t = token
    token = next()
    left = t.nud()
    while rbp < token.lbp:
        t = token
        token = next()
        left = t.led(left)
    return left

###########################################################################
# The main public interface function

def parseRegex(regex, inventory, verbose=False, baseFeatures=None,
        diacriticFeatures=None, justParse=False, returnParse=False):
    global token, next, parseOnly
    parseOnly = justParse
    next = tokenize(regex, baseFeatures=baseFeatures,
            diacriticFeatures=diacriticFeatures).next
    token = next()
    if verbose:
        print 'Regular expression: %r' % regex

    try:
        result = expression()
    except StopIteration:
        raise SyntaxError('Unexpected end of regular expression.')

    if verbose:
        print 'Parse: %r' % result

    if parseOnly:
        return result

    if result.value:
        expandFeatures(result.value, inventory)

    #result.value.dotToFile('Regex-%s.dot' % regex)
    if returnParse:
        return result
    return result.value

def expandFeatures(transducer, inventory):
    for src in transducer.arcs:
        for dest in transducer.arcs[src]:
            labels = transducer.arcs[src][dest]
            newLabels = []
            for l in labels:
                if (l.input == l.output == fsa.AnySymbol) or (l.input is None\
                        and l.output is None):
                    newLabels.append(l)
                    continue

                if l.input in (fsa.AnySymbol, None):
                    iInv = [l.input]
                else:
                    iInv = inventory
                if l.output in (fsa.AnySymbol, None):
                    oInv = [l.output]
                else:
                    oInv = inventory

                for segI in iInv:
                    for segO in oInv:
                        if (segI is None):
                            if segO.unifies(l.output):
                                newLabels.append(fsa.Label(segI, segO, l.weight))
                        elif (segO is None): 
                            if segI.unifies(l.input):
                                newLabels.append(fsa.Label(segI, segO, l.weight))
                        elif segI.unifies(l.input) and segO.unifies(l.output):
                            newLabels.append(fsa.Label(segI, segO, l.weight))
            transducer.arcs[src][dest] = newLabels

# Useful for testing
def randomRegex(inventory):
    r = random.random()
    if r > 0.5:
        # symbol
        return str(random.choice(inventory))
    elif r > 0.4:
        # star
        return "(%s)*" % randomRegex(inventory)
    elif r > 0.3:
        # plus
        return "(%s)+" % randomRegex(inventory)
    elif r > 0.15:
        # disjunction
        return "(%s|%s)" % (randomRegex(inventory), randomRegex(inventory))
    elif r > 0.:
        # concatenation
        return "(%s%s)" % (randomRegex(inventory), randomRegex(inventory))


def regexIsStarFree(regex):
    return ('*' not in regex)

def regexIsFinalStarFree(regex):
    def __recurse(root):
        # find the right most concatenand
        node = root
        while node.id == '.':
            node = node.second

        if node.id == '*symbol*':
            return True
        elif node.id == '*':
            return False
        elif node.id == '+':
            return False
        else:
            return __recurse(node.first) and __recurse(node.second)

    parse = parseRegex(regex, justParse=1)
    return __recurse(parse)

def disjunctionStringExtension(parse):
    """
    Given a parse tree for regex containing only concatenations and
    disjunctions, yield each Form that it matches.
    """
    if parse.id == '*symbol*':
        yield stringifySymbol(parse.first)

    elif parse.id == ':':
        yield stringifySymbol(parse.first.first) + ':' + stringifySymbol(parse.second.first)

    elif parse.id == '.':
        for leftExt in disjunctionStringExtension(parse.first):
            for rightExt in disjunctionStringExtension(parse.second):
                yield leftExt + rightExt

    elif parse.id == '|':
        for s in disjunctionStringExtension(parse.first):
            yield s
        for s in disjunctionStringExtension(parse.second):
            yield s

    else:
        raise ValueError('I pity you.')

def needsEscape(string):
    if re.match(r'\w+', string):
        return False
    return True

def stringifySymbol(symb):
    if symb is None:
        return '-'
    if symb == fsa.AnySymbol:
        return '@'
    if needsEscape(str(symb)):
        return '\\' + str(symb)
    return str(symb)

def parseTreeToRegex(parse):
    """
    Given a parse tree, returns a string with that parse.
    """
    if parse.id == '*symbol*':
        return stringifySymbol(parse.first)
    elif parse.id == '.':
        return '%s%s' % (parseTreeToRegex(parse.first),
                parseTreeToRegex(parse.second))
    elif parse.id == ':':
        return '%s:%s' % (parseTreeToRegex(parse.first),
                parseTreeToRegex(parse.second))
    elif parse.id == '|':
        return '(%s | %s)' % (parseTreeToRegex(parse.first),
                parseTreeToRegex(parse.second))
    elif parse.id == '*':
        return '(%s)*' % parseTreeToRegex(parse.first)
    elif parse.id == '+':
        return '(%s)+' % parseTreeToRegex(parse.first)
    elif parse.id == '*feature*':
        return str(parse.first)
    elif parse.id == 'pos':
        return '+%s' % parseTreeToRegex(parse.first)
    elif parse.id == 'neg':
        return '-%s' % parseTreeToRegex(parse.first)
    elif parse.id == ',':
        return '%s, %s' % (parseTreeToRegex(parse.first),
                parseTreeToRegex(parse.second))
    elif parse.id == '[':
        return '[%s]' % parseTreeToRegex(parse.first)

    else:
        raise ValueError('wtf')



def rightmostConcatenand(parse):
    """
    Returns the subtree of the given parse tree that corresponds to the
    rightmost concatenand that is not dominated by a disjunction or star.
    """
    if parse.id == '.':
        return rightmostConcatenand(parse.second)
    return parse

