from pyphon import *
from fsa import *
import regex
import RNF
import random, sys, os, string, re, math
from copy import deepcopy
from pprint import pprint
import progressbar

import harmonic
#try:
#    import harmonic
#except:
#    print "Warning: Unable to import module \"harmonic\". This probably means\n"\
#          "you don't have pulp or GLPK installed properly. Until this is\n"\
#          "remedied, you won't be able to use any of the Harmonic Grammar\n"\
#          "functions in pyphon.pycbg. Attempting to do so will probably\n"\
#          "crash with some sort of NameError.\n"

#============================================================================

def frozendict(d):
    return tuple(sorted(d.iteritems()))

#============================================================================

class InconsistentERCSetError(ValueError):
    def __init__(self, eSet=None):
        ValueError.__init__(self)
        self.eSet = eSet

#============================================================================

try:
    factorial = math.factorial
except AttributeError:
    try:
        import scipy
        factorial = scipy.factorial
    except ImportError:
        factorial = lambda n: reduce(lambda a,b: a*(b+1), range(n), 1)

#============================================================================

class Constraint(Transducer):
    stopSymbol = '#'

    def __init__(self, name, inventory, transducerOrRegex=None,
            baseFeatures=None, diacriticFeatures=None, filter=False,
            copying=False, strict=False):
        """
        Creates a weighted Transducer whose weights can be used to count the
        number of maximal substrings of any string that would be accepted by
        self. The given inventory unioned with transducer.symbols() is taken as
        sigma, and the returned machine accepts sigma-star, but with arcs
        weighted 1 if their traversal implies that a maximal substring matching
        the transducer has just ended. See also Transducer.matchEmitter().
        """
        super(Constraint, self).__init__()
        if filter:
            self.name = ''
        else:
            self.name = name

        if not inventory:
            raise ValueError('Sorry, bro, you need to specify an inventory.')

        inventory = set([Symbol(i) for i in inventory or []]) - set([None,
            AnySymbol])
        self.inventory = inventory

        if type(transducerOrRegex) is str:
            transducer = regex.parseRegex(transducerOrRegex,
                    inventory,
                    baseFeatures=baseFeatures,
                    diacriticFeatures=diacriticFeatures)
        else:
            transducer = transducerOrRegex

        #transducer.dotToFile("Transducer-%s.dot" % name)
        #inventory = ((transducer and transducer.symbols() or set()) | \
        #        set(inventory or [])) - set([None, AnySymbol])

        self.filter = filter

        if not transducer:
            return

        # Don't do any left/right constraintification if it's a filter
        if copying or filter:
            self.arcs, self.starts, self.finals = deepcopy((transducer.arcs,
                transducer.starts, transducer.finals))

            if copying:
                return

            # expand @s (no (epsilon,epsilon))
            self.expandAnySymbol(inventory | set([None]), (None,None))
            if not strict:
                self.addDeletionSelfLoops(inventory | set([None]))
            #newFinal = nextNode()
            #for fin in self.finals:
            #    self[fin,newFinal] = Label(Epsilon, weight=frozendict({name:0}))
            #self.finals.append(newFinal)
            for src, dst, label in self:
                if label.weight is None:
                    label.weight = frozendict({self.name:0})
            return

        # expand @s (no (epsilon,epsilon))
        transducer.expandAnySymbol(inventory | set([None]), (None,None))
        transducer.addDeletionSelfLoops(inventory | set([None]),
                deletionSymbol=-666)

        #transducer.dotToFile('%s_expdel.dot' % self.name)

        stateSets = {}
        startState = nextNode()
        stateSets[startState] = set([transducer.starts[0]])
        stateSetsToDo = [startState]
        self.starts = [startState]
        while stateSetsToDo:
            node = stateSetsToDo.pop()
            for input in inventory | set([None]):
                for output in inventory | set([None, -666]):
                    if input is output is None: continue
                    if input is None and output == -666: continue
                    if output in (None, -666):
                        dests = transducer.destsFrom(stateSets[node], Label(input,
                        -666)) | transducer.destsFrom(stateSets[node], Label(input,
                        None))
                        #print "%s -> %s: %r" % (input, output, dests)
                    else:
                        dests = transducer.destsFrom(stateSets[node], Label(input,
                            output))
                    dests = dests | set([transducer.starts[0]])

                    ## OLD WAY
                    ## remove final states from considered destinations... and if
                    ## there were any, that's a violation
                    #destsNoFinals = set([d for d in dests if d not in
                    #        transducer.finals])
                    ##print 'dests = ', dests, 'destsNoFinals = ', destsNoFinals
                    #violating = (dests != destsNoFinals)
                    ##print 'violating = ', violating

                    #if destsNoFinals in stateSets.values():
                    #    for k,v in stateSets.iteritems():
                    #        if v == destsNoFinals:
                    #            destState = k
                    #            break
                    #else:
                    #    destState = nextNode()
                    #    stateSets[destState] = destsNoFinals
                    #    stateSetsToDo.append(destState)

                    # NEW WAY: greedy parse
                    violations = 0
                    for d in dests:
                        if d in transducer.finals:
                            violations += 1

                    if dests in stateSets.values():
                        for k,v in stateSets.iteritems():
                            if v == dests:
                                destState = k
                                break
                    else:
                        destState = nextNode()
                        stateSets[destState] = dests
                        stateSetsToDo.append(destState)

                    if output == -666:
                        self[node, destState] = Label(input, output,
                            frozendict({self.name:0}))
                    else:
                        self[node, destState] = Label(input, output,
                            frozendict({self.name:int(violations)}))

        # add violations
        #for node in self.nodes():
        #    if any([s in transducer.finals for s in stateSets[node]]):
        #        # violation
        #        arcs = self[node]
        #        for dest, labels in arcs.iteritems():
        #            for l in labels:
        #                l.weight = frozendict({name:1})
        toDelete = []
        for srcNode, destNode, label in self:
            if label.output == -666:
                for srcNode2, destNode2, label2 in self:
                    if srcNode is srcNode2 and destNode is destNode2 and\
                            label2.input == label.input and label2.output is None:
                        toDelete.append((srcNode, destNode, label))
                        break
                if (srcNode, destNode, label) not in toDelete:
                    label.output = None
        for s,d,l in toDelete:
            del self[s,d,l]


        self.finals = self.nodes()

    def __repr__(self):
        return "Constraint(%r)" % self.name

    def __str__(self):
        return self.name

    def addWeights(self, w1, w2):
        w = {}
        for k in set(w1.keys()) | set(w2.keys()):
            w[k] = w1.get(k, 0) + w2.get(k, 0)
        return w

    def minWeights(self, w1, w2):
        w = {}
        for k in set(w1.keys()) | set(w2.keys()):
            w[k] = min(w1.get(k, 0), w2.get(k, 0))
        return w

    def violationWeight(self, n):
        return frozendict({self.name:n})

    def __setitem__(self, fromNodeOrPair, dictOrLabel):
        if type(fromNodeOrPair) is tuple and len(fromNodeOrPair)==2 and \
                type(dictOrLabel) is tuple and len(dictOrLabel)==3 and \
                type(dictOrLabel[2]) is int:
            dictOrLabel = (dictOrLabel[0], dictOrLabel[1], 
                           self.violationWeight(dictOrLabel[2]))
        return super(Constraint, self).__setitem__(fromNodeOrPair, dictOrLabel)

    def intersection(self, otherCon, weightsFn=None):
        inventory = self.inventory | otherCon.inventory
        result = Constraint('%s&%s' % (self.name, otherCon.name),
                inventory)
        weightsFn = weightsFn or self.addWeights
        #print 'Constraint intersection (weightsFn=%r)' % weightsFn
        nextStates = [(s1, s2) for s1 in self.starts for s2 in otherCon.starts]
        stateNames = dict([(ns, nextNode()) for ns in nextStates])

        while nextStates:
            #print 'nextStates = %s' % nextStates
            s1, s2 = nextStates.pop(0)
            #print 's1 =', s1, 's2 =', s2
            if s1 in self.finals and s2 in otherCon.finals:
                result.finals.append(stateNames[(s1,s2)])
            if s1 in self.starts and s2 in otherCon.starts:
                result.starts.append(stateNames[(s1,s2)])

            selfLabels = self.labelsFrom([s1])
            otherLabels = otherCon.labelsFrom([s2])
            interLabels = []
            for sl in selfLabels:
                for ol in otherLabels:
                    if sl.input == ol.input and sl.output == ol.output:
                        interLabels.append((sl,ol))
            #print 'interLabels = %s' % interLabels
            for sl,ol in interLabels:
                selfDests = self.destsFrom([s1], sl, ignoreWeights=False)
                otherDests = otherCon.destsFrom([s2], ol, ignoreWeights=False)
                for (d1, d2) in [(d1,d2) for d1 in selfDests for d2 in \
                        otherDests]:

                    #print 'd1 =', d1, 'd2 =', d2
                    if (d1,d2) not in stateNames:
                        stateNames[(d1,d2)] = nextNode()
                        nextStates.append((d1,d2))

                    src = stateNames[(s1,s2)]
                    dst = stateNames[(d1,d2)]
                    assert(sl.input==ol.input and sl.output==ol.output)
                    #print 'adding: %s -> %s %r' % (src, dst,
                    #        (sl.input,sl.output))
                    weight = frozendict(weightsFn(dict(sl.weight),
                                dict(ol.weight)))
                    #print 'sl =', repr(sl), 'ol =', repr(ol)
                    #print '%s -> %s %s:%s/%s' % (src, dst, sl.input, sl.output,
                    #        weight)
                    result[src, dst] = Label(sl.input, sl.output, weight)

        #if Transducer.minimizeOperations:
        #    print 'min'
        #    result = Constraint(result.name, result.inventory, result.minimized(),
        #            copying=True)

        #print '%d states in result (type=%r)' % (len(result.nodes()),
        #        type(result))

        # remove empty-string constraints
        for src, dest, label in result:
            label.weight = tuple([(con,viol) for con,viol in label.weight if con])

        return result.trim()

    def onInput(self, form):
        nu = Transducer() # Constraint?
        end = len(form)
        nu.starts = [Node(str(0)+'-'+str(X.id)) for X in self.starts]
        nu.finals = [Node(str(end)+'-'+str(X.id)) for X in self.finals]
        for (ix,symbol) in enumerate(form):
            #sys.stdout.write('.'); sys.stdout.flush()
            for N1,N2,L in self: 
                if L.input == symbol:
                    nu[str(ix)+'-'+str(N1.id),str(ix+1)+'-'+str(N2.id)] = L
                if L.input == None:
                    nu[str(ix)+'-'+str(N1.id),str(ix)+'-'+str(N2.id)] = L
        #sys.stdout.write('\n'); sys.stdout.flush()
        for N1,N2,L in self: 
            sys.stdout.write('.'); sys.stdout.flush()
            if L.input == None: nu[str(end)+'-'+str(N1.id),str(end)+'-'+str(N2.id)] = L
        #sys.stdout.write('\ntrimming...\n'); sys.stdout.flush()
        return nu.trim()

    def collapseStartStates(self):
        """
        Add a new start state with epsilon,epsilon arcs to each old start state,
        and mark the old start states as non-start.
        """
        newStart = nextNode()
        for s in self.starts:
            label = Label(None, None, frozendict({self.name:0}))
            self[newStart, s] = label
        del self.starts[:]
        self.starts.append(newStart)

    def stringifyLabel(self, label):
        eps = u'\u03b5'
        null = u'\u00d8'
        weightDict = dict(label.weight)
        weightStr = [(v>1 and '%d*'%v or '')+k for k,v in weightDict.items() if v]
        if len(weightStr):
            weightStr = '{' + ', '.join(weightStr) + '}'
        else:
            weightStr = null
        return "%s:%s\\n%s" % (label.input or eps, label.output or eps,
                weightStr)
        

        
class LeftRightContextConstraint(Constraint):
    def __init__(self, name, target, inventory, left=None, right=None, 
            baseFeatures=None, diacriticFeatures=None, minimized=True):
        """
        Creates a weighted Transducer whose weights can be used to count the
        number of maximal substrings of any string that would be accepted by
        self. The given inventory unioned with transducer.symbols() is taken as
        sigma, and the returned machine accepts sigma-star, but with arcs
        weighted 1 if their traversal implies that a maximal substring matching
        the transducer has just ended. See also Transducer.matchEmitter().
        """
        #super(Constraint, self).__init__()
        Transducer.__init__(self)
        self.name = name
        if not inventory:
            raise ValueError('Sorry, bro, you have to specify an inventory.')
        self.inventory = set([Symbol(i) for i in inventory]) - set([None,
            AnySymbol])

        # target must consist only of concatenation and disjunction, i.e., no
        # stars
        if not regex.regexIsStarFree(target):
            raise ValueError('Target regex %r for constraint %r is not star '\
                    'free!' % (target, name))

        if '|' in target:
            # disjunctive target
            # lay some pipes
            parse = regex.parseRegex(target, self.inventory, baseFeatures=baseFeatures,
                    diacriticFeatures=diacriticFeatures, returnParse=1)
            # TODO: optimize things by only doing this for the rightmost
            # concatenand
            ixn = None
            l = list(regex.disjunctionStringExtension(parse))
            for form in regex.disjunctionStringExtension(parse):
                subcon = LeftRightContextConstraint(name, str(form),
                        self.inventory, left, right, baseFeatures,
                        diacriticFeatures, minimized=False)
                if not ixn:
                    ixn = subcon
                else:
                    ixn = ixn & subcon
            if minimized:
                self.overwrite(ixn.minimized())
        else:
            self.__constructFromStringTarget(target, left, right, inventory,
                    baseFeatures, diacriticFeatures)
            if minimized:
                self.overwrite(self.minimized())


    def __constructFromStringTarget(self, target, left=None, right=None,
            inventory=None, baseFeatures=None, diacriticFeatures=None):
        # chop everything before the last symbol off of the target and append it
        # to the left context
        # We assume that target at this point is *just a string* - no
        # disjunctions or stars.
        left = left and left.strip() or ''
        right = right and right.strip() or ''
        p = regex.parseRegex(target, self.inventory, baseFeatures=baseFeatures,
                diacriticFeatures=diacriticFeatures, returnParse=1)
        rmc = regex.rightmostConcatenand(p)
        pstr = regex.parseTreeToRegex(p)
        rmcstr = regex.parseTreeToRegex(rmc)
        #print 'pstr =', pstr
        #print 'rmcstr =', rmcstr
        idx = pstr.rindex(rmcstr)
        #print 'idx =', idx

        left += pstr[:idx]
        target = pstr[idx:]

        #print 'left =', left
        #print 'target =', target
        #print 'right =', right

        # build transducers from the target, left context, and right context
        transTarget = regex.parseRegex(target, self.inventory, baseFeatures=baseFeatures,
                diacriticFeatures=diacriticFeatures)
        transLeft = left and regex.parseRegex(left, self.inventory, baseFeatures=baseFeatures,
                diacriticFeatures=diacriticFeatures)
        transRight = right and regex.parseRegex(right, self.inventory, baseFeatures=baseFeatures,
                diacriticFeatures=diacriticFeatures)

        # convert everything in the inventory to a Symbol, if it's not already
        if inventory:
            inventory = set([Symbol(i) for i in inventory])
        # make sure epsilon (None) and the AnySymbol are not in the inventory
        inventory = set(inventory or []) - set([None, AnySymbol])
        self.inventory = inventory

        # make recursive pattern counters for the left and right 
        leftpc = None
        rightpc = None
        if left:
            leftpc = self.leftRecursivePatternCounter(transLeft, transTarget,
                    inventory, False)
            #leftpc.dotToFile('%s_leftpc.dot' % self.name)

        if right:
            rightpc = self.leftRecursivePatternCounter(transRight, transTarget,
                    inventory, True)
            #rightpc.dotToFile('%s_rightpc.dot' % self.name)

        if not (left or right):
            emptyCon = Constraint('empty', self.inventory)
            node = nextNode()
            emptyCon.starts.append(node)
            emptyCon.finals.append(node)
            pc = self.leftRecursivePatternCounter(emptyCon, transTarget,
                    inventory, False)
            #pc.dotToFile('%s_pc.dot' % self.name)
            self.overwrite(pc)
        elif left and right:
            intersection = Constraint('leftpc', inventory, leftpc, 
                    copying=True).intersection(Constraint('rightpc', inventory,
                        rightpc, copying=True),
                        weightsFn=self.minWeights)#.minimized()
            #intersection.dotToFile('%s_intersection.dot' % self.name)
            self.overwrite(intersection)
        elif left:
            self.overwrite(leftpc)
        elif right:
            self.overwrite(rightpc)

    def leftRecursivePatternCounter(self, context, target, inventory,
            reverse):
        # expand @s (no (epsilon,epsilon))
        context.expandAnySymbol(inventory | set([None]), exceptInOut=(None,None))
        target.expandAnySymbol(inventory | set([None]), exceptInOut=(None,None))

        if reverse:
            context = context.reversed()
            target = target.reversed()

        # put violation weights on arcs entering final states
        # has to happen before deletion self loops are added
        for src, dest, label in target:
            if dest in target.finals:
                label.weight = frozendict({self.name:1})
            else:
                label.weight = frozendict({self.name:0})

        # zero violations in the context
        for src, dest, label in context:
            label.weight = frozendict({self.name:0})

        # concatenate
        Transducer.minimizeOperations = False
        transducer = (context + target).withoutEpsilons()
        Transducer.minimizeOperations = True

        #if reverse:
        #    transducer.dotToFile('%s_rightconcat.dot' % self.name)
        #else:
        #    transducer.dotToFile('%s_leftconcat.dot' % self.name)

        # deletion self loops
        transducer.addDeletionSelfLoops(inventory,#why?: | set([None]),
                weight=frozendict({self.name:0}))

        # start state self loops
        for start in transducer.starts:
            for s1 in inventory | set([None]):
                for s2 in inventory | set([None]):
                    if s1 is s2 is None:
                        continue
                    label = Label(s1, s2, frozendict({self.name:0}))
                    if (start, start, label) in transducer: 
                        continue
                    transducer[start, start] = label

            # make start state also final
            if start not in transducer.finals:
                transducer.finals.append(start)

        # disjunctive determinization
        def addWeights(ns, destStateSet, label, mac):
            weight = {}
            for src in ns:
                for dest in destStateSet:
                    if (src, dest) not in mac:
                        continue
                    for sdlabel in mac[src,dest]:
                        if sdlabel.input == label.input and \
                                sdlabel.output == label.output:
                            for c,v in sdlabel.weight:
                                if c not in weight:
                                    weight[c] = 0
                                weight[c] += v
            return frozendict(weight)

        M = transducer.determinized(weightFn=addWeights)

        # faithfulness filter for debugging:
        #toDel = []
        #for src, dest, label in M:
        #    if label.input != label.output:
        #        toDel.append((src, dest, label))
        #for td in toDel:
        #    del M[td]

        if reverse:
            return M.reversed()#.minimized()
        return M#.minimized()

    def __repr__(self):
        return 'LeftRightContextConstraint(%r)' % self.name


#============================================================================

def constraintFromDot(fileObj, name, inventory, baseFeatureMap,
        diacriticFeatureMap):
    """
    Constructs a constraints by parsing a Graphviz DOT file.
    TODO: Use the pydot module to properly parse this stuff. This is just a
    quick and dirty prototype.
    """
    nodeMap = {} # node name -> node object
    def getNode(nodeName):
        try:
            return nodeMap[nodeName]
        except KeyError:
            n = nextNode()
            nodeMap[nodeName] = n
            return n

    def zeroViolns():
        return frozendict({name:0})

    violnReg = re.compile(r'(?:(\d+)\s*\*\s*)?(\w+)(?:\s*,)?')
    def parseViolns(violnStr):
        if (not violnStr.startswith('{')) or (not violnStr.endswith('}')):
            raise ValueError('Violation strings must be enclosed in curly '+
                    'braces')
        violns = {name:0}
        for num, constr in violnReg.findall(violnStr):
            if num.strip():
                num = int(num)
            else:
                num = 1
            if constr not in violns:
                violns[constr] = 0
            violns[constr] += num
        return frozendict(violns)

    def parseMapping(mappingStr):
        segs = mappingStr.split(':')
        # TODO: this won't handle \: symbols
        if len(segs) != 2:
            raise ValueError('Ill-formed mapping: %s' % mappingStr)

        if segs[0].strip() == '-':
            input = None
        else:
            input = pyphon.Symbol(segs[0], baseFeatureMap, diacriticFeatureMap)

        if segs[1].strip() == '-':
            output = None
        else:
            output = pyphon.Symbol(segs[1], baseFeatureMap, diacriticFeatureMap)

        if input and input not in inventory:
            raise ValueError('Symbol not in inventory: %s' % input)
        if output and output not in inventory:
            raise ValueError('Symbol not in inventory: %s' % output)
        return input, output
        

    arcReg = re.compile(
        r'"(?P<src>\w+)"\s*->\s*"(?P<dst>\w+)"\s*\[\s*label="(?P<label>.*)"\s*\]\s*;'
    )
    startReg = re.compile(r'"(?P<node>\w+)"\s*\[\s*style\s*=\s*filled\s*]\s*;')
    finalReg = re.compile(r'"(?P<node>\w+)"\s*\[\s*peripheries\s*=\s*2\s*]\s*;')

    constr = Constraint(name, inventory)
    starts = []
    finals = []
    lineNo = 0
    for row in fileObj:
        lineNo += 1
        # Force ignoring commented lines
        if row.strip().startswith('//'):
            continue

        # Arc?
        arcMatch = arcReg.search(row)
        if arcMatch:
            src = getNode(arcMatch.group('src'))   
            dst = getNode(arcMatch.group('dst'))   
            labelStr = arcMatch.group('label')
            if r'\n' not in labelStr:
                mappingStr = labelStr.strip()
                violns = zeroViolns()
            else:
                labelLines = labelStr.split(r'\n')
                if len(labelLines) != 2:
                    raise ValueError('Ill-formed arc label in %s (line %d): %s' %
                            (fileObj.name, lineNo, labelStr))
                mappingStr = labelLines[0].strip()
                violnStr = labelLines[1].strip()
                violns = parseViolns(violnStr)

            input, output = parseMapping(mappingStr)
            #print '%s -> %s, %s:%s / %s' % (src, dst, input, output, violns)
            constr[src, dst] = Label(input, output, violns)
            continue

        # Start state?
        startMatch = startReg.search(row)
        if startMatch:
            starts.append(getNode(startMatch.group('node')))
            continue

        # Final state?
        finalMatch = finalReg.search(row)
        if finalMatch:
            finals.append(getNode(finalMatch.group('node')))
            continue

    constr.setStarts(starts)
    constr.setFinals(finals)
    return constr


#============================================================================

def makeEval(name, inventory, constraints):
    """
    Returns a new Constraint object, with the given name, which is the
    intersection of all of the given constraints. This intersection constitutes
    "Eval" for the given constraint set, as in Riggle 2004.
    """
    #ixn = lambda c1, c2: Constraint(name, inventory, transducerOrRegex=(c1 & c2),
    #        copying=True)
    #ixn = lambda c1, c2: c1.intersection(c2)
    #eval = reduce(ixn, constraints)

    eval = constraints[0]
    if len(constraints) == 1:
        return eval
    for constr in constraints[1:]:
        eval = eval.intersection(constr)
        if not len(eval):
            raise ValueError('EVAL has zero states after intersecting with '\
                    'constraint %s. Check your constraint definitions.' % constr)
    return eval

#============================================================================

def impliedERC(winnerViolns, loserViolns):
    winnerViolns = dict(winnerViolns)
    loserViolns = dict(loserViolns)
    conditions = {}
    for con, wV in winnerViolns.iteritems():
        lV = loserViolns[con]
        if wV < lV:
            conditions[con] = 'w'
        elif lV < wV:
            conditions[con] = 'l'
        else:
            conditions[con] = 'e'
    return ERC(conditions)

#============================================================================

def impliedERCSet(winnerViolns, losersViolns):
    """
    Give me a violation map and a sequence of violation maps.
    """
    eSet = ERCSet()
    for loser in losersViolns:
        eSet.add(impliedERC(winnerViolns, loser))
    return eSet.minimized()

#============================================================================

def permutations(L):
    """
    Generator of the permutations of the given list. Will destroy L.
    """
    if len(L) <= 1:
        yield tuple(L)
    else:
        a = (L.pop(0),)
        for p in permutations(L):
            for i in range(len(p)+1):
                yield p[:i] + a + p[i:]

#============================================================================

class ERC:
    """
    Arguments are constraints.
    """
    def __init__(self, winnersOrConditions=None, losers=None, equals=None):
        if type(winnersOrConditions) is dict:
            self.w = frozenset([x for x in winnersOrConditions.keys() if\
                winnersOrConditions[x] == 'w'])
            self.l = frozenset([x for x in winnersOrConditions.keys() if\
                winnersOrConditions[x] == 'l'])
            self.e = frozenset([x for x in winnersOrConditions.keys() if\
                winnersOrConditions[x] == 'e'])
        else:
            self.w = frozenset(winnersOrConditions or [])
            self.l = frozenset(losers or [])
            self.e = frozenset(equals or [])
        if (self.w & self.l) or (self.w & self.e) or (self.l & self.e):
            raise ValueError('Non-disjoint elementary ranking conditions.')

    def __str__(self):
        return "W{%s} L{%s} E{%s}" % (", ".join([str(w) for w in self.w]),
                                      ", ".join([str(l) for l in self.l]),
                                      ", ".join([str(e) for e in self.e]))

    def __repr__(self):
        return "ERC([%s], [%s], [%s])" % (", ".join([repr(w) for w in self.w]),
                                          ", ".join([repr(l) for l in self.l]),
                                          ", ".join([repr(e) for e in self.e]))
    
    def __eq__(self, other):
        return self.w == other.w and self.l == other.l and self.e == other.e

    def __ne__(self, other):
        return self.w != other.w or self.l != other.l or self.e != other.e

    def __hash__(self):
        return hash((self.w,self.l,self.e))

    def __len__(self):
        """
        The number of constraints the ERC is over.
        """
        return len(self.w) + len(self.l) + len(self.e)

    def nontrivial(self):
        """
        True iff contains at least one W and one L, False otherwise.
        """
        return len(self.w) > 0 and len(self.l) > 0

    def consistent(self):
        """
        Internal consistency: True iff contains at least one W.
        """
        return len(self.w) > 0

    def constraints(self):
        """
        Returns a set of the constraints this ERC is over.
        """
        return set(self.w | self.l | self.e)

    def entails(self, erc):
        """entails(erc) -> bool

        Returns whether this ERC entails the given ERC.
        """
        if self.constraints() != erc.constraints():
            raise ValueError('Entailment only makes sense betwen ERCs of the '
                             'same constraint set!')
        return self.l >= erc.l and self.w <= erc.w

    def fusion(self, erc):
        """
        Returns the fusion of this ERC with the given one.
        """
        if self.constraints() != erc.constraints():
            print self.constraints()
            print 'vs'
            print erc.constraints()
            raise ValueError('Fusion only makes sense for ERCs of the '
                             'same constraint set!')
        W,L = (self.w|erc.w),(self.l|erc.l)
        E = frozenset([c for c in self.constraints() if c not in W and c not in L])
        return ERC(W-L, L, E)

    def disjunction(self, erc):
        """
        Returns the ERC that includes self or erc.
        """
        if self.constraints() != erc.constraints():
            print self.constraints()
            print 'vs'
            print erc.constraints()
            raise ValueError('Disjunction only makes sense for ERCs of the '
                             'same constraint set!')
        W, L = (self.w | erc.w), (self.l & erc.l)
        E = frozenset([c for c in self.constraints() if c not in W and c not in L])
        return ERC(W, L, E)

    def __or__(self, erc):
        return self.disjunction(erc)

    def negation(self):
        """negation() -> ERC

        Returns the negation of this ERC.
        """
        return ERC(self.l, self.w, self.e)

    def toDNF(self):
        """Returns a list (disjunction) of lists (conjunctions)
        of simple dominance statements."""
        return [[(W,L) for L in self.l] for W in self.w]

    def stringERC(self, constraintOrder=None):
        """Returns a string representation of self as 'wellwelw' or some such
        with a canonical ordering of constraints alphanumerically by name.
        Optionally, constraintOrder may be given as a list specifying the
        desired order of constraints.
        """
        #constraintOrder = sorted([str(c) for c in self.constraints()])
        constraintOrder = constraintOrder or sorted(self.constraints())
        string = ''
        for c in constraintOrder:
            if c in self.w:
                string += 'w'
            elif c in self.l:
                string += 'l'
            elif c in self.e:
                string += 'e'
            else:
                raise 'wtf!'
        return string

#============================================================================

def generateNonTrivialERCs(con, partialERC=None):
    # depth first generator
    if (not con) and partialERC.nontrivial():
        yield partialERC

    elif con:
        partialERC = partialERC or ERC()
        c = con[0]
        for e in generateNonTrivialERCs(con[1:], ERC(partialERC.w | set([c]),
                partialERC.l, partialERC.e)):
            yield e
        for e in generateNonTrivialERCs(con[1:], ERC(partialERC.w, partialERC.l | set([c]), 
                partialERC.e)):
            yield e
        for e in generateNonTrivialERCs(con[1:], ERC(partialERC.w, partialERC.l,
                partialERC.e | set([c]))):
            yield e

#============================================================================

def randomERCSet(k, n, con=None):
    """
    Return an ERCset consisting of n random non-trivial ERCs.
    """
    con = con or range(k)
    S = ERCSet()
    while len(S) < n:
        W, L, E = set(), set(), set()
        for c in con:
            r = random.choice(['l','w','e'])
            if r == 'l':
                L.add(c)
            elif r == 'w':
                W.add(c)
            else:
                E.add(c)
        if W and L:
            S.add(ERC(W, L, E))
    return S

#============================================================================

def randomConsistentERCSet(k, n, con=None):
    while 1:
        s = randomERCSet(k, n, con=con)
        if s.consistent():
            return s

#============================================================================

def strataERCs(strata, con):
    """
    Returns an ERCSet describing the given stratified hierarchy over con; strata
    should be a list of strata, e.g., [[con1, con2, ...], ...].
    """
    eSet = ERCSet()
    conAdded = set()
    for stratum in strata:
        for c in stratum:
            conditions = {c: 'w'}
            conAdded.add(c)
            for cj in con:
                if cj not in stratum and cj not in conAdded:
                    conditions[cj] = 'l'
                elif cj != c:
                    conditions[cj] = 'e'
            if 'l' in conditions.values():
                eSet.add(ERC(conditions))
    return eSet.minimized()

#============================================================================

class ERCSet(set):

    def __str__(self):
        return "{%s}" % (', '.join([str(e) for e in self]),)

    def constraints(self):
        for erc in self:
            return erc.constraints()

    def fusion(self):
        return reduce(lambda x, y: x.fusion(y), self)

    def isConsistent(self):
        if not len(self):
            return True # Empty erc sets are consistent
        while self:
            sf = self.fusion()
            if not sf.w: return False
            else: self = ERCSet([x for x in self if not sf.w & x.w])
        return True

    def consistent(self):
        return self.isConsistent()

    def stringERCs(self, constraintOrder=None):
        """
        Returns self as a list of string-ERCs.
        """
        return [e.stringERC(constraintOrder=constraintOrder) for e in self]

    def strata(self):
        """
        strata(constraints) -> [[stratum0], [stratum1], ..., [unviolated]]
        """
        constraints = self.constraints()
        if not len(constraints):
            return [[], []]
        if not len(self):
            return [constraints]

        working, H, winners = self, [], set([])
        while working:
            W = working.fusion().w
            if not len(W): raise InconsistentERCSetError
            else:
                working = ERCSet([x for x in working if not len(W & x.w)])
                winners |= W
                H += [list(W)]

        meFusion = self.fusion()
        bottom = [c for c in meFusion.l if c not in winners]

        if len(bottom):
            H += [bottom]

        # Add unviolated constraints to bottom stratum
        unviolated = [c for c in constraints if c not in meFusion.w and c not\
                in meFusion.l]

        return H+[unviolated]

    #def hasse(self,name='graph',format='gif', maxWidth=320, maxHeight=240):
    #    self.hasse_grapher(self.hasseArcs(),name,format)

    def hasse(self,name='graph',format='png',small=1):
        '''Should only be called on consistent ERC-sets.'''
        arcsList = self.hasseArcs()
        self.hasse_grapher(arcsList,name,format,small)

    def hasseArcs(self):
        """ Returns a disjunction of hasse diagrams in the form of
            a list of lists (C1,C2) pairs for the arcs in the
            diagrams."""
        rlists = [[]]
        disjoined = [e.toDNF() for e in self]
        while disjoined:
            next = []
            dislis = disjoined.pop()
            for r in rlists:
                for disjerc in dislis:
                    next.append(r+disjerc)
            rlists,next = next,[]

        Z = set([frozenset(x) for x in rlists if ERCSet([ERC((a,),(b,),[c for c in self.constraints() if c != a and c != b]) \
            for (a,b) in x]).isConsistent()])

        good = [z for z in Z]
        for x in Z:
            for y in Z:
                if x > y:
                    good.remove(x)
                    break

        return [self.trans_red(rks) for rks in good]

    def trans_red(self,arcs):
        '''Removes duplicates and arcs that are entailed by transitivity (assumes DAGs).'''
        pdic = {} # precedence dictionary for arcs

        # construct precedence dict from arclist
        for (a,b) in arcs: 
            try: pdic[a].add(b) 
            except KeyError: pdic[a] = set([b])  

        # remove redundant precedences (i.e., entailed by transitivity)
        for x in pdic: 
            for y in list(pdic[x]): 
                try: pdic[x].difference_update(pdic[y]) 
                except KeyError: pass
        return [(a,b) for a in pdic.keys() for b in pdic[a]]

    def tabrep(self,cList=None):
       '''Simple tabular ERCs for fixed width fonts. cList = column order.'''
       # all-e columns will be omitted unless the constraints are given in cList.
       #if not cList: cList = sorted(list(self.nonEcon()))
       cList = self.constraints()
       output = StringIO.StringIO()
       output.write('  '.join(cList) + '\n')
       for e in self:
           for c in cList:
               space = len(c)
               if c in e.w: output.write('W'+' '*(space+1))
               elif c in e.l: output.write('L'+' '*(space+1))
               else: output.write('e'+' '*(space+1))
           output.write('\n')

       return output.getvalue()

    #def hasse_grapher(self,alists,name='graph',format='gif',maxWidth=6,maxHeight=4):
    #    ''' Generates hasse diagrams from pairs in alists.'''
    #    filename = ''.join([name,'.dott'])
    #    f,nodeset,groupId = open(filename,'w+'), set([]), 0
    #    f.write('digraph most_recent_machine     {\n')
    #    #f.write('graph [size="%s,%s"]' % (maxWidth, maxHeight))
    #    f.write('node [shape=none,fontname=CourierPS_Bold];\n\
    #    edge [fontname=CourierPS_Bold,arrowhead=none];\n')   
    #    if len(alists) > 20:
    #        f.write(''.join(['"Too many disjuncts: ',`len(alists)`,'";\n']))
    #    else:
    #        for id in range(len(alists)):
    #            f.write(''.join(['subgraph cluster_',`id`,'{\n']))
    #            for arc in alists[id]:
    #                f.write(''.join(['"',arc[0],`id`,'" -> "',arc[1],`id`,'";\n']))
    #                nodeset.add((arc[0],id))
    #                nodeset.add((arc[1],id))
    #            f.write('} \n')
    #        for (node,id) in nodeset: 
    #            f.write(''.join(['"',node,`id`,'" [label = ','"',node,'"','];\n']))
    #    f.write('     }\n')
    #    f.close()
    #    outname = '.'.join([name,format])
    #    toflag = ''.join(['-T',format])

    #    # Run dot on that shit
    #    cmd = ' '.join(["dot", toflag, '"%s"' % (filename,), '-o', '"%s"' %
    #        (outname,)])
    #    os.system(cmd)

    def hasse_grapher(self,alists,name='graph',format='png',small=1):
        ''' Generates hasse diagrams from pairs in alists.'''
        filename = ''.join([name,'.dott'])
        file,nodeset,groupId = open(filename,'w+'), set([]), 0
        file.write('digraph most_recent_machine     {\n\
        graph [nodesep=0.25,ranksep=0,size="')
        if small: file.write('4,5.25')
        else: file.write('8,10.5')
        file.write('",margin="0.25"];\nnode [shape=none,fontsize=12];\nedge [arrowhead=none,minlen=4,len=4,weight=9,style="setlinewidth(2)"];\n')
        if len(alists) > 21:
            file.write(''.join(['"Too many disjuncts: ',`len(alists)`,'";\n']))
        elif len(alists) < 1:
            file.write(''.join(['"Too few disjuncts: ',`len(alists)`,'";\n']))
        else:
            for id in range(len(alists)):
                file.write(''.join(['subgraph cluster_',`id`,'{\n']))
                Tset,Bset = set(),set()
                for (q1,q2) in alists[id]:
                    Tset.add(q1)
                    Bset.add(q2)
                for (q1,q2) in alists[id]:
                    Tset.discard(q2)
                    Bset.discard(q1)
                n2 = ''.join(['"top',`id`,'"'])
                file.write(''.join([n2,'[shape=point,style=invis];\n']))
                for T in Tset:
                    n3 = ''.join(['"',T,`id`,'"'])
                    file.write(''.join([n2,'->',n3,'[style=invis,weight=0,minlen=1,len=1];\n']))
                for arc in alists[id]:
                    a0 = ''.join(['"',arc[0],`id`,'"'])
                    a1 = ''.join(['"',arc[1],`id`,'"'])
                    file.write(''.join([a0,'->',a1,' [weight=2];\n']))
                    nodeset.add((arc[0],id))
                    nodeset.add((arc[1],id))
                n2 = ''.join(['"bot',`id`,'"'])
                file.write(''.join([n2,'[shape=point,style=invis];\n']))
                for B in Bset:
                    n1 = ''.join(['"',B,`id`,'"'])
                    file.write(''.join([n1,'->',n2,'[style=invis,minlen=1,len=1,weight=0];\n']))
                file.write('} \n \n')
            columnLen = 3
            for x in range(len(alists)):
                if x%columnLen:
                    A = ''.join(['"bot',`x-1`,'"'])
                    B = ''.join(['"top',`x`,'"'])
                    file.write(''.join([A,'->',B,'[style=invis,minlen=1];\n']))
            file.write('\n')
            for (node,id) in nodeset:
                file.write(''.join(['"',node,`id`,'" [label=\"',node,'\"];\n']))
        file.write('graph [label="Hasse Disjunction",fontsize=12,labelloc=t];\n')
        file.write('     }')
        file.close()
        outname = ''.join([name,'.',format])
        toflag = ''.join(['-T',format])
        #run("dot",toflag,filename,"-o",outname)
        # Run dot on that shit
        cmd = ' '.join(["dot", toflag, '"%s"' % (filename,), '-o', '"%s"' %
            (outname,)])
        print cmd
        os.system(cmd)

    def flipOne(self,erc):
        return ERCSet((self-set([erc]))|set([erc.negation()]))

    def isMinimal(self):
        """isMinimal() -> bool

        Returns True if every replacement of a single ERC in the set yields
        a consistent ERCSet.
        """
        for erc in self:
            if not self.flipOne(erc).isConsistent(): return False
        return True

    def __bruteMin(self, newERCs, oldERCs=None):
        # TODO: FIXME
        oldERCs = oldERCs or ERCSet()
        # remove anything in newERCs entailed by anything in newERCs or oldERCs
        for x in newERCs.getERCs():
            for y in newERCs.getERCs():
                if x!=y and y.entails(x) and x in newERCs:
                    newERCs.remERC(x)
            for z in oldERCs:
                if z.entails(x) and x in newERCs:
                    newERCs.remERC(x)

        # remove anything in oldERCs entailed by anything in newERCs
        for x in oldERCs.getERCs():
            for y in newERCs:
                if y.entails(x) and x in oldERCs:
                    oldERCs.remERC(x)

        if not len(newERCs):
            for e in oldERCs:
                if not e.hasLiteral('l'):
                    raise InconsistentERCSetError
            return oldERCs

        nextOldERCs = newERCs.union(oldERCs)

        nextNewERCs = ERCSet()
        considered = []
        for x in newERCs:
            for y in oldERCs:
                if x!=y and (x,y) not in considered and (y,x) not in considered:
                    considered.append((x,y))
                    nextNewERCs.addERC(x.fusion(y))

        return self.__bruteMin(nextNewERCs, nextOldERCs)

    def __powerMin(self):
        pass

    def __eMin(self):
        # Currently kind of a hack. Convert self into a list of erc strings,
        # and just pass them off to Jason's implementation of eMin in 
        # str_ercs.py
        if not len(self):
            return ERCSet()

        try:
            return ERCSet([ERC(dict(zip(sorted(self.__contents[0].getConstraints()),
                str_erc))) for str_erc in str_ercs.emin([str(e) for e in self])])
        except ValueError:
            raise InconsistentERCSetError

    def minimized(self):
        #Brute force
        #return self.__bruteMin(self)

        # eMin
        #return self.__eMin()
        if not self.isConsistent():
            raise InconsistentERCSetError(self)
        return self.reduce()

    def minimizeInPlace(self):
        min = self.minimized()
        self.clear()
        for e in min:
            self.add(e)

    def fuseAllPairs(self):
        """fuseAllPairs() -> ERCSet

        s.fuseAllPairs() returns the ERCSet of the fusions of all pairs of
        ERCs in s.
        """
        result = ERCSet()
        for e in self:
            for f in self:
                result.add(e.fusion(f))
        return result

    def reduce(self):
        """reduce() -> ERCSet

        The reduction of an ERCSet E is { e | e in E and there is no e' in E
        such that e' entails e }.
        """
        #print "Reducing:"
        #print self.tabrep() + "\n"

        #reduction = ERCSet()
        #for e in self:
        #    eEntailed = False
        #    for f in self:
        #        if e != f and f.entails(e):
        #            eEntailed = True
        #            break
        #    if not eEntailed:
        #        reduction.add(e)

        #return reduction
        working, Result = list(self), []
        while working:
            e = working.pop()
            for idx in range(len(working)-1,-2,-1):
                if idx < 0: Result.append(e)
                elif e.entails(working[idx]): working.pop(idx)
                elif working[idx].entails(e): break

        res = ERCSet(Result)
        #print "Alleged reduction:"
        #print res.tabrep()

        return res

    def disjunction(self, ercSet):
        R = ERCSet()
        for e1 in self:
            for e2 in ercSet:
                d = e1 | e2
                # XXX: Is this right? (probably)
                if d.l:
                    R.add(d)

        return R.reduce()

    def rVolNew(self):
        if not len(self):
            raise ValueError('ranking volume of an empty erc set is currently '\
                    'undefined (just use k! if you know it')

        if not self.consistent():
            return 0

        def __rVol(ignoreRows, ignoreColumns):
            con = None
            for e in self:
                con = e.constraints()
                break
            k = len(con) - len(ignoreColumns)
            rows = [e for e in self if e not in ignoreRows]
            cols = [c for c in con if c not in ignoreColumns]

            volume = 0.

            if len(rows) > 1:
                for c in cols:
                    colHasL = False
                    colHasE = False
                    for e in rows:
                        if c in e.l:
                            colHasL = True
                            break
                        if c in e.e:
                            colHasE = True

                    if colHasL: pass
                    elif colHasE:
                        recurse = __rVol(
                                ignoreRows+[e for e in rows if c in e.w],
                                ignoreColumns+[c])
                        volume += recurse
                    else: volume += factorial(k-1)
            else:
                e = rows.pop()
                w = [c for c in e.w if c not in ignoreColumns]
                polar = float(len(w) +\
                              len([c for c in e.l if c not in ignoreColumns]))
                kfac = factorial(k)
                if polar:  volume = (len(w)/polar) * kfac
                else: volume = float(kfac)  #<-- return k! for the all-e ERC  

            return volume

        return __rVol([], [])

    def rVol(self): #short version
        if not self.consistent():
            return 0.

        strercs = self.stringERCs()

        def __rVol(ercs):
            ercLen, volume, steps = len(ercs[0]), 0, 1
            if len(ercs) > 1:
                for i in xrange(ercLen):
                    #col = [e[i] for e in ercs]
                    #if 'l' in col: pass
                    #elif 'e' in col:
                    #    recurse = __rVol([e[:i]+e[i+1:] for e in ercs if e[i]!='w'])
                    #    volume += recurse
                    #else: volume += factorial(ercLen-1)
                    colHasL = False
                    colHasE = False
                    for e in ercs:
                        if e[i] == 'l':
                            colHasL = True
                            break
                        if e[i] == 'e':
                            colHasE = True

                    if colHasL: pass
                    elif colHasE:
                        recurse = __rVol([e[:i]+e[i+1:] for e in ercs if e[i]!='w'])
                        volume += recurse
                    else: volume += factorial(ercLen-1)
            else:
                polar = float(string.count(ercs[0],'w') + string.count(ercs[0],'l'))
                kfac = factorial(ercLen)
                if polar:  volume = (string.count(ercs[0],'w')/polar) * kfac
                else: volume = kfac  #<-- return k! for the all-e ERC  
            return volume

        return __rVol(strercs)

    def conditionalRVol(self, erc, selfRVol=None):
        """
        Fraction: (self | erc).rVol() / self.rVol()
        """
        selfRVol = selfRVol or self.rVol()
        return (self | ERCSet([erc])).rVol() / selfRVol

#============================================================================

def randomCosts(n,basis=['a','b','c','d','e','f']):
    '''Random set of n cost dicts for testing and debugging.'''
    return [frozendict(dict([(b,random.randint(0,9)) for b in basis])) for x in range(n)]

#============================================================================

def contenderCostsOT(weightList):
    '''Returns the contender costs in weightList.'''
    #print weightList
    if not weightList:
        raise ValueError('contenderCostsOT: empty weightList')
    basis = [x[0] for x in list(weightList)[0]] 
    E = list(set([tuple([x[1] for x in w]) for w in weightList]))
    for i in range(len(E)-1,-1,-1):             #Backwards row indices
        Rowz = [e for e in range(len(E)) if e != i]
        while Rowz:
            kill = True
            for (col,val) in enumerate(E[i]):
                if Rowz == []: break            # in case all Rowz are gone
                if val <= min([E[row][col] for row in Rowz]):
                    for row in Rowz:
                        if E[row][col] > val:
                            kill = False         # another round
                            Rowz.remove(row)     # delete row marker
            if kill: Rowz, E = [], E[:i]+E[i+1:] # i is bounded, kill it
    return [tuple(zip(basis,x)) for x in E]

#============================================================================

def violationVectors(weightList, constraintOrder=None):
    sortedConstrs = constraintOrder or None
    if not sortedConstrs:
        for w in weightList:
            sortedConstrs = sorted([c for c,v in w])
            break

    violnVecs = []
    for w in weightList:
        violns = tuple()
        for constr in sortedConstrs:
            for c,v in w:
                if c == constr:
                    violns += (v,)
                    break
        violnVecs.append(violns)
    return violnVecs

#============================================================================

def contenderCostsHG(weightList):
    sortedConstrs = None
    for w in weightList:
        sortedConstrs = sorted([c for c,v in w])
        break

    # First we have to mangle the weightList so harmonic.contendersIn can use it
    violnVecs = violationVectors(weightList, sortedConstrs)

    # then we have to unmangle the answer from harmonic.contendersIn
    hContenders = harmonic.contendersIn(violnVecs, oldWays=False)
    return [tuple(zip(sortedConstrs, viols)) for viols in hContenders]


#============================================================================

def getZeroWeight(constraint):
    for fromNode, toNode, label in constraint:
        zeroWeight = tuple([(c, 0) for (c,v) in label.weight])
        return zeroWeight

def adjMatrix(constraint, symbol, progressCallback=None):
    zero = getZeroWeight(constraint)
    nodes = constraint.nodes()
    #R = dict([(n, dict([(m, []) for m in nodes])) for n in nodes])
    R = {}

    for n in nodes:
        for m in nodes:
            if symbol is None and n == m:
                try:
                    R[n][m].append((zero, set([Form('')])))
                except KeyError:
                    if n not in R:
                        R[n] = {}
                    if m not in R[n]:
                        R[n][m] = []
                    R[n][m].append((zero, set([Form('')])))
            elif n in constraint.arcs and m in constraint.arcs[n]:
                for label in constraint[n][m]:
                    if label.input != symbol:
                        continue
                    try:
                        R[n][m].append((label.weight, set([Form([label.output])])))
                    except KeyError:
                        if n not in R:
                            R[n] = {}
                        if m not in R[n]:
                            R[n][m] = []
                        R[n][m].append((label.weight, set([Form([label.output])])))
            if progressCallback:
                progressCallback()

    return R

#============================================================================

def mergeFragments((violation1, forms1), (violation2, forms2)):
    mViol = tuple([(c1, v1+v2) for (c1,v1), (c2,v2) in zip(violation1,
        violation2)])
    mForms = set([f1+f2 for f1 in forms1 for f2 in forms2])
    return (mViol, mForms)

def mergeCells(cell1, cell2):
    """
    This is the oplus op for matrix multiplication.
    """
    s = []
    for frag1 in cell1:
        for frag2 in cell2:
            s.append(mergeFragments(frag1, frag2))
    return s

def collapseTies(cell):
    violators = {}
    for (violation, forms) in cell:
        if (not violation) and (not forms):
            continue
        if violation not in violators:
            violators[violation] = set()
        violators[violation] |= forms

    return violators.items()

def floydWarshall(am, contenderCosts, progressCallback=None):
    for x in am.iterkeys():
        for y in am.iterkeys():
            for z in am.iterkeys():
                #print 'x =', x, 'y =', y, 'z =', z
                #try:
                #    print 'am[x][y]:'
                #    pprint(am[x][y])
                #except: pass
                #try:
                #    print 'am[y][z]:'
                #    pprint(am[y][z])
                #except: pass
                #try:
                #    print 'am[x][z]:'
                #    pprint(am[x][z])
                #except: pass

                xyz = None
                if z in am[x]:
                    #print 'z in am[x]!'
                    if y in am[x] and z in am[y]:
                        #print 'xyz!'
                        xyz = collapseTies(mergeCells(am[x][y], am[y][z]) +\
                                am[x][z])
                    #else:
                    #    print 'not xyz!'
                elif y in am[x] and z in am[y]:
                    #print 'z not in am[x]'
                    #print 'xyz!'
                    xyz = collapseTies(mergeCells(am[x][y], am[y][z]))
                #else:
                #    print 'not xyz!'

                if xyz:
                    #print 'doing it'
                    goodCosts = contenderCosts(set([i[0] for i in xyz]))
                    am[x][z] = [(v,forms) for (v,forms) in xyz if v in goodCosts]
                if progressCallback:
                    progressCallback()

    return am

def isDiagonal(adjMatrix):
    for x in adjMatrix.iterkeys():
        for y in adjMatrix.iterkeys():
            if (x != y) and (y in adjMatrix[x]) and adjMatrix[x][y]:
                return False
    return True

def innerProduct(row, col, contenderCosts, oplus=mergeCells):
    nv = [oplus(a,b) for a,b in zip(row, col)]
    if not nv:
        return []
    R = nv[0]
    for x in nv[1:]:
        Rx = collapseTies(R+x)
        if Rx:
            goodCosts = contenderCosts(set([i[0] for i in Rx]))
            R = [(v,forms) for (v,forms) in Rx if v in goodCosts]

    return R

def innerProductSparse(row, col, contenderCosts, oplus=mergeCells):
    # row and col are now dicts
    pass

def matrixMultiply(m1, m2, allNodes, contenderCosts, oplus=mergeCells,
        progressCallback=None):
    keys = allNodes
    #keys = sorted(m1[rkeys[0]].keys())
    #R = dict([(n, dict([(m, []) for m in nodes])) for n in nodes])
    R = {}
    for rk in keys:
        if rk not in m1:
            if progressCallback:
                progressCallback(len(keys))
            continue
        R[rk] = {}
        row = [m1[rk][x] if x in m1[rk] else [] for x in keys]
        for ck in keys: 
            col = [m2[x][ck] if x in m2 and ck in m2[x] else [] for x in keys]
            #print 'row, col = %r, %r' % (row, col)
            R[rk][ck] = innerProduct(row, col, contenderCosts, oplus=oplus)
            if progressCallback:
                progressCallback()

    return R

#============================================================================

def contendersOT(eval, input):
    return contendersGeneric(eval, input, contenderCostsOT)

def contendersHG(eval, input):
    return contendersGeneric(eval, input, contenderCostsHG)

def makeEvalMatrices(eval, contenderCostsFn, progress=False):
    assert(len(eval.starts)==1)
    print 'Building epsilon matrix'

    if progress:
        numNodes = len(eval.nodes())
        pbar = progressbar.ProgressBar(numNodes**2 + numNodes**3).start()
    else:
        pbar = None

    def updatePbar(amount=1):
        if pbar:
            pbar.update(pbar.currval + amount)

    allNodes = sorted(eval.nodes())

    epsilonAdjMat = adjMatrix(eval, None, progress and updatePbar)
    if isDiagonal(epsilonAdjMat):
        em = { Symbol(''): epsilonAdjMat }
    else:
        em = { Symbol(''): floydWarshall(epsilonAdjMat, contenderCostsFn,
            progress and updatePbar) }
    if pbar:
        pbar.finish()

    em['start'] = {eval.starts[0]: em[Symbol('')][eval.starts[0]]}

    # figure out which inventory segments are ever actually possible inputs in
    # eval
    inputSegs = set()
    for src, dst, label in eval:
        if label.input:
            inputSegs.add(label.input)

    for seg in inputSegs:
        print 'Building matrix for segment %s' % seg
        if progress:
            numNodes = len(eval.nodes())
            pbar = progressbar.ProgressBar(numNodes**2 + numNodes**2).start()

        am = adjMatrix(eval, seg, progress and updatePbar)
        #if str(seg) == '#':
        #    print 'Hash adjacency matrix:'
        #    pprint(am)
        em[seg] = matrixMultiply(am, em[Symbol('')], allNodes, contenderCostsFn,
                progressCallback=(progress and updatePbar))

        if pbar:
            pbar.finish()
        #if str(seg) == '#':
        #    print 'Hash eval matrix:'
        #    pprint(em)
    return em

def contendersGeneric(eval, input, contenderCostsFn, evalMatrices=None):
    evalMatrices = evalMatrices or makeEvalMatrices(eval, contenderCostsFn)
    allNodes = sorted(eval.nodes())

    r = matrixMultiply(evalMatrices['start'], evalMatrices[input[0]], allNodes,
            contenderCostsFn)
    #print input[0], input[0].features
    for seg in input[1:]:
        r = matrixMultiply(r, evalMatrices[seg], allNodes, contenderCostsFn)

    start = eval.starts[0]
    if len(eval.finals) == 1:
        return r[start][eval.finals[0]]

    #finalParses = [r[start][eval.finals[0]]]
    finalParses = r[start][eval.finals[0]] or [(None, None)]
    for final in eval.finals[1:]:
        #finalParses = collapseTies(finalParses + r[start][final])
        nextFinalParse = r[start][final] or [(None, None)]
        finalParses = collapseTies(finalParses + nextFinalParse)

    goodCosts = contenderCostsFn(set([i[0] for i in finalParses]))
    R = [(v,forms) for (v,forms) in finalParses if v in goodCosts]
    
    return R

def contendersGenericOld(eval, input, contenderCostsFn, diagOnly=True):
    '''Returns contenders with costs for input under eval.'''
    M = eval.onInput(input)
    M.dotToFile('test.dot')
    Basis = None
    for n1,dests in M.arcs.iteritems():
        for n2, labels in dests.iteritems():
            Basis = [c for c,viol in labels[0].weight]
            break
        break
    
    # Step 1: ######### initialize table of node pairs in M  ########## 
    #print 'Step 1'
    chartDic = dict([((x,y),{}) for x in M.nodes() for y in M.nodes()])
    for ((x,y),v) in chartDic.items(): 
        if x == y: chartDic[(x,y)] = {tuple([(b,0) for b in Basis]) : set([Form()])}
        elif (x,y) in M:
            costs = set([L.weight for L in M[x,y]])
            cCosts = contenderCostsFn(costs)
            if not cCosts:
                print 'Empty contender set from %s to %s!\n%r' % (x, y, cCosts)
            for cCost in cCosts:
                oStrings = set([Form([L.output]) for L in M[x,y] if L.weight == cCost])
                chartDic[(x,y)][cCost] = oStrings

    # Step 2: ######### comptue optimal costs for node pairs ########## 
    #print 'Step 2'
    nodes = M.nodes()
    nodes.sort(lambda a,b: cmp(int(a.id.split('-')[0]),
        int(b.id.split('-')[0])))
    for y in nodes:
        for ((x,z),v) in chartDic.items(): 
            n1,n2 = int(x.id.split('-')[0]), int(z.id.split('-')[0])
            ny = int(y.id.split('-')[0])
            if (not diagOnly) or (x in M.starts or n2-n1 in (0,1)): # same Id or n2=n1+1
                if chartDic[(x,y)] and chartDic[(y,z)]: # neither is empty
                    for (V1,S1) in chartDic[(x,y)].items():
                        for (V2,S2) in chartDic[(y,z)].items():
                            nuCst = tuple([(cn1,v1+v2) for ((cn1,v1),(cn2,v2)) in zip(V1,V2)])
                            kats = set([a+b for a in S1 for b in S2])
                            try: chartDic[(x,z)][nuCst] |= kats
                            except KeyError: chartDic[(x,z)][nuCst] = kats
                    cCosts = contenderCostsFn(chartDic[(x,z)].keys())
                    if not cCosts:
                        print 'Empty contender set from %s to %s!\n%r' % (x, y, cCosts)
                    for (V,S) in chartDic[(x,z)].items():
                        if V not in cCosts: del chartDic[(x,z)][V]

    # Step 3: ######### return contender costs across finals ########## 
    #print 'Step 3'
    contenders = {}
    for N1 in M.starts:
        for N2 in M.finals:
            for (V,S) in chartDic[(N1,N2)].iteritems():
                try: contenders[V] |= frozenset(S)
                except KeyError: contenders[V] = frozenset(S)
        cCosts = contenderCostsFn(contenders.keys())
        for (V,S) in contenders.items():
            if V not in cCosts: del contenders[V]
    return contenders

#============================================================================

def contenderERCs(eval, inputs):
    # TODO: rename this contenderDictOT
    ercDict = {}
    for input in inputs:
        print 'Input: %s' % input
        ercDict[input] = []
        print '\tContenders...'
        contenders = contendersOT(eval, input)
        #print 'contenders:', contenders
        print '\tERCs...'
        for violns, output in contenders.iteritems():
            outputESet = ERCSet()
            for otherViolns, otherOutput in contenders.iteritems():
                if violns == otherViolns: continue
                outputESet.add(impliedERC(violns, otherViolns))
            ercDict[input].append((output, violns, outputESet.minimized()))
    return ercDict

#============================================================================

def contenderDictHG(eval, inputs):
    cDict = {}
    for input in inputs:
        print 'Input: %s' % input
        cDict[input] = []
        print '\tContenders...'
        contenders = contendersHG(eval, input)
        for violns, output in contenders.iteritems():
            # XXX at some point we might want weighting-conditions to go where
            # None is here
            cDict[input].append((output, violns, None))
    return cDict

#============================================================================

def rankingToERCSet(ranking):
    """
    Given an ordered list of constraint names, returns an ERCSet that describes
    that ranking (first constraint is highest ranked)
    """
    eSet = ERCSet()
    for con in ranking:
        erc = ERC([con], [c for c in ranking if c != con])
        eSet.add(erc)
    return eSet

#============================================================================

def ioMappingsUnderERCs(eval, inputs, ercSet, ercDict=None):
    """
    Given an eval and an ERC set, returns a dictionary from the given inputs to
    ((output1, ..., outputN), (violnVec1, ..., violnVecN)) tuples consistent
    with that ERC set. If ercDict is not given, the contenders algorithm is run
    to compute it. eval can be None if ercDict is supplied.
    """
    ercDict = ercDict or contenderERCs(eval, inputs)
    mappings = {}
    for input in inputs:
        outputs = tuple()
        violnVecs = tuple()
        for (os, v, e) in ercDict[input]:
            for o in os:
                if ERCSet(e|ercSet).consistent():
                    if outputs:
                        print 'WARNING: multiple output violn ' \
                              'vectors for input %r' % input
                    outputs += (o,)
                    violnVecs += (v,)
        mappings[input] = (outputs, violnVecs)

    return mappings

#============================================================================

def surfaceExtension(ioMappings, join=True, sep='/', unique=False):
    """
    Takes a dictionary of the form returned by ioMappingsUnderERCs and returns 
    a list of just the output forms. If an input has multiple possible outputs
    and join is True, then they will be concatenated together with the given
    separating string between them; if join is False, each output will appear
    separately. If uniques is True, then there will be no duplicates in the
    returned list. The returned list of outputs is always sorted in order of
    increasing length.
    """
    outputs = []
    for i, (os, vs) in ioMappings.iteritems():
        if join:
            outputs.append(sep.join([str(o) for o in os]))
        else:
            for o in os:
                outputs.append(str(o))
    result = outputs
    if unique:
        result = dict([(o, None) for o in outputs]).keys()

    return sorted(result, cmp=lambda x, y: cmp(len(x), len(y)))

#============================================================================

def typologyOTExtension(eval, inputs, typology, ercDict=None):
    ercDict = ercDict or contenderERCs(eval, inputs)
    surface = dict([(n,
        surfaceExtension(ioMappingsUnderERCs(eval,
            inputs, e, ercDict))) for n, e in typology.iteritems()])
    return surface

#============================================================================

def typologyOTMappings(typology, inputs, eval=None, ercDict=None):
    """
    Takes a typology as returned by typologyOT and returns dictionary mapping
    from language names to input-output/violation dictionaries.
    """
    typologyMappings = {}
    for lang, ercSet in typology.iteritems():
        typologyMappings[lang] = ioMappingsUnderERCs(eval, inputs, ercSet,
                ercDict)
    return typologyMappings

#============================================================================

def typologyOT(eval, inputs, ercDict=None, restriction=None, progress=False):
    if eval:
        print 'Generating Typology of %s' % eval
    ercDict = ercDict or contenderERCs(eval, inputs)

    if progress:
        pbar = progressbar.ProgressBar(sum([len(ercDict[K]) for K in inputs])).start()
    else:
        pbar = None

    print 'Finding consistent cross product...'
    ans = [(restriction or set(), [])]
    count = len(ercDict)
    inputs = inputs or ercDict.keys()
    inputs = sorted(inputs, lambda x,y: cmp(str(x),str(y)))
    for K in inputs:
        #print count, len(ans)
        count -= 1
        # The one-line version with list comprehensions:
        #ans = [(ERCSet(x|e).minimized(), io+[(K,o,tuple(v),e)]) for x,io in ans \
        #       for (o,v,e) in ercDict[K] if ERCSet(x|e).consistent()]

        # The unpacked version:
        newAns = []
        for o,v,e in ercDict[K]:
            for x,io in ans:
                if ERCSet(x|e).consistent():
                    newAns.append((ERCSet(x|e).minimized(),
                        io+[(K,o,tuple(v),e)]))
            if pbar:
                pbar.update(pbar.currval + 1)
        ans = newAns

    if pbar:
        pbar.finish()
        
    #print 'minimizing'
    #minans = []
    #for eset in ans:
    #    m = frozenset(RNF.simplemin(list(eset)))
    #    sys.stderr.write('.'); sys.stderr.flush()
    #    minans += [m]
    #ans = minans
    print 'There are', len(ans), 'languages.'
    R,span = {},len(str(len(ans)))
    for (n,E) in enumerate(ans):
        sp = len(str(n+1))
        pad = '0'*(span-sp)
        name = 'L'+pad+str(n+1)
        R[name] = E
    return R     

#============================================================================

def typologyHG(eval, inputs, contenderDict=None, verbose=False,
        restriction=None, progress=False):
    if eval:
        print 'Generating Typology of %r' % eval
    cDict = contenderDict or contenderDictHG(eval, inputs)

    # restrict cDict to only include the requested inputs
    cDict = dict([(k,v) for k,v in cDict.iteritems() if k in inputs])
    #print cDict

    # convert contenders' violation maps to violation vectors
    cDictVec = {}
    for i, cands in cDict.iteritems():
        newCands = []
        for o,v,wc in cands:
            newCands.append((o, violationVectors([v])[0], wc))
        cDictVec[i] = newCands

    hRestriction = []
    if restriction:
        # convert 
        for coefs, intercept, ineq in restriction:
            hRestriction.append((violationVectors([coefs.items()])[0], intercept, ineq))

    print 'Finding consistent cross product...'
    t = harmonic.typologyWeighted(cDictVec, verbose=verbose,
            restriction=hRestriction, progress=progress)
    print 'There are', len(t), 'languages.'

    span = len(str(len(t)))
    rt = {}
    for n, (name, (lg, prob)) in enumerate(t.iteritems()):
        forms = []
        for i,(o,vVec) in lg.iteritems():
            wc = None
            v = None
            for candO, candV, candWC in cDict[i]:
                if candO == o and vVec == violationVectors([candV])[0]:
                    v = candV
                    wc = candWC
                    break
            assert(v)
            forms.append((i,o,tuple(v),wc))

        sp = len(str(n+1))
        pad = '0'*(span-sp)
        name = 'L'+pad+str(n+1)
        rt[name] = (prob, forms)
    return rt

#============================================================================

class TOrder(object):
    def __init__(self, typology):
        self.langsWith = {}
        self.typology = typology
        for name, (ercs, forms) in typology.iteritems():
            for i, o, v, e in forms:
                mapping = (i,o,v)
                if mapping not in self.langsWith:
                    self.langsWith[mapping] = set()
                self.langsWith[mapping].add(name)

        self.mappingsIn = {} # frozenset([lang, ...]) -> frozenset([mapping, ...])
        self.langSets = set([frozenset(langs) for mapping, langs in
            self.langsWith.iteritems()])
        for langSet in self.langSets:
            mappings = set()
            for mapping, langs in self.langsWith.iteritems():
                if frozenset(langs) == langSet:
                    mappings.add(mapping)
            self.mappingsIn[langSet] = set(mappings)

    def toDot(self, dotFile):
        allArcs = []
        for langSet1 in self.langSets:
            for langSet2 in self.langSets:
                if langSet1 == langSet2: continue
                if langSet1.issuperset(langSet2):
                    allArcs.append((langSet1, langSet2))

        pdic = {} # precedence dictionary for arcs
    
        # construct precedence dict from arclist
        for (a,b) in allArcs: 
            try: pdic[a].add(b) 
            except KeyError: pdic[a] = set([b])  
    
        # remove redundant precedences (i.e., entailed by transitivity)
        for x in pdic: 
            for y in list(pdic[x]): 
                try: pdic[x].difference_update(pdic[y]) 
                except KeyError: pass
    
        tOrder = [(a,b) for a in pdic.keys() for b in pdic[a]]

        leaves = [ls for ls in self.langSets if ls not in [a for (a,b) in\
            tOrder]]
    
        f = open(dotFile, 'w')
        f.write('digraph torder {\n\trankdir=LR;ranksep=1.2\n\tnode [shape=box,fillcolor="lightgray"];\n')
    
        nameOf = {}
        nameN = 0
        for (srcSet, destSet) in tOrder:
            if srcSet not in nameOf:
                nameOf[srcSet] = nameN
                nameN += 1
            if destSet not in nameOf:
                nameOf[destSet] = nameN
                nameN += 1
    
            f.write("\t%s -> %s;\n" % (nameOf[srcSet], nameOf[destSet]))
    
        for langSet, name in nameOf.iteritems():
            mappings = self.mappingsIn[langSet]
            label = '\\n'.join(["/%s/ -> [%s] (%s)" % (i, o, ','.join([str(v[1])
                for v in vs])) for (i,o,vs) in mappings])
            label += '\\n%d language%s.' % (len(langSet), len(langSet)>1 and\
                    's' or '')
            if langSet in leaves:
                style = "bold"
            else:
                style = "dotted,filled"
            f.write('\t%s [label="%s", style="%s"];\n' % (name, label, style))
    
        f.write('}\n')
        f.close()


#============================================================================

def parameterVolumeOT(ercSet, restriction=None):
    #return RNF.r((ercSet | (restriction or ERCSet())).stringERCs())[0]
    return (ercSet | (restriction or ERCSet())).rVol()

#============================================================================

def parameterVolumesOT(typology, restriction=None, progress=True):
    if not progress:
        return dict([(name, parameterVolumeOT(lgERCs, restriction)) for \
            name, (lgERCs, forms) in typology.iteritems()])
    volumes = {}
    pbar = progressbar.ProgressBar(len(typology)).start()
    for name, (lgERCs, forms) in typology.iteritems():
        volumes[name] = parameterVolumeOT(lgERCs, restriction)
        pbar.update(pbar.currval + 1)

    pbar.finish()
    return volumes

#============================================================================

def parameterVolumesHG(typology, restriction=None, progress=True):
    if progress:
        pbar = progressbar.ProgressBar(len(typology)).start()
    else:
        pbar = None

    volumes = {}
    for name, (prob, forms) in typology.iteritems():
        vol = harmonic.problemVolume(prob)
        if not vol:
            raise ValueError('Got zero volume for lang %s in typology.' % name)
        volumes[name] = vol
        if pbar:
            pbar.update(pbar.currval + 1)

    if pbar:
        pbar.finish()

    return volumes


#============================================================================

def typologicalOrder(typologyMappings, prune=False):
    """
    T-order calculator. typologyMappings should be a dictionary of the kind
    returned by typologyOTMappings or typologyHG. The object returned is a
    dictionary of the form {(inputX, outViolnX): [(inputY, outViolnY), ...]}
    such that the languages containing (inputX, outViolnX) are a subset of the
    languages containing (inputY, outViolnY). By default, every true entailment
    of this kind is included. Pass prune=True to exclude entailments that are
    implied by transitivity (i.e., the result will be a tree (or several trees)
    of entailments).
    """
    containingLangs = {}
    for name, lang in typologyMappings.iteritems():
        for input, outVioln in lang.iteritems():
            if (input,outVioln) not in containingLangs:
                containingLangs[(input, outVioln)] = \
                        set(langsWith(typologyMappings, input, outVioln))

    # first, unpruned
    tOrder = {}
    for inOutX in containingLangs:
        tOrder[inOutX] = []
        for inOutY in containingLangs:
            if inOutX == inOutY:
                continue
            if containingLangs[inOutX].issubset(containingLangs[inOutY]):
                tOrder[inOutX].append(inOutY)

    # TODO: optionally prune
    if prune:
        raise NotImplemented('T-Order pruning not implemented yet.')

    return tOrder

#============================================================================

def langsWith(typologyMappings, input, outVioln):
    """
    typologyMappings should be a dictionary of the kind returne dby
    typologyOTMappings or typologyHG. Returns a list of the language names in
    the typology such that those languages contain the given
    input-output/violation mapping. outVioln should be tuple of the form ((out1,
    ..., outN), (violnVec1, ..., violnVecN)).
    """
    langNames = []
    for name, lang in typologyMappings.iteritems():
        if input in lang and lang[input] == outVioln:
            langNames.append(name)
    return langNames

#============================================================================

def informativity(ercSet, tableaux):
    pass
