#!/usr/bin/env python

import pyphon, csv, sys, pickle, os
from pprint import pprint
from optparse import OptionParser

usage = \
"""pyphon_generate.py tableaux outputfile <opt> [options]

Read a PyPhon tableaux file with an optional grammar file, and generate optima
or typologies. 

<opt> must be supplied as one of "OT", "HG"; it indicates whether optimal
candidates will be selected according to the rules of OT or HG. If the
-v/--volumes flag is present, it indicates that the parameter volume of each
generated language should be included in the output."""

def printUsage():
    print usage

def parseTableauxDie(file, lineno, msg):
    print "Failed to read tableaux file %r (tableau no. %d): %s" % (file,
            lineno, msg)
    raise SystemExit

def parseGrammarsDie(file, lineno, msg):
    print "Failed to read grammar description file %r (line %d): %s" % (file,
            lineno, msg)
    raise SystemExit

def openWithExtension(basename, extension, mode):
    suffix = '.'+extension
    if basename.endswith(suffix):
        try:
            return open(basename, mode)
        except IOError:
            pass
    try:
        return open(basename+suffix, mode)
    except IOError:
        pass

    try:
        return open(basename, mode)
    except IOError:
        raise ValueError("Unable to open %r or %r for mode %r" %
                (basename+suffix, basename, mode))

def parseTableaux(tf):
    f = openWithExtension(tf, 'csv', 'rU')
    r = csv.reader(f)

    tableaux = [[]]
    for row in r:
        if not row:
            tableaux.append(row)
        else:
            tableaux[-1].append(row)

    constraints = tableaux[0][0][3:]
    contenderDict = {}

    for (tn, t) in enumerate(tableaux):
        if not t:
            continue
        if t.pop(0)[3:] != constraints:
            parseTableauxDie(tf, tn+1, "All tableaux must have the same "\
                    "constraint set!")
        input = t[0][0]
        allVs = [zip(constraints, [int(v) for v in trow[3:]]) for trow in t]
        #winnerSpecified = bool(len(''.join([trow[2].strip() for trow in t])))
        contenderDict[input] = []
        for (trown, trow) in enumerate(t):
            outputs = trow[1].strip()
            winner = trow[2].strip()
            violns = zip(constraints, [int(v.strip()) for v in trow[3:]])
            try:
                ercs = pyphon.pycbg.impliedERCSet(violns,
                    allVs[:trown]+allVs[trown+1:])
            except pyphon.pycbg.InconsistentERCSetError:
                ercs = None

            if winner == '1':
                contenderDict[input] = [(outputs, violns, ercs)]
                break
            elif winner != '-1':
                contenderDict[input].append((outputs, violns, ercs))

    return contenderDict, constraints

def parseGrammars(grammarfile, allConstraints):
    reader = csv.reader(openWithExtension(grammarfile, 'csv', 'rU'), skipinitialspace=True)

    lineno = 0
    curCommand = None
    curName = None
    grammarStates = {}

    possibleCommands = ['RANKING', 'WEIGHTS', 'STRATA', 'POSET', 'ERCS']

    state = None

    for row in reader:
        lineno += 1
        if (not row) or row[0].startswith('#'):
            continue

        if (not curCommand) or row[0].strip() in possibleCommands:
            if curCommand:
                # finish up the last command
                grammarStates[curName] = (curCommand, state)

            curCommand = row[0].strip()
            curName = row[1].strip()
            state = None
            continue

        # continue handling the current command
        if curCommand == 'RANKING':
            if state:
                parseGrammarsDie(grammarfile+'.csv', lineno, 'Ranking "%s" must be'\
                        'specified on one line.' % curName)
            state = [field.strip() for field in row]

        if curCommand == 'STRATA':
            if state == None:
                state = [] 
            state += [[field.strip() for field in row]]
            
        if curCommand == 'POSET':
            if state == None:
                state = [] 
            state += [[field.strip() for field in row]]
        
        if curCommand == 'ERCS':
            if state == None:
                state = [] 
            state += [[field.strip() for field in row]]

        if curCommand == 'WEIGHTS':
            if state == None:
                state = []
            state += [row[0].strip()]


    grammars = {}
    for name, (kind, state) in grammarStates.iteritems():
        if kind == 'RANKING':
            eset = pyphon.pycbg.ERCSet()
            for (i, constr) in enumerate(state):
                eset.add(pyphon.pycbg.ERC([constr], state[i+1:], state[:i]))
            grammars[name] = eset
            
        if kind == 'STRATA':
            eset = pyphon.pycbg.ERCSet()
            for (i, strat) in enumerate(state[:-1]):
                L_for_strat = sum(state[i+1:],[])
                E_for_strat = sum(state[:i+1],[])
                for constr in strat:
                    E = [c for c in E_for_strat if c != constr]
                    ERC =  pyphon.pycbg.ERC([constr], L_for_strat, E)
                    eset.add(ERC)
            grammars[name] = eset
        
        if kind == 'POSET':             
            eset = pyphon.pycbg.ERCSet()
            for [c1,c2] in state:
                E = [c for c in allConstraints if (c != c1) and (c != c2)]
                ERC =  pyphon.pycbg.ERC([c1], [c2], E)
                eset.add(ERC)
            grammars[name] = eset
        
        if kind == 'ERCS':
            eset = pyphon.pycbg.ERCSet()
            for [W,L,E] in state:
                ERC = pyphon.pycbg.ERC(W.split(' '),L.split(' '),E.split(' '))
                eset.add(ERC)
            grammars[name] = eset
        
        if kind == 'WEIGHTS':
            conditions = []
            for line in state:
                L,R = line.split('=')
                if L[-1] == '<': 
                    eqtype = -1
                    L = L[:-1].strip()
                elif L[-1] == '>': 
                    eqtype = 1
                    L = L[:-1].strip()
                else: eqtype = 0
                R = R.strip()
                dic = dict([(c,0) for c in allConstraints])
                isept = 0
                signs = [[x.split('-') for x in L.split('+')],
                         [x.split('-') for x in R.split('+')]]
                
                #print '---------------'
                #print line
                #print signs
                for sIx, side in enumerate(signs):
                    if sIx == 0: D = 1
                    else: D = -1 
                    for group in side:  
                        for idx,item in enumerate(group):
                            G = item.split('*') 
                            if len(G)>1: 
                                mult,con = G
                            else:
                                mult,con = '',G[0] 
                            mult=mult.strip()
                            con=con.strip()
                            if not mult: 
                                if con in allConstraints:  
                                    mult = 1*D
                                else: 
                                    try: delta = int(con)
                                    except ValueError: 
                                        parseGrammarsDie(grammarfile+'.csv', 0, 'Bad constraint: '+con)
                                    if idx > 0: delta *= -1
                                    if sIx == 0: delta *= -1 
                                    isept += delta 
                                    continue

                            M = int(mult)
                            if idx > 0: M *= -1
                            dic[con] += M  * D
                if eqtype == 0:
                    conditions.append((dic, isept+0.01, -1)) 
                    conditions.append((dic, isept-0.01, 1)) 
                else:
                    conditions += [(dic, isept, eqtype)]
                #print (dic,isept,eqtype)
            grammars[name] = conditions
            
    return grammars

def autoCreateTableaux(tableauxfile):
    print 'Attempting to auto-generate...'
    # attempt to infer from tableaux filename: modelfile, inputfile, opt.
    # assume filename of the sort created by maketableaux: model-input-opt.csv
    fname = os.path.split(tableauxfile)[1]
    root = os.path.splitext(fname)[0]
    toks = root.split('-')
    if len(toks) != 3:
        print 'Auto-generation failed. Unable to extract modelfile, '\
              'inputfile, opt from %s.' % tableauxfile
        return False

    modelFile = toks[0]
    inputFile = toks[1]+'.csv'
    opt = toks[2]
    print 'Auto-generation: inferring: modelFile = %r, inputFile = %r, opt = %r'\
          % (modelFile, inputFile, opt)

    try:
        import pyphon_maketableaux
    except ImportError:
        myPath = os.path.split(__file__)[0]
        if myPath not in sys.path:
            sys.path.append(myPath)
        try:
            import pyphon_maketableaux
        except ImportError:
            print 'Auto-generation failed. Unable to import pyphon_makemodel.'
            return False

    pyphon_maketableaux.main(modelFile, inputFile, opt, outputFile=tableauxfile)

    if os.path.exists(tableauxfile):
        print 'Auto-generation successful.'
        return True
    else:
        print 'Auto-generation failed.'
        return False

def main(tableauxfile, outputfile, opt, grammarfile=None, whichGrammar=None,
        volumes=False):
    opt = opt.lower()
    if opt.lower() not in ('ot', 'hg'):
        print "opt must be one of \"OT\" or \"HG\" (case insensitive)."
        return

    if not os.path.exists(tableauxfile):
        print 'Tableaux file %s does not exist.' % (tableauxfile,)
        if not autoCreateTableaux(tableauxfile):
            print 'Aborting pyphon_generate: tableaux file %s not found '\
                  'and unable to auto-generate.' % tableauxfile
            raise SystemExit

    contenderDict, constraints = parseTableaux(tableauxfile)
    restriction = None

    if grammarfile:
        grammars = parseGrammars(grammarfile, constraints)
        if whichGrammar:
            try:
                restriction = grammars[whichGrammar]
            except KeyError:
                print 'Unknown grammar "%s" in %s!' % (whichGrammar,
                        grammarfile+'.csv')
                raise SystemExit
        elif len(grammars) == 1:
            restriction = grammars.values()[0]
        else:
            print 'The following grammars are defined in %s. Enter the name of\n'\
                  'the grammar to use.' % (grammarfile+'.csv',)
            print '\n'.join(grammars.keys())
            choice = raw_input('Choice: ')
            try:
                restriction = grammars[choice]
            except KeyError:
                print 'Unknown grammar "%s" in %s!' % (choice,
                        grammarfile+'.csv')
                raise SystemExit

    if opt == 'ot':
        typology = pyphon.pycbg.typologyOT(None, contenderDict.keys(),
                ercDict=contenderDict, restriction=restriction, progress=True)
    elif opt == 'hg':
        typology = pyphon.pycbg.typologyHG(None, contenderDict.keys(),
                contenderDict=contenderDict, verbose=False, progress=True,
                restriction=restriction)

    # output a T-Order diagram
    outputBase = outputfile.replace('.csv', '')
    tOrder = pyphon.pycbg.TOrder(typology)
    tOrder.toDot(outputBase+'-tOrder.dot')
    print 'Wrote T-Order diagram to %s.' % (outputBase+'-tOrder.dot',)

    # parameter volumes
    if volumes:
        print 'Computing parameter volumes...'
        if opt == 'ot':
            volmap = pyphon.pycbg.parameterVolumesOT(typology,
                    restriction=restriction)
        elif opt == 'hg':
            volmap = pyphon.pycbg.parameterVolumesHG(typology,
                    restriction=restriction)
        else:
            print 'Cannot compute parameter volumes for %r.' % opt
            volmap = None
    else:
        volmap = None

    if volmap:
        print 'Sanity check: total parameter volume of all languages: %f' %\
            sum(volmap.values())

    # output the typology file
    f = openWithExtension(outputfile, 'csv', 'w')
    w = csv.writer(f)

    tlist = sorted(typology.iteritems())
    for name, lg in tlist:
        lgERCs, forms = lg
        surfaceOs = set()
        for i,outputs,v,e in forms:
            for o in outputs.split(' | '):
                surfaceOs.add("[%s]"%o)
        w.writerow(['Language ID:', name])
        w.writerow(['Surface forms:', '{'+', '.join(surfaceOs)+'}'])
        if lgERCs and opt == 'ot':
            w.writerow(['ERC Set:',
                '{'+\
                ', '.join(lgERCs.stringERCs(constraintOrder=constraints))+'}'])
        if opt == 'ot': 
            if volmap:
                vol = volmap[name]
                w.writerow(['Ranking Volume:',
                    '%d/%d! = %f' % (vol, len(constraints),
                    float(vol)/pyphon.pycbg.factorial(len(constraints)))])
            w.writerow(['Input', 'Output'] + constraints + ['ERCs'])
        elif opt == 'hg':
            if volmap:
                vol = volmap[name]
                w.writerow(['Weighting Volume:',
                    '%f' % (vol,)])
            w.writerow(['Input', 'Output'] + constraints)
        for (input, output, violns, ercs) in forms:
            row = ['/%s/'%input, ' | '.join(['[%s]'%o for o in\
                output.split(' | ')])] + [v for c,v in violns]
            if ercs and opt == 'ot':
                row += ['{'+\
                        ', '.join(ercs.stringERCs(constraintOrder=constraints))+'}']
            w.writerow(row)
        w.writerow([])

    print 'Wrote typology to %s (volumes=%s).' % (f.name, volumes)

    #print 'Looking for duplicates...'
    #for (j, (name1, lg1)) in enumerate(tlist):
    #    lgERCs1, forms1 = lg1
    #    surfaceOs1 = set()
    #    iov1 = set()
    #    for i,os,v,e in forms1:
    #        iov1.add((i,os,v))
    #        for o in os.split(' | '):
    #            surfaceOs1.add("[%s]"%o)
    #    print name1
    #    print '\tSame surface sets:',
    #    for name2, lg2 in tlist[j+1:]:
    #        lgERCs2, forms2 = lg2
    #        surfaceOs2 = set()
    #        for i,os,v,e in forms2:
    #            for o in os.split(' | '):
    #                surfaceOs2.add("[%s]"%o)
    #        if surfaceOs1 == surfaceOs2:
    #            print name2,
    #    print
    #    print '\tSame violations:',
    #    for name2, lg2 in tlist[j+1:]:
    #        lgERCs2, forms2 = lg2
    #        iov2 = set()
    #        for i,os,v,e in forms2:
    #            iov2.add((i,os,v))
    #        if iov1 == iov2:
    #            print name2,
    #    print
            


if __name__ == "__main__":
    parser = OptionParser(usage=usage)
    parser.add_option('-V', '--version', action='store_true', dest='version',
            default=False,
            help='Print pyPhon version and quit.')
    parser.add_option('-v', '--volumes', action='store_true', dest='volumes',
            default=False,
            help='Compute the parameter volume of each generated language and'\
                 ' include it in the output [default = false].')
    parser.add_option('-f', '--grammarfile', dest='grammarfile',
            metavar='GRAMMAR_FILE',
            help='An optional CSV file containing grammar definitions; one of '\
            'the defined grammars may be chosen to restrict the generated '\
            'typology.')
    parser.add_option('-g', '--grammar', dest='grammar',
            metavar='GRAMMAR_NAME',
            help='An optional name of one of the grammars defined in '\
            'GRAMMAR_FILE, if the latter has been specified. Restricts the '\
            'generated typology to include only those languages that are '\
            'consistent with the specified grammar. If GRAMMAR_FILE is given '\
            'but GRAMMAR_NAME is not, an interactive prompt will be given to '\
            'select the grammar. Either way, the chosen grammar must be '\
            'consistent with the optimization type (OT or HG).')

    p_options, args = parser.parse_args()

    if p_options.version:
        print 'pyPhon version %s.' % pyphon.__version__
        raise SystemExit

    if p_options.grammar and not p_options.grammarfile:
        print 'Error: GRAMMAR_NAME specified, but no GRAMMAR_FILE given.\n'
        parser.print_help()
    elif len(args) < 3:
        parser.print_help()
    elif len(args) < 4:
        main(args[0], args[1], args[2], p_options.grammarfile,
                p_options.grammar, p_options.volumes)
    else:
        parser.print_help()


# To generate graphs of typologial implictions (T-orders): 
# Associate each (i,o,v) in a tableau with the set of languages it occurrs in. 
# These are the nodes, for each pair of nodes A B  add an arc A->B if the
# languages of A are a superset of those of B. Any pair that have arcs in both 
# directions can be collapsed.
