from __future__ import division
import networkx as nx, numpy as np, tempfile
from string import ascii_letters,digits
from collections import namedtuple,defaultdict
import operator,functools,re,os
from math import exp
from utils import *

SYMBOL_LIST = [] #list of pairs (char, bounds) where bounds = (xmin, xmax, ymin, ymax)

def parse_score(op,bounds0,*bounds):
    """computes score just from that particular parse"""
    f_arr = 2*np.array(features(op,bounds0,*bounds),dtype=np.float32)-1
    #if sum(f_list)/len(f_list) < .66: return -HUGE
    #else: 
    return np.dot(f_arr,WEIGHTS[op])-LOGZ[op]

def features(op,bounds0,*bounds):
    "return list of feature functions for rule"
    return [feature(bounds0,*bounds)
            for feature in FEATURES[op]]
        

def test_parse():
    # (symb, (xmin,xmax,ymin,ymax))
    easiest = [("x",(0,1,0,1))]    
    easier = [("x",(0,1,0,1)),
            ("2",(1,1.1,.9,1))]        
    easy = [("x",(0,1,0,1)),
            ("2",(1,1.1,.9,1)),
            ("+",(1.3,2.3,.3,.7)),
            ("3",(2.5,3.4,0,1))]
    medium = [("x",(0,1,0,1)),
            ("2",(1,1.1,.9,1)),
            ("+",(1.3,2.3,.3,.7)),
            ("3",(2.5,3.4,0,1)),
            ("-",(-.1,4,-.1,0)),
            ("z",(1,2,-1.3,-.3))]
    
    tex_str,score,inds = parse_expr(easiest)
    assert tex_str == "x"
    tex_str,score,inds = parse_expr(easier)
    assert tex_str == "x^2"
    tex_str,score,inds = parse_expr(easy)
    assert tex_str == "x^2+3"
    tex_str,score,inds = parse_expr(medium)
    assert tex_str == r"\frac{x^2+3}{z}"
    

def parse_expr(symbol_list):
    "top level parse function, applied to list of pairs (char, bound_box)"
    global SYMBOL_LIST
    SYMBOL_LIST = symbol_list
    result = ntparse("Expr",tuple(range(len(symbol_list)))).tree
    print result
    print get_features(result)
    print tree2tex(result)
    return tree2tex(result)

def tree2tex(tree):
    if is_atom(tree): return tree.label
    else: return tex_combine(tree.label,*map(tree2tex,tree.subtrees))
    
    
SYMB_SCORE = 0

def terminal_replacements(nonterminal):
    return [nonterminal] if is_symb(nonterminal) else TERMINAL_REPLACEMENTS[nonterminal]

no_match  = Match("NM",-HUGE)

@memoized
def ntparse(nonterminal,symbol_ind_list):
    """Recursive function to return best parse of symbols in describes by symbol_ind_list, where nonterminal
    is the root"""
    bounds = subset_bounds(symbol_ind_list)    
    best_op, best_tree, best_score = None, Tree("?",bounds,None), -HUGE
    
    if len(symbol_ind_list) == 1:
        char,logp = safemax(((match.char,match.logp) for match in SYMBOL_LIST[symbol_ind_list[0]].matches
                             if match.char in terminal_replacements(nonterminal)),
                            key = second,
                            default = no_match)
        cprint("purple",char,logp)
        return Parse(Tree(char,bounds,None),logp,bounds)
    for rule in apply_rule(nonterminal):
        for parse_list in listparse(rule.rightlist,symbol_ind_list):
            cum_score = (sum(parse.score for parse in parse_list) + 
                         parse_score(
                             rule.op,bounds,
                             *(parse.bounds for parse in parse_list)))
            
            if cum_score > best_score:
                best_op = rule.op
                best_score = cum_score
                best_tree = Tree(rule.op,bounds,[parse.tree for parse in parse_list])
                
    return Parse(best_tree,best_score,bounds)

def listparse(rightlist,symbol_ind_list):
    """Parse symbol_ind_list using a bunch of nonterminals, given by rightlist.
    Returns a list of lists of parses. The parses in each list partition the inds
    and give the best parse for that partition. All the real work is done by ntparse.
    Computation is short circuited--if ntparse gives a bad result for the first nonterminal,
    listparse won't bother with the rest."""
    if len(rightlist) > len(symbol_ind_list): return
    if len(rightlist) == 1:
        yield [ntparse(rightlist[0],symbol_ind_list)]
    else:
        for subset,comp in (singletons_and_comps(symbol_ind_list) if is_sing(rightlist[0]) else subsets_and_comps(symbol_ind_list)):
            first_parse = ntparse(rightlist[0],subset)
            if first_parse.score > -BIG:
                for parse_list in listparse(rightlist[1:],comp): yield [first_parse] + parse_list

@memoized
def is_sing(string):
    return not bool(apply_rule(string))
                
def tex_combine(op,*subexprs):
    "use appropriate latex syntax to combine subexpressions"
    return TEX_OPS[op](*subexprs)

def bracket(string):
    return string if len(string)==1 else "{%s}"%string

def is_symb(string):
    return string not in NONTERMINALS

def str2feat(math_expr,n_rhs):
    "Makes feature function from string expression"
    args_str = ",".join(["(xmin%i,xmax%i,ymin%i,ymax%i)"%(i,i,i,i)
                                for i in xrange(n_rhs+1)])
    return eval("lambda %s: %s"%(args_str,height_replace(math_expr)))    
    
def height_replace(math_expr):
    return re.subn("height(\d)",lambda mo: "(ymax%s-ymin%s)"%(mo.group(1),mo.group(1)), math_expr)[0]

def makefeats(li,n_rhs):
    "Turns list of feature strings into list of feature functions"
    return [str2feat(math_expr,n_rhs) for math_expr in li]



def line2rule(string):
    pieces = tuple(string.split())
    return Rule(pieces[0],pieces[2],pieces[3:])


@memoized
def apply_rule(nonterminal):
    """Return a list of triples (operator,right1,right2) for every replacement rule with
    two items on RHS"""
    return [rule for rule in RULES if rule.left == nonterminal]

@memoized
def subset_bounds(symbol_ind_list):
    "Get bounds of a list of symbol indices"
    return bounding_box([SYMBOL_LIST[i].bounds for i in symbol_ind_list])


# Slower. Write sum in any order.
#@memoized
#def subsets(tup):
    #"subsequences of an integer tuple whose elements are consecutive."
    #return tuple(
        #reduce(operator.__add__,
               #([tup[start:(end+1)]
                 #for end in xrange(start,len(tup))
                 #if start-end == tup[start]-tup[end]]
                #for start in xrange(0,len(tup)))))

# Fast. Requires summand first                
@memoized
def subsets(tup):
    n = len(tup)
    if n == 0: 
        return ((),)
    else: 
        return tuple(tup[:end]
                     for end in irange(1,n))


# Slowest: all subsets                     
#@memoized
#def subsets(tup):
#    "All subsets of el"
#    if tup == (): return ((),)
#    else:
#        later = subsets(tup[1:])
#        return later + tuple(tup[0:1]+ss for ss in later)

#@verbose
@memoized    
def subsets_and_comps(tup):
    "a list of pairs subset,complement for all proper subsets of tup"
    return [(ss,tuple(el for el in tup if el not in ss)) for ss in subsets(tup) if increasing(0,len(ss),len(tup))]

@memoized
def singletons_and_comps(tup):
    return [((el,),tup[:ind]+tup[(ind+1):]) for (ind,el) in enumerate(tup)]

def bounding_box(bounds_tup):
    """given a list of bounds, returns maximal bounds"""
    bounds_arr = np.array(bounds_tup)
    return (bounds_arr[:,0].min(),bounds_arr[:,1].max(),bounds_arr[:,2].min(),bounds_arr[:,3].max())




############################
### data ###################
############################

PRETTY_GRAMMAR = [
    "Expr -> right Expr Expr",
    "Expr -> right2 ( Expr )",
    "Expr -> sup ExprWithScript Expr",
    "Expr -> sub ExprWithScript Expr",
    "Expr -> supsub ExprWithScript Expr Expr",
    "Expr -> frac - Expr Expr",
    "Expr -> comb Expr Binop Expr",
    "Expr -> sum1 Bigop Expr",
    "Expr -> sum2 Bigop Expr Expr",
    "Expr -> sum3 Bigop Expr Expr Expr",
    "ExprWithScript -> right2 ( Expr )",
    "Expr -> box \\sqrt Expr"]

    #               sigma summand sub sub

#PRETTY_GRAMMAR = [
    #"Expr -> right Expr Expr",
    #"Expr -> sub Expr Expr",
    #"Expr -> sup Expr Expr",
    #"Expr -> frac - Expr Expr",
    #"Expr -> comb Expr Binop Expr",
    #"Expr -> sum1 \\sum Expr",
    #"Expr -> sum2 \\sum Expr Expr",
    #"Expr -> sum3 \\sum Expr Expr Expr"]

#OP_PROBS = ...    
    
RULES =  map(line2rule,PRETTY_GRAMMAR)

NONTERMINALS = ["Expr","Binop","ExprWithScript","Bigop"]

CHARS = list(ascii_letters) + list(digits) + [r"\infty"]
BINOPS = ["+","-","="]
BIGOPS = ["\\sum","\\prod","\\int"]

TERMINAL_REPLACEMENTS = dict(
    Expr = set(CHARS),
    ExprWithScript = set(CHARS),
    Binop = set(BINOPS),
    Bigop = set(BIGOPS))

LL1 = ".3>abs(xmin0 - xmin1)/height0"
LL1a = "xmin0 == xmin1"
RR1a = "xmax0 == xmax1"
BB1 = ".5>abs(ymin0 - ymin1)/height0"
TT1 = ".5>abs(ymax0 - ymax1)/height0"
R1L2 = "1>abs(xmax1 - xmin2)/height0"
TT2 = ".5>abs(ymax0 - ymax2)/height0"
BB2 = ".5>abs(ymin0 - ymin2)/height0"
RR2 = ".1>abs(xmax0 - xmax2)/height0"
Bigger12a = "1.7<height1/height2"
Bigger12b = "2.5<height1/height2"
T1B2 = ".5>abs(ymax1-ymin2)/height0"
B1T3 = ".5>abs(ymin1-ymax3)/height0"
BB3 = ".5>abs(ymin0 - ymin3)/height0"
RR1 = ".2>abs(xmax0 - xmax1)/height0"
RR2 = ".2>abs(xmax0 - xmax2)/height0"
LL2 = ".2>abs(xmin0 - xmin2)/height0"
RR3 = ".2>abs(xmax0 - xmax3)/height0"
R1L3 = "1>abs(xmax1 - xmin3)/height0"
R2L3 = ".5>abs(xmax2 - xmin3)/height0"
RR3 = ".2>abs(xmax0 - xmax3)/height0"
TT3 = ".5>abs(ymax0 - ymax3)/height0"
Bigger13 = "2<height1/height3"
Bigger14 = "2<height1/height4"
T1B4 = ".5>abs(ymax1-ymin4)/height0"
S12 = "abs(height1-height2)/height0 < .5"
S13 = "abs(height1-height3)/height0 < .5"
C1C2 = "abs(xmin1+xmax1 - xmin2 - xmax2)/(2*height0)<.2"
C1C3 = "abs(xmin1+xmax1 - xmin3 - xmax3)/(2*height0)<.2"
R12 = "xmax1 - xmin2 > 0"
R23 = "xmax2 - xmin3 > 0"

RightFeats = [LL1,BB1,TT1,R1L2,TT2,BB2,RR2,S12,R12]
SupFeats =   [LL1,BB1,TT1,R1L2,TT2,Bigger12a,Bigger12b,RR2]
SubFeats = [LL1,BB1,TT1,R1L2,Bigger12a,Bigger12b,BB2,RR2]
CombFeats = [LL1,R1L2,R2L3,RR3,TT1,TT3,S13,R12,R23]
Sum1Feats = [LL1,BB1,TT1,R1L2,BB2,RR2,R12]
Sum2Feats = [LL1,BB1,TT1,R1L2,BB2,RR2,Bigger13,B1T3,C1C2,R12]
Sum3Feats = [LL1,BB1,TT1,R1L2,BB2,RR2,Bigger13,Bigger14,B1T3,T1B4,C1C2,C1C3,R12]
SupSubFeats = [LL1,BB1,TT1,R1L2,TT2,Bigger12a,Bigger12b,RR2,R1L3,Bigger13,BB3,RR3]
Right2Feats = [LL1,BB1,TT1,R1L2,TT2,BB2,R2L3,BB3,TT3,RR3,R12,R23]
FracFeats = [T1B2,B1T3,TT2,BB3,LL1a,RR1a,LL1,RR1,C1C2,C1C3]
BoxFeats = [LL1,LL2,TT1,TT2,RR1,RR2,BB1,BB2]

FEATURES = dict(
    sup = makefeats(SupFeats,2),
    sub = makefeats(SubFeats,2),
    supsub = makefeats(SupSubFeats,3),    
    right = makefeats(RightFeats,2),
    right2 = makefeats(Right2Feats,3),
    frac = makefeats(FracFeats,3),
    comb = makefeats(CombFeats,3),
    sum1 = makefeats(Sum1Feats,2),
    sum2 = makefeats(Sum2Feats,3),
    sum3 = makefeats(Sum3Feats,4),
    box = makefeats(BoxFeats,2))

FAVG_GUESS = .8
W_DEFAULT = np.arctanh(FAVG_GUESS)
def noisy_const_like(li,c):
    return np.float32(np.random.random(len(li)))*.01+c*np.ones(len(li),dtype=np.float32)

#WEIGHTS["frac"][-2:] = 4.

#WEIGHTS = dict(
    #sup = np.array([1,1,1,1,1,1,1],dtype=np.float32),
    #sub = np.array([1,1,1,1,1,1,1],dtype=np.float32),
    #right = np.array([1,1,1,1,1,1,1],dtype=np.float32),
    #frac = np.array([1,1,1,1,1,1],dtype=np.float32),
    #comb = np.array([1,1,1,1,1,1],dtype=np.float32),
    #sum1 = np.array([1,1,1,1,1,1],dtype=np.float32),
    #sum2 = np.array([1,1,1,1,1,1,1,1],dtype=np.float32),
    #sum3 = np.array([1,1,1,1,1,1,1,1,1,1],dtype=np.float32))

WEIGHTS_FILE = "WEIGHTS.pickle"
LOGZ_FILE = "LOGZ.pickle"
try:
    WEIGHTS = load(WEIGHTS_FILE)
    LOGZ = load(LOGZ_FILE)
except Exception: print "weights and logz files could not be found"


TEX_OPS = dict(
    right = lambda x,y: x+y,
    right2 = lambda x,y,z: x+y+z,
    sub = lambda x,y: "%s_%s"%(x,bracket(y)),
    sup = lambda x,y: "%s^%s"%(x,bracket(y)),
    supsub = lambda x,y,z: "%s^%s_%s"%(x,bracket(y),bracket(z)),    
    frac = lambda hline,x,y: r"\frac{%s}{%s}"%(x,y),
    comb = lambda x,y,z: x+y+z,
    sum1 = lambda sigma,x: "%s %s"%(sigma,x),
    sum2 = lambda sigma,summand,sub:"%s_%s %s"%(sigma,bracket(sub),summand),
    sum3 = lambda sigma,summand,sub,sup:"%s_%s^%s %s"%(sigma,bracket(sub),bracket(sup),summand),
    box = lambda sqrt,x: "%s %s"%(sqrt,bracket(x)))

##################
#### training ####
##################

def get_features(tree):
    if is_atom(tree): return []
    return mappend(get_features,tree.subtrees) + [(tree.label,features(tree.label,tree.bounds,*(subtree.bounds for subtree in tree.subtrees)))]

def is_atom(tree):
    return tree.subtrees is None

N_FAKE_OBS = 1.
def refresh_eqn_examples():
    features = mappend(get_features,map(load,listdir_full("john-eqns")))
    op2sum = mapvals(FEATURES,lambda arr: noisy_const_like(arr,FAVG_GUESS))
    op2count = mapvals(FEATURES, lambda _: N_FAKE_OBS) 
    for op,arr in features:
        op2sum[op] += arr
        op2count[op] += 1
    weights = dict((op,np.arctanh(op2sum[op]/op2count[op])) for op in op2count)
    dump(weights,WEIGHTS_FILE)
    dump(mapvals(weights,lambda wt_vec: np.log((2*np.cosh(wt_vec))).sum()),LOGZ_FILE)
        
    
    
        

if __name__ == '__main__':
    #test_parse()
    #load_eqn_examples()
    pass

