import random
import process_efw
import feedforward
import math
import backprop
import helpers
from helpers import merge_moms

gram = process_efw.load_grammar()

mu = 0.0
dev = 0.1
rrand = lambda: random.gauss(mu, dev)
randev = lambda: math.fabs(rrand())
negrandev = lambda: -randev()
STEPSIZE = .03

#counter = [[[randev(), 1.0 + randev(), .25 + randev()]]]
def setup_counter(stepsize = STEPSIZE):
    return [[[0.0, 1.0 , stepsize]]]

counter = setup_counter()
enmom = [[[0.0,0.0,0.0]]]
#uncounter = [[[-.25 + negrandev(), negrandev()]]]

def setup_uncounter(stepsize = STEPSIZE):
    return [[[-stepsize , 1.0]]]
uncounter = setup_uncounter()
demom = [[[0.0,0.0]]]

depth = 2


def augment_preacts(preacts, prev_layer = None):
    if prev_layer is None:
        prev_layer = []

    for p in xrange(len(preacts)):
        preacts[p] = [prev_layer] + preacts[p]
        prev_layer = preacts[p][-1]


def apply(d, l, stepsize = STEPSIZE, verbose = True):
    """d is word-size, l is index."""
    counter = setup_counter(stepsize)
    uncounter = setup_uncounter(stepsize)

    encodes = [[0.0]] + [None for c in xrange(d)]
    for c in xrange(d):
        (encodes[c+1],enpreacts,enpostacts) =  feedforward.propagate(\
                                encodes[c] + [0.0 if c > l else 1.0],\
                                counter)

    decodes = [encodes[-1]]
    failed = False
    for u in xrange(d):
        (decode, depreacts,depostacts) =  feedforward.propagate(\
                                decodes[-1], uncounter)
        decodes.append(decode)
        if decodes[-1][0] < 0.0 and u < d - l:
            failed = True

    if verbose:
        print "encodes: " + str(encodes)
        print "decodes: " + str(decodes)
        if failed:
            print "Failed to decode correctly."

    err = 0.0
    for dec in decodes[:l+1]:
        err += max(0.0, -dec[0])
    for dec in decodes[l+1:]:
        err += max(0.0, dec[0])
    return err

#        print
#        print [en[-1] for en in enpostacts]
#        print [de[-1] for de in depostacts]



def train(n, verbose = True, incthresh = 20, learnrate = 0.002):
    global enmom, demom, depth
    times = 0
    for i in xrange(n):
        if times > incthresh:
            depth += 1
            times = 0

        word = gram.generateWord()
        d = min(depth, len(word))
        l = random.randint(0, d)
        if verbose:
            print "---------------------------------"
            print word + " | depth: " +str(depth) + "; d: " + str(d) + "; l: " + str(l)

        enpreacts = [None for p in xrange(d)]
        enpostacts = [None for p in xrange(d)]
        encodes = [[0.0]] + [None for c in xrange(d)]
        for c in xrange(d):
            (encodes[c+1],enpreacts[c],enpostacts[c]) =  feedforward.propagate(\
                                    encodes[c] + [0.0 if c > l else 1.0],\
                                    counter)

        if verbose:
            print "encodes: " + str(encodes)

        depreacts = [None for p in xrange(d)]
        depostacts = [None for p in xrange(d)]
        decodes = [encodes[-1]]
        for u in xrange(d):
            (decode, depreacts[u],depostacts[u]) =  feedforward.propagate(\
                                    decodes[-1], uncounter)
            decodes.append(decode)
            if decodes[-1][0] < 0.0:
                depreacts = depreacts[:u+1]
                depostacts = depostacts[:u+1]
                break

        if verbose:
            print "decodes: " + str(decodes)


        if len(decodes) == d+1:
            error = min(-decodes[-1][0], 0.0)
        else:
            error = math.fabs(randev()) - decodes[-1][0]

        if error == 0.0:
            times += 1
        else:
            times = 0

        if verbose:
            print "error: " + str(error) + " times: " + str(times)
        deltas =  backprop.errors2deltas([error], depreacts[-1][-1])
        augment_preacts(depreacts, enpreacts[-1][-1])
        augment_preacts(enpreacts)

        demoms = [None for d in decodes[1:]]

        for u in xrange(len(decodes)-1, 0, -1):
            incerr = decodes[u-1][0] - STEPSIZE/math.sqrt(u) - decodes[u][0]
            incd = backprop.errors2deltas([incerr], depreacts[u-1][-1])
            if times < 2:
                deltas = incd
            elif incerr <= 0.0:
                deltas[0] += incd[0]
            (demoms[u-1], deltas) = backprop.backprop_deltas(uncounter,\
                           deltas, depreacts[u-1], depostacts[u-1], demom,\
                           learnrate, adapt_now = False)

        enmoms = [None for e in encodes[1:]]

        for u in xrange(len(encodes)-1, 0, -1):
            incerr = max(0.0,encodes[u-1][0]) - encodes[u][0]
            if u <= l:
                incerr += STEPSIZE/math.sqrt(u)
            incd = backprop.errors2deltas([incerr], enpreacts[u-1][-1])
            if times < 2:
                deltas = incd
            elif incerr >= 0.0:
                deltas[0] += incd[0]
            (enmoms[u-1], deltas) = backprop.backprop_deltas(counter,\
                           deltas, enpreacts[u-1], enpostacts[u-1], enmom,\
                           learnrate, adapt_now = False)
            deltas = [deltas[0]]

        # this does the actual learning-step, the adaption
        demom = merge_moms(demoms)
        enmom = merge_moms(enmoms)
        merge_moms([demom], uncounter)
        merge_moms([enmom], counter)

#train(4)


def plotcounter(drange = 50):
    for d in xrange(drange):
        for l in xrange(d):
            err = apply(d,l, verbose = False)
            print str(d) + " " + str(l) + " " + str(err)

plotcounter()
