import feedforward
import target_adapt
import math

xrl = lambda l: xrange(len(l))
"""Train an encoder/decoder pair to find its own representation using target_adapt."""

def train(n, grammar, encoder, decoder, en_momentum = None, de_momentum = None,\
          directions = None, learnrate = 0.05, momrate = 0.2, verbose = False):

    if directions is None:
        dim = len(grammar.alphabet)
        directions = [[-1.0 for i in xrange(dim)] for j in xrange(dim+1)]
        for i in xrange(1,dim+1):
            directions[i][i-1] = 1.0
    if en_momentum is None:
        en_momentum = [[[[target_adapt.NEUTRAL for i in inputs]\
             for inputs in neuron] for neuron in layer] for layer in encoder]
    if de_momentum is None:
        de_momentum = [[[[target_adapt.NEUTRAL for i in inputs]\
             for inputs in neuron] for neuron in layer] for layer in decoder]

    for s in xrange(n):
#        string = grammar.generate()
#        dirstring = [directions[grammar.alphabet.index(a) + 1] for a in string]
#        sequence = [directions[0]] + dirstring
        numseq = grammar.generate()
        if verbose:
            print "numseq: " + str(numseq)

        sequence = [directions[0]] + [directions[num] for num in numseq]
        overall_error = 1
        error = 0
        en_wdtargs = [en_momentum]
        de_wdtargs = [de_momentum]

        for i in xrange(1, len(sequence)):

            input_c = sequence[0]
            for j in xrange(i-1):
                (input_c, en_preact, en_postact) = \
                    feedforward.propagate(input_c + sequence[j], encoder)

            target = input_c + sequence[i]
            (coding, en_act, en_nin, en_anin) = feedforward.t_propagate(target, encoder)
            (decoding, de_act, de_nin, de_anin) = feedforward.t_propagate(coding, decoder)

            if verbose:
                print "i" + str(i) + ": " + str(target)
                print "e" + str(i) + ": " + str(coding)
                print "d" + str(i) + ": " + str(decoding)


            (de_nin_dtargs, de_sdtargs, de_allv) =  target_adapt.backprop_target(target,\
                                                         decoder, de_act, de_nin, de_anin)

            (en_nin_dtargs, en_sdtargs, en_allv) =  target_adapt.bp_diff_layer(\
                                                            de_nin_dtargs[0], encoder,\
                                                            en_act,de_allv,en_nin,en_anin)

            (en_wdtarg,new_enin,new_eact) = target_adapt.weight_dtargs(encoder, en_sdtargs,\
                                                                       en_act)
            (de_wdtarg,new_dnin,new_dact) = target_adapt.weight_dtargs(decoder, de_sdtargs,\
                                                                       de_act, new_eact[-1])

            en_wdtargs.append(en_wdtarg)
            de_wdtargs.append(de_wdtarg)

            l = len(target)
            for t in xrange(l):
                err = target[t] - decoding[t]
                error += math.fabs(err)

        en_momentum = target_adapt.avg_adapt(encoder, en_wdtargs, learnrate)
        de_momentum = target_adapt.avg_adapt(decoder, de_wdtargs, learnrate)




        overall_error = 0.75*overall_error + 0.25*error/len(sequence)/len(encoder[-1])

    return overall_error
