import feedforward
import backprop
import math
import copy
import helpers

"""Train an encoder/decoder pair to find its own representation."""

WORDLEN = None

def setup_big_nets(input_dim, en_dim, en_hidden, de_hidden):
    """Initiates encoder and decoder nets with correct input & output dimensions."""


    enlayers = [input_dim + en_dim] + en_hidden + [en_dim]
    delayers = [en_dim] + de_hidden + [input_dim + en_dim]
    encoder = feedforward.generate_net(enlayers, feedforward.default_vgen)
    decoder = feedforward.generate_net(delayers, feedforward.default_vgen)
    en_mom = feedforward.generate_net(enlayers, lambda: 0.0)
    de_mom = feedforward.generate_net(delayers, lambda: 0.0)

    return (encoder, decoder, en_mom, de_mom)

def setup_nets(dim, hidden):
    # can this be done more elegantly.. too lazy to look up a non-altering reverse.
    rev_hidden = copy.copy(hidden)
    rev_hidden.reverse()
    code_size = (dim +  WORDLEN)
    global DEFAULT_ENCODING
    DEFAULT_ENCODING =  [0.0 for i in xrange(code_size)]
    return setup_big_nets(dim, code_size, hidden, rev_hidden)


def apply_nets(encoder,decoder,directions, grammar=None,numseq = None):
    if numseq is None:
        numseq = grammar.generate()

    sequence = [directions[num] for num in numseq]
    outseq = [None for s in sequence]
    coding = DEFAULT_ENCODING
    for i in xrange(len(sequence)):
        input_en = coding + sequence[i]
        (coding,en_preact,en_postact) = feedforward.propagate(input_en,\
                                                    encoder,math.tanh)

    for i in xrange(len(sequence)):
        (decoding, de_preact, de_postact) = feedforward.propagate(coding, decoder,\
                                                                math.tanh)
        coding = decoding[:len(coding)]
        outseq[i] = decoding[len(coding):]

    outseq.reverse()
    return (numseq, sequence,outseq)

def eval_nets(encoder, decoder,directions, grammar = None, numseq = None):
    (numseq, sequence,outseq) = apply_nets(encoder,decoder,directions, grammar,numseq)
    error = 0.0
    maxerr = 0.0
    outstri = ""
    instri = ""
    for (se,os,nu) in zip(sequence, outseq,numseq):
        for (s,o) in zip(se,os):
            cerr = math.fabs(s - o)
            error += cerr/(len(se) * len(sequence))
            if cerr > maxerr:
                    maxerr = cerr
        instri += grammar.alphabet[nu]
        outstri += grammar.alphabet[helpers.maxindex(os)]
#    outstril = outstri.split()
#    outstril.reverse()
#    outstri = "".join(outstril)
    wrong = 0
    for (i,o) in zip(instri,outstri):
        if not i == o:
            wrong += 1

    return (wrong, instri, outstri, error, maxerr)


def trainstep(grammar, encoder, decoder, en_mom, de_mom, directions,\
              learnrate = 0.05, momrate = 0.2, adapt_now = False):
    max_error = 0.0

    numseq = grammar.generate()

    sequence = [directions[num] for num in numseq]
    seq_overall_error = 0.0
    en_moms = [None for i in xrange(len(sequence)) ]
    de_moms = [None for i in xrange(len(sequence)) ]
    coding = DEFAULT_ENCODING

    for i in xrange(len(sequence)):
        input_en = coding + sequence[i]
        (coding,en_preact,en_postact) = feedforward.propagate(input_en,\
                                                encoder,math.tanh)


        (decoding, de_preact, de_postact) = feedforward.propagate(coding, decoder,\
                                                            math.tanh)

        target = input_en
        targ_error = []
        l = len(target)
        error = 0.0
        for t in xrange(l):
            err = target[t] - decoding[t]
            if math.fabs(err) > max_error:
                max_error = math.fabs(err)
            targ_error.append(err)
            error += math.fabs(err) / l

        seq_overall_error += error / len(sequence)

        (de_moms[i], deltas) = backprop.backprop(decoder, targ_error, de_preact,\
                                             [en_postact[-1]] + de_postact,\
                                             de_mom, learnrate, momrate,\
                                             adapt_now = adapt_now)

        (en_moms[i], deltas) = backprop.backprop_deltas(encoder, deltas,\
                                        en_preact, en_postact, en_mom,
                                        learnrate, momrate,\
                                        adapt_now = adapt_now)
    de_mom = helpers.merge_moms(de_moms)
    en_mom = helpers.merge_moms(en_moms)

    return (seq_overall_error, max_error, en_mom, de_mom)


def train(n, grammar, encoder, decoder, en_mom, de_mom, directions,\
          learnrate = 0.05, momrate = 0.2, verbose = False):
    max_error = 0.0
    overall_error = 0.0

    en_moms = [None for s in xrange(n)]
    de_moms = [None for s in xrange(n)]
    for sn in xrange(n):
#        string = grammar.generate()
#        dirstring = [directions[grammar.alphabet.index(a) + 1] for a in string]
#        sequence = [directions[0]] + dirstring
        numseq = grammar.generate()
#        if verbose:
#            print "numseq: " + str(numseq)

        sequence = [directions[num] for num in numseq]
        seq_overall_error = 0.0
        en_moms1 = [None for i in xrange(len(sequence)) ]
        de_moms1 = [None for i in xrange(len(sequence)) ]
        coding = DEFAULT_ENCODING

        for i in xrange(len(sequence)):
            input_en = coding + sequence[i]
            (coding,en_preact,en_postact) = feedforward.propagate(input_en,\
                                                    encoder,math.tanh)


            (decoding, de_preact, de_postact) = feedforward.propagate(coding, decoder,\
                                                                math.tanh)

            target = input_en
#            if verbose:
#                print de_postact
#                print
#                print en_postact
#                print "------------------------"
 #               print "i" + str(i) + ": " + str(target)
 #               print "e" + str(i) + ": " + str(coding)
  #              print "d" + str(i) + ": " + str(decoding)

            targ_error = []
            l = len(target)
            error = 0.0
            for t in xrange(l):
                err = target[t] - decoding[t]
                if math.fabs(err) > max_error:
                    max_error = math.fabs(err)
                targ_error.append(err)
                error += math.fabs(err) / l

            seq_overall_error += error / len(sequence)

            (de_mom, deltas) = backprop.backprop(decoder, targ_error, de_preact,\
                                                 [en_postact[-1]] + de_postact,\
                                                 de_mom, learnrate, momrate)#,\
 #                                                adapt_now = False)



            (en_mom, deltas) = backprop.backprop_deltas(encoder, deltas,\
                                            en_preact, en_postact, en_mom,
                                            learnrate, momrate)#,\
#                                            adapt_now = False)


#        de_moms[sn] = helpers.merge_moms(de_moms1)
 #       en_moms[sn] = helpers.merge_moms(en_moms1)

        overall_error += seq_overall_error / n

#    de_mom = helpers.merge_moms(de_moms)
#    en_mom = helpers.merge_moms(en_moms)
#    helpers.merge_moms([de_mom], decoder)
#    helpers.merge_moms([en_mom], encoder)

    if verbose:
        print "-------------------"
        print de_mom
        print en_mom
        print
        print decoder
        print encoder



    return (overall_error, max_error, en_mom, de_mom)
