import feedforward
import backprop
import cantor
import net
import math
import copy
import eval_coding
#import charcoding
from window_predictor import maxindex
import helpers

#alphabet = charcoding.alphabet


OFFSET = 0.5
DEFAULT = 0.5
LEARNRATE = 0.07
MOMRATE = .4
ACTIVATION = math.tanh
DACTIVATION = backprop.dtanh
"""Train a network to mimic the type of cantor coding detailed in cantor.py."""


def apply(encoder, decoder, sequence, alphabet, directions):
    encoding = can_coding([], directions)[0]
    for s in sequence:
        en_input = encoding + directions[alphabet.index(s)]
        encoding = feedforward.propagate(en_input,encoder, ACTIVATION)[0]

    outsequence = ""
    for s in sequence:
        decoding = feedforward.propagate(encoding, decoder, ACTIVATION)[0]
        encoding = decoding[:len(encoding)]
        output = decoding[len(encoding):]
        outletter = eval_coding.pick_letter(alphabet, [[2*d for d in dir] for dir in directions], output)
        outsequence = outletter + outsequence

    return outsequence



def setup_nets(hidden, directions = None, alphabet = None):
    """Initiates encoder and decoder nets with correct input & output dimensions."""
    if directions is None:
        (directions, dim) = cantor.setup_directions(alphabet, OFFSET, DEFAULT)
    else:
        dim = len(directions[0])

    # can this be done more elegantly.. too lazy to look up a non-altering reverse.
    rev_hidden = copy.copy(hidden)
    rev_hidden.reverse()

    enlayers = [dim * 2] + hidden + [dim]
    delayers = [dim] + rev_hidden + [dim * 2]
    encoder = feedforward.generate_net(enlayers)
    decoder = feedforward.generate_net(delayers)
    en_mom = feedforward.generate_net(enlayers, lambda : 0.0)
    de_mom = feedforward.generate_net(delayers, lambda : 0.0)

    return (encoder, decoder, directions, en_mom, de_mom)


#def evaluate(net, input, target):
    #(out, activ) = feedforward.propagate(input, net, math.tanh)
    #deltas = [target[i] - out[i] for i in xrange(len(target))]
    #result = 0
    #for d in deltas:
        #result += math.fabs(d)

    #return result

def adapt_net(network, mom, input, target, prevnet_postact = [],\
              learnrate = LEARNRATE, momrate = MOMRATE,\
              activation = ACTIVATION, dactivation = DACTIVATION):

    (out, pre_activ, post_activ) = feedforward.propagate(input, network, activation)
    post_activ = [prevnet_postact] + post_activ
    errors = [target[i] - out[i] for i in xrange(len(target))]

    tot_error = 0.0
    max_error = 0.0
    for e in errors:
        e = math.fabs(e)
        tot_error += e
        if e > max_error:
            max_error = e



    (mom, deltas) = backprop.backprop(network, errors, pre_activ, post_activ,\
                                      mom, learnrate, momrate, dactivation, adapt_now = False)

    return (tot_error/len(errors), deltas,errors, max_error, mom)


def rev_intarg(sequence, directions, index):
    input = list(can_coding(sequence[:index], directions)[0])
#    input += directions[sequence[index]]
    target = can_coding(sequence[:index+1], directions)[0]

    return (input, target, target,copy.copy(input))


def dir_intarg(sequence, directions, index):
    en_in = list(can_coding(sequence[:index], directions)[0])
#    input += directions[sequence[index]]
    en_targ = can_coding(sequence[:index+1], directions)[0]

    de_in = can_coding(sequence[index:], directions)[0]
    de_targ = can_coding(sequence[index+1:], directions)[0]

    return (en_in, en_targ, de_in, de_targ)


def train(sequence, encoder, decoder, directions, grammar, en_mom = None,\
          de_mom = None, learnrate = 0.05, momrate = 0.1, reverse = True,\
          activation = ACTIVATION, dactivation = DACTIVATION):
    """Trains encoder and decoder for n epochs."""

    global can_coding
    can_coding = lambda sequence, directions: cantor.cantor_coding(sequence,\
                    alphabet, directions, reverse = reverse, dim = len(directions[0]))
    if reverse:
        intarg = rev_intarg
    else:
        intarg = dir_intarg

    if en_mom is None:
        enlayers = [len(encoder[0][0])-1] + [len(layer) for layer in encoder]
        enlayers = [len(layer) for layer in encoder]
        en_mom = feedforward.generate_net(enlayers, lambda: 0.0)
    if de_mom is None:
        delayers = [len(decoder[0][0])] + [len(layer) for layer in decoder]
#        delayers = [len(layer) for layer in decoder]
        de_mom = feedforward.generate_net(delayers, lambda: 0.0)

    enval = 0.0
    deval = 0.0
    en_max_error = 0.0
    de_max_error = 0.0
    en_moms = [None for i in xrange(len(sequence))]
    de_moms = [None for i in xrange(len(sequence))]

    for i in xrange(len(sequence)):
        (en_in, en_targ, de_in, de_targ) = intarg(sequence, directions, i)
#        (cin,preact,cenpact) = charcoding.prop_coder(sequence[i], net.sigmoid)
#        en_in.extend(cin)
        en_in.extend(directions[alphabet.index(sequence[i])])
#        (ctarg,preact,cdepact) = charcoding.prop_coder(sequence[i],net.sigmoid)
#        de_targ.extend(ctarg)
        ctarg = [2*d for d in directions[alphabet.index(sequence[i])]]
        de_targ.extend(ctarg)

#        (enval1,cdeltas) = adapt_net(encoder, en_mom, en_in, en_targ, cenpact[-1])[:2]
        (enval1,cdeltas,errors,en_max_error1, en_moms[i]) = adapt_net(encoder,en_mom,en_in, en_targ)
        enval += enval1
#        charcoding.bprop_coder(sequence[i], cdeltas, backprop.dsigmoid)

        (deval1,cdeltas,errors,de_max_error1, de_moms[i]) = adapt_net(decoder,de_mom,de_in,de_targ)
        deval += deval1
#        char_errors = [-e for e in errors[len(errors)/2:]]
#        charcoding.bprop_output(sequence[i], char_errors, backprop.dsigmoid)

        if en_max_error1 > en_max_error:
            en_max_error = en_max_error1

        if de_max_error1 > de_max_error:
            de_max_error = de_max_error1

    if len(sequence) > 0:
        enval *= 1.0/len(sequence)
        deval *= 1.0/len(sequence)

    en_mom = helpers.merge_moms(en_moms)
    de_mom = helpers.merge_moms(de_moms)

#    if verbose:
#        print str(enval) + " | " + str(deval)


    return (encoder, decoder, enval, deval, en_mom, de_mom, en_max_error, de_max_error)



#def team_train(n, grammar):
    # todo: generate hidden by slightly variating something like dim^2, search around

    # set up a basic population (reuse some code)
    # have it sorted by
#    entry = coding_entry(train(setup_nets(grammar, hidden)))
#    (enval, deval) = evaluate(encoder, decoder, grammar)
#    entry.value = enval + deval
