from copy import deepcopy
from math import fabs as abs
from math import tanh, log
from random import gammavariate
#from IPython.Debugger import Tracer ; debug_here = Tracer()
#from IPython.Shell import IPShellEmbed

GAMMA_PARAM1 = 64
GAMMA_PARAM2 = 0.015725
NEUTRAL = 0.0
MULNEUTRAL = 1.0
SMALL_NUMBER = 0.0000001
SMALL_FACTOR = 1.0 - SMALL_NUMBER
WARNED = False

def warn_range():
    global WARNED
    if not WARNED:
        WARNED = True
        print "WARNING: You have not adapted your (presumably) target values correctly.\nThis can decrease performance, since errors must be generated and caught.\nPlease only use values within range of the activation function!"

def tanh_inv(x):
    try:
        result = 0.5 * log((1.0+x)/(1.0-x))
    except (OverflowError, ValueError, ZeroDivisionError):
        # TODO: add verbosity param
        x *= SMALL_FACTOR
        warn_range()
        result = 0.5 * log((1.0+x)/(1.0-x))

    return result

UPPER_BOUND = 1.0
LOWER_BOUND = -1.0

JUSTBELOW = UPPER_BOUND - SMALL_NUMBER
JUSTABOVE = LOWER_BOUND + SMALL_NUMBER

def ensure_bounds(x):
    if x >= UPPER_BOUND:
        return JUSTBELOW
    elif x <= LOWER_BOUND:
        return JUSTABOVE
    else:
        return x

xrl = lambda sequence: xrange(len(sequence))
# TODO: don't forget bias!! ( in adjustment-stage!!!)

def cast_vote(activity, net_in, syn_targ_diff, syn_allv, weights, inv_transfer = tanh_inv):
    upvotes = [NEUTRAL for v in xrl(activity)]
    downvotes = [NEUTRAL for v in xrl(activity)]
    up_allv = [NEUTRAL for v in xrl(activity)]
    down_allv = [NEUTRAL for v in xrl(activity)]

    for n in xrl(syn_targ_diff):
        for i in xrl(activity):
            try:
                # i+1 to skip bias-value
                targ_diff = syn_targ_diff[n][i+1] / weights[n][i+1]
                targ_activ = activity[i] + targ_diff
                bounded_targ_activ = ensure_bounds(targ_activ)
                allv = syn_allv[n][i+1] * bounded_targ_activ / targ_activ

                net_in_diff = inv_transfer(bounded_targ_activ) - net_in[i]
                if targ_diff > 0:
                    upvotes[i] += net_in_diff * allv
                    up_allv[i] += allv
                else:
                    downvotes[i] += net_in_diff * allv
                    down_allv[i] += allv

            except ZeroDivisionError:
                # if weight is zero, only the weight can be adapted
                pass
                #pre_targ_diff = 0

    return (upvotes, downvotes, up_allv, down_allv)


def vote_target_layer(upvotes, downvotes, up_allv, down_allv):
    target_diff = []
    allv = []
    for n in xrl(upvotes):
        try:
            if up_allv[n] > down_allv[n]:
                target_diff.append(upvotes[n] / up_allv[n])
                allv.append(up_allv[n])
            else:
                target_diff.append(downvotes[n] / down_allv[n])
                allv.append(down_allv[n])

        except ZeroDivisionError:
            target_diff.append(0.0)
            allv.append(0)

#    IPShellEmbed()()

#    debug_here()
    return (target_diff, allv)

def bp_syn_target(target_diff, alleviation, weights, pre_activity, abs_net_in):
    '''Backpropagates net-in target for a specific layer.'''

    pre_activity = [MULNEUTRAL] + pre_activity
    syn_targ_diff = []
    syn_allv = []

    for n in xrl(target_diff):
        syn_targ_diff.append([])
        syn_allv.append([])


        try:
            backfactor = target_diff[n] / abs_net_in[n]
            allv_factor = alleviation[n] / abs_net_in[n]

            for i in xrl(weights[n]):
                randomizer = gammavariate(GAMMA_PARAM1, GAMMA_PARAM2)
                act_factor = randomizer * abs(pre_activity[i] * weights[n][i])
                syn_targ_diff[n].append(act_factor * backfactor)
                syn_allv[n].append(act_factor * allv_factor)



        except ZeroDivisionError:
            # TODO: clear up
            # should happen very seldomly
            # divide evenly among inputs
            backfactor = target_diff[n] / len(weights[n])
            allv_factor = alleviation[n] / len(weights[n])
            for i in xrl(weights[n]):
                syn_targ_diff[n].append(backfactor)
                syn_allv[n].append(allv_factor)

    return (syn_targ_diff, syn_allv)


def bp_diff_layer(target_diff, network, activity, alleviation, net_in,\
                  abs_net_in, inv_transfer = tanh_inv):

    net_in = [[inv_transfer(a) for a in activity[0]]] + net_in

    net_in_dtargets = [None for x in xrange(len(net_in))] + [target_diff]
    synaptic_dtargets = [None for layer in xrl(network)]
    synaptic_alleviations = [None for layer in xrl(network)]

    for layer in xrange(len(network)-1, -1, -1): # walk backwards, stop at 0
        (syn_targ_diff, syn_allv) = bp_syn_target(target_diff, alleviation,
                                                  network[layer], activity[layer],
                                                  abs_net_in[layer])

        cast = cast_vote(activity[layer], net_in[layer], syn_targ_diff,\
                         syn_allv, network[layer], inv_transfer)

        (target_diff, alleviation) = vote_target_layer(*cast)
        #possibly extract up/downvotes from cast_vote!?
        # or track alleviation??

        synaptic_dtargets[layer] = syn_targ_diff
        synaptic_alleviations[layer] = syn_allv
        net_in_dtargets[layer] = target_diff


    return (net_in_dtargets, synaptic_dtargets, alleviation)

# todo: split into backprop_target & bp_diff_layer

def backprop_target(output_target, network, activity, net_in, abs_net_in, inv_transfer = tanh_inv):
    """Backpropagates a given output target (not a difference)."""

    # target differenc of net-in at output neurons
    alleviation = [abs(output_target[i] - activity[-1][i]) for i in xrl(output_target)]

    target_diff = [inv_transfer(output_target[i]) - inv_transfer(activity[-1][i])
                            for i in xrl(output_target)]

    return bp_diff_layer(target_diff, network, activity, alleviation,\
                         net_in, abs_net_in, inv_transfer)


def avg_syn_dtargs(*syn_dtargs):
    result = []
    for layer in xrl(syn_dtargs[0]):
        ltargs = []
        for n in xrl(syn_dtargs[0][l]):
            ntargs = []

            for i in xrl(syn_dtargs[0][l][n]):
                val = 0
                for s in syn_dtargs:
                    val += syn_dtargs[0][l][n][i]
                ntargs.append(val / len(syn_dtargs))

            ltargs.append(ntargs)

        result.append(ltargs)

    return result

def avg_adapt(network, w_dtargs, learnrate = 0.1, momrate = .7):
#    debug_here()
    momentum = []
    for l in xrl(w_dtargs[0]):
        lmom = []
        for n in xrl(w_dtargs[0][l]):
            nmom = []
            for i in xrl(w_dtargs[0][l][n]):
                val = NEUTRAL
                for s in xrl(w_dtargs):
                    val += w_dtargs[s][l][n][i]
                val /= len(w_dtargs)

                network[l][n][i] += learnrate * val
                nmom.append(val * momrate)

            lmom.append(nmom)
        momentum.append(lmom)

    return momentum




# TODO: test on XOR!!!!

def weight_dtargs(network, syn_dtargs, old_activity, new_inact = None, transfer = tanh):
    if new_inact is None:
        new_inact = old_activity[0]

    new_activity = [new_inact] + [[None for n in xrl(old_activity[layer])] for layer in xrange(1, len(old_activity))]
    w_dtargs = [[[None for i in xrl(network[layer][n])] for n in xrl(network[layer])] for layer in xrl(network)]
    new_net_in = [[NEUTRAL for n in xrl(network[layer])] for layer in xrl(network)]

    for layer in xrl(network):
        for n in xrl(network[layer]):
            new_pre_act = [1.0] + new_activity[layer]
            old_pre_act = [1.0] + old_activity[layer]

            for i in xrl(network[layer][n]):
                w_dtargs[layer][n][i] = (syn_dtargs[layer][n][i] + old_pre_act[i])/new_pre_act[i]\
                                        - network[layer][n][i]
                # todo: check of the whole net-in layer thing is correctly indexed
                new_net_in[layer][n] += (w_dtargs[layer][n][i] + network[layer][n][i]) *\
                                         new_pre_act[i]

            new_activity[layer+1][n] = transfer(new_net_in[layer][n])

    return (w_dtargs, new_net_in, new_activity)


#def train_xor(n = 1, learnrate = 0.01, net = net):

    #momentum = feedforward.generate_net(layers, lambda: 0.0)
    #for i in xrange(n):
        #weight_dtargs = [momentum]
        #for x in xorlist:
            #(out, activity, net_in, abs_net_in) = feedforward.t_propagate(x[0], net)
            #syn_dtargs = target_adapt.backprop_target(x[1], net, activity,\
                                                             #net_in, abs_net_in)[1]
            #weight_dtargs.append(target_adapt.weight_dtargs(net, syn_dtargs, activity)[0])

        #momentum = target_adapt.avg_adapt(net, weight_dtargs, learnrate)

    #return net
