import math
import copy

# todo: place net-specific stuff in file feedforward

COMPLEX = False

if COMPLEX:
    c = lambda x: x.conjugate()
else:
    c = lambda x: x

def conj_dot_prod(v1, v2):
    if COMPLEX:
        result = complex(0,0)
    else:
        result = 0
    for i in range(len(v1)):
        result += c(v1[i]) * c(v2[i])
    return result



SMALL_NUMBER = 0.000000000001

# deals with feed-forward neural nets that can have product and
# standard additive neurons on every layer.

# NN format [ [<layer1> [w_11.... w_nm] [w...] ]  [<layer2> num, [..][..]..] ]

# TODO: make sure that weights from (untransfered) input layer are processed correctly
# don't forget the bias

def backprop_layer(weights, number_of_additive, deltas, activity, learnrate_a, learnrate_m, momentum, momrate):

    activ_pre = [1] + activity[0]     # to handle bias
    activ_post = activity[1]

    new_momentum = copy.deepcopy(momentum)


    for i in xrange(len(activ_pre)):
        # for additive
        for j in xrange(number_of_additive):
            new_momentum[j][i] = learnrate_a * c(deltas[j]) * c(activ_pre[i])\
                                 + momrate * momentum[j][i]
            weights[j][i] += new_momentum[j][i]

        # do it for multiplicative
        if i == 1:  # bias is treated like a constant input, not like a weight
            for j in xrange(number_of_additive, len(weights)):
                try:
                    grad = c(activ_post[j] / weights[j][i]) ## TODO: is conjugate
                                                               ## applied correctly??
                except ZeroDivisionError:
                    grad = c(activ_post[j]) / c(SMALL_NUMBER)

                new_momentum[j][i] = learnrate_m * c(deltas[j]) * grad\
                                 + momrate * momentum[j][i]
                weights[j][i] += new_momentum[j][i]

        else:
            for j in xrange(number_of_additive, len(weights)):
                new_momentum[j][i] = learnrate_m * c(deltas[j]) * \
                                     c(math.log(activ_pre[i]) * activ_post[j])\
                                     + momrate * momentum[j][i]
                weights[j][i] += new_momentum[j][i]

    return new_momentum




def def_a_range(num_of_adtv, layer):
    return xrange(num_of_adtv[layer])

def def_m_range(network, num_of_adtv, layer):
    return xrange(num_of_adtv[layer], len(network[layer]))

# todo: introduce bias also for multiplicatives

def backprop(network, error, activity, num_of_adtv, learnrate_a,\
             learnrate_m, derv_transfer_a, derv_transfer_m, momentum,\
             momrate, num_of_input_adtv = None):

    if num_of_input_adtv is None:
        num_of_input_adtv = len(network[0][0])

    a_range = lambda layer: def_a_range([num_of_input_adtv] + num_of_adtv, layer + 1)
    m_range = lambda layer: def_m_range(network, [num_of_input_adtv] + num_of_adtv, layer + 1)

    # initialize output-layer deltas
    deltas = []
    for e in a_range(-1):
        deltas.append(error[e] * derv_transfer_a(activity[-1][0][e]))

    for e in m_range(-1):
        deltas.append(error[e] * derv_transfer_a(activity[-1][0][e]))

    new_momentum = copy.deepcopy(momentum)
    for layer in xrange(len(network) -1, 0, -1):
        new_momentum[layer] = backprop_layer(network[layer],\
                              num_of_adtv[layer], deltas,\
                              activity[layer], learnrate_a, learnrate_m,\
                              momentum[layer], momrate)

        # calculate deltas for next layer down
        nu_deltas = []

        for n in a_range(layer-1):

            w = [ network[layer][j][n] for j in xrange(len(network[layer])) ]

            derv = derv_transfer_a(activity[layer-1][1][n])
            nu_deltas.append(derv * (conj_dot_prod(deltas, w)))

        for n in m_range(layer-1):
            w = []
            for j in xrange(1,len(network[layer])):
                try:
                    w_j = c(network[layer][j][n]) * c(activity[layer][1][j]\
                            / activity[layer][0][j])
                except ZeroDivisionError:
                    w_j = c(network[layer][j][n]) * c(activity[layer][1][j]\
                            / (activity[layer][0][j] + SMALL_NUMBER))

                w.append(w_j)
            derv = derv_transfer_a(activity[layer-1][1][n])
            nu_deltas.append(c(derv) * (conj_dot_prod(deltas, w)))

        deltas = nu_deltas

    return (new_momentum, deltas)



def pow_all(bases, exps):
    result = 1
    for b in xrange(len(bases)):
        result *= bases[b] ** exps[b]

# TODO: obsolete encoding of activity

def propagate(n_input, network, transfer_a, transfer_m, num_of_adtv):

    a_range = lambda layer: def_a_range(num_of_adtv, layer)
    m_range = lambda layer: def_m_range(network, num_of_adtv, layer)

    # activity is encoded pre & postsynaptic
    activity = [[n_input]]

    for layer in xrange(len(network)):

        number_of_additive = num_of_adtv[layer]
        pre_activity_a = [1] + activity[layer][0]
        pre_activity_m = activity[layer][0]

        activity[layer].append([])
        # additive neurons
        for w in network[layer][:number_of_additive]:
            activity[layer][1].append(conj_dot_prod(w, pre_activity_a))

        # multiplicative neurons
        for w in network[layer][number_of_additive:]:
            activity[layer][1].append(w[0] * pow_all(pre_activity_m, w[1:]))

        next_activity = []
        # transfer activations to next synapse
        for a in a_range(layer):
            next_activity.append(transfer_a(activity[layer][1][a]))
        for m in m_range(layer):
            next_activity.append(transfer_m(activity[layer][1][m]))

        activity.append([next_activity])


    output = activity[-1][0]

    return (output, activity)
