from linear_algebra import big_product, lin_combine, dot_product

# NN format [ [<layer1> number_of_additive, [w_11.... w_nm] [w...] ]  [<layer2> num, [..][..]..] ]

# TODO: make sure that weights from (untransfered) input layer are processed correctly
# don't forget the bias

def backprop_layer(weights, number_of_additive, deltas, activity, learning_rate_add, learning_rate_mul,\
                   transfer, transfer_deriv):

    activ_pre = [1] + activity[0]     # to handle bias
    activ_post = activity[2]
    nu_deltas =  []


    for i in xrange(len(activ_pre)):
        # for additive
        for j in xrange(number_of_additive):
            weights[j][i] += learning_rate_add * deltas[j] * activ_pre[i]

        # do it for multiplicative
        for j in xrange(number_of_additive, len(weights)):
            weights[j][i] += learning_rate_mul * deltas[j] * activ_post[j] / activ_pre[i]

        # calculate new delta
        nu_deltas.append([weights




def backprop(error, network, activity, learning_rate_add,\
             learning_rate_mul, transfer, transfer_deriv):
    for layer in xrange(len(network) - 1, 0, -1):
        error = backprop_layer(network[layer][1:], network[layer][0],\
                               activity[layer] activity[layer-1], error,\
                               learning_rate_add, learning_rate_mul,\
                               transfer, transfer_deriv)

    return error
        # walk back
        # give two-slice of net_in
        # partition off first entry in layer, which is number_of_additive


def propagate2layer(weights, activity):
    net_in = []

    return net_in


def propagate(n_input, network, transfer):
    # activity is encoded pre & postsynaptic
    activity = [[n_input]]

    for layer in xrange(len(network) - 1):

        number_of_additive = network[layer][0]
        pre_activity = [1] + activity[layer][0]

        # additive neurons
        for j in xrange(1, number_of_additive + 1): # go over first entry in
                                                    # layer, which is number_of_additive
            activity[layer][1].append(
                dot_product(network[layer][j], pre_activity))

        # multiplicative neurons
        for j in xrange(number_of_additive + 1):
            net_in = big_product(lin_combine(network[layer][j], pre_activity))
            activity[layer][1].append(net_in)

        activity.append([])
        # transfer activations to next synapse
        for j in xrange(1, len(network[layer])):
            activity[-1].append(transfer(activity[layer][1][j]))

    output = activity[-1][0]

    return (output, activity)
