import math
import random

# deals with feed-forward neural nets that can have
# standard additive neurons on every layer.

# NN format [ [<layer1> [w_11.... w_nm] [w...] ]  [<layer2> num, [..][..]..] ]

default_vgen = lambda: random.gauss(0.0, 0.5)

COMPLEX = False

BIAS_VALUE = 1.0

if COMPLEX:
    c = lambda x: x.conjugate()
else:
    c = lambda x: x

def sigmoid(a, D = 1.0):
    """sigmoid"""
    if a < 0.0:
        if a < -709:
            return 0.0
        else:
            return 1.0 / (1.0 + math.exp(D * math.fabs(a)))
    else:
        if a > 709:
            return 1.0
        else:
            return 1.0 /(1.0 + 1.0/(math.exp(D * a)))


def conj_dot_prod(v1, v2):
    if COMPLEX:
        result = complex(0,0)
    else:
        result = 0.0
    for i in range(len(v1)):
        result += c(v1[i]) * c(v2[i])
    return result

def abs_dot_prod(v1, v2):
    if COMPLEX:
        result = complex(0,0)
    else:
        result = 0
    for i in range(len(v1)):
        result += math.fabs(c(v1[i]) * c(v2[i]))
    return result


def generate_net(layers, generate_weight = default_vgen):
    result = []
    for l in xrange(1,len(layers)):
        result.append([[generate_weight() for j in xrange(1 + layers[l-1])]\
                            for i in xrange(layers[l])])

    return result


def propagate(n_input, network, transfer = math.tanh):

    # activity is encoded pre & postsynaptic
    pre_acts = [n_input]
    post_acts = []

    for layer in xrange(len(network)):

        pre_activity = [BIAS_VALUE] + pre_acts[layer]

        post_acts.append([])
        # additive neurons
        for w in network[layer]:
            post_acts[layer].append(conj_dot_prod(w, pre_activity))

        next_activity = []
        # transfer activations to next synapse
        for a in xrange(len(network[layer])):
            next_activity.append(transfer(post_acts[layer][a]))

        pre_acts.append(next_activity)

    output = pre_acts[-1]

    return (output, pre_acts, post_acts)


def t_propagate(n_input, network, transfer = math.tanh):

    o_activity = [n_input]
    net_in = []
    abs_net_in = []

    for layer in xrange(len(network)):
        o_a = [BIAS_VALUE] + o_activity[-1]
        net_in.append([])
        abs_net_in.append([])

        for w in network[layer]:
            net_in[-1].append(conj_dot_prod(w, o_a))
            abs_net_in[-1].append(abs_dot_prod(w, o_a))

        next_o_activity = []
        for a in xrange(len(network[layer])):
            next_o_activity.append(transfer(net_in[-1][a]))

        o_activity.append(next_o_activity)

    output = o_activity[-1]

    return (output, o_activity, net_in, abs_net_in)


