import math
import copy
from feedforward import COMPLEX, c, sigmoid
import feedforward
import helpers
#import net
#from feedforward import conj_dot_prod as dot_prod

def dsigmoid(x, D = 1.0):
    return sigmoid(x,D) * (1.0 - sigmoid(x,D))


def sqrt_scale(x):
    if x:
        absx = math.fabs(x)
        return (math.sqrt(absx) * (x / absx) + x) / 2.0
    else:
        return x

def sign(x):
    return x / math.fabs(x)


def dot_prod(v1, v2):
    result = 0.0
    for i in range(len(v1)):
            result += v1[i] * v2[i]
    return result

#def dot_prod(v1, v2):
    #result = 0.0
    #for i in range(len(v1)):
        #if v1[i] == 0.0 or v2[i] == 0.0:
            #result += 0.0
        #else:
            #v1i = math.fabs(v1[i])
            #v2i = math.fabs(v2[i])
            #s = math.log(v1i) + math.log(v2i)
            #s = math.exp(s) * sign(v1[i]) * sign(v2[i])
    ##        if str(s) == 'nan' or str(s) == 'inf' or str(s) == '-inf':
    ##            raise "the error was handleable"
            #result += s
    #return result

def apply_momentum(momentum, weights):
    for (momlayer,wlayer) in zip(momentum, weights):
        for (mom_neuron, wneuron) in zip(momlayer, wlayer):
            for (i,m) in enumerate(mom_neuron):
                wneuron[i] += m

dtanh =  lambda x: 1.0 - math.tanh(x) * math.tanh(x)
def backprop_layer(weights, deltas, pre_activ, learnrate, momentum, momrate, adapt_now = True):

    pre_activ = [feedforward.BIAS_VALUE] + pre_activ     # to handle bias

    new_momentum = copy.deepcopy(momentum)


    for i in xrange(len(pre_activ)):
        for j in xrange(len(weights)):
            new_momentum[j][i] = learnrate * c(deltas[j]) * c(pre_activ[i])\
                                 + momrate * momentum[j][i]
            if str(new_momentum[j][i]) == 'nan':
                raise "new_momentum nan"
            if adapt_now:
                weights[j][i] += new_momentum[j][i]


    return new_momentum


def calc_nu_deltas(deltas, network, layer, post_activ, derv_transfer, scale):
    nu_deltas = []

    for n in xrange(len(post_activ[-layer-1])):
        # outgoing weights for a specific input-neuron n
        w = [ network[-layer][j][n+1] for j in xrange(len(network[-layer])) ]

        derv = derv_transfer(post_activ[-layer-1][n])
#       dervdeltas = [derv * delta for delta in deltas]
        nu_deltas.append(scale(derv * dot_prod(deltas, w)))

    return nu_deltas


def backprop_batch_deltas(network, deltass, pre_activs, post_activs, momentum,\
             learnrate = 0.08, momrate = 0.24,\
             derv_transfer = dtanh, scale = lambda x: x):

    zippy = zip(pre_activs,post_activs)
    new_momentum = [None for l in momentum]
    for layer in xrange(1, len(network)+1):
        new_momentum[-layer] = [[0.0 for i in n] for n in network[-layer]]
        new_deltas = [0.0 for i in network[-layer][0]]

        for (pre_activ, post_activ) in zippy:
            dlmom = backprop_layer(network[-layer], deltas,\
                              pre_activ[-layer-1], learnrate,
                              momentum[-layer], momrate)
            for (n,dln) in zip(new_momentum[-layer],dlmom):
                for i in xrange(len(n)):
                    n[i] += dln[i]

            if layer < len(post_activ):
                new_deltas1 = calc_nu_deltas(deltas, network, layer,\
                                             post_activ, derv_transfer, scale)
                for (i,n1) in enumerate(new_deltas1):
                    new_deltas[i] += n1
        deltas = new_deltas

    return (new_momentum, deltas)



def backprop_deltas(network, deltas, pre_activ, post_activ, momentum,\
             learnrate = 0.08, momrate = 0.24,\
             derv_transfer = dtanh, scale = lambda x: x, adapt_now = True):

    """Backpropagates with given deltas for output-layer.

       If post_activ contains also pre-axonic/post-synaptic activation of
       the input-layer (or any prefix-part of it), deltas are computed for
       this input layer and returned. (Backpropagation into input-space /
       recurrency). It is possible to backpropagate only into a first part
       of the input-space by providing only a prefix of post-synaptic activation."""

    new_momentum = [None for m in momentum]
    for layer in xrange(1, len(network)+1):
        new_momentum[-layer] = backprop_layer(network[-layer], deltas,\
                              pre_activ[-layer-1], learnrate,
                              momentum[-layer], momrate, adapt_now)

        # calculate deltas for next layer down

        if layer < len(post_activ):
            deltas = calc_nu_deltas(deltas, network, layer, post_activ, derv_transfer, scale)

    return (new_momentum, deltas)


def sqrt_backprop_deltas(network, deltas, pre_activ, post_activ, momentum,\
             learnrate = 0.08, momrate = 0.24, derv_transfer = dtanh, adapt_now = True):

    return backprop_deltas(network, deltas, pre_activ, post_activ, momentum,\
             learnrate, momrate, derv_transfer, sqrt_scale, adapt_now = adapt_now)



def errors2deltas(errors, net_in, derv_transfer = dtanh):
    deltas = []
    for e in xrange(len(errors)):
        deltas.append(errors[e] * derv_transfer(net_in[e]))
    return deltas


def backprop(network, error, pre_activ, post_activ, momentum, learnrate = 0.05,\
             momrate = 0.1, derv_transfer = dtanh, adapt_now = True):

    # initialize output-layer deltas
    deltas = errors2deltas(error, pre_activ[-1], derv_transfer)

    return backprop_deltas(network, deltas, pre_activ, post_activ, momentum,\
                           learnrate, momrate, derv_transfer, adapt_now = adapt_now)


def backprop_batch(network, errorss, pre_activs, post_activs, momentum,\
                    learnrate = 0.08, momrate = 0.24,
                    derv_transfer = dtanh, scale = lambda x:x):
    deltass = [errors2deltas(errors, pre_activs[i][-1], derv_transfer) for (i,errors) in enumerate(errorss)]
    deltas = [0.0 for i in xrange(len(deltass[0]))]
    for delt in deltass:
        for (i,d) in enumerate(delt):
            deltas[i] += d
    return backprop_batch_deltas(network, deltas, pre_activs, post_activs, momentum,\
                    learnrate = 0.08, momrate = 0.24,
                    derv_transfer = dtanh, scale = lambda x:x)


def sqrt_backprop(network, error, pre_activ, post_activ, momentum, learnrate = 0.05,\
             momrate = 0.1, derv_transfer = dtanh, adapt_now = True):
    # initialize output-layer deltas
    deltas = errors2deltas(error, pre_activ[-1], derv_transfer)

    return sqrt_backprop_deltas(network, deltas, pre_activ, post_activ, momentum,\
                           learnrate, momrate, derv_transfer, adapt_now)


def backprop_inspace(inlayer, n_input, deltas, in_mom = None,\
                     learnrate = 0.1, momrate = 0.24, scale = sqrt_scale):

    new_momentum = [None for n in n_input]
    for n in xrange(len(n_input)):
        # outgoing weights for a specific input-neuron n
        w = [inlayer[j][n+1] for j in xrange(len(inlayer))]

        n_delta = scale(dot_prod(deltas, w))

        new_momentum[n] = learnrate * n_delta * n_input[n] + momrate * in_mom[n]

    return new_momentum
