import linear_algebra
from array import array
from math import exp, fabs
import copy
import random

def tanh(a):
    """tanh"""
    return 2.0 / (1.0 + exp(-2.0 * a)) -1.0


SIGMOID_PRECISION_LOWER_BOUND = -709.7

def sigmoid(a, D = .8):
    """sigmoid"""
    if a < 0.0:
        if a < -1400:
            return 0.0
        else:
            return 1.0 / (1.0 + exp(D * fabs(a)))
    else:
        if a > 1400:
            return 1.0
        else:
            return 1.0 /(1.0 + 1.0/(exp(D * a)))

defaultTransfer = sigmoid
add_all = linear_algebra.add_all

gaussianGenMeans = (1, -1)
gaussianGenDeriv = 1
from random import gauss
def gaussianGen():
    """Delivers a normally distributed ring around 0."""
    return gauss(gaussianGenMeans[random.randint(0,1)], gaussianGenDeriv)

#from random import uniform
##lower_bound = -1
##upper_bound = 1
##valueGen = lambda: uniform(lower_bound, upper_bound)
valueGen = gaussianGen

def randomGen(layers):
    """ Randomly generates weights in a sphere around 0."""
    result = array('d')
    for n in range(add_all(layers[1:])):
        result.append(valueGen())

    for l in range(len(layers[1:])):
        for w in range(layers[l]):
            for i in range(layers[l-1]):
                result.append(valueGen())

    return result

defaultGen = randomGen

class VectorNet(linear_algebra.Vector):
    """Defines the parameters (bias-vaules/weights) of network as components of a vector.

    Inherits from linear_algebra.Vector, hence all methods that work on vectors work here as well.
    If weights are given, the format is [2ndlayer 1stneuron bias, 2ndlayer 1stneuron weights....]"""

    def __new__(cls, layers, *args, **keyw):
        return linear_algebra.Vector.__new__(cls)

    def __init__(self, layers, transfer = defaultTransfer, weights = None, generate=defaultGen):
        linear_algebra.Vector.__init__(self)
        if not weights:
            weights = generate(layers)

        self.extend(weights)

        self.transfer = transfer

        # Todo: check layers is not []
        self.layers = tuple(layers)

    def __copy__(self):
        return VectorNet(self.layers, self.transfer, self)


    def __str__(self):
        """Outputs a string representation of a net."""
        result = str(type(self))
        # extract type name
        result = result.split()[1].replace(">", "").replace("'","")


        result += "\n" + self.transfer.__doc__ + "\n"

        result += str(self.layers) + "\n"

        result += "layer 0\n"
        for b0 in xrange(self.layers[0]):
            result += "\t" + str(b0) + ":" + str(self[b0])

        n_index = 0
        next_n_index = self.layers[0]
        for l in xrange(1,len(self.layers)):
            result += "\nlayer " + str(l) + ":"
            for n in xrange(self.layers[l]):
                n_index = next_n_index
                next_n_index += self.layers[l-1] + 1 # including bias
                result += "\n\t" + str(n) + ":"
                result +=  "\t".join([str(w) for w in self[n_index: next_n_index]])

        return result

    def apply(self, n_input):
        """Applies (propagates) n_input to the net."""
        # TODO: make this safe against cases in which input is not size of input layer
        activity = copy.copy(n_input)

        propagate(self, self.layers, list(activity), self.transfer)

#        return (activity[0 - self.layers[-1]: ], activity)
        return activity[0 - self.layers[-1]: ]




from linear_algebra import dot_product
from linear_algebra import Vector

def propagate(vector, layers, activity, transfer):
    if len(layers) > 1:

        in_activity = [1.0] + activity[0-layers[0] : ]
        insize = len(in_activity)

        w_index = 0
        for n in range(layers[1]):
            nxt_w_index = w_index + insize
            # get net-input
            # todo: optimize, our implementation of dot product doesn't check length of second - can be used
            n_a = dot_product(vector[w_index : nxt_w_index], in_activity)
            # add to next activity
            activity.append(transfer(n_a))

            w_index = nxt_w_index

        propagate(vector[w_index:], layers[1:], activity, transfer)


def parseNet(lines):
    cls = eval(lines[0])
    trans = eval(lines[1])
    layers = eval(lines[2])
    line_index = 3

    weights =  []
    for layer in xrange(len(layers)):
        label = lines[line_index].split()
        if label[0] != "layer" or eval(label[1]) != layer:
            # todo exception
            print "aaaaargh beginning format error"

        line_index += 1
        if layer == 0:
            weights.extend([eval(w.split(':')[1]) for w in lines[line_index].split()])

        else:
            for neuron in xrange(layers[layer]):
                line = lines[line_index].split(':')
                if eval(line[0]) != neuron:
                    # todo exception
                    print "aaaargh error in line" + str(line_index)

                weights.extend([eval(w) for w in line[1].split()])
                line_index += 1

        return cls(layers, trans, weights)

