import backprop
import math
import random

xorlist = [ [[-1.0,-1.0],[-1.0,]], [[-1.0,1.0],[1.0,]], [[1.0,-1.0],[1.0,]], [[1.0,1.0],[-1.0]] ]

layers = [2,2,1]

net = backprop.generate_net([2,2,1], lambda: random.gauss(0, 0.5))
out = []

def calc_err(out = out, net = net):
    result = 0
    for x in xorlist:
        (out, activ) = backprop.propagate(x[0], net, math.tanh, math.tanh, layers[1:])
        result += math.fabs(x[1][0] - out[0])
    return result

def train_xor(n, learnrate = 0.05, momrate = 0.03, net = net):

    momentum = backprop.generate_net(layers, lambda: 0.0)
    for i in xrange(n):
        for x in xorlist:
            (out, activ) = backprop.propagate(x[0], net, math.tanh, math.tanh, layers[1:])
            (momentum, deltas) = backprop.backprop(net, [x[1][0] - out[0]], activ,\
                                         layers[1:], learnrate,learnrate,\
                                         lambda x: 1.0 - math.tanh(x) * math.tanh(x),\
                                         lambda x: 1.0 / (math.cosh(x) * math.cosh(x)),\
                                         momentum, momrate)



    return net

# The sine sinh(x) = (exp(x) - exp(- x)) / 2
# The cosine cosh(x) = (exp(x) + exp(- x)) / 2
# The tangent tanh(x) = sinh(x) / cosh(x)

tanh = lambda x: (exp(x) - exp(-x))/(exp(x) + exp(-x))
dtanh = lambda x: (cosh x - sinh x)/cosh x
= 2 * exp(-x) / cosh(x)

cosh = lambda x: (exp(x) + exp(-x)) / 2.0
sech = lambda z: 1.0 / cosh(z)

dtanh = lambda z: 4.0 / (exp(z * 2.0) + exp(z * -2.0))
