'''
Created on 21/02/2011

@author: Vinicius

Implementacao de perceptrons multiplas camadas com algoritmo de retropropagacao
Restricoes: Unica camada intermediaria
            Nao considera o momentum para retardo do gradiente
'''

from math import exp
from funcoes import erroMedioQuadratico
from numpy.random import randn


def execute(input, hiddenNeurons, outputNeurons, desiredOutput, alfa, rate, errorThreshold,maxEpochs):
    global w0, w1, x, d, a, n, y, layers, o, E
    a = alfa
    d = desiredOutput
    n = rate
    layers = [hiddenNeurons, outputNeurons]
    w0 = randn(hiddenNeurons, len(input[0]) + 1)
    w1 = randn(outputNeurons, hiddenNeurons + 1)
    o = randn(outputNeurons, len(input))
    y = [None] * (hiddenNeurons + 1)
    e = [None] * outputNeurons
    E = [None] * len(input)
    epoca = 0
    fakeInput = []
    currentError = 0.0
    
    #Bias
    for k in range(len(input)):
        z = []
        for t in input[k]:
            z.append(t)
        z.append(1.0)
        fakeInput.append(z)
    y[hiddenNeurons] = 1
    
    while True:
        
        for k in range(len(fakeInput)):
            x = fakeInput[k]
            sumError = 0.0
            
            #Propagation
            for l in range(len(layers)):
                for j in range(layers[l]):
                    v_j = v(j, l)
                    fi_j = fi(v_j)
                    if l == 0:
                        y[j] = fi_j
                    if l == 1:
                        o[j][k] = fi_j
                        e[j] = d[k] - o[j][k]
                        sumError += pow(e[j], 2)
                        
            E[k] = 0.5 * sumError
            
            #Backpropagation
            for l in reversed(range(len(layers))):
                for j in range(layers[l]):
                    d_j = delta(j, l, k)
                    if l == 0:
                        for i in range(len(x)):
                            w0[j][i] += n * d_j * x[i]
                    else:
                        for i in range(len(y)):
                            w1[j][i] += n * d_j * y[i]
                        w1[j][hiddenNeurons] += n * d_j * y[hiddenNeurons] 
           
        #Criterio de parada
        computedError = mse()     
        if  (currentError - computedError > errorThreshold or epoca == maxEpochs):
            break
        epoca += 1
        currentError = computedError
        
    return (w0, w1, o, epoca, computedError)

#Potencial de ativacao
def v(j, l):
    res = 0
    if l == 0:
        for i in range(len(x)):
            res += w0[j][i] * x[i]
    else:
        for i in range(layers[0]):
            res += w1[j][i] * y[i]
        res += w1[j][layers[0]] * y[layers[0]]  
    return res

#Funcao de ativacao
def fi(vj):
    res = 1.0 / (1.0 + exp(-a * vj))
    return res
           
#Gradiente descendente   
def delta(j, l, n):
    if l == 1: 
        oj = o[j][n]
        res = a * (d[n] - oj) * oj * (1.0 - oj)
    else:
        sum = 0
        yj = fi(v(j, l))
        for k in range(layers[1]):
            sum += delta(k , 1, n) * w1[k][j]
        sum += delta(k, 1, n) * w1[k][layers[0]]
        res = a * yj * (1.0 - yj) * sum
    return res

#Erro medio da rede
def mse():
    res = 0 
    N = len(E)   
    for n in range(N):
        res += E[n]
    res /= N
    return res