function [answer age errors Wvecmin minTotalError] = backpropagation(A, WvecInput, activationFunc, hiddenLayerNeuronsParam, etha2, trainSet, testSet, expectedOuts, expectedTestOuts)
    global etha;
    global epsilon;
    global beta;
    global hiddenLayerNeurons;
    global AdaptativeEthaParameterA;
    global AdaptativeEthaParameterB;
    global alpha;
    
    AdaptativeEthaParameterA = 0.01;
    AdaptativeEthaParameterB = 0.05;
    
    hiddenLayerNeurons = hiddenLayerNeuronsParam;
    %Capas intermedias + capa de salida
    layersNumber = cols(hiddenLayerNeurons)-1;
    epsilon= 0.01;
    etha= etha2;
    alpha=0.9;
    beta=1;
    age = 0;

    Wvecmin = 100;
    minTotalError = 100;
    totalError = 1;
    totalErrorAnt = 1;
    totalTestErrorAnt = 1;
    minTotalTestError = 100000;
    decrease = 0;
    isFirstTime=0;

    %datos
    [rowsT colsT] = size(trainSet);
    [rowsO colsO] = size(expectedOuts);
    %matriz de pesos
    Wvec = WvecInput;
    momentum = generateDeltasWeightVector(trainSet, expectedOuts);

    'uno'
    
    %mientras el error sea mayor q un epsilon
    while age <= A
        randIndex = randperm(rowsT);
	
        %Para todos los patrones de entrenamiento
        for k=1:rowsT
            outputs{1} = trainSet(randIndex(k), :);
            %para todos los niveles
            for i=1:layersNumber
                %calculo potencial de membrana
                h{i} = Wvec{i} * outputs{i}';
                h{i} = h{i}';
                outputs{i+1} = g(activationFunc, h{i});
                outputs{i+1} = horzcat(outputs{i+1}, -1);
            end
            %El algoritmo considera a la entrada como una capa
            %Delta de la capa de salida. 
            auxOutputs = outputs{layersNumber+1}(1);    
                
            deltas{layersNumber} = firstDelta(activationFunc, h{layersNumber}, auxOutputs, expectedOuts(randIndex(k)));
            i=layersNumber-1;
            %Delta de las demas capas
            while i>=1
                Waux = Wvec{i+1}(:,1:end-1);
                deltas{i} = otherDelta(activationFunc, h{i}, Waux, deltas{i+1});
                i=i-1;
            end
            %Actualizacion de los pesos
            for i=1:layersNumber
               [Wvec{i} momentum{i}] = updateW(Wvec{i}, deltas{i}, outputs{i}, momentum{i}, isFirstTime); 
            end
	    isFirstTime=1;
            %Guardo la salida esperada para calcular el error cuadratico medio despues
            expectedOutputs(k) = expectedOuts(randIndex(k));
            %Asigno el valor obtenido en la salida
            obtainedOutputs(k) = outputs{layersNumber+1}(1);
            
        end

        %calculo el error de los testeos
        
        %guardo el minimo Wvec y el minimo error en la generalizacion de los patrones de test
        if(totalError < minTotalError)
            Wvecmin = Wvec;
	        minTotalError = totalError;
        end    
            
        %calculo el error del conjunto de entrenamiento
        totalError = error(obtainedOutputs, expectedOutputs);
            
        %eta adaptativo
        if (totalError - totalErrorAnt < 0)
            decrease = decrease + 1;
            if (mod(decrease,5) == 0)
                etha = adaptativeEtha('increaseEtha');
            end
	    %alpha = 0.9;
        elseif(totalError - totalErrorAnt > 0)
            decrease = 0;
            etha = adaptativeEtha('decreaseEtha');
	    %alpha = 0;
        else
            etha = adaptativeEtha('doNothing');
        end         
        totalErrorAnt = totalError
            
        age = age + 1;
        errors(age)=totalError;
        obtainedOutputs;
        expectedOutputs;
    end
    answer=Wvec;
end

%mejora momentum
function [answer momentumResp]=updateW(W, delta, output, momentum, isFirstTime)
    global etha;
    global alpha;
    aux1 = etha.*delta';
    aux = aux1*output;
    momentumResp = aux;
    if isFirstTime==0
        answer = W + aux;
    else
        answer = W + aux + alpha*momentum;
    end
end

function answer = adaptativeEtha(func)
    global etha;
    global AdaptativeEthaParameterA;
    global AdaptativeEthaParameterB;
    switch(func)
        case 'increaseEtha'
            answer = etha + AdaptativeEthaParameterA;
        case 'decreaseEtha'
            answer = etha - AdaptativeEthaParameterB*etha;
        case 'doNothing'
            answer = etha;
    end
end

%dejar el +0.1
%cuando el error es grande se usa la arctanh
function answer = firstDelta(activationFunc, h, output, expectedOutput)
    answer = (gDer(activationFunc, h))*(expectedOutput - output);
end

function answer = otherDelta(activationFunc, h, W, delta)
    aux1 = (gDer(activationFunc, h));
    aux2 = W'*delta';
    aux2 = aux2';
    answer = aux1 .* aux2;
end

function totalError = error(outputs, expectedOutputs)
    errors = outputs - expectedOutputs;
    totalError = sum(errors.^2)*0.5;
end

function answer=generateDeltasWeightVector(trainSet, expectedOuts)
    global hiddenLayerNeurons;
    layerNeurons(1) = cols(trainSet(1,:))-1;
    for i=1:cols(hiddenLayerNeurons)
        layerNeurons(i+1) = hiddenLayerNeurons(i);
    end
    layerNeurons(cols(hiddenLayerNeurons) + 2) = cols(expectedOuts(1));
    for i=1:cols(layerNeurons)-1
        answer{i} = zeros(layerNeurons(i+1), layerNeurons(i)+1);
    end
end

function ans=g(activationFunc, x)
    global beta;
    switch(activationFunc)
        case 'tanh'
            ans = tanh(beta*x);
	case 'exp'
	    ans = 1./(1+exp(-2*beta*x));
    end        
end

function ans=gDer(activationFunc, x)
    global beta;
    switch(activationFunc)
        case 'tanh'
            ans = beta*(1-g(activationFunc, x).^2);
	case 'exp'
	    ans = 2 * beta * g(activationFunc, x) .* (1-g(activationFunc,x));
    end    
end