
function [out gradW1 gradb1 gradW2 gradb2] = MLP(x, t, W1, b1, W2, b2)
%GRADIENT Computes the gradient of the error function
%
% Notations:	Np: Number of points
%				Nd: Nomber of features (dimension)
%				h1: Hidden layer's size
%
% INPUT:
% x				-- Features vectors (Nd x Np)
% t				-- Labels (1 x Np)
% W1			-- First layer's weight vectors (2*h1 x Nd)
% b1			-- First layer's bias vector (2*h1 x 1)
% W2			-- Second layer's weight vector (1 x h1)
% b2			-- Second layer's bias (double)
%
% OUTPUT:
% out			-- output of the binary classifier (1 x Np)
% gradW1		-- gradient of Ei w.r.t. W1 (2*h1 x Nd)
% gradb1		-- gradient of Ei w.r.t. b1 (2*h1 x 1)
% gradW2		-- gradient of Ei w.r.t. W2 (1 x h1)
% gradb2		-- gradient of Ei w.r.t. b2 (double)
%
% USAGE:
% [out gradW1 gradb1 gradW2 gradb2] = gradient(x, t, W1, b1, W2, b2)

%% Transfer function
g = @(a_1, a_2)a_1./(1 + exp(-a_2));
dgx = @(a_1, a_2)1./(1 + exp(-a_2));
dgy = @(a_1, a_2)a_1 .* exp(-a_2) .* (1 + exp(-a_2)).^(-2);

%% Retrieve the dimension and the number of perceptrons in the first layer
%% (divided by two)
Nd = size(x, 1);
Np = size(x,2);
h1 = int16(size(W1, 1)/2);

%% Forward pass
a1 = W1 * x + b1*ones(1,Np); % 2*h1 x Np
a1_ = reshape(a1,2,h1,Np); % 2 x h1 x Np      [odd;even]
a1_ = permute(a1_, [2, 3, 1]); % h1 x Np x 2
z1 = g(a1_(:,:,1), a1_(:,:,2)); % h1 x Np
a2 = W2 * z1 + b2*ones(1,Np); % 1 x Np

%out = sign(a2);
out = a2;

%% Backward pass
%Ei = log((1+exp(-t*a2)))
if nargout > 1
	r2 = -t./(1+exp(t.*a2)); % 1 x Np
	gradW2 = r2 * z1'/Np;% 1 x h1
	gradb2 = sum(r2)/Np; % 1 x 1
	
	r1 = zeros(h1,Np,2); % h1 x Np x 2
	r1(:,:,1) = (W2' * r2) .* dgx(a1_(:,:,1), a1_(:,:,2)); % h1 x Np   odd
	r1(:,:,2) = (W2' * r2) .* dgy(a1_(:,:,1), a1_(:,:,2)); % h1 x Np    even
	r1 = permute(r1, [3 1 2]); % 2 x h1 x Np
	r1 = reshape(r1, 2*h1, Np); % 2*h1 x Np
	
	gradW1 = r1 * x'/Np; % 2*h1 x Nd
	gradb1 = sum(r1,2)/Np; % 2*h1 x 1
end

end

% Nouveau commentaire
