
function [W1 b1 W2 b2 listE_train listE_val] = trainMLP(Xtrain, Ytrain, Xval, Yval, h1, eta, es_delay, plot_result, rseed)
%GRADIENT Computes the gradient of the error function
%
% Notations:	Np_train: Number of points (training set)
%				Np_val: Number of points (validation set)
%				Nd: Number of features (dimension)
%
% INPUT:
% Xtrain		-- Training set (Np_train x Nd)
% Ytrain		-- Training point labels (Np_train x 1)
% Xval			-- Validation set (Np_val x Nd)
% Yval			-- Validation point labels (Np_val x 1)
% h1			-- (optional) Number of hidden layers (integer)
%					(default 10)
% eta			-- (optional) learning rate (default 0.01)
% es_delay		-- (optional) Early stopping delay (MLP stops if the min do
%					not change during this period) (default 10)
% plot_result	-- (optional) plots the result (default 0)
%
% OUTPUT:
% W1		-- Final W1 (2*h1 x Nd)
% b1		-- Final b1 (2*h1 x 1)
% W2		-- Final W2 (1 x h1)
% b2		-- Final b2 (double)
%
% USAGE:
% [W1 b1 W2 b2 listE_train listE_val] = trainMLP(Xtrain, Ytrain, Xval, Yval, h1, eta, es_delay, plot_result, rseed)

%% Default arguments
if nargin < 5, h1 = 10; end
if nargin < 6, eta = 0.01; end
if nargin < 7, es_delay = 10; end
if nargin < 8, plot_result = 0; end
if nargin == 9, rng(rseed); end

%% Random seed
rng(2128);

%% Training and validation sets
x = Xtrain';%[0 0 1 1;0 1 0 1
t = Ytrain';%[-1 1 1 -1];
x2 = Xval';%[0 0 1 1;0 1 0 1
t2 = Yval';%[-1 1 1 -1];
Nf = size(x,1);
Ntrain = size(x,2);

%% Initialize MLP
%h1 = 10;
W1 = normrnd(0, 0.1, 2*h1, Nf);
b1 = normrnd(0, 0.1, 2*h1, 1);
W2 = normrnd(0, 0.1, 1, h1);
b2 = normrnd(0, 0.1);
deltaW1 = W1*0;
deltaW2 = W2*0;
deltab1 = b1*0;
deltab2 = b2*0;

%% Store minimum values
W1_min = W1;
b1_min = b1;
W2_min = W2;
b2_min = b2;
E_min = Inf;
last_change_min = 0;

%% Convergence
%ETA_CST_UNTIL = Inf;
eta_ = @(k)eta;
mu = 0.5;
k=1;

%% Main loop
stop = false;
count=0;
Niter = 10000000;
listE_train = []; % List of errors
listE_val = []; % List of errors
while ~stop
	
	% Random ordering of the training set
	order = randperm(Ntrain);
	
	% FIRST PHASE: Loop over the epoch
	for i=order
		
		% Compute gradient
		[out gradW1 gradb1 gradW2 gradb2] = MLP(x(:,i), t(i), W1, b1, W2, b2);
		deltaW1 = -eta_(k)*(1-mu)*gradW1 + mu*deltaW1;
		deltaW2 = -eta_(k)*(1-mu)*gradW2 + mu*deltaW2;
		deltab1 = -eta_(k)*(1-mu)*gradb1 + mu*deltab1;
		deltab2 = -eta_(k)*(1-mu)*gradb2 + mu*deltab2;
		
		% Update parameters
		W1 = W1 + deltaW1;
		W2 = W2 + deltaW2;
		b1 = b1 + deltab1;
		b2 = b2 + deltab2;
		k = k+1;
	end
	
	% SECOND PHASE: Compute Elog (training points and validation set)
	
	% Training set
	a = MLP(x, t, W1, b1, W2, b2);
	E_train = logErr(a, t);
	listE_train = [listE_train E_train];
	
	% Validation points
	a = MLP(x2, t2, W1, b1, W2, b2);
	E_val = logErr(a, t2);
	listE_val = [listE_val E_val];
	if plot_result, fprintf('Elog on training points = %f, %f\n', E_train, E_val); end
	
	% Stores minimum
	if (E_val < E_min)
		W1_min = W1;
		b1_min = b1;
		W2_min = W2;
		b2_min = b2;
		last_change_min = 0;
		E_min = E_val;
		
	else
		last_change_min = last_change_min+1;
	end
	
	% Stopping criterion
	count = count+1;
	if (count > Niter || last_change_min >= es_delay)
		stop = true;
	end
end

%% Plot
if plot_result
	plotErr(listE_train, listE_val, '');
end

W1 = W1_min;
b1 = b1_min;
W2 = W2_min;
b2 = b2_min;

end