close all;
clear all;
clc;

x = [0 0; 0 1; 1 0; 1 1];
d = [0;1;1;0];
h = 2;
eta = 0.1;
alpha = 0.001;
epochs = 4000;

%Activation function and its derivative
a = 1.7159;
b = 2/3;
phi = @(x) real(a*atanh(b*x));
dphi = @(x) real(a*b*(1 - tanh(b*x).^2));

%Training the network
[w12,w23,E_avg] = mlp2_train(x,d,h,eta,alpha,epochs,phi,dphi);

%Presenting trainging examples to the network and retrieving its
%response
y(1,:) = mlp2_run(x(1,:),w12,w23,phi);
y(2,:) = mlp2_run(x(2,:),w12,w23,phi);
y(3,:) = mlp2_run(x(3,:),w12,w23,phi);
y(4,:) = mlp2_run(x(4,:),w12,w23,phi);

disp('Training examples:');
disp(x);

disp('Desired outputs for each example:')
disp(d);

disp('Inputs for trained network:');
disp(x);

disp('Outputs of trained network:');
disp(y);

figure;
plot(1:epochs,E_avg);
grid on;
xlabel('Epochs');
ylabel('<E>');