%[w12,w23,E_avg] = mlp2_train(x,d,h,eta,alpha,epochs,phi,dphi) trains a 2-layer 
%neural network.
%
%Outputs:
%w12 - synaptic weights matrix for layer 1.
%w23 - synaptic weights matrix for layer 2.
%E_avg - %average squared error energy vector (contains records for all 
%epochs).
%
%Inputs:
%x - input matrix (size [(number of examples) x (input size)]).
%d - desired output matrix (size [(number of examples) x (output size)]).
%h - hidden layer size.
%eta - learning rate.
%alpha - momentum constant.
%epochs - number of epochs.
%phi - activation function.
%dphi - activation function derivative.

function [w12,w23,E_avg] = mlp2_train(x,d,h,eta,alpha,epochs,phi,dphi)
    ne = size(x,1); %number of input examples
    m = [size(x,2),h,size(d,2)]; %[input size, hidden layer size, output size]
    w12 = rand(m(2),m(1)); %synaptic weights matrix for layer 1
    w23 = rand(m(3),m(2)); %synaptic weights matrix for layer 2
    dw12_old = zeros(m(2),m(1)); %weight correction matrix initialization for "0" epoch (layer 1)
    dw23_old = zeros(m(3),m(2)); %weight correction matrix initialization for "0" epoch (layer 2)
    E = zeros(1,epochs); %squared error energy preallocation
    E_avg = zeros(1,epochs); %average squared error energy preallocation
    x0 = x; %saving x matrix;
    d0 = d; %saving d matrix;

    for epoch = 1:epochs
        i = randi(ne); %generating index
        x = x0(i,:); %selecting an example from input matrix
        d = d0(i,:); %selecting an appropriate desired output
        
        %Forward pass
        v1 = sum(repmat(x,m(2),1).*w12,2).'; %output of layer 1 before activation function
        y1 = phi(v1); %ouput of layer 1
        v2 = sum(repmat(y1,m(3),1).*w23,2).'; %output of layer 2 before activation function
        y2 = phi(v2); %output of layer 2
        e = d - y2; %error signal

        %Backward pass
        %updating layer 2
        delta2 = e.*dphi(v2); %delta for output layer
        dw23 = alpha*dw23_old + eta*repmat(y1,m(3),1).*repmat(delta2.',1,m(2)); %weight correction matrix for layer 2
        w23 = w23 + dw23; %correcting
        dw23_old = dw23; %memorizing correction matrix for next epoch

        %updating layer 1
        delta1 = dphi(v1).*sum(w23.*repmat(delta2.',1,m(2))); %delta for hidden layer
        dw12 = alpha*dw12_old + eta*repmat(x,m(2),1).*repmat(delta1.',1,m(1)); %weight correction matrix for layer 1
        w12 = w12 + dw12; %correcting
        dw12_old = dw12; %memorizing correction matrix for next epoch

        %Error estimation
        E(epoch) = 1/2*sum(e.^2); %squared error energy
        E_avg(epoch) = sum(E)/epoch; %average squared error energy
    end
end