\documentclass[10pt]{article}
\usepackage[margin=1in]{geometry}
\usepackage[utf8]{inputenc}
\usepackage{amsmath}
\usepackage{framed, color}
\usepackage{url}
\definecolor{shadecolor}{rgb}{1,1,1}
\usepackage{graphicx}
%\usepackage{subcaption}
\usepackage{listings}
\usepackage{color}
\usepackage{natbib}
\usepackage{listings}
\lstset{ %
language=R,                % choose the language of the code
basicstyle=\footnotesize,       % the size of the fonts that are used for the code
numbers=left,                   % where to put the line-numbers
numberstyle=\footnotesize,      % the size of the fonts that are used for the line-numbers
stepnumber=1,                   % the step between two line-numbers. If it is 1 each line will be numbered
numbersep=7pt,                  % how far the line-numbers are from the code
backgroundcolor=\color{white},  % choose the background color. You must add \usepackage{color}
showspaces=false,               % show spaces adding particular underscores
showstringspaces=false,         % underline spaces within strings
showtabs=false,                 % show tabs within strings adding particular underscores
frame=single,           % adds a frame around the code
tabsize=2,          % sets default tabsize to 2 spaces
captionpos=b,           % sets the caption-position to bottom
breaklines=true,        % sets automatic line breaking
breakatwhitespace=false,    % sets if automatic breaks should only happen at whitespace
escapeinside={(*@}{@*)},          % if you want to add a comment within your code
xleftmargin=.5in,
xrightmargin=.25in
}

\title{Week 3: Perceptron and Multi-layer Perceptron}
\author{Phong Le, Willem Zuidema}

\begin{document}
\lstset{language=R}
\renewcommand{\lstlistingname}{Code}

\maketitle
Last week we studied two famous biological neuron models, Fitzhugh-Nagumo 
model and Izhikevich model. This week, we will firstly explore another one, 
which is, though less biological, very computationally practical 
and widely used in \textit{artificial intelligence}, 
namely \textit{perceptron}. 
Then, we will see how to combine many neurons to build complex neural 
networks called multi-layer perceptrons. 

\paragraph{Required Library} We'll use the library RSNNS\footnote{\url{
http://cran.r-project.org/web/packages/RSNNS/index.html}}, which is installed 
by typing \texttt{install.packages("RSNNS")}.


\paragraph{Required R Code} At \url{http://www.illc.uva.nl/LaCo/clas/fncm13/assignments/computerlab-week3/} 
you can find the R-files you need for this exercise.


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Classification and Regression}
\label{section classification and regression}
Many tasks in artificial intelligence can be casted down to 
\textit{supervised learning} in which, given a \textit{training dataset}
$\{(x_1,y_1),(x_2,y_2),...,(x_n,y_n)\}$, we need to find a function
$y=f(x)$ such that the function is applicable to unknown patterns $x$.
Based on the nature of $y$, we can classify those tasks into two classes:

\paragraph{Classification} is to assign a predefined label 
to an unknown pattern. For instance, given a picture of an animal, 
we need to identify if that picture is of a cat, or a dog, or a mouse, 
etc. If there are two categories (or two classes), the problem is 
called binary classification; otherwise, it is multi-class classification.
For the binary classification problem, there is a special case, where 
patterns of the two classes are perfectly separated by a hyper-plane (see
Figure~\ref{fig linear sep}). We call the phenomenon 
\textit{linear separability}, and the hyper-plane \textit{decision 
boundary}.
\begin{figure}[h!]
    \centering
    \includegraphics[width=0.4\textwidth]{linear_sep.png}
    \caption{Example of linear separability.}
    \label{fig linear sep}
\end{figure}


\paragraph{Regression} differs from classification in that what we
need to assign to an unknown pattern is a \textit{real number}, not 
a label. For instance, given the height and age of a person, can we 
infer his/her weight?

\section{Perceptron}
\label{section perceptron}

A perceptron is a simplified neuron receiving inputs as a vector of real
numbers, and outputing a real number (see Figure~\ref{fig perceptron}). 
Mathematically, a perceptron is represented by the following equation
\begin{equation*}
    y = f(w_1x_1 + w_2x_2 + ... + w_nx_n + b) = f(\mathbf{w}^T \mathbf{x} + b)
\end{equation*}
where $w_1,...,w_n$ are weights, $b$ is a bias, $x_1,...,x_n$ are inputs, 
$y$ is an output, and $f$ is an activation function. In this section, we will 
use the threshold binary function (see Figure~\ref{fig threshold bin})
\begin{equation*}
    f(z) = \left\{
            \begin{matrix}
                1 & \textit{ if } z > 0 \\
                0 & \textit{ otherwise}
            \end{matrix}
            \right.
\end{equation*}

\begin{figure}[h!]
    \centering
    \includegraphics[width=0.3\textwidth]{perceptron.png}
    \caption{Perceptron (from 
    \protect \url{http://en.wikipedia.org/wiki/File:Perceptron.svg}).}
    \label{fig perceptron}
\end{figure}

\begin{figure}[h!]
    \centering
    \includegraphics[width=0.5\textwidth]{thresbin.png}
    \caption{Threshold binary activation function.}
    \label{fig threshold bin}
\end{figure}

\subsection{Prediction} 
A new pattern $\mathbf{x}$ will be assigned the label 
$\hat{y} = f(\mathbf{w}^T\mathbf{x} + b)$. The mean square error (MSE) and 
classification accuracy on a sample 
$\{(\mathbf{x}_1,y_1),...,(\mathbf{x}_m,y_m)\}$ are respectively defined as
\begin{equation*}
    \text{error} = \frac{1}{m} \sum_{i=1}^m (y_i - \hat{y}_i)^2
    \;\;\;
    \text{accuracy} = \frac{1}{m} \sum_{i=1}^m I_{y_i}(\hat{y}_i)
\end{equation*}
where $I_u(v)$ is an identity function, which returns 1 if $u=v$ and 0 
otherwise.

\subsection{Training} 
Traditionally, a perceptron is trained in an 
online-learning manner %(see Code~\ref{code per train})% 
with the delta rule: 
we randomly pick an example $(\mathbf{x},y)$ and update the weights and bias as follows
\begin{align}
    \mathbf{w}_{new} &\leftarrow \mathbf{w}_{old} + \eta (y - \hat{y}_{old})\mathbf{x} \\
    b_{new} &\leftarrow b_{old} + \eta (y-\hat{y}_{old})
    \label{equation update w}
\end{align}
where $\eta$ is a learning rate ($0<\eta<1$), $\hat{y}_{old}$ is the prediction based 
on the old weights and bias. 
%\begin{lstlisting}[caption=Perceptron Online training algorithm, label=code per train]
%for (i in 1:maxit) {
%    randomly pick an example (x,y)
%    update the weights and bias by using Equation (*@\ref{equation update w}@*).
%}
%\end{lstlisting}
Intuitively, we only update the weights and bias if our 
prediction $\hat{y}_{old}$ 
is different from the true label $y$, and the amount of update is (negatively, 
in the case $y=0$) proportional to $\mathbf{x}$. 

To see why it could work, let apply the 
weights and bias after being updated to that example
\begin{align*}
    \mathbf{w}_{new}^T \mathbf{x} + b_{new} &=(\mathbf{w}_{old} + \eta (y - \hat{y}_{old})\mathbf{x})^T \mathbf{x} + b_{old} + \eta (y-\hat{y}_{old}) \\
    &= \mathbf{w}_{old}^T \mathbf{x} + b_{old} + \eta(y-\hat{y}_{old})(\|\mathbf{x}\| + 1)
\end{align*}
Here, let's assume that $\hat{y}_{old}=0$, then $\mathbf{w}_{old}^T \mathbf{x} + b_{old} < 0$.
If $y=0$, nothing happens. Otherwise, $\eta(y-\hat{y}_{old})(\|\mathbf{x}\| + 1) > 0$, and 
therefore $\mathbf{w}_{new}^T \mathbf{x} + b_{new} > \mathbf{w}_{old}^T \mathbf{x} + b_{old}$. 
In other words, the update is to pull the decision boundary to a direction 
that making  prediction on the example is `less' incorrect 
(see Figure~\ref{fig update w}).
\citet{novikoff1962convergence} proves that if the training dataset is linearly separable,
it is guaranteed to find a hyperplane correctly separates the training dataset 
(i.e., $error = 0$) in a finite number of update steps. Otherwise, 
the training won't converge.

\begin{figure}[h!]
    \centering
    \includegraphics[width=0.4\textwidth]{update_w.png}
    \caption{Example of weight update. The chosen example is enclosed in 
    the green box. The update pulls the plane to a direction that making  
    prediction on the example is `less' incorrect.}
    \label{fig update w}
\end{figure}

%The file `perceptron.R' provides you with the \texttt{perceptron}
%function for training a perceptron
%\begin{lstlisting}
%# online-learning perceptron
%# input:
%#   x: a n*d matrix in which each row is a datum point
%#   y: a n*1 matrix in which each row is the target of x[i,]
%#   maxit: the number of iterations for training
%#   learn.rate: the learning rate for training
%#   animation: TRUE for plotting examples and hyperplan (ONLY when d == 2)
%#   ani.step: the graph is shown each ani.step
%#   ani.delay: the delay time (sec) between two plots
%# output: list(weights, bias, errors)
%perceptron <- function(x, y, maxit = 100, learn.rate = 0.1,
%                    animation = FALSE, ani.step = 10, ani.delay = 0.5)
%\end{lstlisting}
%where \texttt{learn.rate} is $\eta$ in Equation~\ref{equation update w}.

\begin{framed}
Exercise \ref{section perceptron}.1: 
We have set up an experiment for the logic operation OR in the 
file `perceptron\_logic\_opt.R'. You should see 
\begin{verbatim}
x <- t(matrix(c(
                0,0,
                0,1,
                1,0,
                1,1),
        2,4))

# OR #
y <- matrix(c(
                0,
                1,
                1,
                1),
        4, 1)
\end{verbatim}
which are mathematically written 
\begin{align*}
    \mathbf{x}_1 = (0,0)&\;\;\; y_1 = 0 \\
    \mathbf{x}_2 = (0,1)&\;\;\; y_2 = 1 \\
    \mathbf{x}_3 = (1,0)&\;\;\; y_3 = 1 \\
    \mathbf{x}_4 = (1,1)&\;\;\; y_4 = 1
\end{align*}
and parameters 
\begin{verbatim}
maxit = 25			# number of iterations
learn.rate = 0.1	# learning rate
stepbystep = TRUE	# the program will output intermediate results on R console
animation = TRUE
\end{verbatim}
Now, typing \texttt{source("perceptron\_logic\_opt.R")} to execute the file. 
Firstly, you will see the below in your R console
\begin{verbatim}
------ init -----
weights  0 0
bias  0
learning rate:  0.1

pick x[ 1 ] =  0 0
\end{verbatim}
then compute yourself
\begin{itemize}
	\item true target $y$: ..............
	\item prediction $\hat{y}$: ..............
	\item weights after update: ..............
	\item bias after update: ..............
\end{itemize}
Press Enter to see if your computation is correct or not. 
At the same time, a plot will appear to inform you which 
example (black circle) is being taken, and how the current 
decision boundary looks like. Repeat that until the program finishes. 
\end{framed}

\begin{framed}
Exercise \ref{section perceptron}.2: Repeat the exercise 
\ref{section perceptron}.1 for the XOR operation. Before that, 
you need to open the file `perceptron\_logic\_opt.R' to change \texttt{y} 
such that the dataset expresses the XOR operation.
What could you conclude about the convergence of the training? 
Explain why.
	
\end{framed}


\begin{framed}
Exercise \ref{section perceptron}.3: In this exercise, you will build a perceptron
for a `breast cancer' classification task. A dataset is provided in the file 
`breast-cancer.dat.shuf' which contains 683 rows (i.e., cases) with the following
format 
\begin{lstlisting}
label   feature_1   feature_2   ...     feature_10
\end{lstlisting}

You are required to play with the file `perceptron\_breast\_cancer.R'.
\begin{itemize}
	\item Open the file and set values for \texttt{maxit} (says 100) 
	and \texttt{learn.rate} (says 0.1).

	\item Execute the file (\texttt{source("perceptron\_breast\_cancer.R")}).
    
    \item Report what you get.
    
    \item Train the perceptron and report what you get again. 
    Do you get the same results? Why or why not?
    
    \item Look at the graph showing the error at each iteration, explain why there are 
    many peaks.
\end{itemize}
\end{framed}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Multi-layer Perceptron}
\label{section mlp}

The perceptron model presented above is very limited: it is theoretically applicable 
only to linearly separable data. In order to handle non-linearly separable data, 
perceptron is extended to a more complex structure, namely multi-layer perceptron
(MLP).
A MLP is a neural network in which neuron layers are stacked such  
that the output of a neuron in a layer is only allowed to be an input to neurons in 
the upper layer (see Figure~\ref{fig mlp}).
\begin{figure}[h!]
    \centering
    \includegraphics[width=0.6\textwidth]{multil_perceptron.png}
    \caption{A 3-layer perceptron with 2 hidden layers (from 
    \protect \url{http://www.statistics4u.com/fundstat_eng/cc_ann_bp_function.html}.)}
    \label{fig mlp}
\end{figure}

It turns out that, if the activation functions of those neurons are non-linear, 
such as the sigmoid function (or logistic function) (see Figure~\ref{fig sigmoid})
\begin{equation*}
    sigm(z) = \frac{1}{1 + e^{-z}}
\end{equation*}
MLP is capable to capture high non-linearity of data: \citet{hornik1989multilayer} 
proves that we can approximate any continuous function at an arbitrary small 
error by using complex-enough MLPs.
\begin{figure}[h!]
    \centering
    \includegraphics[width=0.5\textwidth]{sigmoid.png}
    \caption{Sigmoid activation function.}
    \label{fig sigmoid}
\end{figure}

Let's denote the weight of the link from the $i$-th neuron in the $l$-th layer to
the $j$-th neuron in the $(l+1)$-th layer $w_{li,(l+1)j}$, then we can formalize 
the $i$-th neuron in the $l$-th layer by the equation 
\begin{equation}
    y_{li} = f_{li}(z_{li}) \;;\; z_{li} = \sum_{j=1}^{n_{l-1}} w_{(l-1)j,li}y_{(l-1)j} + b_{li}
    \label{equation yli}
\end{equation}
where $y_{li}, f_{li}, b_{li}$ are respectively its output, activation function,
and bias; $n_l$ is the number of neurons in the $l$-th layer. (Note that 
for the convenience, we denote $y_{0i} \equiv x_i$ .) 
Informally speaking, a neuron is activated by the sum of  
weighted outputs of the neurons in the lower layer.

\subsection{Prediction} 
Prediction is very fast thanks to the \textit{feedforward} algorithm
(%see Code~\ref{code feedforward}, %
Figure~\ref{fig ff bp}-left). 
The algorithm says that, given $\mathbf{x}$, 
we firstly compute the outputs of the neurons in the first layer; then we 
compute the outputs of the neurons in the second layer; and so on until 
we reach the top layer.
%\begin{lstlisting}[caption=Feedforward algorithm,label=code feedforward]
%for (l in 1:L) {
%    for (i in 1:n[l]) {
%        compute (*@$y_{li}$@*) by using Equation (*@\ref{equation yli}@*)
%    }
%}
%\end{lstlisting}

\begin{figure}[h!]
    \centering
    \includegraphics[width=1\textwidth]{feedforward_backprop.png}
    \caption{Feedforward (left) and backpropagation (right).}
    \label{fig ff bp}
\end{figure}

\subsection{Training} 
Training a MLP is to minimize an objective function w.r.t. its 
parameters (i.e., weights and biases) which is 
related to the task that the MLP is used for. For instance, for the
binary classification task, the following objective function is widely used
\begin{equation*}
    E(\theta) = \frac{1}{n} \sum_{(\mathbf{x},y) \in D} \big( y - \hat{y} )^2
\end{equation*}
where $D$ is a training dataset, $\hat{y}$ is the output of the MLP given an
input $\mathbf{x}$, and $\theta$ is its set of weights and biases. In order to minimize
the objective function $E(\theta)$, we will use the \textit{gradient descent}
method (see Figure~\ref{fig gradient descent}) which says that an amount 
of update for a parameter is negatively proportional to the gradient at its 
current value.
\begin{figure}[h!]
    \centering
    \includegraphics[width=0.7\textwidth]{gradient_descent.png}
    \caption{Illustration for the gradient descent method. The blue line
    is the tangent at the current value of the parameter $w$. 
    If we update $w$ by subtracting an amount proportional to the gradient 
    at that point, the value of $E$ will be pushed along the arrow and hence 
    decrease. However, this method only guarantees to converge to a local 
    minimum.}
    \label{fig gradient descent}
\end{figure}

The center point of the gradient descent method is to compute the gradient
$\frac{\partial E}{\partial w} \;\;\; \text{for all } w \in \theta$
which is easily done with the chain rule:
\begin{equation}
    \frac{\partial E}{\partial z_{Li}} = 
            \frac{\partial E}{\partial y_{Li}} 
            \frac{\partial y_{Li}}{\partial z_{Li}}
    \label{equation dE/dzLi}
\end{equation}
\begin{equation}
    \frac{\partial E}{\partial z_{li}} = 
    \sum_{j} \frac{\partial E}{\partial z_{(l+1)j}} 
            \frac{\partial z_{(l+1)j}}{\partial z_{li}} =
    \sum_{j} \frac{\partial E}{\partial z_{(l+1)j}} 
            w_{li,(l+1)j}\frac{\partial y_{li}}{\partial z_{li}}
    \label{equation dE/dzli}
\end{equation}
\begin{equation}
    \frac{\partial E}{\partial w_{li,(l+1)j}} = 
            \frac{\partial E}{\partial z_{(l+1)j}} 
            \frac{\partial z_{(l+1)j}}{\partial w_{li,(l+1)j}} =
            \frac{\partial E}{\partial z_{(l+1)j}} y_{li}
    \label{equation dE/dw}
\end{equation}
That's the main idea of the \textit{back-propagation} algorithm (see 
Figure~\ref{fig ff bp}-right).
%The \textit{back-propagation} algorithm in Code~\ref{code bp} (see 
%Figure~\ref{fig ff bp}-right) is for doing that.
%\begin{lstlisting}[caption=Backpropagation algorithm, label=code bp]
%for (i in 1:n[L]) {
%    compute (*@$\partial E / \partial z_{Li}$@*) by using Equation (*@\ref{equation dE/dzLi}@*)
%}
%for (l in L-1:1) {
%    for (i in 1:n[l]) {
%        compute (*@$\partial E / \partial z_{li}$@*) by using Equation (*@\ref{equation dE/dzli}@*)
%        for (j in 1:n[l+1] {
%            compute (*@$\partial E / \partial w_{li,(l+1)j}$@*) by using Equation (*@\ref{equation dE/dw}@*)
%        }
%    }
%}
%\end{lstlisting}

%Wrapping up, the algorithm for training a MLP is as follows
%\begin{lstlisting}[caption=Algorithm to train MLPs, label=code train mlp]
%# T: number of iterations
%# n: batch size
%# (*@$\eta$@*): learning rate
%randomly initial (*@$\theta$@*)
%for (t in 1:T) {
%    draw a set n-example (*@$D$@*) from the training dataset
%    (*@$\Delta \theta  \leftarrow 0$@*)
%    for each (*@$(x,y) \in D$@*) {
%        compute (*@$y_{li} \; \text{for all } l,i$@*) by using the feedforward algorithm
%        compute (*@$\partial E / \partial \theta$@*) by using the backpropagation algorithm
%        (*@$\Delta \theta \leftarrow \Delta \theta + \partial E / \partial \theta$@*)
%    }
%    (*@$\Delta \theta \leftarrow \Delta \theta / n$@*)
%    (*@$\theta \leftarrow \theta - \eta \Delta \theta$@*)
%}
%\end{lstlisting}

\begin{framed}
Exercise \ref{section mlp}.1(!): You can realize that none of the formula above
mentions about how to compute $\partial E / \partial b_{li}$ for all $l,i$.
That's your task.
\end{framed}

\begin{framed}
Exercise \ref{section mlp}.2(!): Compute $y_{li}$ by using the feedforward algorithm,
and $w_{li,(l+1)j},b_{li}$ by using the backpropagation algorithm for all $l,i,j$,
for the 2-layer MLP shown in Figure~\ref{fig mlp exercise} which 
all biases are 0.5 and all the neurons have the sigmoid activation function, 
with $\mathbf{x} = (-0.5, 0.2), y = 1$.
\end{framed}

The RSNNS library provides us with a function named \texttt{mlp} for training 
a MLP. Carefully read the description for that function in the 
library document
(\url{http://cran.r-project.org/web/packages/RSNNS/RSNNS.pdf})
because you will use it in the next exercise. Note that, its arguments 
\texttt{x,y} are similar to the ones of the \texttt{perceptron} function
above. If \texttt{learnFunc="Std\_Backpropagation"} (which is default), then 
\texttt{learnFuncParams} plays the role as \texttt{learn.rate}.
The default values for other parameters are almost perfect for us, 
you might not change them.

\begin{figure}[h!]
    \centering
    \includegraphics[width=0.3\textwidth]{mlp.png}
    \caption{MLP for Exercise \ref{section mlp}.2. All the biases are 0.5. 
    All the neurons have the sigmoid activation function.}
    \label{fig mlp exercise}
\end{figure}

\begin{framed}
Exercise \ref{section mlp}.3: Use the file `mlp\_logic\_opt.R'
for Exercise \ref{section perceptron}.1 and 2 (OR and XOR)
(you don't need to compute by yourself anything).
\begin{itemize}
	\item Open the file and set values for \texttt{x} and \texttt{y} (training dataset).

	\item Set values for \texttt{maxit} (says 100) and \texttt{learn.rate} (says 0.1),
	and \texttt{size} (says \texttt{c(5)}, i.e. one hidden layer with 5 neurons).

	\item Execute the file (\texttt{source("mlp\_logic\_opt.R")}).
\end{itemize}
Find a value for \texttt{maxit} such that the accuracy is 1.
What is \textit{weighted SSE}?

\end{framed}

\begin{framed}
Exercise \ref{section mlp}.4: Use the file `mlp\_breast\_cancer.R'
for Exercise \ref{section perceptron}.3
\begin{itemize}
	\item Open the file and set values for \texttt{maxit} (says 100) and \texttt{learn.rate} (says 0.1),
	and \texttt{size} (says \texttt{c(5)}, i.e. one hidden layer with 5 neurons).

	\item Execute the file (\texttt{source("mlp\_breast\_cancer.R")}).
\end{itemize}

\end{framed}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Submission}
You have to submit a file named `your\_name.pdf' for those exercises 
requiring explanations and math solutions. Note that exercises marked 
with (!) do not need to be submitted. The deadline is 15:00 Monday 18 Nov.
If you have any question, contact Phong Le (p.le@uva.nl).

\bibliographystyle{apalike}
\bibliography{ref}
\end{document}
