\documentclass[11pt,a4paper,oneside]{article}
\usepackage{fullpage}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage[pdftex]{graphicx}
\usepackage{graphicx, subfigure}


\begin{document}
\title{Autonomous Agents \\ Assignment 2: Single Agent Learning}
\author{Robbert Iepsma 6139108 \and Chiel Kooistra 5743028  \and Sebastian Dr\"oppelman 5783453 \and Boudewijn Bod\'ewes 6049028}
\date{deadline 21/9/2012/23:59}
\maketitle

\section{Introduction}
In this research we have explored and compared several reinforcement learning techniques and examined their respective performances on different parameter settings. First we explore Q-learning, in section \ref{Q param} we evaluate different values for learning rate $\alpha$ and discount factor $\gamma$ while in \ref{Q as} we try different values of $\varepsilon$-greedy's $\varepsilon$ and optimistic initialization of the Q-table.
Section \ref{eGreedy vs Softmax} is dedicated to comparing the action selection methods Softmax and $\varepsilon$-greedy. The differences between Q-learning, SARSA and both on-policy as off-policy Monte-Carlo Control are discussed In the last section. All algorithms are applied to an episodic grid world task.

\section{Assignment}

\subsection{Q-learning: learning parameters}
\label{Q param}
In figure \ref{fig:alpha}, the amount of turns needed for the 
predator to catch the prey for each episode are plotted. 
The predator uses Q-learning with $\varepsilon$-greedy policy to determine its
actions. The figures contain plots showing different learning rate values 
$\alpha$, for different discount factors. The learning rate determines to what 
extent the newly acquired information will override the old information, and
the discount factor determines the importance of future rewards. The plots show
that using a $\alpha$ causes the predator to learn faster and converge to the optimal
action-value function faster. Discount value $\gamma$ does not have a high influence on the policy, because the same actions will be optimal given a state, but based on lower Q-values. However when the discount factor is too low, the values for wrong actions and good actions will not diverge enough.
Therefore it will take longer for the predator to converge to the optimal action-value 
function as shown in the plots. 
\begin{figure}[ht!]
\label{fig:alpha}
     \begin{center}
        \subfigure[$\alpha = 0.1$]{%
            \includegraphics[width=0.5\textwidth]{alpha1}
        }%
        \subfigure[$\alpha = 0.2$]{%
            \includegraphics[width=0.5\textwidth]{alpha2}
        }
        \subfigure[$\alpha = 0.3$]{%
           \includegraphics[width=0.5\textwidth]{alpha3}
        }%
        \subfigure[$\alpha = 0.4$]{%
           \includegraphics[width=0.5\textwidth]{alpha4}
        }
        \subfigure[$\alpha = 0.5$]{%
           \includegraphics[width=0.5\textwidth]{alpha5}
        }
    \end{center}
    \caption{Convergence of Q-learning for different values of $\alpha$ and $\gamma$}
\end{figure}

\subsection{Q-learning: action selection}
\label{Q as}
Figure \ref{fig:epsilon} contains plots showing different values for exploration
rate epsilon and the optimistic initialization of the Q-table. During these
tests, the values for alpha and the discount factor are both 0.5. We experimented 
with epsilon values of 0, 0.01, 0.1 and 0.2. We chose these values because we wanted
to see the difference between a high epsilon, compared to a small epsilon. But also
wanted to see what happens when there is no exploration at all, and the difference
between a high and a very high epsilon without making epsilon too high to cause a
random movement for the predator. We experimented with Q-initialization values of 
0, 5, 10, 20 and 100. We chose these values scaled to the reward of capturing 
the prey. When the Q-initialization value equals zero, the predator will keep making
random moves, until a reward is obtained. This causes its wrong moves to not be
penalized, and thus increases the convergence time. When the Q-initialization value is
very high compared to the reward, even if the reward is obtained, that action value will
still be reduced a lot compared to the other values. Therefore the next time the predator
enters that state, the rewarded action will not be the optimal action until all other
actions are also reduced in value, and increases the convergence time. When epsilon 
equals zero, there will be no exploration at all, and the predator will keep walking the 
first correct path found, causing some states to not be explored correctly and contain
wrong values. When no exploration is used, once an optimal policy is found, fewer turns are needed
to catch the prey since no non-greedy actions will be taken. 

\begin{figure}[ht!]
\label{fig:epsilon} 
     \begin{center}
        \subfigure[$\varepsilon = 0$]{%
            \includegraphics[width=0.5\textwidth]{epsilon1}
        }%
        \subfigure[$\varepsilon = 0.01$]{%
            \includegraphics[width=0.5\textwidth]{epsilon2}
        }
        \subfigure[$\varepsilon = 0.1$]{%
           \includegraphics[width=0.5\textwidth]{epsilon3}
        }%
        \subfigure[$\varepsilon = 0.2$]{%
           \includegraphics[width=0.5\textwidth]{epsilon4}
        }
    \end{center}
    \caption{Convergence of Q-learning for different values of $\varepsilon$ and different initialization values for Q}
\end{figure}

\subsection{Q-learning: action selection}
In figure \ref{fig:softmax} we see that $\varepsilon$-greedy converges faster and better than Softmax. We had initially expected Softmax to perform better as it should explore less over time given the fact that optimal and suboptimal Q-values should diverge more over time. Apparently this did not happen, presumably because the Q-values did not differ enough.
Another possibility is that the value for $\tau (1)$ was too high, but the way our program handles large numbers resulted in errors, which prevented us from verify this hypothesis.

\label{eGreedy vs Softmax}
\begin{figure}[ht!]
     \begin{center}
            \includegraphics[width=0.7\textwidth]{egreedysoft}
    \end{center}
    \caption{Convergence of Q-learning using Softmax and $\varepsilon$-greedy action selection}
\end{figure}


\subsection{Comparing the different algorithms}
The algorithms we attain to can be divided into two groups: on- policy and off-policy methods. The on-policy algorithms evaluate the policy they follow, whereas off-policy algorithms can evaluate any policy but typically greedy policies. This results in a Q-table that does not correspond to the policy being followed but rather to the one the one being evaluated. In some case such as the cliff problem this can result in a suboptimal policy. In our world there is only one state with non-zero reward, and none with a negative reward thus the converged Q-tables result in the same optimal policies, as can be seen in figure \ref{fig:algComp}. 

Where SARSA and Q-learning only backup one step, the Monte Carlo methods backup the entire episode---that is, off-policy Monte Carlo only backs up the part of the episode following and including the last off-policy action.   
In the first episode Monte Carlo will assign values to all visited state-action pairs, whereas SARSA and Q-learning only update state-action pairs when they are visited, so that they can not use future gain, only expected gain. If all initial values were set to 0, this would mean only the last action is updated in the first episode. Later the reward will be propagated through the Q-table whenever a non-zero Q is found. When the state space is big it would take at least as many episodes as the minimal number of actions to reach a terminal state before the Q for the initial state and actions can be updated. 
This is when Monte Carlo would have an edge over SARSA and Q-learning by quickly obtaining an admissible policy, but it can also get stuck in a local optimum, as it tends to follow the actions that have been taken before.

As our implementation does not use a learning rate, Monte Carlo does not converge to the same values as the other algorithms.
\begin{figure}[ht!]
\label{fig:algComp}
     \begin{center}
            \includegraphics[width=0.7\textwidth]{sarsaMConP}
    \end{center}
    \caption{Convergence of Q-learning, SARSA, and Monte Carlo}
\end{figure}

\section{Conclusion}
The experiments have shown that bootstrapping methods work better in this grid world task than Monte Carlo. We also saw that $\varepsilon$-greedy value selection worked better than Softmax, which may be attributed to the limitations of our implementation.

\end{document}
