%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%% ICML 2013 EXAMPLE LATEX SUBMISSION FILE %%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

% Use the following line _only_ if you're still using LaTeX 2.09.
%\documentstyle[icml2013,epsf,natbib]{article}
% If you rely on Latex2e packages, like most moden people use this:
\documentclass{article}

% For figures
\usepackage{graphicx} % more modern
%\usepackage{epsfig} % less modern
\usepackage{subfigure} 

% For citations
\usepackage{natbib}

% For algorithms
\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}

% As of 2011, we use the hyperref package to produce hyperlinks in the
% resulting PDF.  If this breaks your system, please commend out the
% following usepackage line and replace \usepackage{icml2013} with
% \usepackage[nohyperref]{icml2013} above.
\usepackage{hyperref}

% Packages hyperref and algorithmic misbehave sometimes.  We can fix
% this with the following command.
\newcommand{\theHalgorithm}{\arabic{algorithm}}

% Employ the following version of the ``usepackage'' statement for
% submitting the draft version of the paper for review.  This will set
% the note in the first column to ``Under review.  Do not distribute.''
\usepackage{icml2013} 
% Employ this version of the ``usepackage'' statement after the paper has
% been accepted, when creating the final version.  This will set the
% note in the first column to ``Proceedings of the...''
% \usepackage[accepted]{icml2013}

\usepackage{amssymb}


\newtheorem{thm}{Theorem}
\newtheorem{lem}{Lemma}
\newtheorem{cor}{Corollary}
\newtheorem{defn}{Definition}[section]


% The \icmltitle you define below is probably too long as a header.
% Therefore, a short form for the running title is supplied here:
\icmltitlerunning{Natural Temporal Difference Learning}

\begin{document} 

\twocolumn[
\icmltitle{Natural Temporal Difference Learning}

% It is OKAY to include author information, even for blind
% submissions: the style file will automatically remove it for you
% unless you've provided the [accepted] option to the icml2013
% package.
\icmlauthor{Your Name}{email@yourdomain.edu}
\icmladdress{Your Fantastic Institute,
            314159 Pi St., Palo Alto, CA 94306 USA}
\icmlauthor{Your CoAuthor's Name}{email@coauthordomain.edu}
\icmladdress{Their Fantastic Institute,
            27182 Exp St., Toronto, ON M6H 2T1 CANADA}

% You may provide any keywords that you 
% find helpful for describing your paper; these are used to populate 
% the "keywords" metadata in the PDF but will not be shown in the document
\icmlkeywords{covariant, natural gradient, sarsa, reinforcement learning}

\vskip 0.3in
]

\begin{abstract} 
In this paper we investigate the application of natural gradients to Bellman error based reinforcement learning algorithms. We present and analyze quadratic and linear time natural Sarsa$(\lambda)$ and residual gradient algorithms. In our primary result, we prove that these natural algorithms are covariant. We conclude with experiments which suggest that the natural algorithms can match or outperform their non-natural counterparts using linear function approximation, and drastically improve upon their non-natural counterparts when using non-linear function approximation.
\end{abstract} 

\section{Introduction}
Reinforcement learning algorithms have been remarkably successful for problems with finite action sets. Recent research has focused on bringing these successes to problems with continuous actions. For these problems, a significant leap in performance occurred when Kakade suggested the application of natural gradients \cite{Amari1998} to policy gradient algorithms \cite{Kakade2002}. This suggestion has resulted in many successful natural gradient based policy search algorithms  \cite{Morimura2005,Peters2008,Bhatnagar2009,Degris2012}.
% Here we'll cite my tech report on bias in natural actor-critics for the camera ready.

Despite the successful applications of natural gradients to reinforcement learning in the context of policy search, it has not been applied to Bellman-error based algorithms like Sarsa$(\lambda)$, which are the \emph{de facto} algorithms for problems with discrete action sets. In this paper provide a simple quadratic-time natural Sarsa$(\lambda)$ algorithm, show how the idea of compatible function approximation can be leveraged to achieve linear time complexity, and prove that both algorithms are covariant. We conclude with empirical comparisons on two canonical domains (mountain car and cart-pole balancing) and one novel challenging domain (playing Tic-tac-toe using handwritten letters as input).

To save space, when not otherwise specified, we assume the notation of \citet{SuttonBarto}.
% For camera ready, change this to notational standard tech report (ask Phil)
%\subsection{Notation}
%When not otherwise specified, we assume the notation of Sutton and Barto \cite{SuttonBarto}. To save space we do not reproduce the definitions of common symbols like $\pi$ or $Q^\pi$.  A Markov decision process (MDP) is a tuple $M=(\mathcal S, \mathcal A, \mathcal P, \mathcal R, \gamma, d_0)$, where $\mathcal S$ is a set of possible states, $\mathcal A$ is a set of possible actions, $\mathcal P_{ss'}^a$ denotes the probability of moving from $s$ to $s'$ if action $a$ is executed in state $s$, $\mathcal R_{ss'}^a$, $\gamma$ is a discount parameter, and $d_0$ is a distribution of initial states. We assume that $\mathcal A$ is finite, although $\mathcal S$ may not be. For simplicitly, our notation assumes that $\mathcal S$ is countable---we sum over elements of $\mathcal S$. If $\mathcal S$ is continuous, these sums should be replaced with integrals.

\subsection{The Problem with Sarsa$(\lambda)$}
The Sarsa$(\lambda)$ algorithm, particularly with linear function approximation, is perhaps the most popular reinforcement learning algorithm. It is given by the following update rules.
\begin{align}
\label{eqn:sarsaStart}\delta_t =& r_t + \gamma Q_{\theta_t}(s_{t+1},a_{t+1}) - Q_{\theta_t}(s_t,a_t)\\
e_t =& \gamma \lambda e_{t-1} + \frac{\partial Q_{\theta_t}(s_t,a_t)}{\partial \theta_t},\\
\label{eqn:thetaUpdate} \theta_{t+1} =& \theta_t + \alpha \delta_t e_t,
\end{align}
where $Q_{\theta_t}:\mathcal S \times \mathcal A \to \mathbb R$ is a function approximator with parameter vector $\theta_t$ and where $e_0=0$. When combined with linear function approximation using the Fourier basis \cite{Konidaris2011}, Sarsa$(\lambda)$ makes the standard benchmark problems appear trivial. However, Sarsa$(\lambda)$ is not covariant. 

Consider the algorithm at two different levels, as depicted in Figure \ref{fig:fig1}. At one level we can consider how it moves through the space of possible $Q$ functions. At a lower level, we can consider how it moves through two different parameter spaces, each corresponding to a different representation. Although these two representations may produce different update directions in parameter space, we would expect a good algorithm to result in both representations producing the same update direction in the space of $Q$ functions.\footnote{For technical correctness, we must assume that both representations can represent the same set of $Q$ functions.} Such an algorithm would be called \emph{covariant}. Unfortunately, Sarsa$(\lambda)$ is not covariant. This means that the choice of how to represent $Q_{\theta}$ influences the direction that Sarsa$(\lambda)$ moves in the space of $Q$ functions.

\begin{figure}[tbp]
\begin{center}
\includegraphics[width=0.5\columnwidth]{fig1.pdf}
\caption{$Q$-space denotes the space of possible $Q$ functions, while $\theta$ and $h$-space denote two different parameter spaces. That is, $Q_\theta$ and $Q_h$ are two different parameterizations of $Q$ with parameters $\theta$ and $h$, respectively. The circles denote different locations in $\theta$ and $h$-space that correspond to the same $Q$ function. The blue and red arrows denote possible directions that a non-covariant algorithm might attempt to change the parameters, which correspond to different directions in $Q$-space. The purple arrow denotes the direction of update that a covariant algorithm might produce, regardless of the parameterization of $Q$.}
\label{fig:fig1}
\end{center}
\end{figure}


Natural gradients can be viewed as a way to correct the direction of an update to account for a particular parameterization. Although natural gradients do not always result in covariant updates, they frequently do \cite{Bagnell2003}. Formally, consider the direction of steepest ascent of a function $L(w)$ where $L:\mathbb R^n \to \mathbb R$. If we assume that $w$ resides in Euclidean space, then the gradient, $\nabla L(w)$, gives the direction of steepest ascent. However, \citet{Amari1998} showed that if $w$ resides in a Riemannian space with metric tensor $G$, then the direction of steepest ascent is given by $G(w)^{-1}\nabla L(w)$. %NOTE: Amari has the inverse on the G before the w. Not sure which is correct.

To see why it is unreasonable to assume that parameters always lie in Euclidean space, consider any problem that uses linear function approximation. If one feature's magnitude were to be doubled, the gradient with respect to the weight on this feature would double as well. Taken to an extreme, if a single feature is scaled by a very large number, the gradient will be dominated by one term. The opposite behavior is more desirable.

% Mention that FB does this rescaling! Known problem with Sarsa(lambda).

\section{Natural Sarsa$(\lambda)$}
In this section we describe how the intuition behind natural gradients can be applied to Sarsa$(\lambda)$, even though Sarsa$(\lambda)$ does not actually follow the gradient of a loss function. If we assume that the update to $\theta$ in Eq. \ref{eqn:thetaUpdate} is a gradient descent update, then the natural Sarsa$(\lambda)$ update would be
\begin{equation}
\theta_{t+1} = \theta_t + \alpha G(\theta_t)^{-1}\delta_t e_t,
\end{equation}
where $G(\theta_t)$ is the metric tensor for the parameter space.

 In most reinforcement learning applications of natural gradients, the metric tensor is used to correct for the parameterization of a probability distribution. In these cases, \citeauthor{Amari1998} suggests the use of the Fisher information matrix as the metric tensor \cite{Amari1998}.  Here we use natural gradients to correct for the parameterization of $Q_{\theta_t}$, which is not a probability distribution. We must therefore determine an appropriate metric tensor. 

A simple choice would be
\begin{equation}
\label{eqn:GType1}G(\theta_t) = \mbox{E} \left [ \frac{\partial Q_{\theta_t}(s_t,a_t)}{\partial \theta_t}\frac{\partial Q_{\theta_t}(s_t,a_t)}{\partial \theta_t}^\top\right ].
\end{equation}
Intuitively, this metric tensor corrects for the parameterization of $Q_{\theta_t}$. However, recall that Sarsa$(\lambda)$ endeavors to minimize the expected squared Bellman error, $\mbox{E} \left [ \delta_t^2 \right ]$. We could view $\delta_t$ as a function parameterized by $\theta_t$, which suggests the metric tensor:
\begin{align}
G(\theta) =& \mbox{E} \left [ \frac{\partial \delta_t}{\partial \theta_t}\frac{\partial \delta_t}{\partial \theta_t}^\top\right ]\\
=&\mbox{E} \left [ \Phi \Phi^T\right ],
\end{align}
where $\Phi = \gamma \frac{\partial Q_{\theta}(s_{t+1},a_{t+1})}{\partial \theta} -  \frac{\partial Q_{\theta}(s_{t},a_{t})}{\partial \theta}$. Intuitively, this metric tensor corrects for the parameterization of the TD-error. It also happens to be the Hessian of $\mbox E[\delta_t^2]$.\footnote{In this case, the natural gradient is equivalent to Newton's method.} This argument can be repeated with the view that $\delta_t^2$ is parameterized by $\theta_t$ to produce yet another metric tensor, which corrects for the parameterization of the entire loss function. Table \ref{tab} provides the non-exhaustive set of metric tensors that we consider. Notice that all of these metric tensors are of the form 
\begin{equation}
G(\theta_t) = \mbox{E}[UU^T],
\end{equation}
for some random variable $U$, where at each time step we observe $U$ taking some value $u$. For example, with the metric tensor in Eq. \ref{eqn:GType1},  $u_t=\frac{\partial Q_{\theta_t}(s_t,a_t)}{\partial \theta_t}$.
\begin{center}
\begin{table*}[ht]
{\small
\hfill{}
\begin{tabular}{|c|l|l|c|c|}
\hline
	{\bf GID} & Definition of $u_t$ & Resulting $G(\theta)$ & {\bf Corrects for} & {\bf Notes} \\
\hline
1 &	$u_t=\frac{\partial Q(s_t,a_t)}{\partial \theta}$ 	&	$G(\theta) = \mbox{E}\left[ \frac{\partial Q_{\theta}(s,a)}{\partial \theta} \frac{\partial Q_{\theta}(s,a)}{\partial \theta}^\top \right]$	&	 $Q$ & None. \\
\hline
2 &	$u_t=\frac{\partial \delta_t}{\partial \theta}$ 	&	$G(\theta) = \mbox{E}\left[\Phi \Phi^\top \right]$	&	 TD-error. & None.  \\
\hline
3 &	$u_t=e_t$ 	&	$G(\theta_t) = \mbox{E}\left[ e_te_t^\top \right]$ &	 TD-error (Sarsa) & $e_t$ of Sarsa$(\lambda)$. \\
\hline
4 &	$u_t=e_t=\frac{\partial \left ( R^\lambda-Q_{\theta}(s,a) \right )}{\partial \theta}$ 	&	$G(\theta_t) = \mbox{E}\left[ e_te_t^\top \right]$ &	 TD-error using $\lambda$-return. & $e_t$ of RG$(\lambda)$.\\
\hline
\end{tabular}}
\hfill{}
\caption{Various metric tensors. Notice that GIDs 3 and 4 use the expected value of the eligibility traces from Sarsa$(\lambda)$ and Residual Gradient$(\lambda)$ respectively. Also, $R^\lambda$ is the lambda-return \cite{SuttonBarto}. GID 3 is necessary for a linear-time natural Sarsa$(\lambda)$ algorithm.}
\label{tab}
\end{table*}
\end{center}
\vspace{-.25in}
\section{Natural Residual Gradient$(\lambda)$}
The natural gradient approach to reinforcement learning described above is perhaps better suited to residual gradient algorithms \cite{Baird1995} than Sarsa$(\lambda)$. Residual gradient algorithms follow the gradient of the squared Bellman error. Hence, whereas with Sarsa$(\lambda)$ we apply the idea of natural gradients even though Sarsa$(\lambda)$ does not follow a gradient, natural gradients can be applied directly to residual gradient algorithms.

The residual gradient algorithm with eligibility traces is the same as Sarsa$(\lambda)$ (see Eq. \ref{eqn:sarsaStart}--\ref{eqn:thetaUpdate}), except that the eligibility trace update includes an additional term. To unify these two algorithms, we present the eligibility trace update as
\begin{equation}
e_{t} = \gamma\lambda e_{t-1} +f(s_t,a_t,r_t,s_{t+1},a_{t+1},\theta_t).
\end{equation}
The Sarsa$(\lambda)$ algorithm is given by $f(s,a,r,s',a',\theta)=\frac{\partial Q_{\theta}(s,a)}{\partial \theta}$ and the Residual Gradient$(\lambda)$ algorithm is given by  $f(s,a,r,s',a',\theta)=\frac{\partial Q_{\theta}(s,a)}{\partial \theta} - \gamma\frac{\partial Q_{\theta}(s',a')}{\partial \theta}$. Hereafter, we use $f_t$ as shorthand for $f(s_t,a_t,r_t,s_{t+1},a_{t+1},\theta_t)$.

To summarize, $f$ specifies the underlying reinforcement learning algorithm, while $u$ specifies the metric tensor for naturalizing the underlying algorithm. For example,
\begin{equation}
f_t=\frac{\partial Q_{\theta}(s_t,a_t)}{\partial \theta} \mbox{ and } u_t = \frac{\partial \left ( R^\lambda-Q_{\theta}(s,a) \right )}{\partial \theta}
\end{equation}
produces the natural Sarsa$(\lambda)$ algorithm using the metric tensor that corrects for the shape of the TD-error using the $\lambda$-return. See the appendix for a proof that the natural algorithms are covariant for all reasonable $f$ and $u$.

\section{Algorithms}
\subsection{Quadratic Computational Complexity}
A straightforward approach to creating a natural Sarsa$(\lambda)$ algorithm would be to maintain an estimate of $G(\theta)$ and compute $G(\theta)^{-1}$ at each time step. The natural algorithm is then
\begin{align}
\delta_t =& r_t + \gamma Q_{\theta_t}(s_{t+1},a_{t+1}) - Q_{\theta_t}(s_t,a_t)\\
e_t =& \gamma \lambda e_{t-1} + f(s_t,a_t,r_t,s_{t+1},a_{t+1},\theta_t)\\
G_t=&G_{t-1}+u_tu_t^\top\\
\theta_{t+1} =& \theta_t + \alpha_t \delta_t G_t^{-1}e_t,
\end{align}
where $G_0=I$ and $\{\alpha_t\}$ is a step size schedule satisfying $\sum_{t=0}^\infty \alpha_t = \infty$ and $\sum_{t=0}^\infty \alpha_t^2<\infty$. % Uh, we need to talk about decaying G at some point.
Due to the matrix inversion, this na\"{\i}ve algorithm has per time step computational complexity $O(n^3)$, where $n=|\theta|$, and we ignore the complexity of differentiating $Q_\theta$. This can be improved to $O(n^2)$ using the Sherman-Morrison formula to maintain an estimate of $G_t^{-1}$ directly.

\subsubsection{Convergence Guarantees}
The metric tensor $G(\theta)$ is positive semi-definite since for all non-zero vectors $z \in \mathbb R^n$:
\begin{align}
z^\top \mbox{E} \left [uu^\top \right ]z=&\mbox{E} \left [z^\top u u^\top z \right ]=\mbox{E} \left [ \left (z^\top u\right)^2 \right ]\geq0.
\end{align} 
Therefore the angle between the non-natural update $\delta_t e_t$ and the natural update $\delta_t G(\theta)^{-1} e_t$ cannot exceed ninety degrees. The natural algorithms therefore inherit the convergence guarantees of their non-natural counterparts. For example, natural Sarsa$(\lambda)$ will converge to the same solution as Sarsa$(\lambda)$, while natural residual gradient will converge to the same solution as residual gradient. % NOTE TO SELF: We need to prove this more - can it be 90 degrees off and never make progress? That is, can we be positive definite (not semi). If positive definite, what about the limit issue?

\subsection{Linear Computational Complexity}
To achieve linear computational complexity, we leverage the idea of compatible function approximation.\footnote{The compatible features that we present are compatible with $Q_\theta$, whereas the compatible features originally defined by \citet{Sutton2000} are compatible with a parameterized policy. Although related, these two types of compatible features are not the same.} We begin by estimating the TD-error, $\delta_t$, with a linear function approximator $w \cdot e_t$, where $w$ are the tunable parameters of the linear function approximator and $e_t$ are the compatible features. Specifically, we search for the $w$ that are a local minimum of the squared error in the estimate of the TD-error:
\begin{equation}
L(w)=\mbox{E} \left [ \left ( \delta_t - w^\top e_t \right) ^2\right].
\end{equation}
At a local minimum of $L$, $\partial L(w) / \partial w = 0$, so
\begin{align}
\label{eqn:derivThing}\mbox{E}\left[ \left ( \delta_t - w^\top e_t \right ) e_t \right]=&0\\
\label{eqn:asdf}\mbox{E}\left[ \delta_t e_t \right]=& \mbox{E}\left[ e_te_t^\top \right]w.
\end{align}
Notice that the left hand side of Eq. \ref{eqn:asdf} is the expected update to $\theta_t$ in the non-natural algorithms (e.g., $\theta_t$ for Sarsa$(\lambda)$ and Residual Gradient$(\lambda)$).

We can therefore execute the expected update to $\theta_t$ as
\begin{align}
\theta_{t+1}=&\theta_t + \alpha \mbox{E}\left[ \delta_t e_t \right]\\
=&\theta_t + \alpha \mbox{E}\left[ e_t e_t^\top \right]w.
\end{align}
Now, consider what happens when using GID 3 and Sarsa$(\lambda)$ or GID 4 and Residual Gradient$(\lambda)$. In these cases, $u_t=e_t$, so the natural update becomes:
\begin{align}
\theta_{t+1}=&\theta_t + \alpha G(\theta)^{-1}\mbox{E}\left[ e_te_t^\top \right]w\\
=&\theta_t + \alpha  \mbox{E}[u_t u_t^T]^{-1}\mbox{E}\left[ e_te_t^\top \right]w\\
=&\theta_t + \alpha  \mbox{E}[e_t e_t^T]^{-1}\mbox{E}\left[ e_te_t^\top \right]w\\
=&\theta_t + \alpha  w,
\end{align}
assuming that the metric tensor is invertible.

The challenge remains that locally optimal $w$ must be attained. For this we propose a two-timescale approach identical to that proposed by \citeauthor{Bhatnagar2009} (\citeyear{Bhatnagar2009}). That is, we perform stochastic gradient descent on $L(w)$ using a step size schedule $\{ \beta_t \}$ that decays faster than the step size schedule $\{\alpha_t\}$ for updates to $\theta_t$. The resulting linear-complexity two-timescale natural algorithm is:
\begin{align}
\delta_t =& r_t + \gamma Q_{\theta_t}(s_{t+1},a_{t+1}) - Q_{\theta_t}(s_t,a_t)\\
e_t =& \gamma \lambda e_{t-1} + f_t\\
w_t =& w_{t-1} + \beta_t \left ( \delta_t - w ^\top e_t \right)e_t\\
\theta_{t+1} =& \theta_t + \alpha_t w_t.
\end{align}
For a review of a similar application of two-time scale algorithms to natural gradients for reinforcement learning, see the work of \citet{Bhatnagar2009}. Their results carry over without modification. To summarize, with certain smoothness assumptions, if
\begin{align}
\sum_{t=0}^\infty \alpha_t = \sum_{t=0}^\infty \beta_t =& \infty,\sum_{t=0}^\infty \alpha_t^2,\sum_{t=0}^\infty \beta_t^2<\infty,\\
\beta_t = &o(\alpha_t),
\end{align}
then, since $\beta_t \to 0$ faster than $\{\alpha_t\}$, beyond some time, we can consider $w_t$ to already have converged, making the updates
\begin{align}
w_{t+1} =& w_t + 0\\
\label{eqn:asdf2} \theta_{t+1} =& \theta_t + \alpha_t w_{t+1}.
\end{align}
Since the change to $w_t$ is zero, we can write Eq. \ref{eqn:asdf2} as $\theta_{t+1} = \theta_t+\alpha_t w$, where $w$ is the expected natural update, i.e., it satisfies Eq \ref{eqn:derivThing}. The immediate implication of this, combined with $G(\theta)$ being positive semi-definite, is that the linear complexity algorithms maintain the convergence guarantees of their non-natural counterparts.

Unfortunately, unlike compatible function approximation for natural policy gradient algorithms \cite{Bhatnagar2009}, it is not clear how a useful baseline could be added to the stochastic gradient descent updates of $w$. The baseline, $b$, would have to satisfy $\mbox{E} \left [ be_t\right ]=0$, which is not even satisfied by a constant non-zero $b$. 

\subsection{Implementation Details}
In practice, it is common to use fixed step sizes rather than step size schedules, even though this voids convergence guarantees. This also removes the benefits of a two-timescale algorithm. For our experiments, we therefore perform updates in a manner similar to that of \citet{Peters2008}, where $\theta$ is updated every $k$ time steps, giving the gradient estimates time to converge. This merges the benefits of a two-timescale approach with the benefits of a fixed learning rate (i.e., one need not search over step size schedules).

The linear algorithm includes an additional hyper-parameter, $\beta$. Rather than require the user to search for good values of $\beta$, which can be costly when other hyper-parameters like $\alpha$ and exploration rates must also be optimized, we use AutoStep \cite{autostep} to automatically select and tune $\beta$. 

We call the quadratic-complexity algorithm the {\bf Co}variant {\bf re}inforcement learning algorithm (Core), and the linear-complexity algorithm Core-L. Pseudocode for each is provided in Algorithms \ref{alg1} and \ref{alg2}. In Algorithm \ref{alg1}, $\bar e$ tracks the mean $e$-trace since the last update to $\theta$. In Algorithm \ref{alg2}, $\beta_t$ is tuned using the AutoStep algorithm \cite{autostep}. Both algorithms have three open hyper-parameters, the step size $\alpha$, update frequency $k$, and eligibility trace decay parameter $\lambda$. They may have additional hyper-parameters depending on the choice of how actions are selected (e.g., $\epsilon$ for $\epsilon$-greedy action selection).

\begin{algorithm}[t]                      % enter the algorithm environment
\caption{{\bf Co}variante {\bf re}inforcement learning (Core)}          % give the algorithm a caption
\label{alg1}                           % and a label for \ref{} commands later in the document
\begin{algorithmic}                   % enter the algorithmic environment
	\STATE Initialize $G_0^{-1}=I$, $e_0=0$, $\bar e_0$ = 0, $\theta_0=0$ \vspace{0.1cm}
	\hrule
	\STATE $\delta_t = r_t + \gamma Q_{\theta_t}(s_{t+1},a_{t+1}) - Q_{\theta_t}(s_t,a_t)$
	\STATE Compute $f_t$ and $u_t$
	\STATE $e_t = \gamma \lambda e_{t-1} + f_t$
	\STATE $G_{t}^{-1}=G_{t-1}^{-1}-\frac{G_{t-1}^{-1}uu^\top G_{t-1}^{-1}}{1+u^\top G_{t-1}^{-1}u^\top}$
	\STATE $\bar e_{t} = \bar e_{t-1} + \frac{e_t}{k}$
	\IF {$(t \mod k = 0)$}
		\STATE $\theta_{t+1} = \theta_t + \alpha \delta_t G_t^{-1} \bar e_t$
		\STATE $\bar e_t=0$
	\ELSE
		\STATE $\theta_{t+1} = \theta_t$
	\ENDIF
\end{algorithmic}
\end{algorithm}

\begin{algorithm}[t]                      % enter the algorithm environment
\caption{{\bf Co}variante {\bf re}inforcement learning - {\bf L}inear (Core-L)}          % give the algorithm a caption
\label{alg2}                           % and a label for \ref{} commands later in the document
\begin{algorithmic}                   % enter the algorithmic environment
	\STATE Initialize $w_0=0$, $e_0=0$, $\theta_0=0$ \vspace{0.1cm}
	\hrule
	\STATE $\delta_t = r_t + \gamma Q_{\theta_t}(s_{t+1},a_{t+1}) - Q_{\theta_t}(s_t,a_t)$
	\STATE Compute $f_t$ and $u_t$
	\STATE $e_t = \gamma \lambda e_{t-1} + f_t$
	\STATE $w_{t} =  w_{t-1} + \beta_t \left ( \delta_t - w ^\top e_t \right)e_t$
	\IF {$(t \mod k = 0)$}
		\STATE $\theta_{t+1} = \theta_t + \alpha \delta_t G_t^{-1} w_t$
	\ELSE
		\STATE $\theta_{t+1} = \theta_t$
	\ENDIF

\end{algorithmic}
\end{algorithm}

\section{Experimental Results}
We performed experiments on two canonical domains: mountain car and cart-pole balancing, as well as on one new challenging domain that we call visual Tic-tac-toe. For mountain car and cart-pole balancing, we compare Core and Core-L to Sarsa$(\lambda)$, all using the third-order Fourier basis \cite{Konidaris2012}. On visual Tic-tac-toe we used a fully-connected feed-forward artificial neural network with one hidden layer of 20 nodes. This allows us to show the benefits of natural gradients when the value function parameterization is non-linear and more complex.

 We optimized the algorithm parameters for all experiments using a randomized search as suggested by \citet{Bergstra2012}. We selected the hyper-parameters that resulted in the largest mean discounted return over 20 episodes for mountain car, 50 episodes for cart-pole balancing, and $100,000$ episodes for visual tic-tac-toe. Each parameter set was tested 10 times and the performance averaged. A total of over 500,000 sets of hyper-parameters were tested, and the parameters with the highest mean return for each algorithm are used for our figures.
 
For mountain car and cart pole each algorithm's performance is an average over $100$ trials, with standard error shown in the shaded regions. For visual tic-tac-toe algorithm performance is averaged over $10$ trials, again with standard error shown by the shaded regions. Finally, the figures refer to Core-1 (quadratic GID-$4$ natural Sarsa$(\lambda)$), Core-2 (quadratic GID-$4$ natural Residual Gradient$(\lambda)$), and Core-L (linear GID-4 natural Residual Gradient$(\lambda)$). These variants were selected  in order to show both natural Sarsa$(\lambda)$ and natural Residual Gradient$(\lambda)$. We selected GID-$4$ in all cases because it resulted in reliably superior performance. The algorithms Sarsa($\lambda$) and Residual Gradient($\lambda$) are referred to as ``Sarsa" and ``RG" respectively. 

\subsection{Mountain Car}

Mountain car is a simple simulation of an underpowered car stuck in a valley. There are three actions which control the underpowered motor: decelerate, neutral, and accelerate. The goal is the top of the hill in the direction of acceleration, and the agent receives a reward of $0$ for reaching it or a reward of $-1$ per time step when not at the goal. There are two state variables, velocity and position, which are bounded between $\pm 0.07$ and $(-1.2, 0.6)$ respectively. Full details can be found in the work of \citet{SuttonBarto}. %Figure~\ref{fig:mcdom} shows a representation of the mountain car domain.

Figure \ref{fig:MC} gives the results for each algorithm on mountain car. Sarsa($\lambda$), Core-1, and Core-2 all behave very similarly, with Sarsa gaining an advantage towards the end of the experiment. Because Core-1 and Core-2 are quadratic algorithms, their estimates of the inverse of the metric tensor include equal weighting to early samples and later samples. This could explain why they converge prematurely in this domain, while Sarsa continues to improve. Introducing some type of forgetting aspect to the matrix estimate is one possible solution to this practical problem. Core-L, the linear natural residual gradient algorithm is much slower to learn for the first few episodes, but after that continues to improve well past the other algorithms. It does not have the same limitations just mentioned with the quadratic algorithms. One reason for the slower initial learning of the linear algorithm is that it must first build up an estimate of the $w$ vector before updates to the value function parameters become meaningful. The Residual Gradient($\lambda$) algorithm, denoted by "RG", learns at a slow and steady pace, but is not near the other algorithms by the end of the experiment. 

\begin{figure}[t]
\begin{center}
\includegraphics[width=\columnwidth]{mountaincar.pdf}
\caption{Mountain Car Experiments}
\label{fig:MC}
\end{center}
\end{figure}

\subsection{Cart Pole Balancing}

Cart pole balancing simulates a cart on a short one dimensional track with a pole attached with a rotational hinge. There are two actions which move the cart in one direction or the other on the track. The goal is to keep the pole balanced in the vertical upright position, and the episode ends if it drops below $45^\circ$ from vertical or if the cart reaches the end of the track. There is a reward of $1$ per time step and a reward of $0$ at the end of the episode. There are many varieties of the cart pole balancing domain, and we refer the reader to \citep{Barto1983} for complete details. 

Figure \ref{fig:CP} gives the results for each algorithm on cart-pole balancing. In the cart pole balancing domain the two quadratic algorithms, Core-1 and Core-2, perform the best. Again, the linear algorithm, Core-L, takes a slower start as it builds up an estimate of $w$, but converges well above the non-natural algorithms and very close to the quadratic ones. 

\begin{figure}[t]
\begin{center}
\includegraphics[width=\columnwidth]{cartpole.pdf}
\caption{Cart Pole Experiments}
\label{fig:CP}
\end{center}
\end{figure}


\subsection{Visual Tic-Tac-Toe}

Visual Tic-Tac-Toe is a novel challenging decision problem in which the agent plays Tic-tac-toe (Noughts and crosses) against an opponent that makes random legal moves. The game board is a $3\times 3$ grid of handwritten letters (X, O, and B for blank) from the UCI Letter Recognition Data Set \cite{Slate1991}. At every step of the episode, each letter of the game board is drawn randomly with replacement from the set of available handwritten letters (787 X's, 753 O's, and 766 B's). Thus, it is easily possible for the agent to never see the same handwritten ``X", ``O", or ``B" letter in a given episode. The agent's state features are the 16 integer valued attributes for each of the letters on the board. Details of the data set and the attributes can be found in the UCI repository. A visual representation of some of the letters used is given in Figure~\ref{ttt_letters}. 

There are nine possible actions available to the agent, but attempting to play on a non-blank square is considered an illegal move and results in the agent losing its turn. This is particularly challenging because blank squares are marked by a ``B", making recognizing legal moves non-trivial. The opponent only plays legal moves, but chooses randomly among them. The reward for winning is $100$, $-100$ for losing, and $0$ otherwise. 

Figure~\ref{fig:TTT} gives the results comparing Core-L and Sarsa($\lambda$) on the visual Tic-tac-toe domain using the artificial neural network described previously. These results show linear natural residual gradients in a setting where they are able to account for the shape of a more complex value function parameterization, and thus confer greater improvement in convergence speed over non-natural algorithms. 

\begin{figure}[t]
\begin{center}
\includegraphics[width=\columnwidth]{ttt_rand.png}
\caption{Visual Tic-Tac-Toe Experiments}
\label{fig:TTT}
\end{center}
\end{figure}

\begin{figure}[tp]
\begin{center}
\includegraphics[width=0.6\columnwidth]{letterb}
\includegraphics[width=0.6\columnwidth]{letterx}
\caption{Visual Tic-Tac-Toe example letters}
\label{ttt_letters}
\end{center}
\end{figure}

\section{Conclusion}
After motivating and deriving the algorithm for applying natural gradients to Sarsa$(\lambda)$, we derived a large class (spanned by the choice of $u$ and $f$) of natural Bellman-error based algorithms. We provided a quadratic (Core) and linear (Core-L) algorithm for each. We proved that the Core and Core-L algorithms are covariant and inherit the convergence properties of their non-natural counterparts. Finally, we provided empirical results which suggest that the natural algorithms can perform as well as or better than their non-natural counterparts when using linear function approximation, and significantly better when using non-linear function approximation.


\bibliography{natural_sarsa}
\bibliographystyle{icml2013}

\appendix
\section*{Appendix A}

The following theorem and its proof closely follow and then extend the foundations originally laid by \citet{Bagnell2003} and later clarified by \citet{Peters2008} when proving that the natural policy gradient is covariant.  For brevity, we abuse notation and write $\nabla_x$ to denote the gradient with respect to $x$, not the directional derivative in the direction $x$.

\begin{thm}
A natural gradient update is covariant for parameterizations $\theta$ and $h$ if for all parameters $\theta_i$ there exists a function $\theta_i = f_i(h_1, \ldots, h_m)$, (ii) the derivative $\nabla_h \theta$ and its inverse $\nabla_h \theta^{-1}$ exist, (iii) the metric tensor is given by $G(\theta)=E[u(\theta) u(\theta)^T]$ where $u$ is a non-zero function of the parameters, $\theta$. 
\end{thm}

\begin{proof}
Let $\nabla_h J(h)$ and $\nabla_\theta J(\theta)$ denote the expected updates of an algorithm with parameters $h$ and $\theta$ respectively. For example, for Residual Gradient$(\lambda)$, $\nabla_\theta J(\theta)=\mbox{E}[\delta_t e_t]$. For small parameter changes $\triangle h$ and $\triangle \theta$, our assumptions imply that $\triangle \theta = \nabla_h \theta^\top \triangle h$.% because of the mapping from $h$ to $\theta$ and the existence of $\nabla_h \theta$ (from the two assumptions of the theorem).

If the natural gradient update is covariant, a change $\triangle h$ along $\nabla_h J(h)$ would result in the same change $\triangle \theta$ along $\nabla_\theta J(\theta)$ for sufficiently small scalar step-size $\alpha$. Differentiating $\nabla_h J(h)$, we obtain
\begin{equation}
\nonumber \nabla_h J(h) = \nabla_h \theta \nabla_\theta J(\theta).
\end{equation}

Next, we show that any metric tensor satisfying our assumptions includes the Jacobian, $\nabla_h \theta$, twice as a factor:
\begin{align}
\nonumber G(h) &= E[u(h) u(h)^T] \\
\nonumber &= E[(\nabla_h \theta u(\theta))(\nabla_h \theta u(\theta))^\top] \\
\nonumber &= E[\nabla_h \theta u(\theta)  u(\theta)^\top \nabla_h \theta^\top] \\
\nonumber &= \nabla_h \theta E[u(\theta)  u(\theta)^\top] \nabla_h \theta^\top  \\
\nonumber &=  \nabla_h \theta G(\theta)  \nabla_h \theta^\top .
\end{align}
Hence the natural gradient in the $h$ parameterization is given by 
\begin{align}
\nonumber \triangle h &= \widetilde \nabla_h J(h) \\
\nonumber &= G(h)^{-1} \nabla_h J(h) \\
\nonumber &= (\nabla_h \theta G(\theta)  \nabla_h \theta^\top)^{-1} \nabla_h \theta \nabla_\theta J(\theta).
\end{align} 

We conclude that the natural gradient update for a suitably chosen metric tensor is covariant since 
\begin{align} 
\nonumber \triangle \theta &= \alpha \nabla_h \theta^\top \triangle h \\
\nonumber &= \alpha \nabla_h \theta^\top (\nabla_h \theta G(\theta)  \nabla_h \theta^\top)^{-1} \nabla_h \theta \nabla_\theta J(\theta) \\
\nonumber &= \alpha \nabla_h \theta^\top (\nabla_h \theta^\top)^{-1} G(\theta)^{-1}  \nabla_h \theta^{-1} \nabla_h \theta \nabla_\theta J(\theta) \\
\nonumber &= \alpha G(\theta)^{-1} J(\theta).
\end{align}
\end{proof}

\begin{cor}
The Natural Sarsa($\lambda$) and Natural Residual Gradient($\lambda$) updates are covariant.
\end{cor}
\begin{proof}
This follows immediately from Theorem 1.
%Both are functions of $\theta$. Therefore, the result follows directly from the previous theorem. 
% ^^^^^ I don't know what you mean by this. This wasn't stated as the assumption in Thm1...
\end{proof}

\end{document} 


% This document was modified from the file originally made available by
% Pat Langley and Andrea Danyluk for ICML-2K. This version was
% created by Lise Getoor and Tobias Scheffer, it was slightly modified  
% from the 2010 version by Thorsten Joachims & Johannes Fuernkranz, 
% slightly modified from the 2009 version by Kiri Wagstaff and 
% Sam Roweis's 2008 version, which is slightly modified from 
% Prasad Tadepalli's 2007 version which is a lightly 
% changed version of the previous year's version by Andrew Moore, 
% which was in turn edited from those of Kristian Kersting and 
% Codrina Lauth. Alex Smola contributed to the algorithmic style files.  
