\newcommand{\eps}{\varepsilon}
\newcommand{\cmt}[1]{\textcolor{OliveGreen}{//\,\,#1}}

\begin{algorithm}
\begin{algorithmic}
\State \cmt{Global Variables}
\State $S \gets$ Set of states
\State $A \gets$ Finite set of actions
\State $\phi\gets$ Feature transform $S\rightarrow\mathbb{R}^n$
\State $w\gets$ Weight vector $\in\mathbb{R}^n$
\State $\tau\gets$ Trace vector $\in\mathbb{R}^n$
\State $\alpha\gets$ Learning rate $\in[0,1]$
\State $\gamma\gets$ Discount factor $\in[0,1]$
\State $\epsilon_0\gets$ Initial exploration rate $\in[0,1]$
\State $\epsilon_d\gets$ Exploration decay rate $\in[0,1]$ 

\\
\State \cmt{Learn to play a game using the SARSA($\lambda$)}
\State \cmt{algorithm with linear function approximation}
\Function{LEARN-GAME}{\,}
	\State \cmt{Initializations}
	\State $\tau\gets0$, $w\gets0$
	\State $s \gets$ Initial state, $a \gets$ Initial action
	\State $\eps\gets\eps_0$ \cmt{Exploration rate}
	\State
	\State \cmt{Main loop}
	\Repeat
		\State Take action $a$, observe next state $s'$ and reward $r$
		\State $s' \gets$ CHOOSE-ACTION($s, \eps$)
		% \State
		% \State \cmt{Compute difference}
		\State $\delta \gets r + \gamma w^T\phi(s',a') - w^T\phi(s, a)$ \cmt{Compute difference}
		\State
		\State \cmt{Update trace vector}
		\State $\tau \gets \lambda \tau$
		\For{$\phi_i(s,a) \neq 0$}
			\State $\tau_i = 1$
		\EndFor
		\State
		% \State \cmt{Update weight vector}
		\State $w \gets w + w\alpha\delta \tau$ \cmt{Update weight vector}
		% \State
		% \State \cmt{Anneal exploration rate}
		\State $\eps\gets\eps_d\eps$ \cmt{Anneal exploration rate}
	\Until{termination}
\EndFunction
\\
\State \cmt{Choose an action using an $\eps$-greedy policy}
\Function{CHOOSE-ACTION}{$s, \eps$}
	\State $x \gets$ a random number drawn uniformly from $[0,1]$
	\If{$x < \eps$}
		\State \cmt{Exploration}
		\State\Return a randomly chosen action $a\in A$
	\Else
		\State \cmt{Exploitation}
		\State\Return $\arg\max_{a\in A} w^T\phi(s,a)$
	\EndIf
\EndFunction
\end{algorithmic}
\end{algorithm}