\documentclass[10pt,oneside]{book}

\input{macros_orig.tex}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\begin{document}

\pagestyle{empty}
\thispagestyle{empty}

\begin{codebox}
  \Procname{$\proc{Fitted-Q-Learning}(\mathcal A, s_0, \gamma, \alpha,
    \epsilon, m)$}
  \li $s \gets s_0$ \Comment Or draw an $s$ randomly from $\mathcal S$
  \li $\mathcal D = \{\;\}$
  \li initialize neural-network representation of $Q$
  \li \While True: \Do
  \li  $\mathcal D_\text{new}$ = experience from executing $\epsilon$-greedy policy based
  on $Q$ for $m$ steps
\li $\mathcal D = \mathcal D \cup \mathcal D_\text{new}$ represented
as $(s, a, r, s')$ tuples
\li $D_\text{sup} = \{(\ex{x}{i}, \ex{y}{i})\}$  where $\ex{x}{i} =
(s, a)$ and $\ex{y}{i} = r + \gamma \max_{a' \in \mathcal A} Q(s',
a')$ 
  \li \;\;\;for each tuple $\ex{(s, a, r, s')}{i} \in \mathcal D$ 
  \li re-initialize neural-network representation of $Q$
  \li $Q = \text{supervised\_NN\_regression}(D_\text{sup})$
\End
\end{codebox}

\end{document}
