\relax 
\citation{Karakovskiy2012}
\citation{Young2012}
\citation{Genesereth2005}
\citation{Bjornsson2009}
\citation{bellemare12arcade}
\citation{bellemare12arcade}
\@writefile{toc}{\contentsline {section}{\numberline {1}\hskip -1em.\nobreakspace  {}Introduction}{1}}
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Our system successfully learns to play the games shown above: \textsc  {Eat-The-Fruit} (top-left), \textsc  {Pong} (top-middle), \textsc  {Dance-Dance-Revolution} (top-right), \textsc  {Frogger} (bottom-left), \textsc  {Snake} (bottom-middle), \textsc  {Dodge-The-Missile} (bottom-right).}}{1}}
\@writefile{toc}{\contentsline {section}{\numberline {2}\hskip -1em.\nobreakspace  {}Games}{2}}
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Our tile-coded feature representation. We encode the absolute positions of game objects (top) as well as relative positions of game objects (bottom) in spatial bins. Relative positions are computed separately for all pairs of object types. For any game state $s\in S$, this results in a feature vector $\phi (s)$ of dimension $d=O(k^2)$ where $k$ is the number of distinct object types in the game. To be used in the SARSA learning algorithm, the feature transform must also encode the action $a_i\in A=\{a_0,\ldots  ,a_{|A|-1}\}$ that is to be taken from the current game state $s$. To this end, our final feature vector $\phi (s,a_i)$ is simply the vector $\phi (s)$ with all indices shifted by $i|A|$ and with zeros at all other positions.}}{2}}
\newlabel{fig:schematic}{{2}{2}}
\citation{bellemare12arcade}
\@writefile{toc}{\contentsline {section}{\numberline {3}\hskip -1em.\nobreakspace  {}Feature Design}{3}}
\newlabel{section:features}{{3}{3}}
\@writefile{toc}{\contentsline {section}{\numberline {4}\hskip -1em.\nobreakspace  {}Model-Based Reinforcement Learning}{3}}
\newlabel{section:model-based}{{4}{3}}
\@writefile{loa}{\contentsline {algorithm}{\numberline {1}{\ignorespaces Learn to play a game using the SARSA($\lambda $) algorithm with linear function approximation. See Section\nobreakspace  {}5\hbox {} for definitions of the variables.}}{3}}
\newlabel{alg:sarsa}{{1}{3}}
\citation{mohri2012foundations}
\citation{wiering1998fast}
\bibstyle{ieee}
\bibdata{refs}
\bibcite{bellemare12arcade}{1}
\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Top: Comparison of total computation time for learning \textsc  {Dodge-The-Missile} using different combinations of sparse and dense feature and trace vectors. Bottom: The average number of nonzero features for \textsc  {Dodge-The-Missile}. We observe that the average number of nonzero features is very small, which we exploit to drastically reduce computation times. }}{4}}
\newlabel{fig:sparse}{{3}{4}}
\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces The effect of different values for $\lambda $. on the convergence of our algorithm when playing {\sc  Grid-World}. We show convergence rates for various values of $\lambda $ relative to random movement. Lower is better. We observe that our algorithm outperforms random play across a wide range of values for $\lambda $.}}{4}}
\newlabel{fig:lambda}{{4}{4}}
\@writefile{toc}{\contentsline {section}{\numberline {5}\hskip -1em.\nobreakspace  {}Model-Free Reinforcement Learning}{4}}
\newlabel{section:learning}{{5}{4}}
\@writefile{toc}{\contentsline {section}{\numberline {6}\hskip -1em.\nobreakspace  {}Results}{4}}
\bibcite{Bjornsson2009}{2}
\bibcite{Genesereth2005}{3}
\bibcite{Karakovskiy2012}{4}
\bibcite{mohri2012foundations}{5}
\bibcite{wiering1998fast}{6}
\bibcite{Young2012}{7}
\newlabel{subfig:fruitEater-perf}{{5(a)}{5}}
\newlabel{sub@subfig:fruitEater-perf}{{(a)}{5}}
\newlabel{subfig:ddr-perf}{{5(b)}{5}}
\newlabel{sub@subfig:ddr-perf}{{(b)}{5}}
\newlabel{subfig:dtm1-perf}{{5(c)}{5}}
\newlabel{sub@subfig:dtm1-perf}{{(c)}{5}}
\newlabel{subfig:dtm2-perf}{{5(d)}{5}}
\newlabel{sub@subfig:dtm2-perf}{{(d)}{5}}
\newlabel{subfig:frogger-perf}{{5(e)}{5}}
\newlabel{sub@subfig:frogger-perf}{{(e)}{5}}
\newlabel{subfig:pong-perf}{{5(f)}{5}}
\newlabel{sub@subfig:pong-perf}{{(f)}{5}}
\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces  Performance of our algorithm relative to random play for {\sc  Eat-The-Fruit} (top-left, lower is better), {\sc  Dance-Dance-Revolution} (top-middle), {\sc  Dodge-The-Missile} (top-right and bottom-left), {\sc  Frogger} (bottom-middle), and {\sc  Pong} (bottom-right). For {\sc  Dodge-The-Missile}, we capped the episode length at 5000 frames. For {\sc  Pong}, we capped the episode length at 50 bounces. For {\sc  Frogger} , we set $r_1=0.2$ and $r_2=-1$. Note that after our algorithm has learned to play {\sc  Dodge-The-Missile} effectively, it is capable of collecting powerups while simultaneously avoiding missiles. Note that since continuously trying to move upwards can be a viable strategy when playing {\sc  Frogger}, we also compare the performance of our algorithm to an AI player that continuously tries to move upwards.}}{5}}
\newlabel{fig:perf}{{5}{5}}
\newlabel{subfig:snake-easy-short}{{6(a)}{5}}
\newlabel{sub@subfig:snake-easy-short}{{(a)}{5}}
\newlabel{subfig:snake-easy-long}{{6(b)}{5}}
\newlabel{sub@subfig:snake-easy-long}{{(b)}{5}}
\newlabel{subfig:snake-hard-short}{{6(c)}{5}}
\newlabel{sub@subfig:snake-hard-short}{{(c)}{5}}
\newlabel{subfig:snake-hard-long}{{6(d)}{5}}
\newlabel{sub@subfig:snake-hard-long}{{(d)}{5}}
\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces Performance of our algorithm on {\sc  Snake} relative to random play. We evaluate our algorithm's performance on the following four different game variations: empty game board with a snake body length of 1 (left), empty game board with a snake body length of 10 (left-middle), relatively cluttered game board with snake body length of 1 (right-middle), and relatively cluttered game board with a snake body body length of 10 (right). Our algorithm was able to learn effectively on the cluttered game board, but not with the longer body. This is because having a longer body requires longer-term decision making. On the other hand, a short body makes it possible to play according to a relatively greedy strategy, even on a relatively cluttered game board.}}{5}}
\newlabel{fig:snake-perf}{{6}{5}}
