\documentclass[a4paper,11pt]{article}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{lmodern}
\usepackage{amsmath}
\usepackage{subfig}
\usepackage{graphicx}
\DeclareMathOperator{\argmax}{\arg\max}
\title{Intelligent Agents Homework 1} 
\author{Behrooz Mahasseni}

\begin{document}
\maketitle
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Question1}
We can define the following simple MDP which is based on assignment 1 to show this property. We defined our MDP based on a simple "Get the cheese game". The game has an $h \times w$ maze. Each cell may contain a cheese (which will last for ever but has a value). There is a single mouse in a maze and we want to find the best policy to guide the mouse in the maze. As soon as the mouse enters the cell with the cheese it will be rewarded by the value of the cheese. Since we have an infinite source of cheese in the cells contain cheese the mouse will be rewarded as longs as it stays. We put two cheese in State 9 and State 16 with values set to 3 and 1 respectively. Figure \ref{fig:0} shows the maze. All other states have zero reward.
  \begin{figure}[ht]
    \caption{Maze and rewards for states}
    \centering
    \includegraphics[totalheight=0.5\textwidth]{hw1_fig0.jpg}
    \label{fig:0}
  \end{figure}
The stationary policy for state 11 will choose "Right" as the action. But the non-stationary policy will choose "Down" for $1\leq h \leq 3$. This shows the non-stationary policy value is different from stationary policy value. 

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Question2}
\subsection{a.}
Instead of summing over all states we need to perform the summation over the possible next states. 
\begin{equation}
  V_\pi^k(s) = R(s) + \sum_{NEXT(s,a)} T(s, \pi(s,k), s^\prime) V_\pi^{k-1}(s^\prime)\nonumber 
\end{equation}
With this change for each state we will iterate over at most  $r$ times. As a result for each state we need to iterate $r \times H$. Since we have n states it the total complexity is $O(Hnr)$.
Note:$O(Hnr) < O(Hn^2)$.
\subsection{b.}
In addition to the previous change we should also maximize among the possible actions. 
\begin{equation}
  V^k = R(s) + \max_{a\in LEGAL(s)}\sum_{NEXT(s,a)} T(s, a, s^\prime) V^{k-1}(s^\prime)\nonumber 
\end{equation}
With this change we only search among at most $k$ actions. So the total complexity is $O(Hnrk)$.
Note:$O(Hnrk) < O(Hmn^2)$.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Question3}
\subsection{a.} 
The bellman equation for defining reward function as $R(s,a)$ is as follows :
\begin{equation}
  V(s) = \max_a [R(s,a) + \beta \sum_s^\prime T(s,a,s^\prime) V(s^\prime)]\nonumber
\end{equation}
The bellman equation for defining reward function as $R(s,a, s^\prime)$ is as follows:
\begin{equation}
  V(s) = \max_a \sum_s^\prime [T(s,a,s^\prime) R(s,a, s^\prime) + \beta T(s,a,s^\prime) V(s^\prime)]\nonumber
\end{equation}
The first term in sum is expected reward we get in the current state with respect to the next state. 
\subsection{b.}
To convert the MDP with reward function $R(s,a,s^\prime)$ we need to introduce a new state $(s, a, s^\prime)$. Figure \ref{fig:1} shows the new MPD. 
We define the transition $T^\prime$ based on $T$ and the reward function $R^\prime$ as follows: 
\begin{equation}
\begin{array}{l}  
  T^\prime(s,a, (s, a, s^\prime)) = T(s, a, s^\prime) \\
  T^\prime((s, a, s^\prime), a, s^\prime) = 1\\
  R^\prime(s,a) = 0\\
  R^\prime((s,a,s^\prime), a) = \beta^\frac{-1}{2} R(s,a,s^\prime). \nonumber 
\end{array}
\end{equation}
The reason for the last part is because we are dividing the original transition function to two parts. At each step the expected reward will be multiplied by the discount factor. To have the same reward value we need to have the cancellation term $\beta^\frac{-1}{2}$.
  \begin{figure}[ht]
    \caption{Adding new state $(s,a,s^\prime)$}
    \centering
    \includegraphics[totalheight=0.25\textwidth]{hw1_fig1.jpg}
    \label{fig:1}
  \end{figure}
\subsection{c.}
We can use the same idea by adding a new state $(s,a)$. Figure \ref{fig:2} shows the new MDP. As before we define $T^\prime$ and $R^\prime$ as follows:
\begin{equation}
\begin{array}{l}  
  T^\prime(s,a, (s,a)) = 1 \\
  T^\prime((s, a), a, s^\prime) = T(s,a,s^\prime)\\
  R^\prime(s) = 0\\
  R^\prime((s,a)) = \beta^\frac{-1}{2} R(s,a).  \nonumber
\end{array}
\end{equation}
  \begin{figure}[ht]
    \caption{Adding new state $(s,a)$}
    \centering
    \includegraphics[totalheight=0.25\textwidth]{hw1_fig2.jpg}
    \label{fig:2}
  \end{figure}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Question4}
We need to extend the number of states to encode the dependency on $k$ previous states. To do so we define new state space by defining each state as k-tuple of the original states. Suppose we use $z$ for the new MDPs states. 
\begin{enumerate}
  \item Each state in our new MDP represents k ordered states of the original MDP ($z = (s_i, ..., s_j)$ and $\|s^\prime\| = k$). With this definition if we have $n$ states in the M, we have $k^n$ states in $M^\prime$. 
  \item $A^\prime  = A$, because the set of actions are the same in both MDPs.
  \item if $z = (s_1,...,s_k)$ then $R^\prime(z) = R(s_k)$
  \item Suppose $z = (s_1,...,s_k)$ and $z^\prime = (s^\prime_1,...,s^\prime_k)$ then 

  $
  \begin{cases}
    T(z, a, z^\prime) = 0 ~~~~~~~~~~~~~~~~~~~~ if (s_2,...,s_k) \neq (s^\prime_1,..., s^\prime_{k-1}) \\
    T(z, a, z^\prime) = p(s^\prime_{k} |s_2,...,s_k) ~~~ otherwise \\
  \end{cases}$
\end{enumerate}
  Note that $p(s^\prime_{k} |s_2,...,s_k) = p(s^\prime_{k} |s^\prime_1,...,s^\prime_{k-1})$
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Question5}
\subsection{a.} We start by expanding the $T_\pi[V](s)$. In the following $\|\|$ means infinity norm. 
\begin{equation}
  \begin{array}{l}
  |T_\pi[V](s) - T_\pi[V^\prime](s)| = |R(s) + \beta \sum_{s^\prime} p(s^\prime|s,\pi(s)) V(s^\prime) - [R(s) + \beta \sum_{s^\prime} p(s^\prime|s,\pi(s)) V^\prime(s^\prime)]|\\
  =\beta |\sum_{s^\prime} p(s^\prime|s,\pi(s)) V(s^\prime) - \sum_{s^\prime} p(s^\prime|s,\pi(s)) V^\prime(s^\prime)| \\
  =\beta |\sum_{s^\prime} p(s^\prime|s,\pi(s)) (V(s^\prime) - V^\prime(s^\prime) | \\
   \leq \beta |\sum_{s^\prime} p(s^\prime|s,\pi(s)) \max_{s^\prime} (V(s^\prime) - V^\prime(s^\prime) |  \\
   \leq \beta |\sum_{s^\prime} p(s^\prime|s,\pi(s))  \|(V - V^\prime)\|| \\
   \leq \beta \|(V - V^\prime)\| \\
  \end{array}
\end{equation}
From above we have $|T_\pi[V](s) - T_\pi[V^\prime](s)| \leq \beta \|(V - V^\prime\|$. Since we don't have any assumption about $s$ this equation is also valid for the $s$ value that maximizes $|T_\pi[V](s) - T_\pi[V^\prime](s)|$. As a result we have 
\begin{equation}
  \|T_\pi[V] - T_\pi[V^\prime]\| \leq \beta \|(V - V^\prime)\|\nonumber
\end{equation}
\subsection{b.}
  From part "a" we have 
  \begin{equation}  
\|T_\pi[V^k] - V^{k}\| \leq \beta \|T_\pi[V^{k-1}] - V^{k-1}\| \leq \text{...} \leq \beta \|T_\pi[V^{0}] - V^{0}\|\nonumber
\end{equation}
This means 
  \begin{equation}  
\|T_\pi[V^k] - V^{k}\| \leq \beta^k \|T_\pi[V^{0}] - V^{0}\|\nonumber
\end{equation}.
We know $\|T_\pi[V^{0}] - V^{0}\| \geq 0$ and as a result $\lim_{k\rightarrow\infty} \beta^k \|T_\pi[V^{0}] - V^{0}\| = 0$. Since $0 < \|T_\pi[V^k] - V^{k}\| \leq \beta^k \|T_\pi[V^{0}] - V^{0}\|$ as $k\rightarrow \infty$ we have 
\begin{equation}
  \begin{array}{l}
  T_\pi[V^k] - V^{k} = 0\\\nonumber
    \Rightarrow
    T_\pi[V^k] = V^{k} = V_\pi \nonumber
  \end{array}
\end{equation}
\subsection{c.}
By proving parts "a" and "b" we actually proved that bellman backup is a contraction with only one fixed point. We can not have more than one fixed point because then "a" is not true. So for any initialization of "V" by applying bellman backup we get closer and closer to the fixed point at each iteration. So we always converge at infinity.
\subsection{d.}
Similar to part "b", suppose maximum possible reward is $R_{max}$. When $\|V^0\| = 0$,  the maximum initial error $\|V^0 - V^\pi\|$ is $\frac{R_{max}}{(1-\beta)}$. We also know that $T_\pi[V_\pi] = V_\pi$If apply the bellman equation $k$ times from part "a" we have 


\begin{equation}  
\beta \|T_\pi[V^{k-1}] - V_\pi\| \leq \text{...} \leq \beta \|T_\pi[V^{0}] - V_\pi\|\leq \beta \|V^{0} - V_\pi\|\nonumber
\end{equation}


This means

\begin{equation}  
\|V^k-V_\pi\|=\|T_\pi[V^{k-1}] - V_\pi\| \leq \beta^k \|V^{0}- V_\pi\|  \leq \beta^k \frac{R_{max}}{(1-\beta)} \nonumber
\end{equation}

So if we want to have $\|V^k - V_\pi\| \leq \epsilon $ we have to set $k$ such that:
\begin{equation}
\begin{array} {l}
\beta^k \frac{R_{max}}{(1-\beta)} \leq \epsilon
  \Rightarrow
\beta ^ k \leq \frac{\epsilon (1-\beta)}{R_{max}}
\Rightarrow
k \geq \frac{\log(\frac{\epsilon(1-\beta)}{R_{max}})}{\log(\beta)} \nonumber
\end{array}  
\end{equation}
 \end{document}
