\documentclass[a4paper,10pt]{article}
\usepackage[utf8]{inputenc}
\usepackage{graphicx}
\usepackage{cite}
\usepackage{amsmath}
\usepackage{amsthm}

\newtheorem{theorem}{Theorem}

\begin{document}
The reinforcement learning problem is generally modeled using an MDP. \cite{Ng00algorithmsfor}. %mdp equation
\\
A finite MDP is a tuple $(S, A, \{P_{sa}\}, \gamma, R)$, where
\begin{itemize} %reduce spacing
 \setlength{\itemsep}{.1pt}
 \item $S$ is a finite set of $N$ states
 \item $A$ = $\{a_1, \cdots, a_k\}$ is a set of $k$ actions
 \item $P_{sa}(.)$ are the state transition probabilities upon taking action $a$ in state $s$
 \item $\gamma \in [0,1)$ is the discount factor
 \item $R : S \mapsto \Re$ is the reinforcement function or the reward function, bounded in absolute value by $R_{max}$
\end{itemize}
For simplicity, we denote rewards as $R(s)$ but we will be using $R : S \times A \mapsto \Re$ later on.
\\
A policy is defined as any map $\pi : S \mapsto A$, and the value function for a policy $\pi$ at any state $s_1$ is given by

\begin{align*}
 V^\pi(s_1)=E[R(s_1) + \gamma R(s_2) + \gamma^2 R(s_3) + \cdots |\pi]
\end{align*}

where the expectation is over the distribution of the state sequence $(s_1,s_2,\cdots)$ we pass through when we execute the policy $\pi$ starting from $s_1$. We also define the Q-function as

\begin{align*}
 Q^\pi(s,a)=R(s) + \gamma E_{s' \sim P_{sa}(.)}[V^\pi(s')]
\end{align*}

where the expectation is over $s'$ distributed according to $P_{sa}(.)$.\\
For discrete finite spaces, functions such as $R$ and $V$ can be represented as vectors indexed by state. The rewards can thus be written as and $N$ dimensional vector $\mathbf{R}$ for a state space of siwe $N$. Also, let $\mathbf{P}_a$ be the $N$ by $N$ matrix where each element $(i,j)$ gives the probability of moving to state $j$ from state $i$ on taking the action $a$. Let $\prec$ and $\preceq$ denote strict and non-strict vector inequality.
The goal of reinforcement learning is to find a policy $\pi$ such that $V^\pi(s)$ is maximized. Let us denote this policy as $\pi^*$.

%basic properties

Some additional results concerning MDPs \cite{Ng00algorithmsfor,Bertsekas:1996:NP:560669,Sutton:1998:IRL:551283} are also helpful
\begin{theorem}[Bellman Equations]
 Let an MDP $M = (S, A, \{P_{sa}\}, \gamma, R)$ and a policy $\pi : S \mapsto A$ be given. The for all $s \in S, a \in A, V^\pi$ and $Q^\pi$ satisfy
 \begin{equation}
  V^\pi(s) = R(s) + \gamma \sum\limits_{s'} P_{s\pi(s)}(s')V^\pi(s')
 \end{equation}
 \begin{equation}
  Q^\pi(s,a) = R(s) + \gamma \sum\limits_{s'} P_{sa}(s')V^\pi(s')
 \end{equation}
\end{theorem}

\begin{theorem}[Bellman Equations]
 Let an MDP $M = (S, A, \{P_{sa}\}, \gamma, R)$ and a policy $\pi : S \mapsto A$ be given. Then $\pi$ is an optimal policy for $M$ if and only if, for all $s \in S$,
 \begin{equation}
  \pi(s) \in \arg \max\limits_{a \in A} Q^\pi(s,a)
 \end{equation}
\end{theorem}



%V def,Q def
%vector notations, strict inequality
%basic properties

%\\
%then come to IRL
\par In inverse reinforcement learning, the problem is to find the reward function $R$ provided some observed behaviour. For small finite MDPs, we may have the complete observed policy $\pi$ and we then wish to find the set of possible rewrd functions $R$ such that $\pi$ is an optimal policy in the MDP $(S, A, \{P_{sa}\}, \gamma, R)$.
%characteristics
\par Ng and Russell \cite{Ng00algorithmsfor} also characterized the set of solutions for the problem
\begin{theorem}
 Let a finite state space $S$, a set of actions $A$ = $\{a_1, \cdots, a_k\}$, transition probability matrices \{$\mathbf{P}_a$\}, and a discount factor $\gamma \in [0,1)$ be given. Then the policy $\pi$ giv\cite{Ng00algorithmsfor}en by $\pi(s) \equiv a_1$ is optimal if and only if, for all $a = a_2, \cdots, a_k$, the reward $\mathbf{R}$ satisfies
 \begin{equation}\label{eq:css}
  (\mathbf{P}_{a_1}-\mathbf{P}_a)(\mathbf{I}-\gamma \mathbf{P}_{a_1})^{-1}\mathbf{R} \succeq 0
 \end{equation}

\end{theorem}
Note that $\mathbf{R}$ is always a solution and there may be multiple solutions that satisfy (\ref{eq:css}).
In larger finite state MDPs or infinite state MDPs, finding a general solution can become difficult. A linear approximation for the reward function is thus used. We assume that there is a vector of features $\mathbf{\phi} : S \to [0,1]^d$.
\begin{equation}
 R(s)=w_1\phi_1+w_2\phi_2+\cdots +w_d\phi_d
\end{equation}
The problem is now transformed to determine the weight vector $\mathbf{w}$.
\cite{Ng00algorithmsfor} formulated the problem as a linear programming problem.
Considering that we do not have the complete policy but just an observed sequence of states $(s_0, s_1, \cdots)$, we have
\begin{equation}
 V^\pi_i(s_0)=\phi_i(s_0)+\gamma \phi_i(s_1)+ \gamma^2 \phi_i(s_2)+\cdots
\end{equation}
$V^\pi(s_0)$ can thus be written as
\begin{equation}
 V^\pi(s_0)=w_1 V^\pi_1(s_0)+w_2 V^\pi_2(s_0)+\cdots +w_d V^\pi_d(s_0)
\end{equation}
To start off the algorithm, we first find the value estimates for the 'expert' policy $\pi^*$ for $m$ monte carlo runs as well as for our base policy $\pi_1$, which is a randomly chosen policy. We run the algorithms for a large number of iterations. At a particular iteration, $k$, we have the set of policies $\{\pi_1,\cdots,\pi_k\}$, and we want to find $\mathbf{w}$ so that the resulting reward satisfies
\begin{equation}
 V^{\pi^*}(s_0) \ge V^{\pi}_i(s_0), i=1,\cdots,k
\end{equation}
The optimization problem used is as follows:
\begin{align*}
 \max &\sum\limits_{i=1}^{k} p(V^{\pi^*}(s_0) - V^{\pi_i}(s_0)) \\
 s.t. &|w_i| \le 1, i=1, \cdots, d
\end{align*} 
For each iteration, the $\mathbf{w}$ obtained is using to generate a new policy $\pi_{k+1}$ which is added to the current set of policies.
\\If you look at the optimization problem closely, you will see that it can be reduced to
\begin{align*}
 \max &\sum\limits_{i=1}^{d} c_i w_i \\
 s.t. \: &|w_i| \le 1, i=1, \cdots, d
\end{align*} 
which would result in $w_i = \pm 1$ depending on the sign of $c_i$. This algorithm is not helpful when the features are binary and requires Gaussian functions as features.

%ng abbeel
Abbeel and Ng \cite{Abbeel:2004:ALV:1015330.1015430} worked further along the same strategy of matching features to determine weights. The algorithm is similar to the previous one with a change in the optimization problem. The optimization problem in this case has a max-min formulation.
\begin{align*}
 \max\limits_{t,\mathbf{w}} \: &t\\
 s.t. \: &V^{\pi^*}(s_0) \ge V^{\pi_i}(s_0) + t, i=1, \cdots, k\\
 &|w_i| \le 1, i=1, \cdots, d
\end{align*} 

where $t=\min\limits_{j \in \{1, \cdots, k\}} V^{\pi^*}(s_0) - V^{\pi_i}(s_0)$.\\
This problem is equivalent to finding the maximum margin hyperplane between two sets of points. To solve it with a generic quadratic programming problem solver we can transform it into

\begin{align*}
 \max\limits_{\mathbf{w}} \: &\frac{1}{2} \|\mathbf{w}\|^2 \\
 s.t. \: &V^{\pi^*}(s_0) - V^{\pi_i}(s_0) \ge 1, i=1, \cdots, k
\end{align*} 

The algorithm is repeated for a large number of iterations or until $t \le \epsilon$. The algorithm still has a few issues:
\begin{itemize}
 \item If $t > \epsilon$ but not too big, the max-min formulation can prevent the algorithm from converging to a solution. It will try to increase $t$ for a policy that comes close to the optimal policy.
 \item In case of a batch setting, the algorithm is not clearly defined. The constraints can become inconsistent after a few iterations.
\end{itemize}
Maximum margin planning \cite{Ratliff06maximummargin} does have a clear approach for the batch setting but even though the approach presented is similar to inverse reinforcement learning, its goal is to mimic the behaviour of the expert rather than recovering the underlying reward.

\par
Ziebart et. al \cite{Ziebart_2008_6055,bziebart2008navigate} presented a probabilistic approach to IRL based on the principle of maximum entropy. Instead of dealing with policies, they considered a distribution over the entire class of possible behaviours. Many different distributions of paths match feature counts when the demonstrated behaviour is sub-optimal. Any one distribution could favour some path over others. This is avoided using the principle of maximum entropy which chooses the distribution that does not exhibit any additional preferences beyond feature matching.\par
Consider the path $\zeta$ = ($s_0, s_1, \cdots,$). For deterministic MPDs, the resulting distribution over paths is parameterized by the reward weights, $\mathbf{w}$
\begin{equation}
 P(\zeta_i | \mathbf{w}) = \frac{1}{Z(\mathbf{w})} e^{\mathbf{w}^\top V^{\zeta_i}(s_0)}
\end{equation}
The partition function, $Z(\mathbf{w})$, given the parameter weights, always converges for finite horizon problems and infinite horizon problems with discounted reward weights.
\par
For non-deterministic MDPs, the distribution of the paths is conditioned on the weights and the transition distribution, $\mathbf{P}$.
\begin{equation}
 P(\zeta | \mathbf{w},\mathbf{P}) \approx \frac{1}{Z(\mathbf{w},\mathbf{P})} e^{\mathbf{w}^\top V^{\zeta}(s_0)} \prod_{s_{t+1}, a_t, s_t \in \zeta} P_{sa}(s_{t+1}|a_t, s_t)
\end{equation}
The probability of an action is weighted by the expected exponentiated rewards of all paths that begin with that action
\begin{equation}
 P(\mathrm{action} \: a | \mathbf{w},\mathbf{P}) \propto \sum_{\zeta : a \in \zeta_{t=0}} P(\zeta | \mathbf{w},\mathbf{P})
\end{equation}
The model is trained 
\bibliography{report1}{}
\bibliographystyle{plain}
\end{document}     
