\documentclass{beamer}
%TODO check size when done
\usepackage{lmodern}
\usepackage{multimedia}
\usepackage[backend=bibtex,bibstyle=authoryear,citestyle=authortitle-ibid]{biblatex}
\usepackage{algorithm}
\usepackage{algpseudocode}

\setbeamertemplate{navigation symbols}{}

\AtBeginSection[] % Do nothing for \section*
{
\begin{frame}<beamer>
\frametitle{Outline}
\tableofcontents[currentsection]
\end{frame}
}



%\usetheme{Pittsburgh}
\title[] % (optional, only for long titles)
{Lifelong feature-based strategy planning for dynamic environments}
\author[] % (optional, for multiple authors)
{Nishant JAIN\inst{1,2} \\
\vspace{.5cm}
{\footnotesize under the supervision of}\\
\and Dizan VASQUEZ\inst{2}}
\institute[] % (optional)
{
  \inst{1}%
    Grenoble INP
    \and
  \inst{2}%
  Team CHROMA\\
  INRIA Grenoble Rh\^{o}ne-Alpes
}
\date[24 June 2015] % (optional)
%\subject{Computer Science}

\useoutertheme{infolines}
\setbeamertemplate{headline}{}
\bibliography{report1}{}
%TODO titlepage

\begin{document}
%\begin{frame}
\titlegraphic{\noindent\includegraphics[height=7ex]{logoINP.png}\hfill\raisebox{2ex}{\includegraphics[height=3.5ex]{logoUJF.jpg}}\hfill\raisebox{1ex}{\includegraphics[height=5ex]{logo_INRIA.png}}}
\frame{\titlepage}

\section*{Outline}
\begin{frame}
\frametitle{Outline}
\tableofcontents
\end{frame}

\section{Introduction}
%\end{frame}
\begin{frame}
    \frametitle{Learning from Demonstration (LfD)}
    \begin{itemize}
     \item LfD allows users to train robots based on their specific needs
     \item Classical motion planning is not sufficient
     \item The learning should be transferable between different environments (lifelong learning)
    \end{itemize}
\end{frame}

\begin{frame}{Test Problem}
\begin{itemize}
 \item Motion is restricted to one dimension
 \item Including the temporal dimension gives a two dimensional state space
\end{itemize}
\begin{figure}[h!]
   
   \centering
     \includegraphics[scale=.3]{img1f.png}
     \caption{Robot in green, red objects indicating cars to be avoided, blue indicating objects to be collected}
   \label{gm}
 \end{figure}
 \begin{itemize}
 \item The robot can see only a finite time steps into the future (time horizon, $H$ \footcite{Henry10learningto}) 
 \end{itemize}
\end{frame}

\begin{frame}
 \frametitle{Test Problem}
 \framesubtitle{Training result}
 \begin{center}
 \movie[width=8cm,height=6cm,poster,autostart,showcontrols=true]{}{vid1.webm}
 \end{center}
\end{frame}

\begin{frame}
 \frametitle{Test Problem}
 \framesubtitle{Similar environments}
 \begin{center}
  \includegraphics[scale=0.5]{img3f.png}
 \end{center}
\end{frame}

\begin{frame}
    \frametitle{Inverse Reinforcement Learning}
    \framesubtitle{Markov Decision Process}
    Uses the reinforcement learning framework \\
    A finite MDP is a tuple $(S, A, \{P_{sa}\}, \gamma, R)$, where
     \begin{itemize} %reduce spacing
      \setlength{\itemsep}{-2pt}
      \item $S$ is a finite set of $N$ states
      \item $A$ = $\{a_1, \cdots, a_k\}$ is a set of $k$ actions
      \item $P_{sa}(.)$ are the state transition probabilities upon taking action $a$ in state $s$
      \item $\gamma \in [0,1)$ is the discount factor
      \item $R : S \mapsto \Re$ is the reinforcement function or the reward function
     \end{itemize}
    Goal: Determine a mapping (policy) $\pi : S \mapsto A$\\
It is difficult to define the reward function\\
\end{frame}

\begin{frame}
 \frametitle{Inverse Reinforcement Learning}
 The inverse reinforcement learning problem:\\
 \textbf{Given} 
 \begin{itemize}
  \item measurements of an agent's behaviour over time in a variety of circumstances
  \item measurements of the sensory inputs to that agent
  \item a model of the physical environment (including the agent's body)
 \end{itemize}
 \textbf{Determine}
 \begin{itemize}
  \item the reward function that the agent is optimizing
 \end{itemize}
\end{frame}

\begin{frame}
\frametitle{Other essential results}

 For an MDP $M = (S, A, \{P_{sa}\}, \gamma, R)$ with a policy $\pi : S \mapsto A$
 \begin{equation}
  V^\pi(s) = R(s) + \gamma \sum\limits_{s'} P_{s\pi(s)}(s')V^\pi(s')
 \end{equation}
 \begin{equation}\label{eq:bl2}
  Q^\pi(s,a) = R(s) + \gamma \sum\limits_{s'} P_{sa}(s')V^\pi(s')
 \end{equation}

\end{frame}

\section{State of the art}
\begin{frame}
\frametitle{The reward function \footcite{Ng00algorithmsfor}}
The reward is defined in terms of the context of the agent (features $\boldsymbol{\phi} : S \to [0,1]^d$)
\begin{equation}
 R(s)=w_1\phi_1+w_2\phi_2+\cdots +w_d\phi_d
\end{equation}
The weight vector $\mathbf{w}$ needs to be determined instead of $R$
\end{frame}

\begin{frame}
\frametitle{Feature matching}
\begin{itemize}
\item Path or trajectory: $\zeta$ = ($s_0, s_1, \cdots,$)
\item Sum of features(feature count): $F_{\zeta} = \sum_{s_j \in \zeta} \boldsymbol{\phi}(s_j)$
\item The demonstrated path should have the optimal feature count
\item Earlier approaches \footcite{Ng00algorithmsfor}$^,$ \footcite{Abbeel:2004:ALV:1015330.1015430} use this to formulate optimization problems
\end{itemize}
\end{frame}

\begin{frame}
\frametitle{General Algorithm}
\begin{algorithm}[H]
 \caption{IRL using feature matching}
 \begin{algorithmic}[1]
  \State Randomly pick some policy $\pi_1$ and set $k=1$
  \Repeat
   \State Solve the optimization problem to get the optimal weight vector $\mathbf{w}$
   \State Generate the policy $\pi_{k+1}$ for $\mathbf{w}$
   \State $k:=k+1$
   \State $t=\min\limits_{i \in \{1, \cdots, k\}} V^{\pi^*}(s_0) - V^{\pi_i}(s_0)$
  \Until{$t \le \epsilon$}
 \end{algorithmic}
\end{algorithm}
\end{frame}

\begin{frame}
\frametitle{IRL using Linear Programming}
\begin{align*}
 \max &\sum\limits_{i=1}^{k} p(V^{\pi^*}(s_0) - V^{\pi_i}(s_0)) \\
 s.t. &|w_i| \le 1, i=1, \cdots, d
\end{align*} 
where 
\begin{center}
$p(x) = x, \: x \ge 0$ \\ $p(x)=-2x$ otherwise.
\end{center}
\end{frame}

\begin{frame}
\frametitle{IRL using Linear Programming}
\framesubtitle{Limitations}
\begin{align*}
 \max &\sum\limits_{i=1}^{d} c_i w_i \\
 s.t. \: &|w_i| \le 1, i=1, \cdots, d
\end{align*} 
which would result in $w_i = \pm 1$
\end{frame}

\begin{frame}
\frametitle{IRL using Quadratic Programming (QP)}
\begin{align*}\label{eq:irl2a}
 \max\limits_{t,\mathbf{w}} \: &t\\
 s.t. \: &V^{\pi^*}(s_0) \ge V^{\pi_i}(s_0) + t, i=1, \cdots, k\\
 &|w_i| \le 1, i=1, \cdots, d
\end{align*} 
where $t=\min\limits_{i \in \{1, \cdots, k\}} V^{\pi^*}(s_0) - V^{\pi_i}(s_0)$.\\
Limitations:
\begin{itemize}
\item Algorithm may fail to converge for small $\epsilon$
\item Not defined for a batch setting
\end{itemize}
\end{frame}


\begin{frame}{Maximum Entropy IRL \footcite{Ziebart_2008_6055} (ME)}
 \begin{itemize}
 \item Probabilistic approach to IRL based on the principle of maximum entropy.
 \item Uses a forward-backward algorithm to determine expected action visitation frequencies $D_a$
 \item Uses exponential gradient ascent to determine the solution
 \item $\nabla L(\mathbf{w}) = F_{\zeta^*} - \sum_\zeta P(\zeta | \mathbf{w},\mathbf{P}) F_\zeta = F_{\zeta^*} - \sum_a D_a \boldsymbol{\phi}(a)$
 \end{itemize}
\end{frame}

\begin{frame}{Maximum Entropy IRL}
\framesubtitle{Limitations}
 \begin{itemize}
 \item Requires the sign of the weight for a feature beforehand
 \item Needs extra steps for normalization of state action frequencies
 \item Fails if the sum of features are not of comparable magnitude
 \item Solution may not be globally optimal
 \end{itemize}
\end{frame}

\begin{frame}{Bayesian IRL \footcite{Ramachandran07bayesianinverse}}
\begin{itemize}
\item Tries to recover a probability distribution over the reward
\end{itemize}
Limitations:
\begin{itemize}
\item Algorithm requires the full expert policy
\item Defines the reward explicitly for each state
\item Applicable only for small state spaces
\end{itemize}
\end{frame}

\begin{frame}{Other algorithms}
\framesubtitle{Limitations}
\begin{itemize}
\item Applicable for small state spaces
\item Do not take features into account
\item Require more training data
\end{itemize}
\end{frame}

\section{Proposed Approaches}
\begin{frame}{IRL using genetic algorithms (GA)}
 \begin{itemize}
  \item Uses a multi-objective genetic algorithm
  \item Provides a number of solutions some of which are pareto optimal
  \item Uses feature matching : $\min_\mathbf{w} | F_{\zeta^*} - F_{\zeta_\mathbf{w}} |$
  \item Provides a globally optimal solution
 \end{itemize}
\end{frame}

\begin{frame}{IRL using genetic algorithms}
\framesubtitle{Pareto optimality}
 \begin{itemize}
  \item $f : \Re^a \to \Re^b$
  \item $Y = \{y : y=f(x), \forall x \in \Re^a\}$
  \item $x \in \Re^a$ is pareto optimal if $\{y' \in Y: y' \prec y \}=\emptyset$ where $y=f(x)$
 \end{itemize}
  \begin{figure}[h!]
    
    \centering
      \includegraphics[scale=.5]{img4.png}
    \label{SPO}
  \end{figure}

\end{frame}

\begin{frame}{IRL using genetic algorithms}
\framesubtitle{MOGA}
\begin{algorithm}[H]
\caption{MOGA}\label{moga}
 \begin{algorithmic}[1]
  \State Initialize initial population of size $N$
  \Loop
   \State Evaluate the fitness of the population
   \State Evaluate the rank of the individual
   \State Evaluate the fitness of the population based on the rank
   \State Assign a probability of selection based on the fitness to each individual
   \State Create $N/2$ pairs to undergo crossover and mutation to create `children'
   \State Select the population from the current population and the `children'
  \EndLoop
 \end{algorithmic}
\end{algorithm}
\end{frame}

\begin{frame}{Normalized maximum entropy IRL (MEN)}
 \begin{itemize}
  \item Maximum entropy IRL requires feature counts to be of the same magnitude
  \item We use random policies to evaluate the average sum of features $\phi_{i \: avg}$ 
  \item $\nabla L(\mathbf{w})_i=\frac{\nabla L(\mathbf{w})_i}{\phi_{i \: avg}} \quad \forall i \in \{1, 2, 3\}$
 \end{itemize}
\end{frame}

\begin{frame}{Bayesian IRL (BIRL) and Improved Bayesian IRL (BIRLI)}
\begin{itemize}
 \item Modified to use $R$ as a weighted sum of features
 \item Uses a random walk on the weights
 \item Observation : $O = \{(s_1,a_1), (s_2,a_2), \cdots (s_k,a_k)\}$
 \item $P(O|\textbf{w})=P((s_1,a_1)|\textbf{w}) P((s_2,a_2)|\textbf{w}) \cdots P((s_k,a_k)|\textbf{w})$
 \item $P((s_i,a_i)|\mathbf{w})=\frac{e^{\alpha Q^\pi(s_i,a_i,\mathbf{w})}}{\sum\limits_{b \in A} e^{\alpha Q^\pi(s_i,b,\mathbf{w})}}$
 
\end{itemize}
BIRLI:
\begin{itemize}
\item Uses a different probability measure
\item $P(O|\mathbf{w}) \propto e^{-|\mathbf{w} ^\top (F_{O_\mathbf{w}} - F_O)|}$
\item Removes the dependence on the complete policy
\end{itemize}
\end{frame}

\section{Experiment setup}


\begin{frame}{Test Problem}
\begin{figure}[h!]
   
   \centering
     \includegraphics[scale=.3]{img1f.png}
   \label{gmf}
 \end{figure}
\begin{itemize}
\item The state $s=(x,t)$
\item $\phi_1$; Collision with red object: 1 for a collision, 0 otherwise
\item $\phi_2$; Movement: 1 for moving in either direction, 0 otherwise
\item $\phi_3$; Collision with blue object: 1 for a collision, 0 otherwise 
\item $R(s,a)=w_1\phi_1(s,a) + w_2\phi_2(s,a) + w_3\phi_3(s,a)$
\item $\gamma$ as $0.9$
\end{itemize}
\end{frame}

\begin{frame}{Test Problem}
\framesubtitle{Experiments}
Two set of experiments\\
Set 1
\begin{itemize}
\item Artificial trajectories based on pre-defined weights
\item Goal: recover the correct weights
\end{itemize}
Set 2
\begin{itemize}
\item Trajectories under human control
\item Goal: reproduce similar behaviour
\end{itemize}
\end{frame}

\begin{frame}{Partially observale dynamic Environment}
\begin{itemize}
 \item Visible states limited by the time horizon
 \item Policy evaluated at each time step
 \item $\pi_f(x,t)=\pi_t(x,t) \quad \forall x \in X, t\in T$
 \item The action frequency evaluation algorithm modified in a similar manner
\end{itemize}
\end{frame}

\begin{frame}{Metrics}
\begin{itemize}
 \item Path match: Percentage of states $s \in S$ common in the paths taken by the expert and the agent
 \item Policy match for the observed path : Percentage match of the actions $a \in A$ taken by the expert and the agent for the path taken by the expert
 \item Average difference in features per time step : Computed by summing the feature counts for the paths taken by the expert and the agent and taking the absolute value of the difference and dividing it by $|X|$
\end{itemize}
\end{frame}

\begin{frame}{Experiment set 1}
 
 \begin{itemize}
  \item Actions may be deterministic or non-deterministic
  \item Action $a_i$ in state $s: (x,t)$ can result in state $s': (x',t+1)$, $x'=x+k-2, k \in \{1, 2, 3\}$
  \item Deterministic case: $P(s,a_i,s')=1$ when $k=i$, $0$ otherwise
  \item Non-deterministic case: $P(s,a_i,s')=0.8$ when $k=i$, $P(s,a_i,s')=0.1$ otherwise \item For the edge cases $P(s,a_i,s')=0.9$ when $k=i$ for valid actions,  $P(s,a_i,s')=0.1$ otherwise
 \end{itemize}
 \begin{figure}[h!]
   
   \centering
     \includegraphics[scale=.3]{img2f.png}
     \caption{The arrows indicate the invalid actions at the edges}
   \label{gmb}
 \end{figure}
\end{frame}

\begin{frame}[shrink=10]{Experiment set 1}
\framesubtitle{Results: Deterministic actions}
\begin{center}
  \begin{tabular}{ | l | c | c | c | c |}
    \hline
    Algorithm & Weights & Path Match & Policy Match & Average Difference \\ \hline
    QP & 0.9264706 & 79.88 & 96.78 & 0.0356871 \\        
       & 0.0882353 & 	   &       & 0.1260655 \\
       & 0.6911765 & 	   &       & 0.0444611 \\ \hline
    GA & 0.5817141 & 95.84 & 99.46 & 0.0060606 \\        
       & 0.0237875 & 	   &       & 0.0286501 \\
       & 0.6006902 & 	   &       & 0.0060606 \\ \hline
    ME & 0.2099072 & 27.35 & 69.54 & 0.2191048 \\        
       & 0.7900928 & 	   &       & 0.9138373 \\
       & 1.341D-08 & 	   &       & 0.1451539 \\ \hline
    \textbf{MEN} & 0.4768660 & \textbf{97.66} & \textbf{99.86} & \textbf{0.0} \\        
       & 0.0165905 & 	   &       & \textbf{0.0165289} \\
       & 0.5065435 & 	   &       & \textbf{0.0} \\ \hline
    BIRL & 0.4368039 & 53.12 & 65.84 & 0.0451011 \\        
       & 0.0 & 	   &       & 0.4914036 \\
       & 0.5631961 & 	   &       & 0.0421774 \\ \hline 
    BIRLI & 0.4867780 & 94.99 & 99.06 & 0.0096970 \\        
       & 0.0248060 & 	   &       & 0.0431956 \\
       & 0.4884160 & 	   &       & 0.0096970 \\ \hline
  \end{tabular}
\end{center}
\end{frame}

\begin{frame}[shrink=10]{Experiment set 1}
\framesubtitle{Results: Non-deterministic actions}
\begin{center}
  \begin{tabular}{ | l | c | c | c | c |}
      \hline
      Algorithm & Weights & Path Match & Policy Match & Average Difference \\ \hline
      QP & 0.5817141 & 55.98 & 84.69 & 0.0178033 \\        
         & 0.0237875 & 	   &       & 0.3870907 \\
         & 0.6006902 & 	   &       & 0.0153828 \\ \hline
      \textbf{GA} & 0.6532417 & \textbf{98.09} & \textbf{99.77} & \textbf{0.0062628} \\        
         & 0.0009902 & 	   &       & \textbf{0.0232378} \\
         & 0.6349674 & 	   &       & \textbf{0.0061540} \\ \hline
      ME & 0.9768493 & 63.23 & 87.79 & 0.0364202 \\        
         & 2.743D-08 & 	   &       & 0.1333707 \\
         & 0.0231507 & 	   &       & 0.0560314 \\ \hline
      MEN & 0.6362234 & 82.70 & 93.72 & 0.0305718 \\        
         & 6.943D-21 & 	   &       & 0.1098317 \\
         & 0.3637766 & 	   &       & 0.0229517 \\ \hline
      BIRL & 0.7904742 & 71.99 & 90.02 & 0.0302867 \\        
         & 0.0 & 	   &       & 0.1718813 \\
         & 0.2095258 & 	   &       & 0.0358048 \\ \hline 
      BIRLI & 0.4666431 & 88.25 & 95.63 & 0.0072912 \\        
         & 0.0 & 	   &       & 0.1137779 \\
         & 0.5333569 & 	   &       & 0.0078020 \\ \hline
    \end{tabular}
\end{center}
\end{frame}

\begin{frame}{Experiment set 2}
We considered two set of rules:\\
\textbf{Rule set 1}
\begin{itemize}
 \setlength{\itemsep}{-2pt}
 \item Try to avoid the red objects
 \item Try to collect blue objects
\end{itemize}
\textbf{Rule set 2}
\begin{itemize}
 \setlength{\itemsep}{-2pt}
 \item Try to hit the red objects 
 \item Try to minimize movement
 \item Try to avoid the blue objects 
\end{itemize}
Actions are considered to be deterministic
\end{frame}

\begin{frame}[shrink=10]{Experiment set 2}
\framesubtitle{Rule set 1}
\begin{center}
  \begin{tabular}{ | l | c | c | c | c |}
    \hline
    Algorithm & Weights & Path Match & Policy Match & Average Difference \\ \hline
    QP & 0.0013509 & 39.86 & 76.57 & 0.1232874 \\        
       & 0.0006991 & 	   &       & 0.3835450 \\
       & 0.0401125 & 	   &       & 0.0740975 \\ \hline
    GA & 0.2666580 & \textbf{47.24} & 77.26 & 0.1027817 \\        
       & 0.0150257 & 	   &       & 0.2247087 \\
       & 0.3756208 & 	   &       & 0.0699799 \\ \hline
    ME & 0.2035503 & 25.48 & 68.78 & 0.1934495 \\        
       & 0.7964433 & 	   &       & 0.9488993 \\
       & 0.0000063 & 	   &       & 0.1281234 \\ \hline
    MEN & 0.2849231 & 43.87 & \textbf{77.46} & 0.1104534 \\        
       & 0.0658453 & 	   &       & 0.3742005 \\
       & 0.6492316 & 	   &       & 0.0540322 \\ \hline
    BIRLI & 0.3928222 & 32.74  & 53.94 & 0.1173798 \\        
          & 0 & 	   &       & 0.5801888 \\
          & 0.6071778 & 	   &       & 0.0593870 \\ \hline
  \end{tabular}
\end{center}
\end{frame}

\begin{frame}{ME: Poor performance}
 \begin{center}
 \movie[width=8cm,height=6cm,poster,autostart,showcontrols=true]{}{vid2.webm}
 \end{center}
\end{frame}

\begin{frame}[shrink=10]{Experiment set 2}
\framesubtitle{Rule set 2}
\begin{center}
  \begin{tabular}{ | l | c | c | c | c |}
      \hline
      Algorithm & Weights & Path Match & Policy Match & Average Difference \\ \hline
      QP & 0.12 & 34.67 & 84.55 & 0.2105596 \\        
         & 0.1371429 & 	   &       & 0.2970270 \\
         & 0.9057143 & 	   &       & 0.0053476 \\ \hline
      GA & 0.3174619 & \textbf{35.76} & 84.54 & 0.3135418 \\        
         & 0.3135551 & 	   &       & 0.3143837 \\
         & 0.7662550 & 	   &       & 0.0053476 \\ \hline
      ME & 2.909D-17 & 14.43 & \textbf{85.22} & 0.5744536 \\        
         & 1 & 	   &       & 0.4434879 \\
         & 6.377D-24 & 	   &       & 0.0321774 \\ \hline
      MEN & 0.0014155 & 14.43 & \textbf{85.22} & 0.5744536 \\        
         & 0.9985845 & 	   &       & 0.4434879 \\
         & 3.452D-21 & 	   &       & 0.0321774 \\ \hline
      BIRLI & 0.2159944 & 35.20 & 85.05 & \textbf{0.2082719} \\        
             & 0.2190940 & 	   &       & \textbf{0.2525666} \\
             & 0.5649116 & 	   &       & \textbf{0.0053476} \\ \hline
    \end{tabular}
\end{center}
\end{frame}

\section{Conclusion and future work}
\begin{frame}{Conclusion}
\begin{itemize}
 \item Algorithms may be theoretically sound but do not work well in the real world
 \item GA performed well consistently
 \item MEN, BIRLI outperformed their standard counterparts
 \item GA is computationally expensive to train
 \item BIRLI may fail in certain circumstances
\end{itemize}
\end{frame}

\begin{frame}{PhD proposal}
\framesubtitle{Planning based prediction with Inverse reinforcement learning}
\begin{itemize}
 \item Non-linear reward function
 \item Feature selection
 \item Continuous state spaces and actions
 \item Multiple agents
\end{itemize}
\end{frame}

%\begin{frame}{PhD proposal}
%\framesubtitle{A game-theoretic approach to autonomous security patrolling using micro aerial vehicles}
%\begin{itemize}
% \item Patrolling is one of the standard ways to address security threats
% \item Defender and attacker in a partially observable stochastic game
% \item Defender chooses action/policy first
% \item Game repeated till attacker arrested
% \item Micro aerial vehicles (MAVs) used for enforcement
%\end{itemize}
%\end{frame}
%%TODO change proposal
%\begin{frame}{PhD proposal}
%\framesubtitle{Goals}
%\begin{itemize}
% \item Find the patrol strategy for best security performance
% \item Complexity analysis
% \item Attacking may not act optimally
% \item Developing algorithms for MAVs relying on sensor fusion
%\end{itemize}
%\end{frame}

\begin{frame}
\begin{center}
\Huge
Thank You
\end{center}
\end{frame}

\end{document}
