\documentclass[compress, brown, professionalfonts]{beamer} 
\usetheme{Singapore} % Beamer Theme 
\usecolortheme{lily} % Beamer Color Theme 
\usefonttheme[options]{serif}
\usepackage{graphicx}
\usepackage{subfigure}
\usepackage{epstopdf}
\usepackage{array}
\usepackage{epsfig}
\usepackage{movie15}
\newcommand{\muhat}{\hat{\mu}}


\begin{document}

\title{Reward Optimization in the Primate Brain: A POMDP Model of Decision Making Under Uncertainty}
\author{Yanping Huang}
\institute{University of Washington}
\date{\today}

\begin{frame}
\titlepage
\end{frame}

\section{Model Setup}

\begin{frame}
  \frametitle{Motivation: How do animals handle Uncertainty. }
  \begin{itemize}
  \item  Probabilistic Inference: Bayesian Brain.
  \item Decision Making  Based on
    \begin{itemize}
      \item Noisy Observation/Unreliable Evidence.
      \item Future Rewards: Payoff, Urgency.
      \item Prior Knowledge
    \end{itemize}
  \end{itemize}
\end{frame}


\begin{frame}
\frametitle{Outline}
  \begin{center}
  \begin{itemize}
  \item \large{\bf Model of Decision Making Under Uncertainty}
  \item\large{Learning Optimal Decision Strategy}
  \item\large{Model Predictions: Speed and Accuracy}
  \item \large{Effects of Prior Information}
   \end{itemize}
   \end{center}
\end{frame}

\begin{frame}
  \frametitle{Random Dots Motion Discrimination Tasks}
   \begin{center}
     \includegraphics[scale=0.5]{MonkeyRandomDots.png}
   \end{center}
\end{frame}

\begin{frame}
  \frametitle{Random Dots Motion Discrimination Tasks}
   \begin{center}
     \includegraphics[scale=0.6]{monkey1.png}
   \end{center}
\end{frame}


\begin{frame}
 \frametitle{Two Alternative Choices}
   \begin{center}
     \includegraphics[scale=0.6]{monkey2.png}
   \end{center}
\end{frame}

\begin{frame}
 \frametitle{Random Dots Motion Stimuli}
   \begin{center}
     \includegraphics[scale=0.6]{monkey3.png}
   \end{center}
\end{frame}

\begin{frame}
  \frametitle{Discriminate between Right and Left}
   \begin{center}
     \includegraphics[scale=0.6]{monkey4.png}
   \end{center}
\end{frame}




\begin{frame}
  \frametitle{Motion Strength with $c = -25.6\%$  }
   \begin{center}
     \begin{figure}[ht]
     \includemovie[poster,text={\includegraphics[width=8cm,height=6cm]{movie_thumb.png}},autoplay,mouse=true]{8cm}{6cm}{c_L_25_6.avi}
\end{figure}   
\end{center}
\end{frame}


\begin{frame}
  \frametitle{  Motion Strengths $c = 6.4\%$ }
   \begin{center}
     \begin{figure}[ht]
       \includemovie[poster,text={\includegraphics[width=8cm,height=6cm]       {movie_thumb.png}},autoplay,mouse=true]{8cm}{6cm}{c_R_6_4.avi}
\end{figure}   
\end{center}
\end{frame}



\begin{frame}
  \frametitle{Model Setup and Notations}
   In each trial, the experimenter chooses a fixed motion
strength $c (-1 \le c \le +1)$
\begin{itemize}
\item $-1$ corresponds to $100\%$
leftward motion (all dots moving leftward)
\item  $+1$ corresponds to
$100\%$ rightward motion (all dots moving rightward)
\item Intermediate
values of $c$ represent a corresponding fraction of dots moving
leftward or rightward. 
\end{itemize}
But $c$ is {\em unknown} to the agent.
\end{frame}




\begin{frame}
  \frametitle{Observation Model}
\begin{itemize}
\item $n$: the number of random dot samples on the screen.
\item $o_t \in \{0, \ldots, n\}$, the number of rightward moving
dots on the screen.
\item $n - o_t$, the number of leftward moving dots at time $t$.
\item $o_t$ follows a Binomial distribution $o_t \sim \mathrm{Bino}(n, \mu)$
\item $\mu = \frac{c+1}{2}$, the probability of an individual random dot moving in the rightward
direction
\end{itemize}
\end{frame}


\begin{frame}
  \frametitle{Model Setup and Notations}
\quad\ {\bf Discriminating} between Right and Left  
\\ $\Leftrightarrow$ {\bf Inferring} whether $\mu > 0.5$ (the sign of $c$) based on the Random Dots Motion Stimuli $o_{1:t}$
\end{frame}


\begin{frame}
  \frametitle{Model Setup and Notations}
   \begin{center}
    \includegraphics[scale=0.4]{model.jpg}
   \end{center}
\end{frame}

\begin{frame}
  \frametitle{Belief State}
  \begin{itemize}
  \item The parameter $\mu$ is hidden, inferred by $o_{1:t}$
   \item Maintain a belief $b_t$ over $\mu$, the posterior distribution
     \begin{eqnarray*}
       b_t(\mu) = \frac{\Pr[o_t|\mu]\Pr[\mu|o_{1:t-1}]}{\Pr[o_t]} 
     \end{eqnarray*}
   \item $b_t$ follows a Beta distribution $\textrm{Beta}(\alpha,\beta)$
  \begin{itemize}
  \item $\alpha = m_R + \alpha_0$, $m_R = \sum_{\tau=1}^t o_t$
   \item $\beta = m_L + \beta_0$,  $m_L = \sum_{\tau=1}^t (n - o_t)$
  \end{itemize}
 \item $b_t$ depends only on the number of rightward and leftward moving dots $m_R$ and $m_L$.
  \end{itemize}
\end{frame}


\begin{frame}
  \frametitle{Available Actions}
    \includegraphics[scale=0.2]{model.jpg}
   \begin{center}
     \begin{itemize}
     \item The policy $\pi$ is  a mapping from belief $b_t$ to actions $a_t$
     \item Actions $a_t\in \{A_R, A_L, A_S\}$
       \begin{itemize}
       \item $A_R$:  Rightward eye movement and terminate the trial.
       \item $A_L$:  Leftward eye movement and terminate the trial.
       \item $A_S$:  Wait for $o_{t+1}$
       \end{itemize}
     \end{itemize}
   \end{center}
\end{frame}


\begin{frame}
  \frametitle{Intermediate Reward}
     Intermediate Reward $R(\mu, a)$ is based on the belief state and the selected action. Note that $R(\mu, a)$  cannot be directly observed by the agent.
      \begin{itemize}
      \item $R_P > 0$: positive reward for a correct choice, $i.e.$, a rightward eye movement $A_R$ when $\mu > 0.5$ or a leftward eye movement $A_L$ when $\mu < 0.5$.
      \item $R_N \le 0$: negative reward (penalty) or nothing for an incorrect choice
      \item $R_S = -1$:  a unit penalty for each random dot sample
      \end{itemize}
\end{frame}

\begin{frame}
  \frametitle{Markov Process}
  \begin{center}
    \includegraphics[scale=0.6]{pomdp.png}
\end{center}
\end{frame}

\begin{frame}
  \frametitle{Markov Process}
    \includegraphics[scale=0.25]{pomdp.png}
    \begin{itemize}
    \item Only $o_t$ is observed in this decision process.
    \item Action $a_t$ depends on $b_t$, thus depend on $o_{1:t-1}$ and $a_{1:t}$
     \item This process terminate only when $A_R$ or $A_L$ is selected.
    \item Receive total reward $\sum_t R_t(\mu_t,a_t)$ at the end.
    \end{itemize}
\end{frame}

 
\begin{frame}
  \frametitle{The POMDP Model}
  A POMDP consist of the following:
  \begin{itemize}
  \item Hidden state, $s(d,c) \in \{ d_R, d_L\} \otimes [0,1]$
  \item Observations $o_t \sim \Pr(o_t|s)$
  \item Belief $b_t = \Pr(s|o_{1:t})$
  \item Actions $a_t\in\{A_R, A_L, A_W\}$
  \item Reward $r(s, a_t) \in \{R_P, R_N, R_W\}$   
  \end{itemize}
The goal is to find an optimal policy $\pi^*$ that maximizes the expected total future reward 
      \begin{eqnarray*}
      v^* = \max_{\pi}{\textrm E}[\sum_{k=1}^{\infty} r_{t+k} | b_t]  
      \end{eqnarray*}
\end{frame}

\begin{frame}
  \frametitle{Belief Update}
  \begin{itemize}
  \item Given $o_{1:t}$, infer $s$.
  \item $\Pr(s) \rightarrow \Pr(s|o_1) \rightarrow \ldots \rightarrow \Pr(s|o_{1:t})$ using Bayes' rule
\begin{align*}
  \textrm{Posterior} &= \textrm{Likelihood} \times \textrm{Prior}/ \textrm{Normalization}\\
  \Pr(s|o_{1:t}) &= \Pr(o_t|s) \times \Pr(s|o_{1:t-1})/ \Pr(o_t|o_{1:t-1})
\end{align*}
\item With normal distributed initial belief $b_0 = N(0,\sigma^2)$ and evidence $\Pr(o_t|s) = N(s, \sigma^2)$,  we have $b_t = \Pr(s|o_{1:t}) = N(\mu_t = \frac{\sum_{i=1}^to_i}{t}, \sigma^2_t = \frac{\sigma^2}{t+1})$
\item Belief update: $b_{t-1}(\mu_{t-1},\sigma^2_{t-1} \overset{o_t}{\to} b_t(\mu_t = \frac{t-1}{t}\mu_{t-1} + \frac{1}{t}o_t, \sigma^2_t = \frac{t}{t+1}\sigma^2_{t-1})$
  \end{itemize}
\end{frame}

\begin{frame}
  \frametitle{The POMDP Model}
  A POMDP consist of the following:
  \begin{itemize}
  \item A set of hidden state, in our model, a static variable $\mu$
  \item A set of possible observations $o_t \in \{0, \ldots, n\}$
  \item A set of actions $a_t\in\{A_R, A_L, A_S\}$
  \item The observation Model $\Pr[o_t|\mu]$
  \item The reward model $R(\mu, a_t)$   
  \end{itemize}
The goal is to find an optimal policy $\pi^*$ that maximizes the expected total future reward 
      \begin{eqnarray*}
      v^* = \max_{\pi}{\textrm E}[\sum_{k=1}^{\infty} r_{t+k} | b_t]  
      \end{eqnarray*}
\end{frame}



\section{Optimal Policy}

\begin{frame}
\frametitle{Outline}
  \begin{center}
  \begin{itemize}
  \item \large{Model of Decision Making Under Uncertainty}
  \item\large{\bf Learning Optimal Decision Strategy}
  \item\large{Model Predictions: Speed and Accuracy}
  \item \large{Effects of Prior Information}
   \end{itemize}
   \end{center}
\end{frame}

\begin{frame}
  \frametitle{Bellman Equation}
  Remember the goal is to find an optimal policy $\pi^*$ that maximizes the expected total future reward 
\begin{eqnarray*}
v^*(b_t) &=& \max_{\pi}{\textrm E}[\sum_{k=1}^{\infty} r_{t+k}| b_t]\\
    &=& \max_{\pi}{\textrm E}[r_t +  \sum_{k=1}^{\infty} r_{t+k+1} | b_t] \\
    &=& \max_{a_t} \{ {\textrm E}[r_t(\mu, a_t) |b_t] + \sum_{b_{t+1}}T(b_t,a_t, b_{t+1}) v^*(b_{t+1}) \}
\end{eqnarray*}
\end{frame}

\begin{frame}
  \frametitle{The expected intermediate reward}
 The hidden state $\mu$ is unknown, one need to compute the expected intermediate reward given the current belief state ($\alpha, \beta$):
\begin{eqnarray*}
  \label{eq:rewardGivenBelief}
  r(\alpha,\beta, A_S) &=& n R_S  \\
  r(\alpha,\beta, A_R) &=& \int_{\mu = 0}^1 R(\mu, A_R) {\textrm Beta}(\mu|\alpha,\beta) d\mu \nonumber  \\
&=&  R_P  \times [1 - I_{0.5}(\alpha, \beta)] + R_N  \times
I_{0.5}(\alpha, \beta) \nonumber\\
   r(\alpha,\beta, A_L) &=&   R_N  \times [1 - I_{0.5}(\alpha, \beta)] + R_P  \times
I_{0.5}(\alpha, \beta)  \nonumber
\end{eqnarray*}
\end{frame}


\begin{frame}
  \frametitle{Belief Update}
\begin{itemize}
\item To compute the expected reward in the future, one need to calculate the dynamics of belief state:
\item Starting with a particular belief state $b_t$ and we take action $a_t$ and receive observation $o_t$ after taking that action. Then our next belief state $b_{t+1}$ is fully determined.
\item If the sets of $o_t$ and $a_t$ are finite, there are a finite number of possible next belief states, corresponding to each combination of action and observation
\end{itemize}
\end{frame}


\begin{frame}
  \frametitle{Belief Update}
\includegraphics[scale=0.4]{beliefUpdate.jpg}
\end{frame}


\begin{frame}
  \frametitle{Belief Update}
\begin{eqnarray*}
 T(b_t | b_{t-1}, A_S) &=& \Pr[\alpha', \beta' | \alpha, \beta, A_S] \nonumber \\
&=& \Pr[o_t|\alpha', \beta'] \delta_{\alpha' = \alpha + o_t} \delta_{\beta' = \beta + n - o_t} \\
 \Pr[o_t | \alpha, \beta] &=&  {n \choose o_t} {\textrm E}[\mu^{o_t} (1-\mu)^{n- o_t} | \alpha, \beta]\\ 
 &=& {n \choose o_t}\frac{\alpha^{o_t} \beta^{n - o_t}}{(\alpha + \beta)^{n}}
\end{eqnarray*}
\end{frame}



\begin{frame}
  \frametitle{Dynamic Programming}
Finally, we have a contraction mapping:
  \begin{eqnarray*}
v^*(b_t)  = \max_{a_t} \{ {\textrm E}[r_t(\mu, a_t) |b_t] + \sum_{b_{t+1}}T(b_t,a_t, b_{t+1}) v^*(b_{t+1}) \}
\end{eqnarray*}
\begin{center}
\includegraphics[scale=0.35]{policyIteration.png}
\end{center}
\end{frame}


\section{Model Predictions}


\begin{frame}
\frametitle{Outline}
  \begin{center}
  \begin{itemize}
  \item \large{Model of Decision Making Under Uncertainty}
  \item\large{Learning Optimal Decision Strategy}
  \item\large{\bf Model Predictions: Speed and Accuracy}
  \item \large{Effects of Prior Information}
   \end{itemize}
   \end{center}
\end{frame}




\begin{frame}
  \frametitle{Optimal Value}
\begin{center}
\includegraphics[scale=0.1]{value.jpg}
\end{center}
\end{frame}


\begin{frame}
  \frametitle{Belief State Space}
\begin{itemize}
\item The belief state $b$ can be parametrized by 
\begin{itemize}
  \item  $(\alpha,\beta)$
  \item  $(m_R, m_L)$
  \item  $(\hat{\mu}_m = \frac{m_R}{m_R+m_L}, m = m_R + m_L)$
\end{itemize}
\item Time $t$ has been encoded in the belief state $t = \frac{m}{n}$.
\item The optimal policy $\pi^*(b)$ and value $v^*(b)$
  are both joint functions of point estimator of $\hat{\mu}$  and time $m$.
 \end{itemize}
\end{frame}

\begin{frame}
  \frametitle{Optimal Policy}
\begin{center}
\includegraphics[scale=0.35]{policy.jpg}
\end{center}
\end{frame}



\begin{frame}
  \frametitle{Decision Making}
\begin{center}
\includegraphics[scale=0.35]{decisionMaking.png}
\end{center}
\end{frame}

\begin{frame}
  \frametitle{Decision Making}
Given a sequence of observation $\{o_1, \ldots, o_t \}$ with a fixed coherence $c$, we obtained a sequence of random variables $\{\muhat_1,\muhat_2,\ldots, \muhat_t\}$. 
\end{frame}


\begin{frame}
  \frametitle{Decision Making $c = 25.6\%$}
\begin{center}
\includegraphics[scale=0.1]{policy_0_256.jpg}
\end{center}
\end{frame}


\begin{frame}
  \frametitle{Decision Making with $c = 0\%$}
\begin{center}
\includegraphics[scale=0.1]{policy_0.jpg}
\end{center}
\end{frame}

\begin{frame}
  \frametitle{Speed and Accuracy}
Let $\Pr[t,R|c]$ be the joint probability mass
functions that the agent makes a right at time $t$:
\begin{center}
 \begin{eqnarray*}
  \Pr[t,R|c] &=& \Pr[\muhat_t \in \Pi^R_t , \muhat_{t-1} \in \Pi^S_{t-1}, \ldots, \muhat_{1} \in \Pi^S_{1}|c] \\ 
\Pr[R|c] &=& \sum_{t=1}^{\infty} \Pr[t,R|c] \quad \quad \quad \mbox{Accuracy}\\
  RT_R(c) &=& \sum_{t=1}^{\infty} t \frac{\Pr[t,R|c]}{\Pr[R|c]} \quad \quad \quad  \mbox{Speed}
 \end{eqnarray*}
\end{center}
\end{frame}

\begin{frame}
  \frametitle{Speed and Accuracy}
\begin{center}
\includegraphics[scale=0.1]{PCRT.jpg}
\end{center}
\end{frame}

\section{Effects of Prior Information}

\begin{frame}
\frametitle{Outline}
  \begin{center}
  \begin{itemize}
  \item \large{Model of Decision Making Under Uncertainty}
  \item\large{Learning Optimal Decision Strategy}
  \item\large{Model Predictions: Speed and Accuracy}
  \item \large{\bf Effects of Prior Information}
   \end{itemize}
   \end{center}
\end{frame}

\begin{frame}
  \frametitle{Combining Prior Information}
Decision are often based on a combination of sensory evidence and
prior knowledge about the true state.
\begin{itemize}
  \item Use Bayesian combination when the prior learnt from previous trials is perfect, $i.e.$, the hidden variable $\mu$ of current trial is drawn from prior probability. 
 \item When the prior is misleading, Bayesian combination is not optimal.
\end{itemize}
\end{frame}


\begin{frame}
  \frametitle{Combining Prior Information}
A mixture model that
\begin{itemize}
\item  With probability $1 - \gamma$, $\mu$ is
drawn from the posterior distribution given observations $o_{1:t}$.
\item with probability $\gamma$,
$\mu$ is drawn from the  ``prior'' distribution learned from previous trials
\end{itemize}
  \begin{eqnarray}
  \label{eq:priorUpdate}
b_t'(\mu) = \Pr[\mu | o_{1:t}, P_0] = (1 - \gamma) \Pr[\mu|o_{1:t}] + \gamma \textrm{Pr}_0[\mu]
\end{eqnarray}
\end{frame}





\begin{frame}
  \frametitle{Speed and Accuracy using Mixture Model}
The mixture model predicts
\begin{center}
$$\muhat_t = {\textrm E}[\mu|b_t'(\mu)] = (1-\gamma)\frac{m_R(t-1)}{t} + \frac{1-\gamma}{t}o_t + \gamma {\textrm E}_{\textrm{Pr}_0}[\mu].$$
\end{center}
 Note that the relative weight of the prior $ {\textrm E}_{\textrm{Pr}_0}[\mu]$ compared to the new evidence $o_t$ is an increasing function of  time, causing the prior to exert more influence on the decision as time progresses.
\end{frame}

\begin{frame}
  \frametitle{Summary}
  \begin{itemize}
  \item Our model predicts psychometric and chronometric functions that are quantitatively close to those observed in monkeys. 
\item  We show through analytical derivations and numerical results that the optimal threshold for selecting overt actions is a declining function of time.
\item We provide a normative explanation for the dynamic bias signal
assumption  in which prior probability plays an
increasingly important role in the decision process over time.
  \end{itemize}
\end{frame}



\begin{frame}
  \frametitle{Future Work}
  \begin{itemize}
  \item A neural implementation based on Temporal Difference(TD) learning.
\item  Include action ``bailout'' for post decision wagering.
\item  Time varying motion strength.
\item  POMDP model on more general graphical models.
  \end{itemize}
\end{frame}




\begin{frame}
  \frametitle{Acknowledgement}
  \begin{center}
\includegraphics[scale=0.4]{ack.jpg}
\end{center}
\end{frame}

\end{document}
