\documentclass[11pt]{report}
\usepackage{amsmath}
\usepackage{graphicx}
%\usepackage{times}

\renewcommand*\thesection{\arabic{section}}
\newcommand{\EqnArr}[1]{\begin{align}#1\end{align}}
\newcommand{\Prob}{\mathrm{Prob}}
\newcommand{\Br}[1]{\left(#1\right)}
\newcommand{\CurlBr}[1]{\left\{#1\right\}}
\newcommand{\Trm}[1]{\textrm{#1}}

\begin{document}
\title{Hidden Markov Models Variants - report}
\author{Long Q Tran}
\maketitle

\section{Hidden Markov Models}

\begin{figure}[htb!]
\centering
\includegraphics[width=\textwidth]{hmm.eps}
\caption{The graphical model of Hidden Markov Model}
\label{fig:Hidden Markov Model}
\end{figure}

The joint probability of the state sequence and observation sequence is
\EqnArr{
\Prob\CurlBr{q_{1:T}, o_{1:T}} &= \pi_{q_1} \prod_{t=2}^{T}a_{q_{t-1}, q_t} \prod_{t=1}^{T} b_{q_t}(o_t),
}
where
\begin{itemize}
\item $q_{1:T} = (q_1, q_2, \ldots, q_T)$ is the state sequence, $q_t \in [s_1,\ldots, s_N]$.
\item $o_{1:T} = (o_1, o_2, \ldots, o_T)$ is the observation sequence, $o_t \in [v_1,\ldots, v_M]$.
\item $\pi_m = \Prob\CurlBr{q_1 = s_m}$ is the starting probability for state $s_m$.
\item $a_{m,n} = \Prob\CurlBr{q_{t+1} = s_n|q_t = s_m}$ is the state transition probability.
\item $b_m(k) = \Prob\CurlBr{o_t = v_k|q_t = s_m}$ is the state emisson probability.
\end{itemize}
\subsection{Probability of an observation sequence}
We need to compute the marginal probability of an observation sequence
\EqnArr{
\label{eqn:obs_seq_prob}
\Prob\CurlBr{o_{1:T}} &= \sum_{q_{1:T}}\Prob\CurlBr{q_{1:T}, o_{1:T}}.
}
Equation \eqref{eqn:obs_seq_prob} could be computed via dynamic programming,
also called the \emph{forward procedure}. Let us define the quantity
\EqnArr{
\alpha_t(m) &= \Prob\CurlBr{o_{1:t}, q_t = s_m} \\
&= \sum_{q_{1:t-1}}  \Prob\CurlBr{o_{1:t}, q_{1:t-1}, q_t = s_m} \\
&= \begin{cases} 
\pi_m b_m(o_1) & \Trm{if } t = 1\\
\sum_{n=1}^N \alpha_{t-1}(n) a_{n,m} b_m(o_t) & \Trm{if } t > 1
\end{cases}.
}
Then the needed quantity is
\EqnArr{
\Prob\CurlBr{o_{1:T}}  &= \sum_{m=1}^N \Prob\CurlBr{o_{1:T}, q_t = s_m} \\
&= \sum_{m=1}^N \alpha_T(m)
}

\subsection{The most probable state sequence}
We need to identify the state sequence such that
\EqnArr{
q_{1:T}^* &= \arg\max_{q_{1:T}} \Prob\CurlBr{q_{1:T}| o_{1:T}}\\
&= \arg\max_{q_{1:T}} \Prob\CurlBr{q_{1:T}, o_{1:T}}.
}
This Maximum A Priori problem can be solved via the Viterbi algorithm which is basically the 
forward procedure above, only with summation changed to maximizaton.
Let us define the quantity
\EqnArr{
\kappa_t(m) &= \max_{q_{1:t-1}}  \Prob\CurlBr{o_{1:t}, q_{1:t-1}, q_t = s_m} \\
&= \begin{cases} 
\pi_m b_m(o_1) & \Trm{if } t = 1\\
\max_{n=1}^N \kappa_{t-1}(n) a_{n,m} b_m(o_t) & \Trm{if } t > 1
\end{cases}.
}
The MAP state sequence can be found using the following back tracking procedure
\EqnArr{
q_T^* &= \arg\max_m \kappa_T(m),\\
q_t^* &= \arg\max_n \kappa_t(n) a_{n, q_{t+1}}, t = T-1,\ldots, 1.
}
\textbf{Partially known state sequence.}
In some cases, parts of the state sequence are known (e.g. partially annotated genes), we may
use this additional information to our advantage. The formula for $\kappa_t(m)$
needs a small change to work in these cases.
\EqnArr{
\kappa_t(m) &= \begin{cases} 
\pi_m b_m(o_1) & \Trm{if } t = 1\\
\max_{n=1}^N \kappa_{t-1}(n) a_{n,m} b_m(o_t) & \Trm{if } t > 1 \Trm{ and } q_{t-1} \Trm{ is hidden,}\\
\kappa_{t-1}(q_{t-1}) a_{q_{t-1},m} b_m(o_t) & \Trm{if } t > 1 \Trm{ and } q_{t-1} \Trm{ is known.}
\end{cases}.
}
The back-tracking procedure to find $q_{1:T}^*$ also changes accordingly.

\section{Explicit-Duration Hidden Markov Models}
The duration HMMs allow each state to repeat itself for a duration with certain
probability. The joint probability is
\EqnArr{
\Prob\CurlBr{q_{1:t}, d_{1:t}, o_{1:T}} &= \pi_{q_1} 
\prod_{\begin{array}{c}t>1 \\ q_{t-1}\neq q_t\end{array}}^{T}a_{q_{t-1}, q_t} 
\prod_{\begin{array}{c}t\geq 1 \\ q_{t-1}\neq q_t\end{array}}^{T} p_{q_t}(d_t) \prod_{t=1}^{T} b_{q_t}(o_t),
}
where
\begin{itemize}
\item $d_t$ is the duration of state $q_t$, $t = 1,\ldots,T$.
\item $p_m(d) = \Prob\CurlBr{\Trm{state } m \Trm{ repeats } d \Trm{ times}}$.
\end{itemize}

\begin{figure}[htb!]
\centering
\includegraphics[width=\textwidth]{dhmm.eps}
\caption{The graphical model of Duration Hidden Markov Model}
\label{fig:Duration Hidden Markov Model}
\end{figure}

\subsection{Probability of an observation sequence}
We need to compute the quantity
\EqnArr{
\Prob\CurlBr{o_{1:T}} &= \sum_{q_{1:T}, d_{1:T}}\Prob\CurlBr{q_{1:T}, d_{1:T}, o_{1:T}}.
}
Dynamic programming can be applied here. Let us define
\EqnArr{
\alpha_t(m, d) &= \Prob\CurlBr{q_t = s_m, d_t = d, o_{1:t}}\\
&=\sum_{q_{1:t-1}, d_{1:t-1}}\Prob\CurlBr{q_{1:t-1}, d_{1:t-1}, q_t = s_m, d_t = d, o_{1:t}}
}
Similar to the original HMMs formula, $\alpha_t(m, d)$ can also be define recursively as follows
\EqnArr{
\alpha_t(m, d) &= \begin{cases}
\pi_m p_m(d) b_m(o_t), &  t=1\\
\begin{array}{c}\alpha_{t-1}(m, d+1) b_m(o_t) + \\ 
\sum_{n\neq m}\alpha_{t-1}(n, 1) a_{n,m} p_m(d) b_m(o_t))
\end{array}
 & t > 1
\end{cases}.
}
Finally, the desired probability is
\EqnArr{
\Prob\CurlBr{o_{1:T}}  = \sum_{m} \alpha_T(m, 1)
}
\subsection{The most probable state sequence and duration}

We need to compute the state sequence and the corresponding duration
\EqnArr{
(q_{1:T}^*, d_{1:T}^*) &= \arg\max_{q_{1:T}, d_{1:T}}\Prob\CurlBr{q_{1:T}, d_{1:T}, o_{1:T}}.
}
This optimization could also be solved via dynamic programming technique. Let us
define
\EqnArr{
\kappa_t(m, d) &= \max_{q_{1:t-1}, d_{1:t-1}}\Prob\CurlBr{q_{1:t-1}, d_{1:t-1}, q_t = s_m, d_t = d, o_{1:t}}\\
&= \begin{cases}
\pi_m p_m(d) b_m(o_t), &  t=1\\
\max \begin{pmatrix}\kappa_{t-1}(m, d+1) b_m(o_t), \\ 
\displaystyle\max_{n\neq m}\kappa_{t-1}(n, 1) a_{n,m} p_m(d) b_m(o_t))
\end{pmatrix}, & t > 1
\end{cases}.
}  
The explanation for the formula above is that at time step $t$, if
the state and duration $(q_t, d_t) = (s_m, d)$ then either the previous
state and duration at time step $t-1$ are $(q_{t-1}, d_{t-1}) = (s_m, d+1)$ or another state
$s_n$ has terminated its repetition, $(q_{t-1}, d_{t-1}) = (s_n, 1)$, and transited to state $s_m$.

\textbf{Partially known state sequence.} If the state sequence is partially known,
the formula can also be adapted to provide efficient computation as follows
\EqnArr{
\kappa_t(m, d) &= \max_{q_{1:t-1}, d_{1:t-1}}\Prob\CurlBr{q_{1:t-1}, d_{1:t-1}, q_t = s_m, d_t = d, o_{1:t}}\\
&= \begin{cases}
\pi_m p_m(d) b_m(o_t), &  t=1,\\
\max \begin{pmatrix}\kappa_{t-1}(m, d+1) b_m(o_t), \\ 
\displaystyle\max_{n\neq m}\kappa_{t-1}(n, 1) a_{n,m} p_m(d) b_m(o_t))
\end{pmatrix}, & t > 1 \Trm{ and } q_{t-1} \Trm{ is hidden,}\\
\kappa_{t-1}(n, 1) a_{n,m} p_m(d) b_m(o_t), & t > 1 \Trm{ and } q_{t-1} = n \neq m,\\
\kappa_{t-1}(m, d+1) b_m(o_t), &  t > 1 \Trm{ and } q_{t-1} = m.
\end{cases}.
}  

\section{Duration HMMs with General Output Probabilistic Models}
In the previous section, the observations are independent of each other given
their corresponding states. In general, we could apply any probabilistic
model for the output sequence corresponding to a state duration. More
specifically, if at time $t$, state $s_m$ has a duration $d>0$, then
the observation sequence $o_{t:t+d-1}$ is generated from a distribution $P_m(o_{1:d})$.
Examples of this distribution are:
\begin{itemize}
\item Independent observations: 
\EqnArr{
P_m(o_{1:d}) = \prod_{i=1}^d b_m(o_i).
}
This distribution is exactly the case of explicit-duration HMMs in the previous section.
\item First order Markov chain:
\EqnArr{
P_m(o_{1:d}) = b_m(o_1) \prod_{i=2}^d b_m(o_i|o_{i-1}).
}
\item $r$-order Markov chain:
\EqnArr{
P_m(o_{1:d}) = \prod_{i=1}^d b_m(o_i|o_{i-r:i-1}),
}
with a convention that observation with negative indices are dropped
from the equation. This higher order Markov chain provides
more expressive power to the duration HMMs.
\end{itemize}

\subsection{The probability of an observation sequence}
The dynamic programming variables and the forward procedure  are followings:
\EqnArr{
\alpha_t(m, d) &=  \Prob\CurlBr{o_{1:t}, (q_t,d_t) = (s_m,d)}\\
& = p_m(d) P_m\Br{o_{t-d+1:t}} \sum_{m'\neq m, d'} \alpha_{t-d}(m', d') a_{m', m},
}
where
\begin{itemize}
\item $p_m(d)$ is the probability that state $s_m$ has duration $d$.
\item $a_{m',m}$ is the transition probability from state $s_{m'}$ to state $s_m$.
\item $P_m\Br{o_{t-d+1:t}}$ is the emission probability under state $s_m$ of
the observation sequence $o_{t-d+1:t}$.
\end{itemize}
The needed probability is
\EqnArr{
\Prob\CurlBr{o_{1:T}} &= \sum_{m,d} \alpha_T(m, d).
}
\subsection{The MAP sequence}
The dynamic programming variables and its recursive formulation are
\EqnArr{
\kappa_t(m, d) &= \max_{q_{1:t-1}, d_{1:t-1}} \Prob\CurlBr{o_{1:t}, (q_t,d_t) = (s_m,d)}\\
& = p_m(d) P_m\Br{o_{t-d+1:t}} \max_{m'\neq m, d'} \kappa_{t-d}(m', d') a_{m', m}.
\label{eqn:map dHMMs}
}
After the table $\kappa_t(m,d)$ is computed, the state and duration
sequence can be recovered via a back-tracking procedure (i.e. the $\arg\max_{m',d'}$
in Eq. \eqref{eqn:map dHMMs} needs to be stored).
\subsection{The Maximum Likelihood estimator - EM algorithm}
In order to implement the EM algorithm for dHMMs, we extend the
EM algorithm for the original HMMs. Let us define the quantity
\EqnArr{
\beta_t(m, d) &=  \Prob\CurlBr{o_{t+1:T}| (q_t,d_t) = (s_m,d)}.
}
This quantity is computed recursively (i.e. the backward procedure) as followings
\EqnArr{
\beta_t(m, d) &= \sum_{m'\neq m, d'} a_{m, m'} p_{m'}(d') P_{m'}(o_{t+1:t+d'}) \beta_{t+d'}(m',d').
}
\EqnArr{
\zeta_t(m,n) &= \Prob\CurlBr{q_t = s_m, q_{t+1} = s_n,o_{1:T}}, m\neq n\\
&= \sum_{d_1, d_2} \alpha_t(m, d_1) a_{m, n} p_n(d_2) P_n\Br{o_{t+1:t+d_2}} \beta_{t+d_2}(n, d_2).
}
\EqnArr{
\eta_t(m,d) &= \Prob\CurlBr{(q_t,d_t) = (s_m,d),o_{1:T}}\\
&= \alpha_t(m,d)\beta_t(m,d).
}
\EqnArr{
a_{m,n}^{new} &= \frac{\sum_{t=1}^T \zeta_t(m,n)}{\sum_{t=1}^T\sum_{m,n} \zeta_t(m,n)}, m\neq n\\
p_m(d)^{new} &= \frac{\sum_{t=1}^T \eta_t(m,d)}{\sum_{t=1}^T \sum_{d=1}^D \eta_t(m,d)}.
}
The last computation is the reestimation of the
output distribution $P_m(o_{1:d})$ for every state $s_m$. This computation 
certainly depends on the analytics form of the distributions. We can
derive update formulation for each distribution introduced above.
\begin{itemize}
\item Independent observation
\item Markov chains
\end{itemize}
\end{document}