\documentclass[11pt]{report}
\usepackage{amsmath,amsfonts}
\usepackage{graphicx}
%\usepackage{times}

\renewcommand*\thesection{\arabic{section}}
\newcommand{\EqnArr}[1]{\begin{align}#1\end{align}}
\newcommand{\Prob}{\mathrm{Prob}}
\newcommand{\Br}[1]{\left(#1\right)}
\newcommand{\CurlBr}[1]{\left\{#1\right\}}
\newcommand{\SqBr}[1]{\left[#1\right]}
\newcommand{\Trm}[1]{\textrm{#1}}

\begin{document}
\title{Hidden Markov Models Variants - report}
\author{Long Q Tran}
\maketitle

\section{Hidden Markov Models}

\begin{figure}[htb!]
\centering
\includegraphics[width=\textwidth]{hmm.eps}
\caption{The graphical model of Hidden Markov Model}
\label{fig:Hidden Markov Model}
\end{figure}

The joint probability of the state sequence and observation sequence is
\EqnArr{
\Prob\CurlBr{q_{1:T}, o_{1:T}} &= \pi_{q_1} \prod_{t=2}^{T}a_{q_{t-1}, q_t} \prod_{t=1}^{T} b_{q_t}(o_t),
}
where
\begin{itemize}
\item $q_{1:T} = (q_1, q_2, \ldots, q_T)$ is the state sequence, $q_t \in [s_1,\ldots, s_N]$.
\item $o_{1:T} = (o_1, o_2, \ldots, o_T)$ is the observation sequence, $o_t \in [v_1,\ldots, v_M]$.
\item $\pi_m = \Prob\CurlBr{q_1 = s_m}$ is the starting probability for state $s_m$.
\item $a_{m,n} = \Prob\CurlBr{q_{t+1} = s_n|q_t = s_m}$ is the state transition probability.
\item $b_m(k) = \Prob\CurlBr{o_t = v_k|q_t = s_m}$ is the state emisson probability.
\end{itemize}
\subsection{Probability of an observation sequence}
We need to compute the marginal probability of an observation sequence
\EqnArr{
\label{eqn:obs_seq_prob}
\Prob\CurlBr{o_{1:T}} &= \sum_{q_{1:T}}\Prob\CurlBr{q_{1:T}, o_{1:T}}.
}
Equation \eqref{eqn:obs_seq_prob} could be computed via dynamic programming,
also called the \emph{forward procedure}. Let us define the quantity
\EqnArr{
\alpha_t(m) &= \Prob\CurlBr{o_{1:t}, q_t = s_m} \\
&= \sum_{q_{1:t-1}}  \Prob\CurlBr{o_{1:t}, q_{1:t-1}, q_t = s_m} \\
&= \begin{cases} 
\pi_m b_m(o_1) & \Trm{if } t = 1\\
\sum_{n=1}^N \alpha_{t-1}(n) a_{n,m} b_m(o_t) & \Trm{if } t > 1
\end{cases}.
}
Then the needed quantity is
\EqnArr{
\Prob\CurlBr{o_{1:T}}  &= \sum_{m=1}^N \Prob\CurlBr{o_{1:T}, q_t = s_m} \\
&= \sum_{m=1}^N \alpha_T(m)
}

\subsection{The most probable state sequence}
We need to identify the state sequence such that
\EqnArr{
q_{1:T}^* &= \arg\max_{q_{1:T}} \Prob\CurlBr{q_{1:T}| o_{1:T}}\\
&= \arg\max_{q_{1:T}} \Prob\CurlBr{q_{1:T}, o_{1:T}}.
}
This Maximum A Priori problem can be solved via the Viterbi algorithm which is basically the 
forward procedure above, only with summation changed to maximizaton.
Let us define the quantity
\EqnArr{
\kappa_t(m) &= \max_{q_{1:t-1}}  \Prob\CurlBr{o_{1:t}, q_{1:t-1}, q_t = s_m} \\
&= \begin{cases} 
\pi_m b_m(o_1) & \Trm{if } t = 1\\
\max_{n=1}^N \kappa_{t-1}(n) a_{n,m} b_m(o_t) & \Trm{if } t > 1
\end{cases}.
}
The MAP state sequence can be found using the following back tracking procedure
\EqnArr{
q_T^* &= \arg\max_m \kappa_T(m),\\
q_t^* &= \arg\max_n \kappa_t(n) a_{n, q_{t+1}}, t = T-1,\ldots, 1.
}
\textbf{Partially known state sequence.}
In some cases, parts of the state sequence are known (e.g. partially annotated genes), we may
use this additional information to our advantage. The formula for $\kappa_t(m)$
needs a small change to work in these cases.
\EqnArr{
\kappa_t(m) &= \begin{cases} 
\pi_m b_m(o_1) & \Trm{if } t = 1\\
\max_{n=1}^N \kappa_{t-1}(n) a_{n,m} b_m(o_t) & \Trm{if } t > 1 \Trm{ and } q_{t-1} \Trm{ is hidden,}\\
\kappa_{t-1}(q_{t-1}) a_{q_{t-1},m} b_m(o_t) & \Trm{if } t > 1 \Trm{ and } q_{t-1} \Trm{ is known.}
\end{cases}.
}
The back-tracking procedure to find $q_{1:T}^*$ also changes accordingly.

\section{Explicit-Duration Hidden Markov Models}
The duration HMMs allow each state to repeat itself for a duration with certain
probability. The joint probability is
\EqnArr{
\Prob\CurlBr{q_{1:t}, d_{1:t}, o_{1:T}} &= \pi_{q_1} 
\prod_{\begin{array}{c}t>1 \\ q_{t-1}\neq q_t\end{array}}^{T}a_{q_{t-1}, q_t} 
\prod_{\begin{array}{c}t\geq 1 \\ q_{t-1}\neq q_t\end{array}}^{T} p_{q_t}(d_t) \prod_{t=1}^{T} b_{q_t}(o_t),
}
where
\begin{itemize}
\item $d_t$ is the duration of state $q_t$, $t = 1,\ldots,T$.
\item $p_m(d) = \Prob\CurlBr{\Trm{state } m \Trm{ repeats } d \Trm{ times}}$.
\end{itemize}

\begin{figure}[htb!]
\centering
\includegraphics[width=\textwidth]{dhmm.eps}
\caption{The graphical model of Duration Hidden Markov Model}
\label{fig:Duration Hidden Markov Model}
\end{figure}

\subsection{Probability of an observation sequence}
We need to compute the quantity
\EqnArr{
\Prob\CurlBr{o_{1:T}} &= \sum_{q_{1:T}, d_{1:T}}\Prob\CurlBr{q_{1:T}, d_{1:T}, o_{1:T}}.
}
Dynamic programming can be applied here. Let us define
\EqnArr{
\alpha_t(m, d) &= \Prob\CurlBr{q_t = s_m, d_t = d, o_{1:t}}\\
&=\sum_{q_{1:t-1}, d_{1:t-1}}\Prob\CurlBr{q_{1:t-1}, d_{1:t-1}, q_t = s_m, d_t = d, o_{1:t}}
}
Similar to the original HMMs formula, $\alpha_t(m, d)$ can also be define recursively as follows
\EqnArr{
\alpha_t(m, d) &= \begin{cases}
\pi_m p_m(d) b_m(o_t), &  t=1\\
\begin{array}{c}\alpha_{t-1}(m, d+1) b_m(o_t) + \\ 
\sum_{n\neq m}\alpha_{t-1}(n, 1) a_{n,m} p_m(d) b_m(o_t))
\end{array}
 & t > 1
\end{cases}.
}
Finally, the desired probability is
\EqnArr{
\Prob\CurlBr{o_{1:T}}  = \sum_{m} \alpha_T(m, 1)
}
\subsection{The most probable state sequence and duration}

We need to compute the state sequence and the corresponding duration
\EqnArr{
(q_{1:T}^*, d_{1:T}^*) &= \arg\max_{q_{1:T}, d_{1:T}}\Prob\CurlBr{q_{1:T}, d_{1:T}, o_{1:T}}.
}
This optimization could also be solved via dynamic programming technique. Let us
define
\EqnArr{
\kappa_t(m, d) &= \max_{q_{1:t-1}, d_{1:t-1}}\Prob\CurlBr{q_{1:t-1}, d_{1:t-1}, q_t = s_m, d_t = d, o_{1:t}}\\
&= \begin{cases}
\pi_m p_m(d) b_m(o_t), &  t=1\\
\max \begin{pmatrix}\kappa_{t-1}(m, d+1) b_m(o_t), \\ 
\displaystyle\max_{n\neq m}\kappa_{t-1}(n, 1) a_{n,m} p_m(d) b_m(o_t))
\end{pmatrix}, & t > 1
\end{cases}.
}  
The explanation for the formula above is that at time step $t$, if
the state and duration $(q_t, d_t) = (s_m, d)$ then either the previous
state and duration at time step $t-1$ are $(q_{t-1}, d_{t-1}) = (s_m, d+1)$ or another state
$s_n$ has terminated its repetition, $(q_{t-1}, d_{t-1}) = (s_n, 1)$, and transited to state $s_m$.

\textbf{Partially known state sequence.} If the state sequence is partially known,
the formula can also be adapted to provide efficient computation as follows
\EqnArr{
\kappa_t(m, d) &= \max_{q_{1:t-1}, d_{1:t-1}}\Prob\CurlBr{q_{1:t-1}, d_{1:t-1}, q_t = s_m, d_t = d, o_{1:t}}\\
&= \begin{cases}
\pi_m p_m(d) b_m(o_t), &  t=1,\\
\max \begin{pmatrix}\kappa_{t-1}(m, d+1) b_m(o_t), \\ 
\displaystyle\max_{n\neq m}\kappa_{t-1}(n, 1) a_{n,m} p_m(d) b_m(o_t))
\end{pmatrix}, & t > 1 \Trm{ and } q_{t-1} \Trm{ is hidden,}\\
\kappa_{t-1}(n, 1) a_{n,m} p_m(d) b_m(o_t), & t > 1 \Trm{ and } q_{t-1} = n \neq m,\\
\kappa_{t-1}(m, d+1) b_m(o_t), &  t > 1 \Trm{ and } q_{t-1} = m.
\end{cases}.
}  

\section{Duration HMMs with General Output Probabilistic Models}
In the previous section, the observations are independent of each other given
their corresponding states. In general, we could apply any probabilistic
model for the output sequence corresponding to a state duration. More
specifically, if at time $t$, state $s_m$ has a duration $d>0$, then
the observation sequence $o_{t:t+d-1}$ is generated from a distribution $P_m(o_{1:d})$.
Examples of this distribution are:
\begin{itemize}
\item Independent observations: 
\EqnArr{
P_m(o_{1:d}) = \prod_{i=1}^d b_m(o_i).
}
This distribution is exactly the case of explicit-duration HMMs in the previous section.
\item First order Markov chain:
\EqnArr{
P_m(o_{1:d}) = b_m(o_1) \prod_{i=2}^d b_m(o_i|o_{i-1}).
}
\item $r$-order Markov chain:
\EqnArr{
P_m(o_{1:d}) = \prod_{i=1}^d b_m(o_i|o_{i-r:i-1}),
}
with a convention that observation with negative indices are dropped
from the equation. This higher order Markov chain provides
more expressive power to the duration HMMs.
\end{itemize}

\subsection{The probability of an observation sequence}
The dynamic programming variables and the forward procedure  are followings:
\EqnArr{
\alpha_t(m) &=  \Prob\CurlBr{o_{1:t}, \textrm{ state } s_m \textrm{ ends at } t}\\
& =  \sum_{m'\neq m, d} \alpha_{t-d}(m') a_{m', m} p_m(d) P_m\Br{o_{t-d+1:t}},
}
where
\begin{itemize}
\item $p_m(d)$ is the probability that state $s_m$ has duration $d$.
\item $a_{m',m}$ is the transition probability from state $s_{m'}$ to state $s_m$.
\item $P_m\Br{o_{t-d+1:t}}$ is the emission probability under state $s_m$ of
the observation sequence $o_{t-d+1:t}$.
\end{itemize}
The needed probability is
\EqnArr{
\Prob\CurlBr{o_{1:T}} &= \sum_{m} \alpha_T(m).
}
\subsection{The MAP sequence}
The dynamic programming variables and its recursive formulation are
\EqnArr{
\kappa_t(m) &= \max_{q_{1:t-1}, d_{1:t-1}} \Prob\CurlBr{o_{1:t}, \textrm{ state } s_m \textrm{ ends at } t}\\
& =  \max_{m'\neq m, d} \kappa_{t-d}(m') a_{m', m} p_m(d) P_m\Br{o_{t-d+1:t}}.
\label{eqn:map dHMMs}
}
After the table $\kappa_t(m)$ is computed, the state and duration
sequence can be recovered via a back-tracking procedure (i.e. the $\arg\max_{m',d}$
in Eq. \eqref{eqn:map dHMMs} needs to be stored).
\subsection{The Maximum Likelihood estimator - EM algorithm}
In order to implement the EM algorithm for dHMMs, we extend the
EM algorithm for the original HMMs. Let us define the quantity
\EqnArr{
\beta_t(m) &=  \Prob\CurlBr{o_{t+1:T}| \textrm{ state } s_m \textrm{ ends at } t}.
}
This quantity is computed recursively (i.e. the backward procedure) as followings
\EqnArr{
\beta_t(m) &= \sum_{m'\neq m, d: t+d \leq T} a_{m, m'} p_{m'}(d) P_{m'}(o_{t+1:t+d}) \beta_{t+d}(m').
}
We also needs two more quantities
\EqnArr{
\alpha_t^\star(m) &=  \Prob\CurlBr{o_{1:t}, \textrm{ state } s_m \textrm{ begins at } t+1}\\
&= \sum_{m'\neq m} a_{m',m} \alpha_t(m')
}
and
\EqnArr{
\beta_t^\star(m) &=  \Prob\CurlBr{o_{t+1:T}| \textrm{ state } s_m \textrm{ begins at } t+1}\\
&= \sum_{d:t+d\leq T}p_m(d)\beta_{t+d}(m)P_m(o_{t+1:t+d})
}
The computation of $\alpha, \alpha^\star, \beta,\beta^\star$ (i.e. the forward and backward
procedure) is the E-step in the EM algorithm. In the M-step, we re-estimate the model's parameter
as follows
\EqnArr{
a_{m,n}^{new} &= \frac{1}{Z_m} \sum_{t=1}^T \alpha_t(m)a_{m,n} \beta_t^\star(n),\\
p_m(d)^{new} &= \frac{1}{G_m} \sum_{t=1}^T \alpha_t^\star(m)p_m(d) P_m(o_{t+1:t+d})\beta_{t+d}(m),
}
where $Z_m, G_m$ are normalizing factor such that the results are probabilities (i.e. sum to 1).

The last computation is the reestimation of the
output distribution $P_m(o_{1:d})$ for every state $s_m$. This computation 
certainly depends on the analytics form of the distributions. We can
derive update formulation for each distribution introduced above.
\begin{itemize}
\item Independent observation
\EqnArr{
b_m(k)^{new} &= \frac{1}{H_m} \sum_{t=1}^T \gamma_t(m)\mathbb{I}(o_t=v_k),
}
where
\EqnArr{
\gamma_t(m) &= \Prob\CurlBr{o_{1:T}, q_t = s_m}\\
&= \sum_{\tau<t} \alpha_t^\star(m)\beta_t^\star(m) - \sum_{\tau<t} \alpha_t(m)\beta_t(m)
}
\item 1-order Markov chains
\EqnArr{
\gamma_t(m) &= \Prob\CurlBr{o_{1:T}, s_m \textrm{ begins at } t}\\
&=  \alpha_{t-1}^\star(m)\beta_{t-1}^\star(m)
}
\EqnArr{
\xi_t(m) &= \Prob\CurlBr{o_{1:T}, q_t = q_{t+1} = s_m}\\
&= \sum_{\tau<t} \alpha_t^\star(m)\beta_t^\star(m) - \sum_{\tau\leq t} \alpha_t(m)\beta_t(m)
}

\EqnArr{
b_m(k)^{new} &= \frac{1}{H_m} \sum_{t=1}^T \gamma_t(m)\mathbb{I}(o_t=v_k)
}
\EqnArr{
b_m(k_2|k_1)^{new} &= \frac{1}{H_{m,k_1}}\sum_{t=1}^T \xi_t(m)\mathbb{I}(o_t=v_{k_1}, o_{t+1} = v_{k_2})
}
\end{itemize}
\section{Examples}
\subsection{Toy example - Independent observations}
We generate a 100-state-change sequence using the following parameter
\EqnArr{
A &= \begin{bmatrix} 0.0 & 1.0 \\ 1.0 & 0.0 \end{bmatrix}\\
B &= \begin{bmatrix} 0.4 & 0.4 & 0.1 & 0.1 \\ 0.1 & 0.1 & 0.4 & 0.4 \end{bmatrix}\\
P &= \begin{bmatrix} 0.2 & 0.2 & 0.2 & 0.2 & 0.2 \\ 0.1 & 0.1 & 0.1 & 0.1 & 0.1 & 0.1 & 0.1 & 0.1 & 0.1 & 0.1 \end{bmatrix}
}
and use the EM algorithm to re-estimate the parameters (starting from random initialization)
and get
\EqnArr{
\widehat{A}  &= \begin{bmatrix} 0.0 & 1.0 \\ 1.0 & 0.0 \end{bmatrix}\\
\widehat{B} &=  \begin{bmatrix} 
    0.4267594  & 0.51597915 & 0.05656184 & 0.0006995611\\
    0.1661187 & 0.09074489 & 0.38057972 & 0.3625567048
\end{bmatrix}\\
\widehat{P} &= \begin{bmatrix} 
 0.1027 & 0.6737 & 0.0 & 0.1912  & 0.0322 \\
 0.1535 & 0.0847 & 0.0 & 0.0009 &0.0385 &
 0.0  & 0.1126 & 0.0830 & 0.0 & 0.5264
\end{bmatrix}
}
\begin{figure}[htb!]
\centering
\includegraphics[width=\textwidth]{dhmm_converge.eps}
\caption{Convergence in log-likelihood}
\label{fig:Hidden Markov Model}
\end{figure}

\begin{figure}[htb!]
\centering
\includegraphics[width=\textwidth]{difference.eps}
\caption{Change in parameter over iterations}
\label{fig:Hidden Markov Model}
\end{figure}

\subsection{Toy example - Markov chain observations}
We generate a 100-state-change sequence using the following parameter
\EqnArr{
A &= \begin{bmatrix} 0.0 & 1.0 \\ 1.0 & 0.0 \end{bmatrix}\\
B &= \begin{bmatrix} 0.4 & 0.4 & 0.1 & 0.1 \\ 0.1 & 0.1 & 0.4 & 0.4 \end{bmatrix}\\
B_1 &= \begin{bmatrix} 
   0.7 & 0.1 & 0.1 & 0.1 \\ 
   0.1 & 0.7 & 0.1 & 0.1 \\
   0.7 & 0.1 & 0.1 & 0.1 \\ 
   0.1 & 0.7 & 0.1 & 0.1 \\
\end{bmatrix}\\
}
\EqnArr{
B_2 &= \begin{bmatrix} 
   0.1 & 0.1 & 0.7 & 0.1 \\ 
   0.1 & 0.1 & 0.1 & 0.7 \\
   0.1 & 0.1 & 0.7 & 0.1 \\ 
   0.1 & 0.1 & 0.1 & 0.7 \\
\end{bmatrix}\\
P &= \begin{bmatrix} 0.2 & 0.2 & 0.2 & 0.2 & 0.2 \\ 0.1 & 0.1 & 0.1 & 0.1 & 0.1 & 0.1 & 0.1 & 0.1 & 0.1 & 0.1 \end{bmatrix}
}

The estimated parameter set is
\EqnArr{
\widehat{A}&= \begin{bmatrix} 0.0 & 1.0 \\ 1.0 & 0.0 \end{bmatrix}\\
\widehat{B}&=\begin{bmatrix} 
    0.1370 &  0.1370 & 0.7259 & 0\\
    0.0000 &  0.0000 & 0.0000 & 1
\end{bmatrix} 
}
\EqnArr{
\widehat{B_1}&=\begin{bmatrix} 
   & A & C & T & G\\
   A & 0.6667 &0.0000 &0.0000 &0.3333 \\
   C &0.0000 &1.0000& 0.0000 &0.0000\\
   T &1.0000 &0.0000 &0.0000& 0.0000\\
   G &0.0000 & 1.0000 & 0.0000 &0.0000
\end{bmatrix} \\
\widehat{B_2}&=\begin{bmatrix} 
   & A & C & T & G\\
   A &0.0000&0.0000 &1.0000& 0.0000\\
   C &0.0000 &0.0000 &0.0000& 1.0000\\
   T &0.0000 &0.2500 &0.7500 &0.0000\\
   G &0.06667 &0.0000 &0.2000 &0.7333
\end{bmatrix} \\
P &= \begin{bmatrix} 
0.0000 &0.2500 &0.2500 &0.5000& 0.0000\\
0.0000 &0.0000& 0.0000&0.0000& 0.4000&
0.0000 &0.6000& 0.0000& 0.0000& 0.0000
\end{bmatrix}
}
\begin{figure}[htb!]
\centering
\includegraphics[width=\textwidth]{markov_dhmm_converge.eps}
\caption{Convergence in log-likelihood (markov chains)}
\label{fig:Hidden Markov Model}
\end{figure}

\begin{figure}[htb!]
\centering
\includegraphics[width=\textwidth]{markov_dhmm_difference.eps}
\caption{Change in parameter over iterations (markov chains)}
\label{fig:Hidden Markov Model}
\end{figure}

\end{document}