\documentclass[11pt]{report}
\usepackage{amsmath,amsfonts,amssymb,bm}
\usepackage{graphicx}
\usepackage{algorithm, algorithmic}

\renewcommand*\thesection{\arabic{section}}
\newcommand{\EqnArr}[1]{\begin{align}#1\end{align}}
%\newcommand{\Prob}{\mathrm{Pr}}
\newcommand{\Cal}[1]{\mathcal{#1}}
\newcommand{\Rm}[1]{\mathrm{#1}}
\newcommand{\Bb}[1]{\mathbb{#1}}
\newcommand{\Br}[1]{\left(#1\right)}
\newcommand{\ClBr}[1]{\left\{#1\right\}}
\newcommand{\SqBr}[1]{\left[#1\right]}
\newcommand{\Trm}[1]{\textrm{#1}}
\newcommand{\Prob}[1]{\mathrm{Pr}\ClBr{#1}}
\renewcommand{\Bar}[1]{\overline{#1}}

\title{Motif alignment - report}
\author{Long Q Tran}

\begin{document}
\maketitle

\section{The motif finding problem}\label{sec:intro}
The motif finding problem can be decomposed into two related tasks:

\begin{enumerate}
\item Finding sequence motifs in a global Multiple Sequence Alignment, and
\item Producing a scoring matrix or frequency matrix to detect motif in future sequences.
\end{enumerate}

The dataset consists of $n$ sequences $\Cal{D} = \ClBr{\bm{x}_i, i=1,2,\ldots, n}$, where
$\bm{x}_i = x_{i1},\ldots,x_{iL}$ is the $i$-th sequence. For simplicity,
we assumed that all sequences have the same length, $L$. We need to find
in each sequence the position of the motif, $p_i, i=1,\ldots,n$, and its length, $G$, such that
the score function, $\Cal{S}(\Cal{D}, \bm{p}, G)$, achieves maximum. The score
function is a function of the dataset and the location of the motifs in all sequences. We
also assumed that the motifs have the same length, G, in all sequences.

In the following development, we denote $\bm{y}_i = x_{i,p_i}, \ldots, x_{i,p_i+G-1}$ be the motif
in sequence $\bm{x}_i$ given the motif's location and length. We also denote 
$\Bar{\bm{y}_i} = x_{i,1},\ldots,x_{i,p_i-1}, x_{i,p_i+G},\ldots,x_{iL}$ be the rest of the sequence.

The $j$-th position frequency of the motif, $f_j(a)$, is the frequency that a sequence symbol
appears in the $j$-th position of the motif. This frequency is essentially the histogram
of symbols in $\ClBr{y_{i,j}, i=1,\ldots,n}$. Meanwhile, the background frequency, $f_b(a)$, is the frequency
that a sequence symbol appears in the non-motif part of all sequences (in any position). This
frequency is the histogram of symbols in $\Bar{\bm{y}_i}, i=1,\ldots, n$. These frequencies 
statisfy
\EqnArr{
f_j(a)\geq0, & \sum_a f_j(a) = 1\\
f_b(a)\geq0, & \sum_a f_b(a) = 1.
}

\subsection{A brief survey of current methods}
Motif finding methods (or Multiple Sequence Alignment methods) differ in their definitions
of the score function, $\Cal{S}(\Cal{D}, \bm{p}, G)$, and their techniques to achieve maximum
of this function.


\section{A Position Model for motif alignment}
The position model assumes that there is a generative process that generates the
sequence, the motif location, the motif itself and  the non-motif (background) part following the joint probability
\EqnArr{\label{eqn:joint probability}
\Prob{\bm{x}, p} &= \Prob{p} \times \Prob{\bm{y}} \times \Prob{\Bar{\bm{y}}},
}
where
\EqnArr{
\Prob{p} &= \alpha_p, \textrm{(motif location frequency)}, \\
\Prob{\bm{y}} &= \prod_{j=1}^G f_j(y_{ij}), \textrm{(motif)}\\
\label{eqn:joint probability end}
\Prob{\Bar{\bm{y}}} &= \prod_j f_b(\Bar{y}_{ij}), \textrm{(background)}.
}
The motif position frequencies, $\alpha_p$, satisfy $\alpha_p \geq 0$, and $\sum_p \alpha_p = 1$.

The score function is the likelihood of the dataset given the motif location frequencies, motif
frequencies and background frequencies
\EqnArr{
\Cal{S}(\Cal{D} | \bm{\alpha}, \bm{f}_b, \bm{f}_j) &= \prod_{i=1}^n \Prob{\bm{x}_i| \bm{\alpha}, \bm{f}_b, \bm{f}_j}
\\
&= \prod_{i=1}^n \sum_p \Prob{\bm{x}_i, p| \bm{\alpha}, \bm{f}_b, \bm{f}_j} 
\label{eqn:total probability}.
}
The probability in \eqref{eqn:total probability} is given by \eqref{eqn:joint probability}-\eqref{eqn:joint probability end}.

\subsection{A Maximum Likelihood Estimator}
Following the EM paradigm, we compute the posterior probability of motif position for $i$-th 
sequence as (E-step)
\EqnArr{
\gamma_i(p) &= \Prob{p|\bm{x}_i} \\
&= \frac{\Prob{\bm{x}_i, p}}{\Prob{\bm{x}_i}} = \frac{\Prob{\bm{x}_i, p}}{\sum_p \Prob{\bm{x}_i, p}},
\label{eqn:position model E-step gamma}\\
z_p &= \sum_i \gamma_i(p).
\label{eqn:position model E-step}
}

The reestimation of model parameters (M-step) is as follows
\EqnArr{
\label{eqn:position model alpha new}
\alpha_p^{\Rm{new}} &= \frac{z_p}{\sum_{p'}z_{p'}}\\
\label{eqn:position model fb new}
f_b^{\Rm{new}}(a) &= \frac{\sum_i\sum_p\gamma_i(p)\sum_{j\in\Bar{\bm{y}}} \Bb{I}(\Bar{y}_{ij} = a)}
{\sum_i\sum_p\gamma_i(p)|\Bar{\bm{y}_i}|}\\
\label{eqn:position model fj new}
f_j^{\Rm{new}}(a) &= \frac{\sum_i\sum_p\gamma_i(p) \Bb{I}(y_{ij} = a)}
{\sum_i\sum_p\gamma_i(p)},
}
where $\Bb{I}(\cdot)$ is the indicator function that returns $1$ if the predicate
is true, and $0$ otherwise.

For Viterbi training, we can replace the E-step above with a Viterbi pass
\EqnArr{
\gamma_i(p) &=\begin{cases}
1 & p = \arg\max_p \Prob{p|\bm{x}_i}\\
0 & \Rm{otherwise}
\end{cases}.
\label{eqn:position model Viterbi gamma}
}

\begin{algorithm}
\caption{Maximum Likelihood Estimator for the Position Model}
\label{alg:MLE for Position Model}
\begin{algorithmic}
\FOR{$\Rm{iter} = 1, 2, \ldots$}
	\STATE (\emph{E-step}). Compute $\gamma_i(p), z_p, \forall i,p$ via \eqref{eqn:position model E-step gamma}
	or \eqref{eqn:position model Viterbi gamma}, and \eqref{eqn:position model E-step}.
	\STATE (\emph{M-step}). Reestimate $\bm{\alpha}, \bm{f}_b, \bm{f}_j$ via \eqref{eqn:position model alpha new},
	\eqref{eqn:position model fb new}, \eqref{eqn:position model fj new}.
\ENDFOR
\end{algorithmic}
\end{algorithm}

\section{Simulated Annealing}
In order to formulate the simulated annealing algorithm for motif finding, we define
the followings
\begin{itemize}
\item \textbf{State}: the collection of motif locations in all sequences in the dataset
is a \emph{state}, $\bm{p} = (p_1,\ldots,p_n)$.
\item \textbf{Frequency}: Given a dataset $\Cal{D}$ and a state $\bm{p}$ we could
compute the motif frequency $\bm{f}_j$, and background frequency $\bm{f}_b$
following the definition given in section \ref{sec:intro}.
\item \textbf{Score}: A score is assigned to a state $\bm{p}$ as followings
\EqnArr{\label{eqn:SA score}
\Cal{S}(\Cal{D}, \bm{p}) & = \frac{1}{n}\sum_i s(\bm{x}_i, p_i),\\
s(\bm{x}, p) &= \log\frac{\prod_{j=1}^G f_j(y_{ij})}{\prod_{j=1}^G f_b(y_{ij})}.
}
The score $s(\bm{x}, p)$ evaluate how easily we can distinguish the motif
from background.
\end{itemize}

Given the definitions of state, frequency, and score, we outline of the simulated annealing process in algorithm \ref{alg:simulated annealing}.
\begin{algorithm}
\caption{Simulated Annealing for Motif Finding}
\label{alg:simulated annealing}
\begin{algorithmic}
\STATE Generate a random state $\bm{p}_0$.
\STATE $\bm{p} = \bm{p}_0$.
\FOR{$t = 1, 2, \ldots, K$}
	\STATE Compute current score $S = \Cal{S}(\Cal{D}, \bm{p})$ via \eqref{eqn:SA score}.
	\STATE Choose a neighbor state $\bm{p}'$ of $\bm{p}$ and compute its score $S' = \Cal{S}(\Cal{D}, \bm{p}')$.
	\IF {$S' \geq S$}
		\STATE $\bm{p} = \bm{p'}$.
	\ELSE
		\STATE Let $r$ be a uniform random number in $\SqBr{0,1}$.
	  \IF {$\exp\ClBr{-|S' - S| / T_t} > r$}
		\STATE $\bm{p} = \bm{p'}$.
	  \ENDIF
	\ENDIF
\ENDFOR
\end{algorithmic}
\end{algorithm}

\subsection{Choice of neighbor states}
There are many ways to produce a neighbor state of a given state $\bm{p}$. In our implementation,
we generate a set of candidates and choose the best candidate (max score) as neighbor state. 
Followings are the candidate generation methods:
\begin{itemize}
\item We pick a sequence $\bm{x}_i$ and change the corresponding motif location $p_i$ to $p_i'$
randomly.
\item We shift all motif locations $p_i, i=1,\ldots, n$ by an offset $d = -2,-1,+1,+2$.
\end{itemize}

\subsection{Temperature schedule}
Another crucial aspect of SA algorithm is the schedule of temperature in each iteration. As
$T_t$ appear in $\exp\ClBr{-|S' - S| / T_t}$, the temperature should be in the same order of
magnitude with the expected change in score $|S'-S|$. Empirically we found that the score
change is about $10^{-3}$ in each iteration. Thus we let $T_1 = 10^{-3}$ and let it reduce
linearly to $T_K = 0$ at the final iteration.


\section{Experiment Results}
We compare MEME, position model and simulated annealing on systhetic datasets 
and a real dataset.

\subsection{Synthetic datasets}
We generate 1000 sequences of length 30 with motifs of length 6 with the parameters
specified in table \ref{tbl:synthetic frequency}.

\begin{table}[ht!]
  \caption{Synthetic data motif and background frequency}
  \label{tbl:synthetic frequency}
  \centering
  \begin{tabular}{| c || c| c | c | c |}
  \hline   &  A & C & T & G \\ \hline \hline
  $\bm{f}_b$ & 0.25 & 0.25 & 0.25 & 0.25\\ \hline
  $\bm{f}_1$ & \textbf{0.6} & 0.2 & 0.1 & 0.1\\ \hline
  $\bm{f}_2$ & \textbf{0.7} & 0.05 & 0.15 & 0.1\\ \hline
  $\bm{f}_3$ & 0.2 & \textbf{0.7} & 0.05 & 0.05\\ \hline
  $\bm{f}_4$ & 0.2 & 0.1 & \textbf{0.6} & 0.1\\ \hline
  $\bm{f}_5$ & 0.2 & 0.1 & 0.1 & \textbf{0.6}\\ \hline
  $\bm{f}_6$ & 0.1 & 0.1 & 0.1 & \textbf{0.7}\\ \hline
  \end{tabular}
\end{table}

The motif location, $p$, is chosen randomly in the range $\ClBr{1,2,\ldots,25}$ with
probability similar to Normal or Laplace distribution but on discrete values as follows
\EqnArr{
\Prob{p} &  \propto \exp\ClBr{-\frac{(p-\mu)^2}{2\sigma^2}},\\
\Prob{p} &  \propto \exp\ClBr{-\frac{|p-\mu|}{\sigma}}.
}
Note that a uniform distribution could be approximated by choosing a very large
deviation $\sigma$.

We would vary the deviation $\sigma$ to generate different datasets in order to
check
\begin{itemize}
\item Whether the methods could recover the location of the motifs.
\item Whether the methods could recover the motif and background frequencies.
\end{itemize}

\subsection{Real dataset}
The dataset consists of 900 sequences, each has 50 nucleotides. These are Bacillus subtilis non-coding sequences selected upstream of the genes and supposed to contain RBS site of length 6 nucleotides.

%\begin{figure}[htb!]
%\centering
%\includegraphics[width=\textwidth]{../fig/markov_diff.eps}
%\caption{Difference to ground truth over iterations (markov chains)}
%\label{fig:Hidden Markov Model markov diff}
%\end{figure}

\end{document}