%% BioMed_Central_Tex_Template_v1.06
%%                                      %
%  bmc_article.tex            ver: 1.06 %
%                                       %

%%IMPORTANT: do not delete the first line of this template
%%It must be present to enable the BMC Submission system to 
%%recognise this template!!

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%                                     %%
%%  LaTeX template for BioMed Central  %%
%%     journal article submissions     %%
%%                                     %%
%%         <14 August 2007>            %%
%%                                     %%
%%                                     %%
%% Uses:                               %%
%% cite.sty, url.sty, bmc_article.cls  %%
%% ifthen.sty. multicol.sty		   %%
%%				      	   %%
%%                                     %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%                                                                 %%	
%% For instructions on how to fill out this Tex template           %%
%% document please refer to Readme.pdf and the instructions for    %%
%% authors page on the biomed central website                      %%
%% http://www.biomedcentral.com/info/authors/                      %%
%%                                                                 %%
%% Please do not use \input{...} to include other tex files.       %%
%% Submit your LaTeX manuscript as one .tex document.              %%
%%                                                                 %%
%% All additional figures and files should be attached             %%
%% separately and not embedded in the \TeX\ document itself.       %%
%%                                                                 %%
%% BioMed Central currently use the MikTex distribution of         %%
%% TeX for Windows) of TeX and LaTeX.  This is available from      %%
%% http://www.miktex.org                                           %%
%%                                                                 %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


\NeedsTeXFormat{LaTeX2e}[1995/12/01]
\documentclass[10pt]{bmc_article}    



% Load packages
\usepackage{cite} % Make references as [1-4], not [1,2,3,4]
\usepackage{url}  % Formatting web addresses  
\usepackage{ifthen}  % Conditional 
\usepackage{multicol}   %Columns
\usepackage[utf8]{inputenc} %unicode support
\usepackage{multirow}
\usepackage{rotating}
%\usepackage[applemac]{inputenc} %applemac support if unicode package fails
%\usepackage[latin1]{inputenc} %UNIX support if unicode package fails
\urlstyle{rm}
 
 
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%	
%%                                             %%
%%  If you wish to display your graphics for   %%
%%  your own use using includegraphic or       %%
%%  includegraphics, then comment out the      %%
%%  following two lines of code.               %%   
%%  NB: These line *must* be included when     %%
%%  submitting to BMC.                         %% 
%%  All figure files must be submitted as      %%
%%  separate graphics through the BMC          %%
%%  submission process, not included in the    %% 
%%  submitted article.                         %% 
%%                                             %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%                     


%\def\includegraphic{}
%\def\includegraphics{}
\usepackage{subfigure}
\usepackage{color}



\setlength{\topmargin}{0.0cm}
\setlength{\textheight}{21.5cm}
\setlength{\oddsidemargin}{0cm} 
\setlength{\textwidth}{16.5cm}
\setlength{\columnsep}{0.6cm}

\newboolean{publ}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%                                              %%
%% You may change the following style settings  %%
%% Should you wish to format your article       %%
%% in a publication style for printing out and  %%
%% sharing with colleagues, but ensure that     %%
%% before submitting to BMC that the style is   %%
%% returned to the Review style setting.        %%
%%                                              %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
 

%Review style settings
%\newenvironment{bmcformat}{\begin{raggedright}\baselineskip20pt\sloppy\setboolean{publ}{false}}{\end{raggedright}\baselineskip20pt\sloppy}

%Publication style settings
%\newenvironment{bmcformat}{\fussy\setboolean{publ}{true}}{\fussy}

%New style setting
\newenvironment{bmcformat}{\baselineskip20pt\sloppy\setboolean{publ}{false}}{\baselineskip20pt\sloppy}

% Begin ...
\begin{document}
\begin{bmcformat}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%                                          %%
%% Enter the title of your article here     %%
%%                                          %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\title{Accelerating read mapping with FastHASH} 
 
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%                                          %%
%% Enter the authors here                   %%
%%                                          %%
%% Ensure \and is entered between all but   %%
%% the last two authors. This will be       %%
%% replaced by a comma in the final article %%
%%                                          %%
%% Ensure there are no trailing spaces at   %% 
%% the ends of the lines                    %%     	
%%                                          %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


% Can and Onur can be switched
\author{Hongyi Xin$^1$,
        Donghyuk Lee$^1$,
        Farhad Hormozdiari$^2$,
        Samihan Yedkar$^1$,
        Onur Mutlu$^1$\correspondingauthor\email{Onur Mutlu\correspondingauthor - onur@cmu.edu}, and
        Can Alkan$^3$\correspondingauthor\email{Can Alkan\correspondingauthor - calkan@cs.bilkent.edu.tr}
}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%                                          %%
%% Enter the authors' addresses here        %%
%%                                          %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\address{%
    \iid(1)Dept. of Computer Science and Electrical and Computer Engineering, Carnegie Mellon University, Pittsburgh, PA, 15213, United States.
    \iid(2)Dept. of Computer Science, University of California Los Angeles, Los Angeles, CA, 90095, United States.
    \iid(3)Dept. of Computer Engineering, Bilkent University, Ankara, 06800, Turkey.
}%

\maketitle

Email addresses: \\

\noindent Hongyi Xin: hxin@cmu.edu \\
Donghyuk Lee: donghyu1@cmu.edu\\
Farhad Hormozdiari: fhormoz@cs.ucla.edu\\
Samihan Yedkar: syedkar@cmu.edu\\
Onur Mutlu: onur@cmu.edu\\
Can Alkan: calkan@cs.bilkent.edu.tr \\

\noindent Corresponding authors: Onur Mutlu and Can Alkan

\clearpage

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%                                          %%
%% The Abstract begins here                 %%
%%                                          %%  
%% Please refer to the Instructions for     %%
%% authors on http://www.biomedcentral.com  %%
%% and include the section headings         %%
%% accordingly for your article type.       %%   
%%                                          %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


\begin{abstract}
        % Do not use inserted blank lines (ie \\) until main body of text.
%\paragraph*{Background} 
With the introduction of next generation sequencing (NGS) technologies, we are
facing an exponential increase in the amount of genomic sequence data.  The
success of all medical and genetic applications of next-generation sequencing
critically depends on the existence of computational techniques that can
process and analyze the enormous amount of sequence data quickly and
accurately.  Unfortunately the current read mapping algorithms have
difficulties in coping with the massive amounts of data generated by NGS.

%\paragraph*{Results} 
We propose a new algorithm, FastHASH, which drastically improves the performance
of the seed-and-extend type read mapping algorithms, while maintaining the
sensitivity, accuracy and comprehensiveness of such methods.  FastHASH is a
generic algorithm compatible with all seed-and-extend class read mapping
algorithms, and it introduces two main contributions, namely {\it Adjacency
Filtering}, and {\it Cheap K-mer Selection}.

%\paragraph*{Conclusion} 
We implemented FastHASH and merged it into the codebase of the popular read
mapping program, mrFAST. We observed 5- to 15-fold speedup while keeping
mapping sensitivity at 100\%.  \end{abstract}

\clearpage

\ifthenelse{\boolean{publ}}{\begin{multicols}{2}}{}




%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%                                          %%
%% The Main Body begins here                %%
%%                                          %%
%% Please refer to the instructions for     %%
%% authors on:                              %%
%% http://www.biomedcentral.com/info/authors%%
%% and include the section headings         %%
%% accordingly for your article type.       %% 
%%                                          %%
%% See the Results and Discussion section   %%
%% for details on how to create sub-sections%%
%%                                          %%
%% use \cite{...} to cite references        %%
%%  \cite{koon} and                         %%
%%  \cite{oreg,khar,zvai,xjon,schn,pond}    %%
%%  \nocite{smith,marg,hunn,advi,koha,mouse}%%
%%                                          %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%




%%%%%%%%%%%%%%%%
%% Background %%
%%
\section*{Introduction} \label{sec:introduction}

The massively parallel sequencing, or so-called next-generation sequencing
(NGS), technologies substantially changed the way biological research is
performed since 2000~\cite{HONGYI}. With these new DNA sequencing platforms, we
can now investigate the human genome diversity between
populations~\cite{1000GP}, find genomic variants that are likely to cause
disease~\cite{Antonacci2009, Antonacci2010, Bailey2006, Bailey2002, Bailey2008,
Bailey2001}, investigate the genomes of the great ape species~\cite{Bailey2002a,
Bailey2004a, Marques-Bonet2009, Rozen2003, Scally2012, Ventura2011} and even
ancient hominids~\cite{Green2010, Reich2010} to understand our own evolution.
Despite all the revolutionary power these new sequencing platforms offer, they
also present difficult computational challenges due to 1) massive amounts of
data produced, 2) shorter read lengths with consequently more mapping locations
and 3) higher sequencing errors when compared to the traditional
capillary-based sequencing. 

With NGS platform, such as the popular Illumina platform, billions of raw short
reads are generated at a fast speed. Each short read represents a 100
base-pairs (bps) continuous DNA fragment from the sequencing subject. After the
short reads are generated, the first step is to map (align) the reads to a
known reference genome. The mapping process is very computationally expensive
as the short reads are mapped to the huge reference genome such as human genome
(3.2 giga base-pairs). The mapper, a software performing the mapping, has to
search a very large reference genome data base {\color{red} to map} millions of
short reads. Even worse, each short read may contain mutations which requires
expensive approximate searching. In addition, the ubiquitous common repeats and
segmental duplications within human genome complicate the task since a short
read from such genome segment corresponds to a large number of mapping
locations in the reference genome.

To simplify searching a large data base such as human genome, previous work has
developed several algorithms that fall into two categories: the
seed-and-extend heuristic methods and suffix-tree mapping methods.

The seed-and-extend heuristic is developed based on the observation that for a
correct mapping, the query short read and the corresponding reference
counterpart, which is the piece of reference genome that the query read should
map to, must share some brief regions (usually 10-100 base-pair-long) of exact
or inexact matches. These shorter shared regions, indicating {\color{red} high}
similarity between the query read and the reference genome counterpart, are
called seeds.  By identifying seeds of a query read, the mapper narrows down
the searching range from the whole genome to only the seeds' neighborhood
region. Seeds are generated by preprocessing the reference genome and storing
{\color{red} the locations of} their occurrence in the reference genome in a
separate data structure. At mapping stage, a seed-and-extend mapper first
analyzes the query read to identify the seeds.  Then the mapper tries to extend
the read at the each of the seed locations via sophisticated fuzzy string
matching algorithms such as Smith-Waterman~\cite{sw} or
Neddleman-Wunsch~\cite{nw} algorithm.

On the other hand, the suffix-tree mapping methods analyze the reference genome
and transfer the reference genome into a suffix-tree data structure, with each
tree edge labeled with one of the four base-pairs types and each node stores
all the occurrence locations of a suffix. Walking through the tree from root to
leaf while concatenating all the base-pairs on the edges along the path
together forms a unique suffix of the reference genome. Every leaf node of the
tree stores all mapping locations of this unique suffix in the reference
genome. Searching for an query read equals to walking through the reference
suffix-tree from root following the query read's sequence. If there exists a
path from the root to a leaf such that the corresponding suffix of the path
matches the query read, then all the locations stored in the leaf node are
returned as mapping locations.

Several mappers have been developed over the past few years.  These mappers can
be classified into two categories based on their mapping algorithms: 1) hash
table based seed-and-extend mappers (hash table based mappers) similar to the
popular BLAST~\cite{blast} method, such as mrFAST/mrsFAST~\cite{Alkan2009,
Hach2010}, MAQ~\cite{Li2009a}, SHRiMP~\cite{shrimp}, Hobbes~\cite{hobbes},
drFAST~\cite{Hormozdiari2011} and RazerS~\cite{razers}; and 2) suffix-tree and
genome compression based mappers (suffix-tree based mappers) that utilize the
Burrows-Wheeler Transform~\cite{Burrows94ablock-sorting} and the
Ferragina-Manzini index~\cite{Ferragina07compressedrepresentations} (BWT-FM)
such as BWA, Bowtie, and SOAP2. Both types of read mapping algorithms have
different strengths and weaknesses. To measure the performance between
different mappers, three general metrics are introduced: speed in performing
the mapping, sensitivity in mapping reads with the presence of multiple
mutations (including mismatches, insertions and deletions) and
comprehensiveness in searching for all mapping locations across the reference
genome. The hash table based mappers are much slower, albeit more sensitive,
more comprehensive and more robust to sequence errors and genomic diversity
{\color{red} than suffix-tree based mappers}.  For {\color{red} these reasons},
hash table based mappers are typically more suitable when comparing the genomes
of different species, such as mapping reads generated from a gorilla genome to
the human reference genome, or to highly repetitive genomic regions where
structural variants are more likely to occur~\cite{Alkan2011nrgreview,
Schuster2010, mills2011nature1000genomes}. On the contrary, BWT-FM mappers
offer very high mapping speed (up to 30-fold faster than hash table based
mappers), yet their mapping sensitivity and comprehensiveness suffer when the
mutations of the read or the diversity increases and {\color{red} BWT-FM mappers
perform} in the complex regions of the genome. Their fast speed makes the
BWT-FM mappers the first choice in single nucleotide polymorphism (SNP)
discovery studies where sensitivity is less important. In this work, we focused
on increasing the speed of hash table based mappers while preserving
{\color{red} their} high sensitivity and comprehensiveness.

The relatively slower speed of hash-based mappers is due to their high
sensitivity and comprehensiveness. Such mappers first index {\it fixed length
seeds} (also called k-mers), typically 11-13 base-pair-long DNA fragments from
the reference genome, into a hash table or a similar data structure. Next, they
divide each query read into smaller fixed length seeds to query the hash table
for their associated {\it seed locations}. Finally {\color{red} (comment: why is
it finally?)}, they try to {\it extend} the read at each of the seed locations
by aligning the read to the reference genome counterpart at the seed location
via dynamic programming algorithms such as Needleman-Wunsch~\cite{nw} and
Smith-Waterman~\cite{sw}, or simply calculate Hamming distance for greater
speed at the cost of missing {\color{red} the chance to find potential
locations which have insertioins/deletions (indels).}
%potential insertions/deletions (indels).  
For simplicity, the rest of the paper will use the term ``k-mer" representing
the term ``fixed length seed". We will also use the term
{\color{red}``}location" as {\color{red} ``}seed location". They
are interchangable.

Using real data generated with the NGS platforms, we observed that most of the
{\it locations} fail to provide correct alignments. This is because the
size of the k-mers that formed the hash table's indices are typically very
short (e.g.  12 {\color{red} bps} for mrFAST/mrsFAST). These short k-mers appear in the
reference genome much more frequently than the undivided, hundreds of
base-pair-long query read. As a result, only a few of the locations of a k-mer
provide correct alignments.  Naively extending at all of the locations of all
k-mers only introduces wasteful computation. In this paper, we define the seed
locations that the read cannot align to as ``false" locations. Reducing
unnecessary computation associated with the huge number of false locations is
the key to improve hash table based mappers' performance {\color{red} in terms of speed}.

In this paper we propose a new algorithm, FastHASH, that dramatically improves
the {\color{red} speed} of hash-based algorithms while maintaining the sensitivity and
comprehensiveness. We introduce two key ideas for this purpose. First, we
drastically reduce the potential locations considered for the extend step (both
true{\color{red} (comments: you didnot mentioned the true location)} and false) while still preserving comprehensiveness. We call this
method {\it Cheap K-mer Selection}. Second, we quickly eliminate most of the
false locations without invoking the extend step in the early stages of
mapping. This method is called {\it Adjacency Filtering}.  We tested FastHASH
by incorporating it to the mrFAST~\cite{Alkan2009} codebase.  Our initial CPU
implementation of FastHASH provides up to 5-fold speed up over mrFAST, while
still preserving comprehensiveness. We also show that FastHASH is GPU friendly
and can be implemented on GPU hash based mappers.

In the next section, we describe the basics and the characteristics of Cheap
K-mer Selection and Adjacency Filtering. In {\color{red} Section} ``Mechanisms", we present the mechanism
of FastHASH in detail. In Section ``GPU Implementation of FastHASH", we
describe the mechanism of our GPU implementation. Finally, we present the
performance of FastHASH compared with mrFAST and several other read mapping
tools in Section ``Results". We will present more analysis in Section ``Analysis" and conclude in
Section ``Conclusion and Discussion".

\section*{Observation and Insight} \label{sec:Ob&Ins}

\subsection*{Hash table based mappers}

Hash table based mappers map query reads to a known reference genome under a
user defined mutation threshold {\it e}. With the mutation threshold {\it e},
the mappers search for locations where there are fewer than {\it e} mutated
base-pairs (including mismatches, insertions or deletions) between the input
read and the reference genome counterpart. Typically, they follow a
``seed-and-extend'' procedure. These mappers index the reference genome and
stores the contents in a hash table. The hash table stores the full
lexicographical permutation of a fixed length k-mer (typically 10-13
base-pairs) as keys and all the corresponding occurrence locations in the
reference genome for each k-mer as contents. The indexing procedure is only
performed once. Later mapping only loads the generated hash table to perform
the mappings.

Figure~1 shows the flow chart of a typical hash table based mapper during the
mapping stage. The mapper follows six steps to map a query read to the
reference genome. Step 1, the mapper divides the query read into smaller
k-mers, with each k-mer at the same length of the hash table keys. Step 2,
several k-mers from the k-mer pool from step 1 are selected as query k-mers.
Query k-mers are then fed to the hash table as inputs. The hash table returns
the location lists for each query k-mer. The location list stores all the
occurrence locations of the query k-mer in the reference genome. Step 3, the
mapper probe the location list one-by-one. The mapper examines a location at a
time. For each location, the mapper invokes the reference genome and, as step
4, retrieves the reference genome counterpart from the reference genome at the
location neighborhood. Step 5, the mapper aligns the query read against the
reference genome counterpart using dynamic programming algorithms such as the
Hamming distance, edit distance~\cite{levenshtein1966},
Needleman-Wunsch, or Smith-Waterman, to verify if the
number of mutations between the query read and the reference genome counterpart
exceeds the user set mutation threshold. Step 6, The mapper processes the next
location in the location list and repeats step 4 and step 5 until all the
locations are processed.

\subsection*{Observation} \label{sec:observation} 

Hash table based mappers are computationally more expensive than suffix-tree
alternatives. Unlike suffix-tree based mappers which quickly returns the mapping
locations at the leaf nodes of the tree, hash table based mappers try to
calculate the optimal alignment for all query k-mers' locations. Mappers that
are capable of aligning with the presence of mutations are the most sensitive,
yet slowest, since these dynamic programming algorithms typically run in
$O(l^2)$ time (l is the length of the reads). This can be reduced to $O(2el)$
if the number of allowed indels are reduced to {\it e}.

We experimentally tested the behavior of a hash table based mapper
mrFAST~\cite{Alkan2009} to identify performance bottlenecks. We observed that
the dynamic programming alignment algorithm (step 5) occupies over 90\% of the
execution time while most locations fail to pass the alignment verification
(step 5). Due to both the small k-mer sizes used to generate the indices and
the repetitive nature of most genomes (including human), each k-mer's location
list may contain many more locations than a query read may correctly align to.

Within a k-mer's location list, we define those locations that pass alignment
verification (step 5) as ``true locations'' and other locations that fail the
verification as ``false locations''. The false locations are false because they
do not provide mapping results. Figure~2 gives an example of true locations vs.
false locations. In Figure~2, we have the location lists from the query read of
Figure~1. From our oracle knowledge, we know the read only maps to location 212
thus it is sufficient to only extending at the true locations (shaded blocks).
However, the mapper has no such knowledge. As a result, the mapper examines all
the locations while wasting a lot of computation resource on verifying false
locations (white blocks).

Verification of the vast number of false locations greatly degrades the
performance of the mapper as it consumes a massive amount of unnecessary
computation and memory access. To verify a location, the mappers have to 1)
access the reference genome sequence starting at the seed location and then 2)
invoke a dynamic programming algorithm to align the query read to the reference
genome counterpart.  Performing these costly (in terms of CPU time and memory
accesses) operations for a high number of false locations will only waste
computational resources as false locations, by definition, do not provide any
valid mappings.  Therefore improving the performance of hash table based
mappers strongly depends on {\it efficiently} reducing the number of false
locations before the verification step.

\subsection*{Insight} \label{sec:insight} 

There are two main directions to ameliorate the computational cost imposed by
false locations. First, one can apply a filter within the seed locations and
only extend on ``true locations'' to reduce unnecessary computation. Second,
one can select only the k-mers with low frequency occurrences within the
reference to avoid high frequency k-mers, which in turn avoids long location
lists and reduces the number of locations to examine. In this work, we propose
two new mechanisms that address both directions.

Our first method aims to filter out the {\it obviously} false locations.  Our
observation is that by voting among the location lists of all of the k-mers for
a common set of locations, we can quickly detect true and false locations and
skip the unnecessary verification step of the false locations.  The basic idea
is as follows: A potential seed location from one k-mer's location list can
return a {\it correct} mapping location (under the given edit distance $e$)
only if other adjacent k-mers within the read are also located at adjacent
locations within the reference (e.g, in Figure~2, location 212 in first k-mer's
list, location 224 in the second k-mer's list, etc.). Consequently, by asking
all other k-mers to vote if they have the corresponding adjacent locations
stored inside their location lists, we may quickly identifies false locations
without the alignment step (e.g, in Figure~2, location 1121 from the first
k-mer's location list is an easily detectable false location since no other
k-mer contains adjacent locations in their location lists). To tolerate
mutations, this rule can be violated by at most $e$ adjacent k-mers. Otherwise,
the number of mutations (mismatches and in/dels) between the query read and the
reference genome counterpart must be greater than $e$, and thus the location
would be rejected at the verification step (step 5). Such k-mers need to be
filtered out before the expensive verification step (step 5) is invoked. We
call this method {\it Adjacency Filtering (AF)}.

Note that AF does not guarantee correct mappings, instead it rejects obviously
false locations. For computing the actual number, location, and content of
mutations (including sequence errors) the alignment step (step 5) is still
needed.  Nevertheless,  AF detects false locations and removes them from
consideration without any reference genome lookups, which may save hundreds to
thousands of CPU cycles per read.

Our second method, {\it Cheap K-mer Selection (CKS)} tries to minimize the
verification operations by preferentially selecting the k-mers from the input
reads that occur at low frequency in the reference genome. For a query read, the
amount of alignment computation is proportional to the number of locations
stored in the location lists of the query k-mers. We observed that the hash
table entries are heavily unbalanced. Selecting different k-mers to query the
hash table may heavily affect the mapper's performance. Due to the repetitive
nature of most genomes and the extremely short k-mer length, some k-mers have
extremely large location lists (high frequency k-mers) and others (low frequency
k-mers) have much smaller location lists, as Figure~3 shows. Probing large
location lists burdens the mapper since it has to verify a large number of
locations; thus, we define these high frequency k-mers as {\it expensive
k-mers}. On the other hand, k-mers with smaller location lists are denoted as
{\it cheap k-mers}. The insight is, for a correct mapping, both cheap and
expensive k-mers have the true location stored in their location list. While
cheap k-mer's location list stores only a few other false locations, expensive
k-mers stores several orders of magnitude more false locations due to the
k-mers repeating nature in the reference genome. As a result, selecting
cheaper k-mers from a read instead of expensive ones as query k-mers reduces
the number of locations to be verified (step 3~6).

By selecting cheaper k-mers, we deliberately reduce the number of locations
without affecting the mapper's sensitivity. Sensitivity is guaranteed by
picking multiple cheap locations to ensure that their combined coverage
includes all mutation scenarios (e.g, in Figure~2, by selecting four
non-overlapping cheap k-mers, we ensure finding all mappings with at most three
mutations since 3 mutations can at most alter 3 k-mers).

With AF eliminating unnecessary computation to detect false locations and CKS
reducing the number of false locations, fastHASH is able to avoid unnecessary
computation and focus on aligning only at the true locations, which provides
drastic speed up over original hash table based mappers.

\section*{Mechanisms} \label{sec:mechanisms}

\subsection*{Adjacency Filtering (AF)} \label{sec:af}

Adjacency Filtering (AF) uses the location lists retrieved from the hash table
to detect false locations. Since the location lists are stored continuously as
sorted arrays in the hash table, it is easy to prefetch these lists into the CPU
cache. Moreover, the location lists provide great reusability. Once fetched into
the cache, the location lists of the k-mers can be used to verify all seed
locations thus reused many times. Unlike traditional hash table based mappers
which try to extend at all potential seed locations and perform many
unpredictable reference genome lookups, FastHASH with AF only accesses the
reference genome when it is confident that the seed location is a true location.

Briefly, FastHASH divides a read into consecutive k-mers and tests whether
k-mers that are adjacent to each other within the read are also found at
adjacent positions within the reference.  For example, assume the size of the
k-mers in the hash table is $k=12$, we have a 84 base-pairs (bps) read to map to
the reference genome, and the mapper's mutation threshold is set to $e=0$, which
allows no mutation. The mapper will first divide the read into 7 {\it
consecutive} k-mers of length 12 bps each, and then use the locations of these
k-mers in the hash table as seeds. As the left half of Figure~4 shows, for a
true location $m$ (where the input read perfectly maps to the reference), the
first k-mer of the read is at location $m$, the second k-mer is at $m+12$, third
k-mer is at $m+24$; and this pattern continues up to the 7$^{th}$ k-mer, which
is located at $m+72$.  Similarly, if $m$ is an unknown location from the fist
k-mer's location list, we can test whether the location is a true location by
testing if location $m$ is stored in the first k-mer's location list, $m+12$ for
second k-mer's location list etc., as shown in the right half of Figure~4.  Here
we define k-mers for which we can find adjacent k-mers at adjacent locations as
{\it correct k-mers}, and others as {\it mutated k-mers}.  Now suppose that the
read contains some mutations from the reference genome counterpart, then such
mutations must affect at least one k-mer, which in turn alters the k-mer to be
different from the reference counterpart's. As a result, the corresponding
adjacent location will not show up in the location list of the mutated k-mer. By
simply testing if all the corresponding adjacent locations are presented in all
of the adjacent k-mers' location lists, we can detect mutations without actually
aligning the read to the reference counterpart.

If mutations to some finite extend are allowed, for example with fewer than a
total amount of $e$ mismatches, then the mapper can no longer mark a location as
false location when only a single k-mer cannot find its corresponding adjacent
location. With at most $e$ mutations, in the worst case as they spread across
$e$ k-mers, a mapping location can still lead to a valid mapping with at most
$e$ k-mers fail finding their adjacent locations. In essence, to incorporate $e$
mismatches into AF with a read divided into $N$ k-mers, we require at least
$N-e$ k-mers finding corresponding adjacent locations in their location lists.
Otherwise the location is marked as a false location and rejected before further
operations. Allowing insertion and deletions is a little bit more tricky but
similar. As an addition of the above observation, an insertion or deletion not
only fails the searching of the adjacent location for the mutated k-mer, but
also shifts all the downstream k-mers as well, as shown in Figure~5.  At the
presence of insertions or deletions, the AF requirement is further relaxed from
requiring searching for a single adjacent location to searching for an adjacent
range. For example, if the user allows 1 insertion/deletion, then instead of
searching location $n$ in the first k-mer's location list, we now search for
locations $[n-1, n+1]$ in the first k-mer's location list, $[n+11, n+13]$ in the
second and so on. To sum up, with at most $e$ mutations (mismatches, insertions
or deletions), a potential location passes the AF only if $N-e$ k-mers find
corresponding adjacent location ranges within their location list, with adjacent
range defined as [-e, +e] deviation range from the adjacent location.
Otherwise, the location is marked as a false location and the mapper moves to
the next location (step 3).

The power of AF comes from detecting and rejecting most of the false locations
before alignment. Not only does AF prevent unnecessary computation (step 5),
but also prevent extensive unnecessary memory accesses to the reference genome
to retrieve the reference genome counterpart (step 4). For a false location,
other than the query k-mer itself, usually the rest of the read is completely
different from the reference genome. We observed in real data sets that even
with high mutation threshold {\it e=5}, as the constraints of AF have been
loosened to allow more mutated k-mers, still only a small fraction (usually
less than 1\%) of the locations pass AF and marked as true locations for
further verification. As a result, AF is effective and most false locations
will be detected and rejected by AF.

However, AF also comes with its own computational cost. To test a potential
location, AF conducts $N$ searches for corresponding locations, one for each
adjacent k-mer's location list. Additionally, AF does not guarantee that the
remaining seed locations will have fewer than $e$ mutations after alignment,
since multiple mutations might reside in a single mutated k-mer. In such cases
AF will not be able to tell exactly how many mutations are there so it
conservatively assumes there is only one mutation per mutated k-mer and passes
the location to alignment step (step 5). During the alignment step, the more
sophisticated fuzzy string matching algorithm will extract detailed mutation
information and verify if the mapping is indeed correct. Above all, for true
mapping locations (with fewer than $e$ mutations) AF introduces extra
computation. Nevertheless, AF is cost efficient because the number of the
locations that falsely pass AF is marginal compared to those that are correct
filtered out.

\subsection*{Cheap K-mer Selection (CKS)} \label{sec:cks}

Although AF reduces memory lookups, it also incurs a penalty in detecting false
locations: AF searches the corresponding adjacent locations for every k-mer.
This is in fact a quick lookup in the location lists: as the location lists are
sorted, we can use binary search. Nevertheless, for longer reads with many
k-mers, AF can be a costly process.

Another problem that AF cannot solve is the imbalance of the hash table. Most
location lists in the hash table for the human genome have very few locations.
But there are also location lists with cardinality more than 1 million. Even
though such k-mers are only a small portion of the hash table, we encounter
them frequently with real data. These high frequency (or, expensive) k-mers
mostly correspond to poly-N tracks and microsatellites, and such sequences have
many copies in the human reference genome.  These expensive k-mers also
introduce many false locations. When we use such expensive k-mers to query the
hash table, all of the locations in their entries will go through the AF test,
which is a search-heavy (i.e. computation massive) process.

FastHASH actively selects {\emph cheaper} k-mers over expensive k-mers to
search in the hash table. There will be fewer false locations and fewer
invocations to AF with cheaper k-mers. Note that, for any read, both cheap and
expensive k-mers will have the same true locations in their location lists.
However, since by definition expensive k-mers are more frequent in most
genomes, including the human genome, they will contain substantially more seed
locations than cheaper k-mers, thus imposing more computational cost to both AF
and the subsequent verification step.  Instead, starting the AF and then the
alignment with the cheap k-mers is going to relieve the mapper of this cost
while preserving all the true locations.

We implemented the selection of cheap k-mers as a simple quicksort operation
before querying the k-mers in the hash table.  For each input read, instead of
simply picking the first {\it e+1} k-mers to search in the hash table, FastHASH
first sorts all k-mers with respect to the cardinalities of their location
lists,  and then picks the cheapest (i.e. smallest cardinality) {\it e+1}
k-mers.  Note that selecting {\it e+1} k-mers as query k-mers guarantees full
sensitivity under edit distance $e$ due to the pigeonhole principle.

In summary, Cheap K-mer Selection (CKS) reduces the number of AF and
verification operations by using a computationally cheap operation: quicksort.
We may further be able to reduce the number of the false locations with more
complex mechanisms with larger memory footprint as done by Ahmadi et
al.~\cite{hobbes}. However, the improvement may not be significant when the
cost of the large memory requirement is taken into consideration, where CKS
filters out most of the false locations with very little auxillary memory usage
(O($\log n$)).

\section*{GPU Implementation of FastHASH}

FastHASH is easily applicable to GPU systems since both AF and CKS seldom
diverge on control flow, which is an essential property for GPU friendly
applications.

GPU systems follow a single-instruction-multi-thread (SIMT) execution
model~\cite{SIMT_WARP}, thus it is important for the application to have as few
control flow divergences (branches) as possible. For each divergence (if-else
condition statement or loop statement), all threads within a GPU-processor
warp~\cite{SIMT_WARP} (a group of threads that share a single instruction pool
and fetching logic) will have to execute both branches no matter which one they
take. Even worse, for a loop, all threads will have to wait until the longest
loop finishes before executing new instructions. As a result, GPUs are more
efficient for control-simple brute force algorithms compared to complexity
optimized condition-rich algorithms.  On CPU systems, AF is implemented as
binary search and CKS is implemented as quicksort.  Both can be easily applied
to GPUs after a careful optimization.

CKS sorts a fixed length array (all reads have the same number of k-mers),
therefore we can use a simple selection sort. Sorting reads will take $O(n^{2})$
time where n is the number of k-mers for a read.  Although it has higher
average time complexity than other sorting algorithms, selection sort does not
have a complex control-flow.  Algorithms like quicksort may not be better than
selection sort for GPU implementations since their worst case complexities are
similar, with the added cost of  more complex control flows.  In this case, all
threads within a GPU warp will have to wait for the last loop to finish.  To
implement selection sort  we assign each thread within a warp to work on a
single read, finishing the sort of all reads in $O(n^{2})$ time.

We do not need to change the AF algorithm as it does not diverge.  For each
potential location, all  k-mers will search for their corresponding adjacent
locations. Grouping all binary search operations for the same k-mer 
guarantees that all of threads finishes at the same time ($O(\log m)$), where m
is the length of the location list. During the AF phase, we assign each warp to a
single read, with each thread within the warp working on one potential
location. All threads within the warp check the different adjacent locations
for the same k-mer at the same time.

Beyond FastHASH, aligning algorithms also need to adapt to the full GPU
execution model, however, this topic is beyond the scope of this paper.


\section*{Results} \label{sec:results}

\subsection*{Methodology} \label{sec:methodology} 

We implemented FastHASH on top of mrFAST version 2.1.0.6, creating a new
version, mrFAST-2.5.0.0. To assess the performance of FastHASH, we compared the
performance of the new mrFAST-2.5.0.0 against several popular read mappers
currently available including Bowtie, BWA, RazorS and mrFAST-2.1.0.6, both on
simulated and real data sets. We evaluated the mappers with respect to four
metrics: speed, sensitivity, comprehensiveness, and accuracy. We also tried to
benchmark Hobbes but it has too large a memory footprint (more than 20
GB)~\cite{hobbes} that results in page swaps largely degrading system
performance. %We later dropped it.

Speed is how fast a mapper maps reads to the reference genome, and is measured
by execution time. Sensitivity refers to how good the mapper is at searching
for correct locations that is measured by the fraction of reads that find at
least one mapping. Comprehensiveness represents how thorough by the mapper
searches for mappings, and it is measured by the total number of valid mapping
locations across all of the reads. Accuracy is how correct the mapping
locations are for each read, and it is measured by counting how many simulated
reads are mapped to their original location.

We tested speed, sensitivity and comprehensiveness with different error
threshold levels from  1 to 5 for all mappers using both real and simulated
reads, while we tested accuracy at a fixed mutation threshold of 3 with only
simulated reads. Since Bowtie does not support any mutation threshold greater than
3, we only have results for Bowtie with mutation thresholds 1, 2 and 3.  RazerS
supports mutation thresholding via a percent identity setting. In order to provide
fair comparison, we chose the error percentage as close to the mutation threshold
as possible. For accuracy testing, the mutation threshold is set to 3 for all
mappers. We only use simulated data sets to test sensitivity since they are
generated from reference genome, which guarantees each read has at least one
correct mapping. For real data sets, there is no such guarantee. On the other
hand, we can see 100\% sensitivity for fastHASH for simulated data sets.
%For real data sets, there is no such guarantee and as a result,
%we only see 100\% sensitivity for fastHASH for simulated data sets.

%The mappers and the parameters used for each mapper are listed below:

%\begin{itemize}
%\item mrFAST(version 2.5.0.0) : e=1,2,3,4,5 (mutation threshold for different runs)
%\item mrFAST (version 2.1.0.6) : e=1,2,3,4,5 (mutation threshold for different runs)
%\item Bowtie (version 0.12.8) : n,v=1,2,3 (mutation threshold for different runs), -a (reporting all possible mapping), -S (output in SAM format) 
%\item Bowtie2 (version 2.2.0.0) :
%\item BWA (version 0.6.1) :
%\item RazerS (version 3.0) :
%\end{itemize}

\emph{Real Data:} We used three different real data sets to evaluate the
performance of different mappers. All sequence datasets were generated using
the Illumina platform. The first set (set 1; 160 bp, 1 million reads) consists
of reads from an individual from the 1000 Genomes project~\cite{1000GP}
sequenced with the Illumina platform. The second set (set 2; 101 bp, 500,000
reads) is generated from a chimpanzee genome~\cite{Prufer2012_bonobopaper}, and
the third set (set 3; 70 bp, 500,000 reads) is generated from an orangutan
genome~\cite{Locke2011}. In our benchmarks, we mapped all reads to the current
human reference genome assembly (GRCh37, hg19). 

\emph{Simulated Data:} We generated three simulated data sets from the current
human reference genome assembly (hg19). For each set we generated 50,000 random
reads from the first 20 chromosomes summing up to 1 million reads for each set.
The sets differ in their read lengths:  72bp, 108bp, and 180bp.
 For each read, we simulated the read errors and mutations by randomly
altering or inserting/deleting 0 to 3 base-pairs. Each set is mapped to the
human reference genome (hg19).

We ran  all  mappers on a single user mode Linux machine with a 3.2GHz Intel i7
Sandybridge CPU and 16 GB 1333Mhz DDR3 main memory.

As Table~1 shows, compared to BWA and Bowtie, hash table based mappers such as
mrFAST-2.1.0.6 and RazerS suffer in run time.  As we will show in Section
\ref{sec:analysis}, this is mainly due to the massive amount of false
locations. With the help of FastHASH, mrFAST-2.5.0.0 greatly improves its speed
over mrFAST-2.1.0.6, making it even faster than BWA under certain circumstances
(for set 1, when mutation threshold is greater than 3).  Meanwhile, mrFAST-2.5.0.0
preserves the important sensitivity and comprehensiveness properties of the
earlier version mrFAST, mrFAST-2.1.0.6.

Figure~6 presents the speedup across different mutation threshold values on
different data sets. Notice that as mutation threshold {\it e} increases, the
speedup decreases. This is expected since a higher {\it e} results in relaxed AF
constraints and diminished CKS benefits, about which we will provide further
details in Analysis section.

Table~2 shows the accuracy across different mappers on simulated data sets. For
a simulated data set, since all the reads are generated from the reference
human genome and are guaranteed to have fewer than 3 mutated basepairs
(mismatch, insertion or deletion), an ideal mapper should be able to map all 1
million reads. In reality, due to performance constraints or simply mapping
limitations, most mappers do not guarantee full sensitivity. mrFAST on the
other hand, targets 100\% sensitivity.  We clearly see that mrFAST with
FastHASH retains high accuracy from mrFAST-2.1.0.6. In fact, since
mrFAST-2.5.0.0 introduces several minor bug fixes, the accuracy is slightly
higher than the earlier mrFAST, mrFAST-2.1.0.6.  Note that for some input sets,
mrFAST-2.1.0.6 runs faster than mrFAST-2.5.0.0 for higher mutation threshold. This
is because mrFAST uses Intel SSE SIMD code extension~\cite{SSE} which
marginally alters the mapper algorithm used based on the mutation threshold. In
particular the algorithm for $e=4$ is slightly faster than that for $e=3$.
Generally, however, all mappers are slower with higher mutation thresholds as
expected. We will also show further analysis in the Analysis section.

Figure~7 shows that with FastHASH the memory usage does not change
significantly compared to mrFAST-2.1.0.6.

\section*{Analysis} \label{sec:analysis}

%In this section, we provide a breakdown analysis of the performance of
%Adjacency Filtering and Cheap K-mer Selection. The numbers are shown in log
%scaled Figure~8.

In this section, we provide the performance improvements of Adjacency Filtering
and Cheap K-mer Selection. The numbers are shown in log scaled Figure~8.

As discussed in previous section, mrFAST-2.1.0.6, like other hash
based mappers, suffers greatly from extending on a large number of false
locations. Figure~8(a) presents the number of true
locations out of all potential locations:  only 0.007\% of the potential
locations (seeds) will provide correct alignment on average. 

We can clearly see the incremental benefits of AF and CKS when mapping 1
million simulated reads of 180bp in length (Figure~8(b) and Figure~8(c)).
As discussed above, a very small
fraction of the seed locations pass the verification test initially
(Figure~8(a)).  Adjacency filtering substantially decreases the
number of false locations as seen in Figure~8(b). This way, AF
saves many false memory accesses since now only the locations that pass AF
will proceed for further verification. On average, AF filters out approximately
99.8\% of all false seeds.  Figure~8(c) shows the benefit of using
both AF and CKS. Compared to Figure~8(b), CKS reduces the number of
overall potential locations, which reduces AF computation. On average, CKS
eliminates 95.4\% of all potential locations, which of course are false.

In the Results section, we presented that the speedup gained by using
FastHASH reduces as mutation threshold increases. This is because of two reasons
as demonstrated in Figure~8. First, as mutation threshold {\it e}
increases, AF allows a greater number of false locations to pass the
filter.  Similarly, as {\it e} increases, CKS starts to select more expensive
k-mers, providing less reduction of false locations.

\section*{Conclusions and Discussion} \label{sec:conclusion} 

Parallel sequencing platforms continuously evolve at a dazzling rate.
New technologies are introduced frequently that offer different strengths;
each, however, has unique biases. The current trend is to generate
longer reads, however, with newer technologies such as the nanopore sequencing,
at the cost of increased error rates. While the BWT-FM based mappers offer tremendous
speed in read mapping, they also suffer greatly with higher error rates and
longer read lengths. Hash-based mappers are more robust to these changes,
but they are also very slow for mapping short reads.

In this paper, we analyzed seed-and-extend type read mapping algorithms and
proposed a new optimization algorithm called FastHAST that provides up to
5- to 15-fold speed up. FastHASH provides a potential solution to the speed
inefficiency problem of hash-based mappers as a generic algorithm that can be
merged with any such read mapper.  

Although our current implementation of FastHASH is on a CPU based system, we
also provide a preliminary implementation on a GPU based system, that we aim to
further develop. Another future direction to improve FastHASH may be to develop
hybrid indexing strategies that merge BWT-FM indexing and compression with hash
based indexing to increase seed size for longer ($>$1 kb) reads while keeping
memory requirements low.

Together  with additional GPU-based improvements for the extend step of read
mapping, FastHASH promises to accelerate read mapping while maintaining the
sensitivity of hash-based mappers to help cope with the overwhelming data
deluge caused by next generation sequencing.

\bigskip

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section*{Authors' Contributions}
HX and DL designed the FastHASH algorithm. HX, DL, and FH implemented the
described methods. SY performed comparisons and helped HX and DL perform the
analyses. OM and CA conceived and planned the experiments, and supervised HX
and DL for the algorithm development. All authors contributed to the writing of
the manuscript. 
 

%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section*{Acknowledgements}
We thank F. Hach for his help in editing the mrFAST source code to integrate
the FastHASH mechanism.  This work was supported, in part, by an NIH grant
HG006004 to C.A. and O.M.
 
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%                  The Bibliography                       %%
%%                                                         %%              
%%  Bmc_article.bst  will be used to                       %%
%%  create a .BBL file for submission, which includes      %%
%%  XML structured for BMC.                                %%
%%  After submission of the .TEX file,                     %%
%%  you will be prompted to submit your .BBL file.         %%
%%                                                         %%
%%                                                         %%
%%  Note that the displayed Bibliography will not          %% 
%%  necessarily be rendered by Latex exactly as specified  %%
%%  in the online Instructions for Authors.                %% 
%%                                                         %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\newpage
{\ifthenelse{\boolean{publ}}{\footnotesize}{\small}
 \bibliographystyle{bmc_article}  % Style BST file
  \bibliography{bmc_article} }     % Bibliography file (usually '*.bib' ) 

%%%%%%%%%%%

\ifthenelse{\boolean{publ}}{\end{multicols}}{}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%                               %%
%% Figures                       %%
%%                               %%
%% NB: this is for captions and  %%
%% Titles. All graphics must be  %%
%% submitted separately and NOT  %%
%% included in the Tex document  %%
%%                               %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%%
%% Do not use \listoffigures as most will included as separate files

\section*{Figures}

\subsection*{Figure 1: Hash table based mapping.}
The flow chart of hash table based mappers. 1) Divide the input read into
smaller k-mers. 2) Search each k-mer in the hash table which is previously generated
from the reference genome. 3) Probe location lists. 4) Retrieve the reference
sequence starting at the seed location. 5) Align the read against the reference
sequence.  6) Move to the next location and redo steps 4 and 5.
  \begin{figure}[h]
    \includegraphics[width=\textwidth]{./figures/Figure_1.pdf}
      %\caption{
      %  The flow chart of hash table based mappers. 1) Divide the input read into smaller
      %  k-mers. 2) Search each k-mer in the
      %  hash table previously generated from the reference genome. 3) Probe location lists. 4) Retrieve the reference
      %  sequence starting at the seed location. 5) Align the read against the reference
      %  sequence.  6) Move to the next location and redo steps 4 and 5.
      %}
    \label{hash-mapper}
  \end{figure}

\subsection*{Figure 2: True vs. false locations.}
Example of true locations vs. false locations. Only the true locations provides
mapping results.
\begin{figure}[h]
	\includegraphics[width=0.7\textwidth]{./figures/Figure_2.pdf}
	%\caption{Example of essential locations vs. redundant locations. Only
	%the essential locations provides mapping results.} 
	\label{EvsR}
\end{figure}

\newpage

\subsection*{Figure 3: Hash table.}
A snapshot of a hash table. Some k-mers have very large location lists, while
others have much shorter lists. For example, AAAAAAAAAAAA has over 1 million of
entries into reference genome whereas TGAACGTAACAA only has 2.
\begin{figure}[h]
	\includegraphics[width=0.7\textwidth]{./figures/Figure_3.pdf}
	%\caption{A snapshot of a hash table. Some k-mers have very large location
	%list, while others have shorter lists. For example, AAAAAAAAAAAA
	%has over 1 million of entries whereas TGAACGTAACAA only has 2.}
\label{HT}
\end{figure}

\subsection*{Figure 4: Adjacency Filtering.}
The insight behind adjacency filtering: For a perfect mapping, all adjacent
k-mers within a read should also be at adjacent locations within the reference.
This is equivalent to searching for adjacent locations in adjacent k-mers'
location lists.
\begin{figure}[h]
	\includegraphics[width=\textwidth]{./figures/Figure_4.pdf}
	%\caption{The insight behind adjacency filtering: For a perfect mapping, all
	%	djacent k-mers within a read should also be at adjacent locations within the reference. 
	%	This is equivalent to 
	%	searching for adjacent locations in adjacent k-mers' location lists.}
	\label{AF}
\end{figure}

\newpage

\subsection*{Figure 5: Adjacency Filtering with errors.}
An example of insertion tolerance. Notice that because of the insertion, the
sixth k-mer cannot find its adjacent location and bring the seventh k-mer's
adjacent location left by 1-bp. However the seventh k-mer is still considered
as a ``correct k-mer'' since {\it m+71} is still a acceptable adjacent
location.
\begin{figure}[h]
	\includegraphics[width=\textwidth]{./figures/Figure_5.pdf}
    %\caption{An example of insertion tolerance. Notice that because of the
    %  insertion, the sixth k-mer cannot find its adjacent location and bring the
    %  seventh k-mer's adjacent location left by 1-bp. However the seventh k-mer
    %  is still considered as a ``correct k-mer" since {\it m+71} is still a
    %  acceptable adjacent location.}
    \label{AF&ins}
\end{figure}
 
\subsection*{Figure 6: Speedup of fastHASH.}
Speedup of mrFAST-2.5.0.0 over mrFAST-2.1.0.6, with different read sets.
\begin{figure}[h]
	\centering
   	\includegraphics[height=2in]{./figures/Figure_6.pdf}
   	\vspace{0.16in}
   	%\caption{Speedup of mrFAST-2.5.0.0 over mrFAST-2.1.0.6, with different read sets}
    \label{speedup}
\end{figure}

\newpage

\subsection*{Figure 7: Memory usage.}
Memory usage comparison.
\begin{figure}[h]
   	\centering
   	\includegraphics[height=2in]{./figures/Figure_7.pdf}
   	%\caption{Memory Usage Comparison}
    \label{memory}
\end{figure}

\subsection*{Figure 8: Benefit breakdown of AF and CKS.}
Breakdown of incremental benefits by AF and CKS. a) mrFAST, b) mrFAST with AF,
c) mrFAST with AF and CKS.
  \begin{figure}[h]
    	\subfigure[]{
    		\includegraphics[width=2in]{./figures/Figure_mrFAST.pdf}
    	}
    	\subfigure[]{
    		\includegraphics[width=2in]{./figures/Figure_mrFAST_AF.pdf}
    	}
    	\subfigure[]{
    		\includegraphics[width=2in]{./figures/Figure_mrFAST_AF_CKS.pdf}
    	}
    %\caption{Breakdown of incremental benefits by AF and CKS. a) mrFAST, b) mrFAST with AF, c) mrFAST with AF and CKS}
    \label{mrfast_false}
    \label{sim_bl}
    \label{sim_sa}
    \label{break_down}
  \end{figure}

%\junk{
%  \begin{figure}[h]
%    \includegraphics[width=\textwidth]{./figure/MRFAST-crop.pdf}
%      \caption{
%        The flow chart of hash table based mappers. 1) Divide the input read into smaller
%        k-mers. 2) Search each k-mer in the
%        hash table previously generated from the reference genome. 3) Probe location lists. 4) Retrieve the reference
%        sequence starting at the seed location. 5) Align the read against the reference
%        sequence.  6) Move to the next location and redo steps 4 and 5.
%      }
%    \label{hash-mapper}
%  \end{figure}
%  
%  \begin{figure}[h]
%    	\includegraphics[width=0.7\textwidth]{./figure/EvsR-crop.pdf}
%      \caption{Example of essential locations vs. redundant locations. Only
%        the essential locations provides mapping results.} 
%    \label{EvsR}
%  \end{figure}
%  
%  \begin{figure}[h]
%    	\includegraphics[width=0.7\textwidth]{./figure/HashtableUneven-crop.pdf}
%      \caption{A snapshot of a hash table. Some k-mers have very large location
%        list, while others have shorter lists. For example, AAAAAAAAAAAA
%        has over 1 million of entries whereas TGAACGTAACAA only has 2.}
%    \label{HT}
%  \end{figure}
%  
%  \begin{figure}[h]
%    	\includegraphics[width=\textwidth]{./figure/AF-crop.pdf}
%     \caption{The insight behind adjacency filtering: For a perfect mapping, all
%       adjacent k-mers within a read should also be at adjacent locations within the reference. 
%       This is equivalent to 
%       searching for adjacent locations in adjacent k-mers' location lists.}
%    \label{AF}
%  \end{figure}
%  
%  \begin{figure}[h]
%    	\includegraphics[width=\textwidth]{./figure/AF_ins-crop.pdf}
%    \caption{An example of insertion tolerance. Notice that because of the
%      insertion, the sixth k-mer cannot find its adjacent location and bring the
%      seventh k-mer's adjacent location left by 1-bp. However the seventh k-mer
%      is still considered as a ``correct k-mer" since {\it m+71} is still a
%      acceptable adjacent location.}
%    \label{AF&ins}
%  \end{figure}
%  
%  \begin{figure}[h]
%    \begin{minipage}[b]{0.5\linewidth}
%    	\centering
%    	\includegraphics[width=\textwidth]{figure/speedup_B}
%    	\vspace{0.16in}
%    	\caption{Speedup of mrFAST-2.5.0.0 over mrFAST-2.1.0.6, with different read sets}
%    \label{speedup}
%    \end{minipage}
%    \hspace{0.2in}
%    \begin{minipage}[b]{0.5\linewidth}
%    	\centering
%    	\includegraphics[width=\textwidth]{figure/memory_B}
%    	\caption{Memory Usage Comparison}
%    \label{memory}
%    \end{minipage}
%  \end{figure}
%  
%  
%  \begin{figure}[h]
%    	\subfigure[]{
%    		\includegraphics[width=2in]{figure/dna3_3_B}
%    	}
%    	\subfigure[]{
%    		\includegraphics[width=2in]{figure/dna1_1_B}
%    	}
%    	\subfigure[]{
%    		\includegraphics[width=2in]{figure/dna2_2_B}
%    	}
%     \caption{Breakdown of incremental benefits by AF and CKS. a) mrFAST, b) mrFAST with AF, c) mrFAST with AF and CKS}
%    \label{mrfast_false}
%    \label{sim_bl}
%    \label{sim_sa}
%    \label{break_down}
%  \end{figure}
%}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%                               %%
%% Tables                        %%
%%                               %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%% Use of \listoftables is discouraged.
%%
%\section*{Tables}
%  \subsection*{Table 1 - Sample table title}
%    Here is an example of a \emph{small} table in \LaTeX\ using  
%    \verb|\tabular{...}|. This is where the description of the table 
%    should go. \par \mbox{}
%    \par
%    \mbox{
%      \begin{tabular}{|c|c|c|}
%        \hline \multicolumn{3}{|c|}{My Table}\\ \hline
%        A1 & B2  & C3 \\ \hline
%        A2 & ... & .. \\ \hline
%        A3 & ..  & .  \\ \hline
%      \end{tabular}
%      }
%
\clearpage

\begin{sidewaystable*}

%\centering
\scriptsize

\section*{Tables}

  \subsection*{Table 1 - Real Sets Performance}

Performance comparison between different methods, while using three different
data sets.  The set 1 is a set of 1 million reads of length 160bp obtained from
a human genome sequenced within the 1000 Genomes Project. The set 2 is composed
of 500,000 reads of length 101bp generated from a chimpanzee genome, and the
set 3 is from an orangutan genome with reads of length 75bp (500,000 reads).
We select mutation threshold values from 1 to 5 in order to compare the speed,
sensitivity and comprehensiveness for each mapper.

\mbox{
\begin{tabular}{|l|l|r|r|c|r|r|c|r|r|c|r|r|c|r|r|c|}
\hline
\multicolumn{2}{|c|}{mutation threshold value} & \multicolumn{3}{|c|}{e=1} & \multicolumn{3}{|c|}{e=2} & \multicolumn{3}{|c|}{e=3} & \multicolumn{3}{|c|}{e=4} & \multicolumn{3}{|c|}{e=5} \\ \hline
Data  & Mapper & Time & Map  & Reads & Time & Map  & Reads & Time & Map  & Reads & Time & Map  & Reads & Time & Map & Reads\\
Set&  & (min.) & Locations & Mapped  & (min.) & Locations & Mapped & (min.) & Locations & Mapped & (min.) & Locations & Mapped & (min.) & Locations & Mapped \\ \hline
\multirow{3}{*}{Set 1} & {\em mrFAST-2.5.0.0}  & 9:36  & 186 & 139 & 11:12 & 912& 628 & 15:04 & 28791 & 17048 & 18:10 & 196152 & 98405 & 26:08 & 741255 & 261685 \\ 
 & mrFAST-2.1.0.6 & 45:24 & 187 & 139 & 96:45 & 1591& 1255 & 239:17 & 28817& 17048 & 207:57 & 196154 & 98405 & 275:31 & 741261 & 261685 \\ 
 & Bowtie-0.12.8   & 1:19 & 106 & 79 & 3:45 & 897 & 807 & 9:15 & 9935 & 5276 & - & - & - & - & - & - \\
 & BWA-0.6.1      & 1:22 & 121 & 117 & 2:12 & 1787 & 1611 & 14:09 & 14718 & 12599 & 28:21 & 78099 & 59517 & 47:50 & 293310 & 181793 \\
 \hline
\multirow{4}{*}{Set 2} & {\em mrFAST-2.5.0.0}  & 06:09 & 4472581 & 150589 & 08:27 & 5846210 & 188266 & 17:43 & 23816953 & 248523 & 34:10 & 40478618 & 273830 & 59:43 & 61247805 & 291364 \\
 & mrFAST-2.1.0.6 & 22:27 & 4472589 & 150589 & 44:29 & 10074899 & 188266 & 127:53 & 23860614 & 248523 & 92:21 & 40477986 & 273830 & 122:24 & 61246626 & 291364 \\ 
 & Bowtie-0.12.8 & 1:26 & 4408132 & 147941 & 3:52 & 11520494 & 204301 & 8:46 & 22745565 & 238210 & - & - & - & - & - & - \\
 & BWA-0.6.1 & 0:52 & 190185 & 150452 & 3:09 & 286474 & 210683 & 8:49 & 432833 & 247361 & 18:06 & 737619 & 270179 & 35:23 & 1200341 & 285845  \\
 & RazerS-3.0 & 139:49 & 2350133 & - & 141:54 & 5529657 & - & 144:01 & 10425235 & - & 138:43 & 18572971 & - & 160:35 & 30360502 & - \\
 \hline
 \multirow{4}{*}{Set 3} & {\em mrFAST-2.5.0.0} & 08:08 & 18687179 & 117443 & 16:31 & 28832787 & 180069 & 83:07 & 87786020 & 285605 & 102:59 & 151326521 & 345367 & 229:02 & 252380574 & 384931 \\
 & mrFAST-2.1.0.6 & 33:06 & 18686753 & 117443 & 70:05 & 38381433 & 180069 & 216:30 & 87900237 & 285605 & 151:00 & 151318153 & 345367 & 205:34 & 252352158 & 384931 \\ 
 & Bowtie-0.12.8 & 2:50 & 18397770 & 114790 & 7:44 & 44644438 & 198143 & 17:42 & 83504811 & 268742 & - & - & - & - & - & - \\
 & BWA-0.6.1 & 0:46 & 128937 & 117262 & 3:15 & 230848 & 206701 & 12:45 & 331919 & 280000 & 30:22 & 444669 & 327875 & 60:46 & 596867 & 356141 \\
 & RazerS-3.0 & 158:92 & 8469193 & - & 161:34 & 19466215 & - & 163:29 & 39336295 & - & - & - & - & 389:01 & 144658031 & - \\
\hline
\end{tabular}
}

\subsection*{Table 2 - Simulated Set}
Comparison of mapping accuracy across the selected mappers using three
synthetic reads (Sets 4, 5 and 6) with read lengths of 72, 108 and 180
basepairs respectively. Each benchmark is referenced against the human
reference genome using an mutation threshold of 3.

\mbox{
\begin{tabular}{|l|l|r|r|r|}
\hline
Data Set & Mapper & Time & Mapped Reads & Map Locations \\ \hline
\multirow{4}{*}{Set 4} &    {\em mrFAST-2.5.0.0} & 158:13 & 1000000 & -\\
					   		& mrFAST-2.1.0.6 & 531:48 & 1000000 & 112638623\\
		  					& Bowtie-0.12.8 & 27:12 & 831211 & 95923952\\
		 					& BWA-0.6.1 & 35:55 & 978102 & 112638623\\ \hline
\multirow{4}{*}{Set 5} &	{\em mrFAST-2.5.0.0} & 30:38 & 1000000 & -\\
					   		& mrFAST-2.1.0.6 & 455:40 & 1000000 & 26957196 \\
		  					& Bowtie-0.12.8 & 14:47 & 747457 & 22039633\\
		 					& BWA-0.6.1 & 30:35 & 952953 & 112638623\\ \hline
\multirow{4}{*}{Set 6} &    {\em mrFAST-2.5.0.0} & 19:34 & 1000000 & -\\
					   		& mrFAST-2.1.0.6 & 380:28 & 1000000 & 4484055\\
		  					& Bowtie-0.12.8 & 12:07 & 614827 & 3303329\\
		 					& BWA-0.6.1 & 24:34 & 883520 & 112638623\\ \hline
\end{tabular}
}

\end{sidewaystable*}

\clearpage




%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%                               %%
%% Additional Files              %%
%%                               %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%\section*{Additional Files}
%  \subsection*{Additional file 1 --- Sample additional file title}
%    Additional file descriptions text (including details of how to
%    view the file, if it is in a non-standard format or the file extension).  This might
%    refer to a multi-page table or a figure.
%
%  \subsection*{Additional file 2 --- Sample additional file title}
%    Additional file descriptions text.
%

\end{bmcformat}
\end{document}
