%%
%% This is file `sample-acmsmall.tex',
%% generated with the docstrip utility.
%%
%% The original source files were:
%%
%% samples.dtx  (with options: `acmsmall')
%% 
%% IMPORTANT NOTICE:
%% 
%% For the copyright see the source file.
%% 
%% Any modified versions of this file must be renamed
%% with new filenames distinct from sample-acmsmall.tex.
%% 
%% For distribution of the original source see the terms
%% for copying and modification in the file samples.dtx.
%% 
%% This generated file may be distributed as long as the
%% original source files, as listed above, are part of the
%% same distribution. (The sources need not necessarily be
%% in the same archive or directory.)
%%
%%
%% Commands for TeXCount
%TC:macro \cite [option:text,text]
%TC:macro \citep [option:text,text]
%TC:macro \citet [option:text,text]
%TC:envir table 0 1
%TC:envir table* 0 1
%TC:envir tabular [ignore] word
%TC:envir displaymath 0 word
%TC:envir math 0 word
%TC:envir comment 0 0
%%
%%
%% The first command in your LaTeX source must be the \documentclass command.
\documentclass[acmsmall]{acmart}
\usepackage{amsmath, graphicx, listings, algorithm2e, float, pgfplots}
\usepackage{xcolor,xspace}
\usepackage{libertine}

%\usepackage[backend=bibtex]{}

%%
%% \BibTeX command to typeset BibTeX logo in the docs
\AtBeginDocument{%
  \providecommand\BibTeX{{%
    \normalfont B\kern-0.5em{\scshape i\kern-0.25em b}\kern-0.8em\TeX}}}

%% Rights management information.  This information is sent to you
%% when you complete the rights form.  These commands have SAMPLE
%% values in them; it is your responsibility as an author to replace
%% the commands and values with those provided to you when you
%% complete the rights form.
\setcopyright{acmcopyright}
\copyrightyear{2018}
\acmYear{2018}
\acmDOI{10.1145/1122445.1122456}


%%
%% These commands are for a JOURNAL article.
\acmJournal{TOS}
\acmVolume{37}
\acmNumber{4}
\acmArticle{111}
\acmMonth{8}

\newcommand{\sys}{\textsc{DADI}\xspace}
\def\jnl#1{\textcolor{black}{#1}}
\def\lxx#1{\textcolor{black}{#1}}
% \def\zyy#1{\textcolor{red}{#1}}
\def\zzh#1{\textcolor{black}{#1}}
\def\zzhnew#1{\textcolor{blue}{#1}}
\def\new#1{\textcolor{black}{#1}}

%%
%% end of the preamble, start of the body of the document source.
\begin{document}

%%
%% The "title" command has an optional parameter,
%% allowing the author to define a "short title" to be used in page headers.
% \title{\sys: Block-Level Image Service for Agile and Elastic Application Deployment}
\title{DADI: Block-Level Image Service for The Cloud}

%%
%% The "author" command and its associated commands are used to define
%% the authors and their affiliations.
%% Of note is the shared affiliation of the first two authors, and the
%% "authornote" and "authornotemark" commands
%% used to denote shared contribution to the research.
% Huiba Li, Yiyun Zhang, 
% Yifan Yuan, Rui Du, Kai Ma, Lanzheng Liu, Windsor Hsu, 
% Xiaoxu Li, Yiming Zhang, Siqi Shen, Jiwu Shu

\author{Huiba Li}
% \authornotemark[1]
% \authornote{Corresponding author.}
\affiliation{%
 \institution{Alibaba Group}
%  \streetaddress{1 Th{\o}rv{\"a}ld Circle}
 \city{Beijing}
 \country{China}}
\email{huiba.lhb@alibaba-inc.com}

\author{Zhihao Zhang}
% \authornote{This work is was done when Zhihao Zhang was a research intern at
% Alibaba.}
% \authornotemark[1]
\email{zhihaoz@stu.xmu.edu.cn}
%\orcid{1234-5678-9012} 
% \authornotemark[1]
\affiliation{%
 \institution{Xiamen University and Alibaba Group}
 \city{Xiamen}
 \country{China}
}

\author{Yifan Yuan}
\affiliation{%
  \institution{Alibaba Group}
  % \streetaddress{8600 Datapoint Drive}
  \city{Beijing}
  \country{China}}
\email{tuji.yyf@alibaba-inc.com}

\author{Rui Du}
\affiliation{%
  \institution{Alibaba Group}
  % \streetaddress{8600 Datapoint Drive}
  \city{Beijing}
  \country{China}}
\email{ray.dr@alibaba-inc.com}

\author{Kai Ma}
\affiliation{%
  \institution{Alibaba Group}
  % \streetaddress{8600 Datapoint Drive}
  \city{Beijing}
  \country{China}}
\email{michael.mk@alibaba-inc.com}

\author{Lanzheng Liu}
\affiliation{%
  \institution{Alibaba Group}
  % \streetaddress{8600 Datapoint Drive}
  \city{Beijing}
  \country{China}}
\email{lanzheng.liulz@alibaba-inc.com}

\author{Yiming Zhang}
\affiliation{%
  \institution{Xiamen University and Alibaba Group}
  % \streetaddress{8600 Datapoint Drive}
  \city{Xiamen}
  \country{China}}
\email{sdiris@gmail.com}

\author{Windsor Hsu}
\authornote{Corresponding author.}
\affiliation{%
  \institution{Alibaba Group}
  % \streetaddress{8600 Datapoint Drive}
  \city{Sunnyvale, CA}
  \country{USA}}
\email{windsor.hsu@alibaba-inc.com}

% \authorsaddresses{Authors' addresses:
% Zhihao Zhang, Xiaoxu Li, Yiming Zhang, Siqi Shen, NICEX Lab, Xiamen University;
% Huiba Li, Alibaba;
% Yiyun Zhang, NICEX Lab, NUDT;
% % Yiming Zhang {and} Qiwen Ke, NICEX Lab, Xiamen University; 
% Jiwu Shu, Xiamen University and Tsinghua University.}

%%
%% By default, the full list of authors will be used in the page
%% headers. Often, this list is too long, and will overlap
%% other information printed in the page headers. This command allows
%% the author to define a more concise list
%% of authors' names for this purpose.
\renewcommand{\shortauthors}{Li and Zhang, et al.}

%%
%% The abstract is a short summary of the work to be presented in the
%% article.
\begin{abstract}
Businesses increasingly need agile and elastic
computing infrastructure to respond quickly to real world
situations. By offering efficient process-based virtualization and
a layered image system, containers are designed to enable agile
and elastic application deployment. However, creating or updating
large container clusters is still slow due to the image
downloading and unpacking process. 
In this paper, we
present \sys Image Service, a block-level image service for increased agility
and elasticity in deploying applications. \sys replaces the
waterfall model of starting containers (downloading image,
unpacking image, starting container) with fine-grained on-demand
transfer of remote images, realizing instant start of containers. 
\zzh{
To accelerate the cold start of containers,
\sys designs a pull-based prefetching mechanism 
which allows a host to read necessary image data beforehand 
at the granularity of image layers.
% based on time sequenced I/O patterns (trace).
% This mechanism has been integrated as a feature into command. 
%into ctr record-trace command.
We design a P2P-based decentralized image sharing architecture
to balance traffic among all the participating hosts
and propose a pull-push collaborative prefetching mechanism 
to accelerate cold start.
% in large clusters 
%
\sys efficiently supports various kinds of
runtimes including cgroups, QEMU, etc.,
further realizing ``build once, run anywhere''. \sys has been
deployed at scale in the production environment of 
Alibaba, serving one of the world's largest ecommerce platforms. Performance
results show that \sys can cold start 10,000 containers on
1,000 hosts within 4 seconds.
}
\end{abstract}

%%
%% The code below is generated by the tool at http://dl.acm.org/ccs.cfm.
%% Please copy and paste the code instead of the example below.
%%
% \begin{CCSXML}
% <ccs2012>
%  <concept>
%   <concept_id>10010520.10010553.10010562</concept_id>
%   <concept_desc>Computer systems organization~Embedded systems</concept_desc>
%   <concept_significance>500</concept_significance>
%  </concept>
%  <concept>
%   <concept_id>10010520.10010575.10010755</concept_id>
%   <concept_desc>Computer systems organization~Redundancy</concept_desc>
%   <concept_significance>300</concept_significance>
%  </concept>
%  <concept>
%   <concept_id>10010520.10010553.10010554</concept_id>
%   <concept_desc>Computer systems organization~Robotics</concept_desc>
%   <concept_significance>100</concept_significance>
%  </concept>
%  <concept>
%   <concept_id>10003033.10003083.10003095</concept_id>
%   <concept_desc>Networks~Network reliability</concept_desc>
%   <concept_significance>100</concept_significance>
%  </concept>
% </ccs2012>
% \end{CCSXML}

\begin{CCSXML}
  <ccs2012>
  <concept>
  <concept_id>10010520.10010521.10010537.10003100</concept_id>
  <concept_desc>Computer systems organization~Cloud computing</concept_desc>
  <concept_significance>500</concept_significance>
  </concept>
  </ccs2012>
\end{CCSXML}

\ccsdesc[500]{Computer systems organization~Cloud computing}

% \ccsdesc[500]{Computer systems organization~Embedded systems}
% \ccsdesc[300]{Computer systems organization~Redundancy}
% \ccsdesc{Computer systems organization~Robotics}
% \ccsdesc[100]{Networks~Network reliability}

%%
%% Keywords. The author(s) should pick words that accurately describe
%% the work being presented. Separate the keywords with commas.
%% \keywords{datasets, neural networks, gaze detection, text tagging}

\thanks{This work is supported by
the National Natural Science Foundation of China 
(NSFC 62072306). This work was supported by Alibaba Group 
through Alibaba Research Fellowship Program and Alibaba Research Intern Program.
%\\ Authors' addresses:
% Zhihao Zhang, Xiaoxu Li, Yiming Zhang, Siqi Shen, NICEX Lab, Xiamen University;
% Huiba Li, Alibaba;
% Yiyun Zhang, NICEX Lab, NUDT;
% % Yiming Zhang {and} Qiwen Ke, NICEX Lab, Xiamen University; 
% Jiwu Shu, Xiamen University and Tsinghua University.
% Li Chen {and} Gong Zhang, Huawei, Shenzhen, China.
}


%%
%% This command processes the author and affiliation and title
%% information and builds the first part of the formatted document.
\maketitle

\section{Introduction}
As business velocity continues to rise, businesses increasingly need
to quickly deploy applications, handle unexpected surge, fix security flaws,
and respond to various real world situations. By offering efficient
process-based virtualization and a layered image system, containers
are designed to enable agile and elastic application
deployment. However, creating or updating large container clusters is
still slow due to the image downloading and unpacking process.  For
example, Verma et al. in \cite{borg} report that the startup latency
of containers is highly variable with a typical median of about 25s,
and pulling layers (packages)  accounts for about 80\% of the total time.

% The situation is even worse in heavily loaded clusters. For example, at the very
% beginning of the 2018 holiday shopping season, one of the applications
% of Alibaba ran out of resources and needs emergent scaling out. But the deployment 
% of new container instances took a lot of time, and the breakdown lasts about half an
% hour to complete. This application was responsible for modifying  
% shipping addresses. Until it was scaled out, many
% customers had to place orders with a wrong shipping address and
% leave messages to ask customer service to manually update the
% address. Despite the expenditure of immense manpower to handle these
% manual changes, there was not enough time to process most of them. The
% issue also resulted in many customers abandoning their shopping carts.

Highly elastic container deployment has also become expected of modern
cloud computing platforms. In serverless computing~ \cite{jonas2019cloud}, 
high cold-start latency could violate responsiveness SLAs. Workarounds 
for the slow start are cumbersome and expensive, and include storing all 
images on all possible hosts. Therefore, minimizing cold-start latency 
is considered a critical system-level challenge for serverless
computing~\cite{jonas2019cloud,baldini2017serverless}.

%In general,
%improving container startup time can enable many new capabilities:
%applications can scale instantly to handle flash-crowd events;
%cluster schedulers can frequently rebalance nodes at low cost;
%software upgrades can be rapidly deployed when a security flaw or
%critical bug is fixed; and developers can interactively build and test
%distributed applications~\cite{harter2016slacker}.

There has been recent work on reducing container startup time by
accelerating the image downloading process with a peer-to-peer (P2P)
approach~\cite{borg,tupperware,dragonfly,kraken,fid}. We relied on a
P2P download tool for several years to cope with the scalability
problem of the Container Registry.  However, the startup latency was still
unsatisfactory. Another general approach to the problem is to read
data on-demand from remote
images~\cite{crfs,cvmfs,cider,harter2016slacker,wharf,exo-clone,teleport}.
Because container images are organized as overlaid layers of files
and are presented to the container runtime as a file system directory,
all of the previous work adhered to the file system interface, even though
some of them actually used block stores as their backends.

Implementing a POSIX-complaint file system interface and exposing it
via the OS kernel is relatively complex. Moreover, using file-based layers
has several disadvantages. First, updating big files (or their attributes) is slow
because the system has to copy whole files to the writable layer
before performing the update operations. Second, creating hard links is
similarly slow, because it also triggers the copy action as cross
layer references are not supported by the image.  Third, files may have
a rich set of types, attributes, and extended attributes that are not
consistently supported on all platforms. Moreover, even on one
platform, support for capabilities such as hard links, sparse files,
etc. tends to be inconsistent across file systems.

With the rapid growth of users running containers on public cloud and
hybrid cloud, virtualized secure containers are becoming mainstream.
Although it is possible to pass a file-based image from host to guest
via 9p~\cite{9p-virtio} or virtio-fs~\cite{virtiofs}, there is usually a performance
cost. There are also complications in handling heterogeneous
containers such as Windows guest on Linux host, or vice versa. This
means that some users may not be able to burst efficiently to public
clouds, i.e., run their applications primarily on premise with an
efficient container runtime, and scale them out under load to public
clouds with a virtualized secure container runtime.

In this paper, we observe that the benefits of a layered image are not
contingent on representing the layers as sets of file changes.  More
specifically, we can achieve the same effect with block-based layers
where each layer still corresponds to a set of file changes but is
physically the set of changes at the block level underneath a given
file system. Such a design allows the image service to be file system
and platform agnostic. The image service is solely
responsible for managing and distributing physical images to the
appropriate hosts. It is up to the individual host or container on the
host to interpret the image with an appropriate file system. This
approach also allows dependency on the file system to be explicitly
captured at image creation time, further enhancing consistency in the
runtime environment of applications.

We have designed and implemented a complete system called {\it DADI
 Image Service} ({\it DADI} in short) based on this approach. 
The name {\it DADI} is an acronym
for {\it Data Acceleration for Disaggregated Infrastructure} and
describes several of our initiatives in enabling a disaggregated
infrastructure. At the heart of the \sys Image Service is a new
construct called Overlay Block Device (OverlayBD) which provides a
merged view of a sequence of block-based layers.  Conceptually, it can
be seen as the counterpart of union file systems that are usually used to
merge container images today. It is simpler than union file systems
and this simplicity enables optimizations including flattening of the
layers to avoid performance degradation for containers with many
layers. {\zzh{More generally, the simplicity of block-based layers
facilitates (1) fine-grained on-demand data transfer of remote images;
(2) online decompression with efficient codecs and compression-aware caching; 
(3) trace-based prefetching; (4) P2P-based decentralized image sharing 
architecture with collaborative block 
prefetching to handle burst workload; (5)
flexible choice of guest file systems and host systems; (6) efficient
modification of large files (cross layer block references); (7) easy
integration with the container ecosystem.}}

We have applied \sys to both cgroups~\cite{cgroups} runtime and QEMU
runtime.  Support for other runtimes such as
Firecracker~\cite{firecracker}, gVisor~\cite{gvisor},
OS$^v$~\cite{osv}, etc. should be technically straightforward.
\sys has been deployed at scale in the production
environment of Alibaba to serve one of the world's largest ecommerce
platforms.  Performance results show that \sys can cold start 10,000
containers on 1,000 hosts within 4 seconds. 
% We are currently working on an edition of \sys for our public cloud service.

\section{Background and Motivation}

\zzh{Containers rely on process-based virtualization to offer an
appealing, lightweight alternative to traditional VM-based
virtualization (e.g., QEMU, VMware, Xen). Linux, for instance,
provides the cgroups~\cite{cgroups} and namespaces mechanisms that
enable visibility and performance isolation between containers. Due to
its efficiency, all of the applications of one of the world's largest
online marketplaces are running in containers.}
%

\zzh{Containers provide weak isolation between applications on the same
host. This level of isolation is not sufficient in typical
multi-tenant environments, especially those on public clouds. In many
of these environments, containers are run inside virtual machines as a
result. Since virtualization is necessary, there have been some recent
works~\cite{manco2017my, firecracker, kata} on virtualized secure
runtimes. The basic idea is to make hypervisors as lightweight as
possible, and therefore suitable for containers, in order to provide
alternatives to the cgroups-based traditional runtime.}

\zzh{From the container engine's perspective, an image is the root file system of a
complete system.  To start a cgroups-based container on a host, the
engine basically mounts the image to a directory on the host and
chroot into that directory. This is possible because the kernel is
shared between the host and container. On the other hand, virtualized
runtimes do not share the host kernel so the image has to be passed
from the host to the guest via some kind of interface like virtio-fs,
which is still under development~\cite{virtiofs}. A practical approach
today is to place the images on thin-provisioned 
LVM (Logical Volume Manager) volumes, and pass the
volumes to guests as virtual block devices.}

\begin{figure}[tb]
\centering
\includegraphics[width=0.9\linewidth]{figs/container-layers.pdf}
\caption{Layered Container Image. The image layers (L1, L2) are
  read-only shared by multiple containers (C1, C2) while the container
  layers (LC) are privately writable.}
\label{layers}
\end{figure}


\subsection{Container Image}

Container images are composed of multiple incremental layers so as to
enable incremental image distribution. Each layer is essentially a
tarball of differences (addition, deletion or update of files)
from a previous layer. The container system may apply the diffs
in a way defined by its storage driver. The layers are usually much
lighter than VM images that contain full data. Common layers are
downloaded only once on a host, and are shared by multiple containers
as needed. Each container has a dedicated writable layer (also known
as container layer) that stores a private diff to the image, as shown
in Figure~\ref{layers}.  Writing to a file in the image may trigger a
copy-on-write (CoW) operation to copy the entire file to the writable
layer.

To provide a root file system to containers, the container engine usually
depends on a union file system such as overlayfs, aufs, etc. These
union file systems provide a merged view of the layers which are
stored physically in different directories. The container system can
also make use of Logical Volume Manager (LVM) thin-provisioned
volumes, with each layer mapped to a snapshot.
%The developer provides a recipe for building an
%image layer in the form of a script in a domain-specific language. The
%script primarily specifies the base layer (by \texttt{FROM}
%instruction), the files / directories to copy to the new layer (by
%\texttt{COPY} instruction), and also metadata of the image layer such
%as \texttt{ENTRYPOINT}, etc. The engine creates and prepares a new
%writable layer according to the script, and then commits it to a
%read-only image layer.

%As LVM provides only merged views, the container engine has
%to traverse all files of two adjacent layers, in order to generate an
%image layer blob for committing or pushing.

The container system has a standard web service for image uploading and
downloading called the Container Registry. The Container Registry
serves images with an HTTP(S)-based protocol which, together with the
incremental nature of layers, makes it a lot easier to distribute
container images widely as compared to VM images. 
%This is a huge
%advantage of the container system.

\subsection{Remote Image}

The image distribution operation, however, consumes a lot of network
and file system resources, and may easily saturate the service capacity
allocated to the user/tenant, especially when creating or updating
large container clusters. The result is long startup latencies for
containers.
%The logically centralized nature of the Registry also makes the
%service uneconomical to scale, compared to P2P transfer.
After an image layer is received, it has to be unpacked. This
unpacking operation is CPU, memory (for page cache)
and I/O intensive at the same time so that it often affects other
containers on the host and sometimes even stalls them.
%especially on hosts with HDDs.

To some extent, the current container image service is a regression to
a decade ago when VM images were also downloaded to hosts. A
similar problem has been solved once with distributed block
stores~\cite{ursa, blizzard, ceph, sheepdog} where images are stored
on remote servers, and image data is fetched over the network
on-demand in a fine-grained manner rather than downloaded as a
whole. This model is referred to as ``remote image''. There are
several calls for this model in the container world (e.g., ~\cite{crfs,cvmfs}).

The rationale for remote image is that only part of the image is
actually needed during the typical life-cycle of a container, and the
part needed during the startup stage is even smaller. According to
\cite{harter2016slacker}, as little as 6.4\% of the image is used
during the startup stage. Thus remote image saves a lot of time and resources by
not staging the entire image in advance. And with the help of data
prefetching (by OS) or asynchronous data loading (by applications
themselves), the perceived time to start from a remote image can be
effectively reduced further.

\zzhnew{Remote images,} however, requires random read access to the contents of the
layers. But the standard layer tarball was designed for sequential reading
and unpacking, and does not support random reading. Thus the format has to be 
changed.% to meet this new requirement. 

\subsection{File-System-Based Remote Image}

\label{fsimages}
%CRFS~\cite{crfs} and CernVM-FS~\cite{cvmfs} are recent proposals for
%file-system-level remote image services.
CRFS~\cite{crfs} is a read-only
file system that can mount a container
image directly from a Container Registry.
% (such as gcr.io)without pulling the image locally first.
CRFS introduces an improved format called Stargz that supports random
reads. Stargz is a valid tar.gz format but existing
images need to be converted to realize \zzhnew{a} remote image service.
%CRFS
%does not provide a writable file system layer and relies on
%traditional Linux file systems to provide the container layer. 
%can do the aufs/overlay2-ish unification of multiple read-only stargz layers, 
%but it stops short of trying to unify a writable file system layer atop. For that, you 
%have to use the traditional Linux file systems.
Instead of having the read-only file system read files directly from
the layer, one could also extract the files in each layer and store
them in a repository such as CernVM-FS~\cite{cvmfs} where they can be
accessed on demand.
% CernVM-FS also realizes a read-only
% file system to serve image layers. But CernVM-FS turns layer into a
% virtual concept: it extracts files in the layer and stores them in
% CernVM-FS repository. The unit of transfer and reuse is a file or, in
% the case of big files, chunks. File and chunk level deduplication is provided
% transparently. Files are transferred on demand only for the first time
% with HTTP, leveraging proxy caches to address the scalability problem.
CFS~\cite{cfs} is a distributed file system to serve unpacked layer files for hosts.
% However it focuses on aspects of a general file system and does not provide 
% direct support for layering (snapshot) semantics that are needed for container 
% images. CFS does not realize full POSIX semantics.
Wharf~\cite{wharf}, Slacker~\cite{harter2016slacker}, Teleport~\cite{teleport} 
serve unpacked layer files through NFS or CIFS/SMB.
% They additionally 
% realize cooperation between hosts, Registry and the image service, 
% to seamlessly switch from the Registry to their own image services.

Due to the complexity of file system semantics, there are several
challenges with file-system based image service. For example, passing
a file system from host to guest across the virtualization boundary
tends to limit performance. The I/O stack involves several complex
pieces (including virtio-fs~\cite{virtiofs}, FUSE~\cite{fuse},
overlayfs~\cite{overlayfs}, remote image itself) that need to be
made robust and optimized.  When compared to a block device, the file
system also presents a larger attack surface that potentially reduces
security in public clouds.

POSIX-compliant features are also a burden for non-POSIX workloads
such as serverless applications, and an obstacle to Windows workloads
running on Linux hosts (with virtualization). In addition,
unikernel~\cite{osv, kylinx, gvisor} based applications are usually
highly specialized, and tend to prefer a minimalistic file system such
as FAT~\cite{fat} for efficiency.  Some of them may even require a
read-only file system. These different requirements are difficult to
be satisfied by a file system that is predefined by the image service.

Furthermore, some desirable features of popular file
systems such as XFS~\cite{xfs}, Btrfs~\cite{btrfs}, ZFS~\cite{zfs}, etc., are missing
in current file-system-based image services, and are not likely to be
supported soon. These include file-level or directory-level snapshot,
deduplication, online defragmentation, etc. It is even difficult to
efficiently (without copy) support standard features such as hard links
and modification of files or file attributes.
 
\subsection{Block-Snapshot-Based Remote Image}
% A block-level image, on the other hand, is much simpler and it gives users
% the freedom to choose a file system that fit best for their applications. A real file 
% system is by far more advanced than a tarball image, however, there is another 
% issue to address --- image layering.

% Existing block stores such as Ursa~\cite{ursa},
% Blizzard~\cite{blizzard}, Ceph RBD~\cite{ceph},
% Sheepdog~\cite{sheepdog} and Linux LVM (Logical Volume Manager) are usually
% backends of virtual disk services such as AWS's EBS.

Modern block stores~\cite{ceph,ursa,blizzard,sheepdog} 
usually have a concept of copy-on-write snapshot
which is similar to layer in the container world. Cider~\cite{cider}
and Slacker~\cite{harter2016slacker} are attempts to make use of such
similarity by mapping image layers to the snapshots of Ceph and
VMstore~\cite{vmstore}, respectively.
%Slacker additionally depends on a 
%centralized NFS server; it suffers from scalability problem, too.

Container image layer and block store snapshot, however, are not
identical concepts. Snapshot is a point-in-time view of a disk. Its
implementation tends to be specific to the block store. In many
systems, snapshots belong to disks.
% For example, snapshots in Ceph RBD are named as 
% \texttt{\{pool-name\}/\{image-name\}@\{snap-name\}}. This naming
% convention clearly shows the relationship between snapshot and disk.
When a disk is deleted, its snapshots are deleted as well. Although
this is not absolutely necessary, many block stores behave this way by
design. Layer, on the other hand, refers to the incremental change
relative to a state which can be that of a different image.  Layer
emphasizes sharing among images, even those belonging to different
users, and has a standard format to facilitate wide distribution.

\zzh{In this paper, we observe that incremental image can be realized with
block-based layers where each layer corresponds to a set of file changes but is
physically the set of changes at the block level underneath a given
file system. Such a design allows the image service to be file system
and platform agnostic. In other words, the image service is solely
responsible for managing and distributing the physical images to the
appropriate hosts. It is up to the individual host or container on the
host to interpret the image with the appropriate file system. We will
present this design in greater detail in the following sections.}


\zzh{\zzhnew{The drawbacks of block-level images are the lack of file system semantics 
(such as file metadata and namespace structure, etc.) and the difficulty to 
share file cache memory between containers,}
because the kernel does not know the relationships between block devices.
\zzhnew{One possible method to address the issue is to map host cache pages 
which load image layers into guests’ virtual address space. This method would 
require significant changes to the Linux kernel and underlying block-level 
storage architecture, which necessitates a non-trivial development effort.} 
VMs on a same host do not share cache memory all the time, and 
everything is running very well. 
What's more, the shared-cache feature needs refinement for multi-tenancy, as sharing 
also means influences, which is not always acceptable.}

\section{\sys Image Service}

\sys is designed to be
a general solution that can become part of the container
ecosystem.
% %
% The primary challenge is to meet many requirements at the same time: 
% (1) a layered image that is 
% (2) capable of random access 
% (3) with online decompression; 
% (4) independent of transfer protocols and
% (5) underlying storage systems;
% (6) minimal attack surface.
% To the best of our knowledge, \sys is the only solution that meets all of
% these requirements.
The core of \sys (Sections~\ref{image-layers}-\ref{container-layer}) is 
a remote image design that inherits the layering
model of \zzhnew{existing} container image \zzhnew{designs}, and remains compatible with the Registry by
conforming to the OCI-Artifacts~\cite{oci-artifacts} standard.
%
\sys is independent of transfer protocols so it is possible to insert an optional P2P 
transfer module to cope with large-scale applications (Section~\ref{sec-p2p}).
\sys is also independent of the underlying storage system so users can choose an 
appropriate storage system such as HDFS, NFS, CIFS, etc., to form a fully 
networked solution (Section~\ref{pubsln}).
%
\sys uses a block-level interface which minimizes \zzhnew{the} attack surface, a
design point especially relevant for virtualized secure containers.

\subsection{\sys Image}

\begin{figure}[tb]
  \centering
  \includegraphics[width=0.9\linewidth]{figs/dadi-layers.pdf}
  \caption{\sys Image. \zzhnew{The \sys image layer} (L1, L2) consists of
    modified data blocks. \sys uses an overlay block device to provide
    each container (C1, C2) with a merged view of its layers.}
  \label{dadi-arch}
  \end{figure}

%In this section we will describe the design of DADI, by resolving following issues:
%fetch a specified part of a layer blob from Registry; 
%get to know which part of which blob should be fetch;
%compression and online decompression;
%handle write opeartions;
%and finally support post-startup stage with a shared storage system.
As shown in Figure~\ref{dadi-arch}, \sys models an image as a virtual
block device on which is laid a regular file system such as ext4.
Note that there is no concept of file at the block level. The file
system is a higher level of abstraction atop the \sys image.  When a
guest application reads a file, the request is first handled by the
regular file system which translates the request into one or more
reads of the virtual block device. The block read request is forwarded
to a \sys module in user space, and then translated into one or more
random reads of the layers.

% some of which may be stored in the Registry in case of cold
% start.

\label{image-layers}
\sys models an image as a virtual block device while retaining the
layered feature. Each \sys layer is a collection of modified data
blocks under the filesystem and corresponding to the files added,
modified or deleted by the layer. \sys provides the container engine
with merged views of the layers by an overlay block device (OverlayBD)
module. We will use layer and changeset interchangeably in the rest of
the paper for clearer statements. The block size (granularity) for
reading and writing is 512 bytes in DADI, \zzhnew{which is the same as the 
sector size of a real block device.} The rule for overlaying changesets is simple: 
for any block,
the latest change takes effect. The blocks that are not changed
(written) in any layer are treated as all-zero blocks.
%Figure~\ref{overlayer} shows an example of applying the
%rule to various layers.
%
%\begin{figure}[htb]
%\centering
%\includegraphics[width=.9\linewidth]{figs/overlayer.pdf}
%\caption{Overlaying 2, 3, 4 changesets respectively. Assuming a 20-block 
%virtual disk image represented as a $5 \times 4$ grid, with color representing data.}
%\label{overlayer}
%\end{figure}

\begin{figure}[h]
  \begin{lstlisting}[numbers=none, basicstyle=\footnotesize]
  struct Segment {
    uint64_t offset:48; // offset in image's LBA
    uint16_t length;    // length of the change
    uint64_t moffset:48;// mapped offset in layer blob
    uint16_t pos:12;    // position in the layer stack
    uint8_t flags:4;    // zeroed? etc.
    uint64_t end() {return offset + length;}
  };
  \end{lstlisting}
  \caption{Definition of Segment. LBA is short for logical block address, offsets and lengths are in unit of blocks (512 bytes), size of the struct is 16 bytes.}
  \label{fig-segment}
  \end{figure}

The raw data written by the user, together with an index to the raw
data, constitutes the layer blob. The \sys layer blob format further
includes a header and a trailer.
%, as shown in the following figure:
%\smallskip
%%\begin{figure}[H]
%%\centering
%\includegraphics[width=.9\linewidth]{figs/lsmt.pdf}
%%\end{figure}
To reduce memory footprint and increase deployment density, we design an
index based on variable-length segments, as illustrated in
Figure~\ref{fig-segment}.
A segment tells where a change begins and ends in the image's
logical block address (LBA) space, and also where the latest data is
stored in the layer blob file's offset space. 
In this design, adjacent segments that are contiguous can
be merged into a single larger segment to reduce the index size.
%both in memory and layer blobs.
The segment struct can record 
a change as small as 1 block which is the minimal write size for a block 
device. This avoids Copy-on-Write operations and helps to yield consistent 
write performance. 
%
%

The index 
is an array of non-overlapping segments sorted by their offset. According 
to statistics from our production environment, the indices have fewer than 
4.5K segments (see Section~\ref{eval} for details) which corresponds to only 
72KB of memory. In contrast, the qcow2 image format of QEMU has a fixed
block size of 64KB by default, and an index based on radix-tree. QEMU allocates 
MBs of memory by default to cache the hottest part of its index. 
%

\begin{figure}[t]
\centering
\includegraphics[width=0.75\linewidth]{figs/lookup.pdf}
\caption{Index Lookup for Reading \sys Image. The lookup operation is
  a range query on a set of ordered non-overlapping variable-length
  segments, each of which points to the location of its raw data in
  the layer blob(s). \zzhnew{The segments to read are indicated in dark green 
  while the others are in light green, and the raw data to read is 
  presented in black while the others are in gray.}}
\label{figlookup}
\end{figure}



% \begin{figure*}[t]
%   \begin{minipage}[t]{.48\textwidth} 
%     \includegraphics[width=1.0\linewidth]{figs/lookup.pdf}
%       \caption{Index Lookup for Reading \sys Image. The lookup operation is
%        a range query on a set of ordered non-overlapping variable-length
%        segments, each of which points to the location of its raw data in
%        the layer blob(s).}
%       \label{figlookup}
%   \end{minipage}
%   \hfill
%   \begin{minipage}[t]{.48\textwidth} 
%     \includegraphics[width=1.0\linewidth]{figs/index-merge.pdf}
%   \caption{Index Merge.}
%   \label{figmerge}
%   \end{minipage}
%   \hfill
% \end{figure*}



 To realize reading, \sys performs a range lookup in the 
index
%with an improved binary search algorithm
to find out where in the
blob to read. The problem can be formally stated as given a
set of disjoint segments in the LBA space, find all the segments (and
``holes'') within the range to read. This problem is depicted in Figure~\ref{figlookup}.
%
For efficiency, the algorithm deals with variable-length segments directly
without expanding them to fixed-sized blocks.
% Suppose \texttt{s}
% is a segment and \texttt{x} is an integer, we define \texttt{s>x} as \texttt{s.offset>x}; 
% and \texttt{s<x} as \texttt{s.end()<=x}; and \texttt{s==x} otherwise
% (s intersects with x).
As the index is ordered and read-only, we simply use binary 
search for efficient lookup, as shown in Algorithm~\ref{lookup}.
A B-tree could achieve higher efficiency
% , which is more friendly
% to CPU cache and memory page swap, especially for large indices.
but as the index contains only a few thousand entries in practice, we
leave this optimization as a possible future work.

\begin{algorithm}[ht]
  %\DontPrintSemicolon
  %\SetAlgoLined
  %\label{Algorithm:lookup}
  \SetAlFnt{\tiny \sf}
  \SetKw{Yield}{yield}
  \SetKw{And}{and}
  \KwIn{the range \textbf{(offset, length)} to look up}
  end $\leftarrow$ offset + length\;
  i $\leftarrow$ index.binary\_search\_first\_not\_less(offset)\;
  \If {i < index.size()} {
      delta $\leftarrow$ offset - index[i].offset\;
      \If (\tcp*[f]{trim \& yield 1st segment}) {delta > 0} {
          s $\leftarrow$ index[i];
          s.offset $\leftarrow$ offset\;
          s.moffset +=  delta;
          s.length -=  delta\;
          \Yield s;
          offset $\leftarrow$ s.end();
          i++;
      }
  }
  \While {i < index.size() \And index[i].offset < end} {
      len $\leftarrow$ index[i].offset - offset\;
      \If (\tcp*[f]{yield a hole}) {len > 0} {
          \Yield Hole(offset, len)\;
          offset $\leftarrow$ index[i].offset\;
      }
      s $\leftarrow$ index[i]\tcp*[r]{yield next segment}
      s.length $\leftarrow$ min(s.length, end - offset)\;
      \Yield s;
      offset $\leftarrow$ s.end();
      i++;
  }
  \If (\tcp*[f]{yield final hole}) {offset < end} {
      \Yield Hole(offset, end - begin)\;
  }
  \caption{Index Lookup. Yields a collection of segments within the
    specified range (offset, length) with \texttt{i} initialized to the
    first element in the index that is not less than \texttt{offset},
    and Hole being a special type of segment representing a range that
    has never been written.}
  \label{lookup}
  \end{algorithm}

\begin{figure}[t]
\centering
  \includegraphics[width=0.8\linewidth]{figs/index-merge.pdf}
  \caption{Index Merge.}
  \label{figmerge}
\end{figure}

\subsection{Merged View of Layers}

When there are multiple layers, if the lookup procedure goes through
the layers one by one, the time complexity is $O(n \cdot \log m)$
where $n$ is the number of layers and $m$ is the average number of
segments in a layer. In other words, the cost increases linearly with
$n$. We optimize this problem with a merged index that is
pre-calculated when the indices are loaded, thus reducing the
complexity to $O(\log M)$ where $M$ is the number of segments in
the merged index. The merging problem is illustrated in
Figure~\ref{figmerge}.

\begin{algorithm}[t]
  
  \SetAlFnt{\tiny \sf}
  \SetKw{Yield}{yield}
  \SetKw{And}{and}
  \SetKw{In}{in}
  \KwIn{an array of \textbf{indices}[1..n]; \newline  
  subscript \textbf{i} of the indices array for this recursion;\newline  
  the range to merge \textbf{(offset, length)}}
  \For {s \In indices[i].lookup(offset, length)} {
      \uIf {s is NOT a Hole} {
          s.pos $\leftarrow$ i\; 
          \Yield s; 
      } \ElseIf (// \textit{ignore a real hole}) {i > 0} {
          indices\_merge(indices, i-1, s.offset, s.length); 
      }
  }
  \caption{Index Merge by Recursion. 
    % Yields an equivalent index in the 
    % form of a collection of segments. To merge them all, invoke as 
    % merge(indices, n, 0, MAX\_OFFSET). 
  }
  \label{idx-merge}
  \end{algorithm}
  
\begin{algorithm}[t]
  %\label{pread}
  \SetAlFnt{\tiny \sf}
  \SetKw{Yield}{yield}
  \SetKw{And}{and}
  \SetKw{In}{in}
  \KwIn{an array of file objects \textbf{blobs}[0..n]; \newline 
  a rage \textbf{(offset, length)} to pread}
  \For {s \In merged\_index.lookup(offset, length)} {
      // \textit{s.pos == 0 for Hole segments} \\
      // \textit{blobs[0] is a special virtual file object} \\
      // \textit{that yields zeroed content when pread} \\
      blobs[s.pos].pread(s.offset, s.length); 
  }
  \caption{Read Based on Merged Index.}
  \label{pread}
  \end{algorithm}

To merge the indices, we put them in an array indexed from $1$ to $n$
where $n$ is the number of layers, and in an order such that base
layers come earlier. Algorithm~\ref{idx-merge} shows the recursive
procedure to merge indices for a specified range. To merge them as
whole, the algorithm is invoked for the entire range of the image. We
make use of the \texttt{pos} field in the final merged index to
indicate which layer a segment comes from. With the merged index,
random read operation (pread) can be easily implemented as
Algorithm~\ref{pread}, supposing that we have an array of file objects
representing the ordered layers' blobs.

% \begin{figure}[t]
% \centering
% \includegraphics[width=.9\linewidth]{figs/index-size-2020.pdf}
% \caption{Index Size of Production Applications.}
% \label{index-size}
% \end{figure}

% \begin{figure}[h]
%   \centering
%   \includegraphics[width=.9\linewidth]{figs/perf-index.pdf} 
%   \caption{Index Performance on Single CPU Core.}
%   \label{perf-index}
% \end{figure}

\begin{figure*}[h]
  \begin{minipage}[t]{.33\textwidth} 
    \includegraphics[width=.9\linewidth]{figs/index-size-2020.pdf}
    \caption{Index Size of Production Applications.
    \zzhnew{Each point represents a merged index of an image.}}
    \label{index-size}
  \end{minipage}
  \hfill
  \begin{minipage}[t]{.32\textwidth} 
    \includegraphics[width=.9\linewidth]{figs/perf-index.pdf} 
    \caption{Index Performance on Single CPU Core.}
    \label{perf-index}
  \end{minipage}
  \begin{minipage}[t]{.32\textwidth} 
    \centering
    \includegraphics[width=.9\linewidth]{figs/relative-layer-sizes.pdf} 
    \caption{Relative Layer Blob Size. \zzhnew{The all layer blob 
    sizes are normalized to the .tar format sizes.}}
    \label{relative-layer-sizes}
  \end{minipage}
  \hfill
\end{figure*}

We analyzed 1,664 \sys image layers from 205 core applications in our
production environment to extract the size of the merged indices. The
statistics are summarized in Figure~\ref{index-size}. They show that
the indices have no more than 4.5K segments so the algorithm for
merging indices is efficient enough to be run when an image is
launched. Observe also that the number of segments is not correlated
with the number of layers. This suggests that the performance of DADI
OverlayBD does not degrade as the number of layers
increases. Figure~\ref{perf-index} plots the throughput of index
queries on a single CPU core. Observe that at an index size of 4.5K
segments, a single CPU core can perform more than 6 million index
queries per second. In Section~\ref{performance}, we find that IOPS tops
out at just under 120K for both LVM and DADI, suggesting that DADI
spends no more than $1/50$ of a CPU core performing index lookups.

\subsection{Compression and Online Decompression}
Standard compression file formats such as gz, bz2, xz, etc., do not support
efficient random read operation. Files in these formats usually need to be decompressed
from the very beginning until the specified part is reached. To support
compression of the layers' blobs and enable remote image at the same time,
\sys introduces a new compression file format called ZFile.

ZFile includes the source file compressed in a fixed-sized
chunk-by-chunk manner and a compressed index. To read an offset into a
ZFile, one looks up the index to find the offset and length of the
corresponding compressed chunk(s), and decompresses only these
chunks. ZFile supports various efficient compression algorithms
including lz4, zstd, gzip, etc., and can additionally store a
dictionary to assist some compression algorithms to achieve higher
compression ratio and efficiency.
% ZFile is designed to be a general
% compression format that is not bound to the \sys image
% format.
Figure~\ref{zfile} illustrates the format of ZFile.

\begin{figure}[h]
  \centering
    \includegraphics[width=0.7\linewidth]{figs/zfile.pdf}
    \caption{ZFile Format.}
    \label{zfile}
\end{figure}

The index stored in ZFile is an array of 32-bit integers, each of
which denotes the size of the corresponding compressed chunk. The
index is compressed with the same compression algorithm as the data
chunks. When loaded into memory, the index is decompressed and
accumulated into an array of 64-bit integers denoting the offsets of
the compressed chunks in the ZFile blob.  
After the conversion, index lookup becomes a
simple array addressing at \texttt{offset/chunk\_size}.

Due to the fixed-size nature of chunks and the aligned nature of the
underlying storage device, ZFile may read and decompress more data
than requested by a user read. The decompression itself is an extra
cost compared to the conventional I/O stack. In practice, however,
ZFile improves user-perceived I/O performance even on servers with
high-speed NVMe SSD.  The advantage is even larger for slower storage
(e.g., HDD or Registry).  This is because, with the compression algorithm
in use (lz4), the time saved reading the smaller amount of compressed
data more than offsets the time spent decompressing the data. See
Section~\ref{performance} for detailed results.

In order to support online decompression, a fast compression algorithm
can be used at some expense to the compression ratio. We typically use
lz4 in our deployment. Individually compressing chunks of the original
files also impacts the compression ratio. As a result, \sys images
are usually larger than the corresponding .tgz images but not by
much.

We analyzed the blob sizes of 205 core applications in our production
environment. Figure~\ref{relative-layer-sizes} shows the blob sizes in
various formats relative to their .tar format sizes. In general, DADI
uncompressed format (.dadi) produces larger blobs than .tar, due to
the overhead of the image file system (ext4 in this case) but the
overhead is usually less than 5\% for layers that are larger than 10MB.  Note
that the compression ratio varies greatly among these images and some
of them are not compressible. As discussed, ZFile blobs tend to be
larger than their .tgz counterparts.

By adhering to the layered model of container image, \sys images are 
able to share layers. To further save space and network traffic, 
deduplication can be performed at the chunk level of \sys images,
followed by compression of the unique chunks. 

% If the Registry needs to support cross-image deduplication, it must be 
% able to decode ZFile format, in addition to gzip. And it must perform 
% deduplication in block level, as \sys image blobs do not have an unpacking 
% operation.

\subsection{Compression-Aware Caching}
\label{ca-caching}

\zzhnew{DADI proposes an efficient compression-aware caching policy for 
compressed data to minimize amplification of both I/O and computation 
induced by compressed data, and avoid double caching caused by 
compressed and decompressed data page cache. The compressed data 
caching should only retain the partial compressed blocks that are 
not yet decompressed, and evict the compressed blocks as soon as 
they get decompressed.}

\begin{figure}[h]
  \centering
    \includegraphics[width=0.7\linewidth]{figs/ca-caching.pdf}
    \caption{Pages of 4KB ($p_0-p_5$) and Variable-Length Compressed Blocks ($b_0-b_5$).}
    \label{ca-caching}
\end{figure}

\zzh{As shown in Figure~\ref{ca-caching}, variable-length compressed data 
blocks (e.g., $b_i$) are typically smaller than the page size (4KB) and are compactly
stored in succession. Compressed data is usually read with an alignment of 4KB 
(page size), and \sys manages cache space in unit of pages instead
of compressed (partial) blocks, which can be easily cooperated
with the OS page cache. }

\zzh{During decompression, \sys evicts a page as soon as 
its last bytes of data are used, regardless whether its front part is 
used or not. Further, \sys organizes the cached pages into a FIFO queue with 
expiration, evicting the front page if its age exceeds the predefined 
parameter \texttt{longevity} or the queue length becomes greater than the predefined
parameter \texttt{npages}. The compressed cache pages are also subject to the eviction 
policy of the standard page cache of the OS.}

\zzh{For example, in Figure~\ref{ca-caching}, if the two compressed 
blocks $b_0$ and $b_1$ are required, then the compressed pages 
$p_2$ and $p_3$ will be loaded from storage. After decompression,  $p_2$ is evicted
because its last bytes have been used, whereas $p_3$ remains in the cache
as its last bytes ($b_2$ and part of $b_3$) have not yet been used.
And if $b_3$ is required next, $p_4$ will be loaded from storage
and $b_3$ will be decompressed from $p_3$ and $p_4$. Then $p_3$ will
be evicted as it has been used although part of it ($b_2$) has not
yet. The compressed page $p_4$ remains in the cache.}


\subsection{\sys Container Layer}
\label{container-layer}
Unlike other remote image systems (e.g., \cite{crfs,cvmfs}), DADI
realizes a writable container layer. The writable layer is not only a
convenient way to build new image layers, but also provides an option
to eliminate the dependency on a union file system. We base the
writable layer on a log-structured~\cite{lsm1992} design because
this makes \sys viable on top of virtually all kinds of storage
systems, including those that do not support random writes
(e.g., HDFS). The log-structured writable layer is also technically a
natural extension of the read-only layers.

\begin{figure}[h]
  \centering
  \includegraphics[width=.9\linewidth]{figs/writable-layer.pdf}
  \caption{DADI's Writable Layer.}
  \label{writable-layer}
  \end{figure}

As shown in Figure~\ref{writable-layer}, the writable layer consists
of one file for raw data and one for index. Both of these files are
open-ended, and are ready to accept appends. As overwrites occur in
the writable layer, these files will contain garbage data and index
records.
% The writable layer also applies the rule that the latest
% changes take effect.
When there is too much garbage, \sys will spawn a background
thread to collect the garbage by copying the live data to a new
file, and then deleting the old file.
When the writable layer is committed, \sys will copy the live data
blocks and index records to a new file in layer format, sorting and
possibly combining them according to their LBAs.

The index for the writable layer is maintained in memory as a
red-black tree to efficiently support lookup, insertion and
deletion. On a write, \sys adds a new record to the index of the
writable layer. On a read, \sys first looks up the index of the
writable layer. For each hole (a segment with no data written) within
the range to read, \sys further looks up the merged index of the
underlying read-only layers. \sys supports \texttt{TRIM} by adding to
the writable layer an index record that is flagged to indicate that
the range contains all-zero content.

\subsection{P2P-Based Decentralized Image Sharing}
\label{sec-p2p}
% Although remote image can greatly reduce the amount of image data that
% has to be transferred, 
% there are situations where more improvement is necessary. 
% In particular, 
In our production environment, 
there are many large-scale applications 
that are deployed on thousands of servers
and that comprise layers as large as several GBs. 
The deployment of these applications
places \zzhnew{significant} pressure on the Registry and the network infrastructure.
%
To better handle such scenarios, 
\sys caches recently used data blocks on the local disks of the hosts
for future use,
and adopts a fast image distribution mechanism 
once the hosts do not cache the data blocks.
% \sys also has the option to transfer data directly among the hosts in a peer-to-peer manner. 
%
Given that all the peers need roughly the same set of data and
in roughly the same order during the startup time period, \zzh{ \sys adopts
a P2P-based decentralized image sharing overlay inspired by 
BitTorrent~\cite{bt, dragonfly, kraken} to accelerate image distribution. }

% to realize application-level
%multicast similar to VMThunder~\cite{vmthunder} instead of the
%rarest-first policy commonly used in P2P downloading tools

\zzh{\zzhnew{Figure~\ref{p2p-bittorrent} shows the topology of DADI’s 
P2P-based network. Each container host runs a P2P module called DADI-Agent 
or DADI-Root. DADI-Agent can only obtain data blocks from DADI-Root or other 
agents and DADI-Root is} responsible for fetching data blocks from the Registry 
or other roots to a local persistent cache, as well as managing the agents.
\zzhnew{The Registry is a Web service based on HTTP with range 
capability enforced, and } serves as a seeder and tracker for all roots, 
providing them 
with torrent files and facilitating the transfer process. \zzhnew{The P2P module 
on each host maps random reads of the layer blobs to ranged HTTP requests 
so as to realize on-demand data transfer while requesting a remote image.} 
The roots are peers, 
and they simultaneously download the data blocks for their agents and 
upload the data blocks already downloaded to others. To maximize the 
image's liquidity, each root also serves as a seeder and tracker for their 
agents. The agents under the same root are peers, and they can
exchange data blocks upon requests.}

\begin{figure}[t]
\centering  
\includegraphics[width=0.8\linewidth]{figs/bittorrent.pdf}
\caption{DADI's P2P-Based Decentralized Image Sharing.}
\label{p2p-bittorrent}
\end{figure}

\zzh{To initiate a download process of the requested data, the root or agent node
will first search and download the corresponding torrent file from their seeders.
The torrent file contains the metadata about the blocks to be downloaded as well as the 
address of the tracker that coordinates the data exchange in the nodes. The node 
connects to the tracker to get a list of peers which may have a part of the 
data or the whole data. Then the node establish a connection with some of the 
peers in the peer list and finds out which data block is available for download
in its peer's cache. It will send its download requests for blocks 
belonging to the same layer to different peers in its peer list.
}

\zzh{The data blocks and torrent file of a layer are pre-distributed to 
multiple root nodes. If the requested data blocks are not present in the root's 
cache, the root will download the blocks from its seeder or peers. 
To download target blocks, the root obtains the \zzhnew{IP} addresses of 
other nodes that \zzhnew{contain} the targets from the tracker, establishes 
a connection with them, and downloads the data blocks until all the
missing blocks have been downloaded. The data received from the peers is 
added to the root's cache as it will probably be needed soon by
other agents or the node itself. The agent downloads the data 
blocks from the root (seeder) or other agents (peers).}

\zzh{During the download process, blocks are downloaded in any order, 
and the order of blocks' download is not defined. Different blocks belonging
to the same blob can be downloaded from different peers that have already 
obtained that blob. \zzhnew{We warm up the root and agent servers’ cache 
whenever a new layer is built or converted to ensure that data blocks 
are likely to exist on the peers when they are needed.} By downloading 
the target data from multiple peers, the parallelism of data transmission is 
increased, resulting in a higher network throughput without overburdening any 
specific node}

% Conference's Contents
% Whenever an agent wants to read some data from a blob for the first
% time or when its parent node does not respond, it sends a request RPC
% to the root. The root may service the request by itself, or it may
% choose to rearrange the topology and redirect the requesting agent to
% a selected parent. The requesting agent is considered to join the tree
% as a child of the selected parent. Every node in a tree, including the
% root, serves at most a few direct children. If the requested data is
% not present in the parent's cache, the request flows upward until a
% parent has the data in its cache. The data received from the parent is
% added to the child's cache as it will probably be needed soon by other
% children or the node itself.
% DADI-Root manages the topology. It knows how many children every node
% has. When a node needs to be inserted into the tree, the root simply
% walks down the tree in memory, always choosing a child with the fewest
% children. The walk stops at the first node with fewer direct children
% than a threshold. This node becomes the selected parent for the
% requesting agent. When a node finds that its parent has failed, it
% reverts to the root to arrange another parent for it. As the P2P
% transfer is designed to support the startup of containers and this
% startup process usually does not last long, DADI-Root expires topology
% information relatively quickly, by default after 20 minutes.

DADI-Root is actually a replicated service running on several servers
for availability, and deployed separately for different clusters. 
\zzh{The roots in each cluster equally choose other nodes as their 
agents and \zzhnew{record} address information of them. The agent will try to 
connect to other roots in order to get the torrent file and peer list 
if it is unable to connect to its own root.}
The Registry tends to be shared by many clusters and possibly across 
a long distance, so its performance may not always be high. 

% To protect against potential data corruption, we create a separate
% checksum file for each and every layer blob as part of the image
% building or conversion process. The checksum file contains the CRC32
% value for each fixed-sized block of the layer. As the checksum files
% are small, they are distributed whole to every involved node as part
% of the image pulling process. The data blocks are verified on arrival
% at each node.

%With reasonably modern networks (10Gbps or faster), each hop in the P2P read
%path adds an extra latency of less than 1~ms in most cases. As a
%full binary tree of height 9 contains more than a thousand nodes, the
%maximum extra latency is highly likely to be less than 9~ms. This
%level of performance is far better than what we have observed with
%congested central servers.


%As the Registry is logically centralized and possibly located at a distant place, 
%its performance and availability may be unsatisfactory. \sys allows the blobs 
%to be mirrored to a local storage system that is shared by the hosts.
%We will refer to the shared storage system as the pool in the rest of the section. 
%Thanks to the log-structured design, \sys supports virtually all types 
%of pools, like HDFS, NFS, Ceph, GlusterFS, etc.
%
%\sys on each host will collaboratively save the blobs to the pool.  To
%access a blob, the host attempts to open it in the pool with exclusive
%write permission. If the blob does not exist, it is created. The host
%that succeeds in opening the blob is responsible for copying the blob
%from the Registry to the pool. Other hosts will try to open the blob in
%read-only mode, and they will also watch the progress of copying by
%regularly getting real space usage of the blob in the pool. If the
%progress has stopped for a period, the hosts will again compete to get
%the exclusive write permission, and the winner will take over the
%responsibility of copying. When the progress reaches 100\%, the hosts
%will use the blob in the pool instead of that in the Registry. The
%exclusiveness of write permission acts like a distributed lock in
%DADI.  If exclusive write access is not supported by the pool, \sys can
%alternatively rely on a real lock provided by coordination services
%such as etcd~\cite{etcd} or ZooKeeper~\cite{zk}.
%
%To release space used in the pool, \sys relies on the last-access-time
%attribute of the blobs to find and remove the blobs that have not been
%used recently. The pool should refuse to remove a blob if it is still
%in use. If the pool does not have such a capability, \sys can alternatively rely
%on a coordination service to maintain the set of blobs currently in use.
%The writable layer can be placed in the pool so as to support easy migration
%of the container, or on host-local disk to reduce the requirements on the
%pool and achieve higher performance.

\subsection{Prefetching}

\subsubsection{Basic Prefetching}

% {\color{blue}{

% Since every single I/O request happens on user's own filesystem will eventually 
% be mapped into one overlaybd's layer blob, we can then record all I/Os from the 
% layer blob's perspective, and replay them later. That's why we call it Trace Prefetch.

% Trace prefetch is time based, and it has greater granularity and predication accuracy 
% than stargz. We don't mark a file, because user app might only need to read a small part 
% of it in the beginning, simply prefetching the whole file would be less efficient. 
% Instead, we replay the trace, by the exact I/O records that happened before. 
% Each record contains only necessary information, such as the offset and length of the blob being read.

% Trace is stored as an independent image layer, and must always be the uppermost one. 
% Neither image manifest nor container snapshotter needs to know if it is a trace layer, 
% snapshotter just downloads and extracts it as usual. The overlaybd backstore must recognize 
% trace layer, and replay it accordingly.

% The \sys format stacks everything in the container's hierarchical mirror into a block device, 
% and the user's read operation of the \sys mirror occurs on the block device at the OS level. 
% As shown in Figure~\ref{trace-prefetch}, after LSMT conversion, trace prefetch records the action 
% at the mirror layer level.

% If the trace format is defined as a combination of frames in a time series, the format 
% of a frame is shown in Figure~\ref{struct-trace}.

\begin{figure}[h]
\centering
\includegraphics[width=0.7\linewidth]{figs/trace-prefetch.pdf}
\caption{Trace Prefetch. \zzhnew{The prefetch mechanism records 
the I/O operations and converts the time sequenced records to 
a log-structured writable image layer.}}
\label{trace-prefetch}
\end{figure}

\begin{figure}[h]
\centering
\includegraphics[width=0.6\linewidth]{figs/struct-trace.pdf}
\caption{Definition of Trace. Layer index is the sequence number, offset represents 
the offset of this I/O in the layer file and count is the number of bytes read by 
this I/O.}
\label{struct-trace}
% \vspace*{-0.8em}
\end{figure}

\zzh{
To avoid frequent and temporary visits to the Rigistry by the
backend storage engine when container cold start, \sys provides 
a new prefetch mechanism which is based on time sequenced I/O 
patterns (trace). Unlike other prefetch mechanisms such as Prioritize 
Files and Use Landmarks realized by stargz,  \sys's prefetch mechanism 
never marks a file. Since user app might only need to read a small part 
in the beginning of a file, simply prefetching the whole file would be 
less efficient. Instead, we record the exact I/O operations which 
happened before (trace) and store them as an independent 
\zzhnew{log-structured wirtable} 
image layer realized by \sys on the topmost of all image layers 
\zzhnew{as shown in Figure 13} thus 
this new prefetch mechanism has greater granularity and predication 
accuracy than stargz. \zzhnew{Since every I/O request that occurs on the 
user's own filesystem is mapped into one OverlayBD's layer blob, 
we record all I/Os from the viewpoint of the layer blob.} 
The \sys format stacks everything in the container’s 
hierarchical image into a block device, and the user’s read 
operation of the \sys image occurs on the block device at the OS level. 
After LSMT conversion, trace prefetch 
records the action at the image layer level. Each record contains only 
necessary information shown in Figure~\ref{struct-trace} such as the layer index, 
offset of this I/O operation in the layer file and length of the blob being read.}

\zzh{
All these records should be stored in the topmost layer, which need to be writable.
Overlaybd backend provides a merged view of block-based layer sequence as a virtual 
block device through iSCSI protocol and tcmu kernel module. 
Before \zzhnew{recording} I/O requests, we should firstly pull the OCI format image 
\zzhnew{that} we want to 
accelerate from \zzhnew{the} Rigistry and then \zzhnew{convert} it to the \zzhnew{OverlayBD} format. 
During this process, 
we need to run the \zzhnew{OverlayBD} storage backend, which connects to applications 
with a filesystem mounted on a virtual block device. 
\zzhnew{Since the OverlayBD backend provides a merged view of a sequence of 
block-based layers as a virtual block device through the iSCSI protocol and 
tcmu kernel module, all of an application’s I/O requests will go from the regular 
filesystem (ext4, for example) to the loopback iSCSI device through TCM\_loopback. 
Then these requests will go into the OverlayBD backstore through TCMU so that we 
can collect traces easily.}}

\zzh{
To collect and record these I/O requests, we firstly run a 
temporary container with the \zzhnew{OverlayBD} format image for a 
prescribed time. \zzhnew{OverlayBD} will collect all I/O \zzhnew{requests by 
revising the read method.} 
When an I/O request happens, \zzhnew{OverlayBD} backend will record the layer, length and offset. 
After timeout or user interrupt, these traces will be dumped into a trace blob.
This trace blob \zzhnew{needs} to \zzhnew{conform} to the specified trace-file format. This 
format contains a file header and chained trace records. The header records 
\zzhnew{trace magic number (CRC32 of trace format), data size and checksum. }
The trace blob will be stored in the topmost \zzhnew{writable} layer. 
During this process, we create another thread to listen user \zzhnew{operations} and 
terminate the instance. Since the trace layer 
is transparent to the push command, we could push \zzhnew{the trace} 
image to \zzhnew{the} Registry so that users could pull it anywhere else. 
When we replay this specific image, \zzhnew{OverlayBD} will run \zzhnew{in "replay" mode}, load 
the trace blob \zzhnew{into the trace queue} and verify \zzhnew{its} checksum. 
If \zzhnew{the} trace blob is not empty, \zzhnew{OverlayBD} will replay 
these I/O records to warmup \zzhnew{the} cache. 
The replay process will create multiple threads to read the exact data recorded by each trace. 
Every thread will load traces in queue order and get the data
for the recorded location until the queue is empty.}

\zzh{
When the accelerated image instance starts in the network, \sys will load the trace records
and start a pre-task, which reads every trace, compares the existing cache and requests nonexistent 
data from its parent node.}


% }}

\subsubsection{Pull-Push Collaborative Block Prefetching}
\label{sec-tree}

\begin{figure}[tb]
  \centering
  \includegraphics[width=0.65\linewidth]{figs/cache-value.pdf}
  \caption{Cache pool.}
  \label{cache-value}
  \end{figure}
  
  \begin{figure}[tb]
  \centering
  \includegraphics[width=0.66\linewidth]{figs/cacheList.pdf}
  \caption{Cache list for recording block requests.}
  \label{cacheList}
  \end{figure}

\zzhnew{\sys implements pull-push collaborative block prefetching to 
reduce cold startup latencies of containers and improve data transfer efficiency.
Figure~\ref{p2p-bittorrent} illustrates that} \zzh{DADI-Root is 
the root node that is responsible for fetching data blocks from the 
Registry and other roots, whereas DADI-Agent is the leaf node that can 
only obtain data blocks from DADI-Root and other agents. Each node in the network
has a P2P module for processing 
data requests. The P2P proxy server in the P2P module serves as the 
coordinator, intercepting data requests from \zzhnew{OverlayBD,} performing 
preliminary filtering based on the URL of the \zzhnew{HTTP} requests 
\zzhnew{and forwarding all the requests to other P2P nodes.}
When \zzhnew{other nodes} return the data, the proxy server sends the requested \zzhnew{data} 
to \zzhnew{OverlayBD}.}

\zzh{\zzhnew{DADI maintains a persistent cache pool to buffer key-value items for 
the received data blocks on each DADI-Root in order to proactively push 
the predicted data blocks to remote DADI-Agents.} 
All the received data blocks are first persisted 
into \zzhnew{a file and then} the cache pool uses the address of the file where the data 
blocks stored as the key, and Figure~\ref{cache-value} shows the value's data 
structure. When rebooting due to a system failure, the cache pool retrieves the 
directory where the cache files are stored, rebuilds the key-value items
and loads them into the cache pool. The cache pool uses 
a reader-writer lock for concurrency and a periodic garbage collection 
strategy to free up memory space. For key-value items that have been cleaned, 
their associated files can be deleted directly. In addition, each root node employs 
a cache to record information about linked agent nodes in order to speed up data 
prefetching. The cache of linked information, unlike the cache pool of data blocks, 
does not need to be persistent. If the root node crashes and cannot be detected, 
the agent will automatically search for the next available root node.}

\begin{figure}[tb]
  \centering
  \includegraphics[width=0.6\linewidth]{figs/workflow-p2p-server.pdf}
  \caption{The workflow of the root nodes in the P2P networks.}
  \label{workflow-p2p-server}
  % \vspace*{-0.8em}
  \end{figure}

\zzh{\zzhnew{The P2P module uses a cache list, which is a thread-safe doubly linked list 
to save the data request records as shown in Figure~\ref{cacheList}. The main purpose of the cache 
list is to record all the data block requests generated within a given period of time. 
Before deploying cooperative block prefetching, the P2P module first obtains the records 
of the data requests when the container starts, and then uses the prediction algorithm to analyze them}
When a record is 
added or hit, the cache list puts the record into the linked list's header. 
At the same time, the cache list periodically releases the space occupied by 
the tail data.
After the cache list has accumulated a sufficient number of block requests, 
the Least Recently Used (LRU) algorithm is utilized to \zzhnew{predict} which data blocks
are likely to be used later. Data request recording time can directly affect the 
recording efficiency and prediction accuracy. Insufficient or excessive request 
records will damage the system performance and prediction accuracy. Furthermore, 
if there are any unexpected requests during this period, records may get polluted, 
hurting the accuracy of the prediction algorithm. 
}

\zzh{ \zzhnew{Figure~\ref{workflow-p2p-server} depicts the workflow when the root node receives data 
requests from the agent nodes.} The root node first checks whether 
the requested block is in the cache pool and cache list. If the requested block 
is in the cache pool, \zzhnew{it is transmitted immediately} to the agent node. 
Otherwise, the \zzhnew{data} request needs to be forwarded to the Registry. If the requested 
block is in the cache list, the data block to be used by the agent node is predicted 
and \zzhnew{actively} pushed to the agent node. Then the root node waits for the 
\zzhnew{subsequent requests} from the agent \zzhnew{nodes}. }  

\subsection{Authentication and Verification}
\zzh{Figure~\ref{registry-api} shows the standard process of obtaining image
blob from a typical production setup of Registry, which usually
involves a sign-on service and a storage service as well.  The
container host engine will concurrently issue multiple connections to
download the layer blobs from \zzhnew{the} Registry, each going through the process
and downloading the blob to local disk. The pulling process for a
blob, say $x$, goes as follow: (1) the container engine blindly
requests \zzhnew{$x$ from} the Registry and (2) gets refused with an
unauthorized error; the Registry also attaches a URL and arguments for
authentication in the response; (3) the engine connects to the sign-on
server, and (4) gets authorized in the form of a token; (5) the engine
requests \zzhnew{$x$ again from} the Registry, and (8) gets redirected to a
storage service where the blobs are actually stored; the target URL is
unique and temporarily available for (9, 11) downloading the blob of
tarball.}
%

%
\begin{figure}[tb]
\centering
\includegraphics[width=.7\linewidth]{figs/registry-api.pdf}
\caption{The process to pull image blob data. The dotted 
are possible actions performed by servers behind the scenes.}
\label{registry-api}
\end{figure}
%
%%\begin{figure}[tb]
%%\centering
%%\includegraphics[width=.9\linewidth]{figs/on-demand.pdf}
%%\caption{Accessing remote image on the Registry}
%%\label{on-demand}
%%\end{figure}

% \zzh{The engine unpacks the blobs sequentially from lowest to uppermost layer, 
% as unpacking action is defined by storage plug-in to minimize data copying, 
% and the action may have dependency on lower layers in some drivers, like 
% overlay, lvm, etc. The sequential unpacking contributes to a majority of startup
% latency in most cases.}

\zzh{The standard process to pull blob data is designed for downloading so
its lengthy steps 1--8 for authentication and redirection are
acceptable. On the other hand, \zzhnew{the pulling processes} repeatedly fetch fine-grained data
blocks from the Registry so they cannot afford the extra steps on every
request. We optimize as follows:}

\zzh{First, the \zzhnew{temporary} URLs received from the registry are cached by
disabling automatic redirection in the HTTP client. This allows us to
begin at step 9 on subsequent requests as long as the \zzhnew{temporary} URL and
token do not expire. This optimization reduces 90\% of the latency.}

\zzh{The second optimization is caching the authentication token received
from the sign-on service. The same token can be used to read multiple
layers' blobs to avoid unnecessary steps, but a hiccup may occur
when either a \zzhnew{temporary} URL or the token expires and it is needed
during \zzhnew{reading}. When this happens, \zzhnew{the container host must 
get a new authentication token from the sign-on server, which increases the 
latency and affects user experience.} }

\zzh{The third optimization is prefetching a new \zzhnew{temporary} URL or token, if it
is currently in use and predicted to be close to expiration. We
consider a token to be in use if it has been used at lease once during the current life
span. The sign-on server will explicitly return the expiration in its
response \zzhnew{(default expiration is 60s)}; while the redirection \textit{may} encode
an \texttt{Expire} argument in the \zzhnew{temporary} URL for commonly used object
stores such as S3. If such an argument does not exist,
we predict \zzhnew{the expiration} of a \zzhnew{temporary} URL as the minimum of its 
detected past lifespans, and we switch to a prefetched \zzhnew{temporary} URL 
only when it actually gets expired so as to detect the real lifespan.}

% The standard image layer blobs are not randomly readable as they are
% tarballs usually gzipped~\cite{oci-image2}. This
% format is designed for sequential reading and unpacking. One can only
% read the file(s) in an image of this format after the image has been
% unpacked. So the blob format has to be changed in order to realize
% remote image.


\section{Implementation and Deployment}

This section discusses how \sys interfaces with applications and
container engines, as well as how \sys can be deployed in different
user scenarios.

\begin{figure}[tb]
\centering
\includegraphics[width=0.8\linewidth]{figs/iopath-cgroups.pdf}
\caption{I/O Path for cgroups Runtime.}
\label{iopathc}
\end{figure}

\subsection{Data Path}

\sys connects with applications through a file system mounted on a
virtual block device. \sys is agnostic to the choice of file system so
users can select one that best fits their needs. By allowing the
dependency on the file system to be explicitly captured at image
creation time, \sys can help applications exploit the advanced
features of file systems such as XFS~\cite{xfs}, Btrfs~\cite{btrfs},
ZFS~\cite{zfs}.

In the case of the cgroups runtime, we use an internal module called
\texttt{vrbd} to provide the virtual block device. \texttt{vrbd} is
similar to \texttt{nbd} but contains improvements that enable it to
perform better and handle crashes of the user-space daemon. As shown
in Figure~\ref{iopathc}, I/O requests go from applications to a
regular file system such as ext4. From there they go to the virtual
block device and then to a user-space daemon called
\texttt{lsmd}. Reads of data blocks belonging to layers that have
already been downloaded are directed to the local file system where
the layers are stored. Other read operations are directed to DADI's
P2P agent which maintains a persistent cache of recently used data
blocks. Write and trim operations are handled by lsmd which writes the
data and index files of the writable layer to the local file system.

We have also realized a QEMU driver for its block device backend to
export an image to virtualized containers. As shown in
Figure~\ref{iopathv}, the data path in this case is conceptually
similar to that for the cgroups runtime except that the image file
system and virtual block device are running in the guest context, and
the block driver takes the place of lsmd. Integration with other
hypervisors should be straightforward. It is also possible to pass the
virtual block device from the host into the guest context. This
approach works with virtually all hypervisors but incurs a slightly
higher overhead. As the block device interface is narrower and simpler
than a file system interface, it exposes a small attack surface to the
untrusted guest container.

\begin{figure}[tb]
\centering
\includegraphics[width=.6\linewidth]{figs/iopath-vmm.pdf}
\caption{I/O Path for Virtualized Runtime (QEMU, etc).}
\label{iopathv}
\end{figure}

\subsection{Container Engine Integration}
\sys is integrated with Docker through a graph driver which is a
Docker plugin to compose the root file system from layers. The layers
form a graph (actually tree) topology hence the name graph
driver. \sys is also integrated with containerd through a snapshotter
which provides functions similar to those of the graph driver. We will
use the term ``driver'' to refer to either of them in the rest of the
paper.

We implemented the drivers to recognize existing and \sys image
formats. When they encounter \zzhnew{a} .tgz image, they invoke existing
drivers. When they come across \zzhnew{a} \sys image, they perform DADI-specific
actions. In this way, the container engine can support both types of
images at the same time so that the deployment of \sys to a host does
not require the eviction of existing .tgz based containers or images from
that host. This enables us to use a canary approach to systematically
roll out \sys across our complex production environment.

\sys currently fakes the image pulling process with a small tarball
file consisting of DADI-specific metadata. The tarball is very small
so that the image pull completes quickly. We are
preparing a proposal to the container community for extensions to the
image format representation to enable lazy image pulling and make the
engine aware of remote images.

\subsection{Image Building}
\sys supports image building by providing a log-structured writable
layer. The log-structured design converts all writes into sequential
writes so that the build process with \sys is usually faster than
that for regular .tgz images (see Section~\ref{eval} for details).  As \sys uses
faster compression algorithms, the commit operation is faster with
\sys than it is for regular .tgz images. \sys also avoids pulling entire base
images and this saves time when building images on dedicated
image building servers where the base images are usually not already
local.

In order to build a new layer, \sys first prepares the base image file
system by bringing up a virtual block device and
mounting the file system on it. When the layer is committed, DADI
unmounts the file system and brings down the device. These actions
are repeated for each layer produced in a new image, adding up to a
lot of time. According to the specification of the image building script
(dockerfile), each line of action will produce a new layer. It is not
uncommon to see tens of lines of actions in a dockerfile in our
environment so a single build job may result in an image with many new
layers. This design was supposed to improve the speed of layer
downloading by increasing parallelism, but it may become unnecessary
with remote \zzhnew{images}.

We optimized the \sys image build process by
bringing the device up and down only once. The intermediate
down-and-ups are replaced with a customized operation called
stack-and-commit. As its name suggests, stack-and-commit first stacks
a new writable layer on top of existing layers, and then commits the
original writable layer in the background.  This optimization
significantly increases image building speed, especially on high-end
servers with plenty of resources.

To convert an existing .tgz image into the \sys format, \sys proceeds
from the lowest layer of the image to its highest layer. For each
layer, \sys creates a new writable layer and unpacks the corresponding
.tgz blob into the layer while handling whiteouts, a special file name
pattern that indicates deletion of an existing file. If users want to
build a \sys image from a .tgz base image, the base image layers must
first be converted into the \sys format using this process.

Some container engines implicitly create a special init layer named as
xxxxx-init between the container layer and its images layers.  This
init layer contains some directories and files that must always exist
in containers (e.g., /proc, /dev, /sys). During commit, \sys merges
this init layer with the container layer so as to keep the integrity
of the image file system.

\subsection{Prefetching}

\begin{figure}[tb]
  \centering
  \includegraphics[width=0.75\linewidth]{figs/prefetch-layer.pdf}
  \caption{Each layer of the \sys format mirror is metadata, containing many small files, 
  and one more trace layer does not change the overall structure.}
  \label{prefetch-layer}
\end{figure}

% {\color{blue}{
% \sys implements prefetch in two steps: the first is recording and the second is playback.

% \sys provides a recording tool that first images and records traces, 
% then saves traces into the top layer, and finally pushes a new image, 
% overwriting the original tag.

% AS shown in Figure~\ref{prefetch-layer}, because each layer of the \sys format 
% mirror is metadata and contains many small files, one more trace file does 
% not change the overall structure. End users perceive id changes at the top, 
% as well as new changes in the image manifest digest. The others are unchanged 
% and you can continue to use the original tag.

% Replay is accelerated, the entire process is transparent to the user, 
% and if there is trac in a DIDD image, it can be accelerated by default.
% When replayed, read and release buffer in advance in accordance with all 
% frames of the trace file, thus preheating the cache. The old deployer encounters 
% a new trace mirror and ignores the new trace file. Therefore, 
% the feature is down-compatible.

\zzh{
\sys implements trace-based prefetching in two steps: \zzhnew{(1)} trace recording and
 \zzhnew{(2)} playback with trace layer.}

\zzh{
Before trace recording, we \zzhnew{must} firstly convert a normal image to an \zzhnew{OverlayBD} format 
image. This image format provides a merged view of a 
sequence of block-based \zzhnew{layers and} outputs as a virtual \zzhnew{block} device 
through TCMU. With the help of \zzhnew{OverlayBD} backstore, we \zzhnew{can capture} I/O request 
traces and dump them into the topmost layer of overlaybd-format image.}  

\zzh{
\zzhnew{Trace recording uses the converted image to run a temporary container.} This 
recording operation \zzhnew{can} be assigned an appropriate time so that the
container \zzhnew{can collect enough traces.} \zzhnew{When recording begins, 
the OverlayBD backstore’s prefetcher works in ”record” mode and persists R/W metadata 
of all layers into a trace file. After termination of the container,} 
the traces we collected in the trace file will be recorded on 
the topmost layer \zzhnew{(trace layer)} of \zzhnew{OverlayBD} format image. \zzhnew{As} 
shown in Figure~\ref{prefetch-layer}, 
because each layer of the image is metadata and contains many small files, one more 
trace \zzhnew{layer} does not change the overall structure. \zzhnew{End users can 
perceive checksum changes of the topmost trace layer, as well as new 
changes in the image manifest digest. The checksums of other layers are 
unchanged and you can continue to use them. }} 

\zzh{
When we start a container based on \zzhnew{an} image with \zzhnew{a} trace layer, 
the \zzhnew{OverlayBD} backstore 
will recognize and replay it accordingly. The entire process is transparent to the user, 
and the startup \zzhnew{of the} progress will be accelerated by default 
if \zzhnew{the} trace layer has been 
\zzhnew{identified} successfully. \zzhnew{When replaying, the OverlayBD backstore 
works in ”replay” mode. Buffer reading and releasing operations can be executed 
in accordance with the trace file to preheat the cache in advance.} 
Since the normal deployer 
ignores the trace layer during startup, the feature is down-compatible.}


% }}

\zzh{DADI further implements pull-push collaborative block prefetching to 
improve transfer efficiency in the P2P network. 
The roots fetch data blocks from the Registry and the agents can only 
obtain data blocks from the root and \zzhnew{other agents, When a container or 
virtual machine is starting, OverlayBD} pulls the data 
blocks from the root node to local memory. Once the root node receives a data
block request, it utilizes the file path given in the request to retrieve the 
key of the predicted data block from the cache list. Then the key is used by the 
node to push the data block obtained from the cache pool to the requester.}

\zzh{\zzhnew{The header of each data request includes the start 
and end offset addresses of one or more data blocks that are integer 
multiples of the data block length (usually 512 bytes).}. In general, 
the content of the data block will not be modified 
after caching, and the download length of each data block request is between 512K 
and 2M. Therefore, we set the length of each item in the cache pool to 1M. However, 
since the start offset of each received data request or data downloaded from the 
Register is not an integer multiple of 1M, each received data request and download 
operation are aligned with split processing, which not only \zzhnew{conserves} 
memory space but also improves access performance of the cache pool.}

\subsection{Deployment Options}
\label{pubsln}
% On the first day when container is adopted, pulling images has become 
% an issue for those who want to launch their applications as quickly as
% possible. Large users / applications suffer more than small ones. 
% The design of \sys bears this common issue in mind.

% The core part of \sys is a remote image that is even more general than
% the .tgz image, especially when virtualization, attack surface, and/or
% heterogeneous guest systems are involved. \sys image enables on-demand
% transfer of what is actually used, leading to faster startup than
% pulling the whole images. The container community is seeking for good
% remote image solutions, too. We are actively working on a contribution
% of \sys to the community. \sys image format conforms to
% OCI-Artifacts~\cite{oci-artifacts} specification, which allows \sys to 
% store image blobs that are not tarballs in standard image Registry.

% \textbf{Stacking on a shared storage.}

\zzh{DADI implements a P2P-based decentralized image sharing policy to accelerate 
image distribution, which is the transmission process of multiple read-only layer blobs.
When the P2P network topology is established, a certain number of roots are 
randomly selected, and \zzhnew{a list containing the roots} is broadcast to all agents.
The agent randomly selects the root when sending data block requests 
until it encounters the first available root. However, there is a threshold 
for the number of agents that can connect to the root. \zzhnew{The threshold 
is predefined as the ratio of the number of agents to the number of roots in a cluster.}
If the threshold is exceeded, the root will deny other \zzhnew{requests}
to ensure load balancing. 
The threshold is predefined based on the number of all nodes in a cluster.}

\zzh{The seeders (Registry and roots) store the metadata of data blocks and the 
tracker's address in a torrent file and send it to the downloaders. When 
the root or agent needs to download data blocks, it first establishes a 
connection with the tracker based on the torrent and obtains the addresses 
of the nodes that own the target blocks. The missing data blocks are transferred
between nodes until they are all downloaded.}

\zzh{The P2P-based decentralized image sharing capability of \sys is 
optional and targeted at users with large applications. Other 
users may prefer to use \sys with
the layer blobs stored in a high-performance shared storage system as
a compromise between fetching the layer blobs from the Registry and
storing the layer blobs on every host.}
% In this model, \sys works with
% systems including HDFS, NFS, CIFS, Ceph, GlusterFS, etc., including
% writing the container layers to these systems.
Similar solutions have been proposed in the community
(e.g., Teleport~\cite{teleport}, Wharf~\cite{wharf}). \sys further
enhances these solutions by not requiring the layers to be unpacked
and supporting alternative storage systems such as
HDFS.

% As our production
% environment uses a limited image set, though individually big (and
% updated frequently), we currently depends on host-local storage only.

% \textbf{Cold start from Registry.}

For users who do not wish to set up shared storage, \sys provides them
with the option to fetch layer blobs on-demand from the Registry and
cache the data blocks on local disk(s). This approach greatly reduces
cold startup latencies by avoiding the transfer of data blocks that
are not needed. If there is a startup I/O trace available when
launching a new container instance, \sys can make use of the trace to
prefetch the data blocks needed by the starting container, yielding a
near-warm startup latency.

% \textbf{Pulling \sys image blobs.}

Users may also choose to use \sys by downloading the layer blobs to
local disk(s). \sys layers do not need to be unpacked, saving a
time-consuming sequential process needed for .tgz layers.  Thus
pulling \sys images is much faster. The downloading can be
optionally offloaded to P2P tools such as
\cite{dragonfly,kraken,borg,tupperware,fid}.  We use this approach as
a backup path in case our on-demand P2P transfer encounters any
unexpected error.

\section{Evaluation}
\label{eval}

In this section, we evaluate the performance and scalability of DADI
Image Service.

\subsection{Methodology}

We compare the container startup latency with \sys to that with the
standard tarball image, Slacker, CRFS, LVM (dm or device mapper), and
P2P image download. We also analyze the I/O performance as observed by
an application inside the container.

\zzhnew{We use ext4 as the local file system for DADI.}
Slacker uses Tintri VMstore as its underlying storage system. We do
not have access to such a system so we use LVM together with NFS as
an approximation of Slacker (denoted as pseudo-Slacker). At the
time of this writing, CRFS has not yet achieved its goal of realizing
an internal overlayfs so we rely on the kernel implementation of
overlayfs for the comparisons.
%
%We also evaluate \sys for secure containers, by running benchmarks inside
%QEMU. \sys images are directly accessible via a block driver of QEMU; and
%the file-system-level images are accessed via virtio-fs.

%At the time of writing, overlayfs is the most widely used storage driver in production,
%and is ``preferred'' by official documents of docker.com. LVM was the recommended
%one a few years ago, and is regaining attention again, 
%as it is much easier to pass a block device to virtualized
%containers. 
%%Qcow2 is the de facto standard image format for open source virtual 
%%machines, and it is also a block-level image format. 
%Slacker and CRFS are two
%innovative and representative solutions for containers. 

We generally use NVMe SSDs as local storage. We also emulate a
low-speed disk by limiting IOPS to 2,000 and throughput to 100
MB/s. These are the performance characteristics of the most popular
type of virtual disks on public clouds so we refer to such a disk as
``cloud disk'' in the rest of the paper. We use ZFile by default for
\sys unless explicitly noted. Before starting a test, we drop the
kernel page cache in the host and guest (if applicable) as well as the
persistent cache of DADI.

The physical servers we use are all equipped with dual-way multi-core
Xeon CPUs and 10GbE or higher-speed NICs. The VMs are hosted on our
public cloud. Each VM is equipped with 4 CPU cores and 8 GBs of memory. The
vNICs are capable of a burst bandwidth of 5 Gbps and sustained
bandwidth of 1.5 Gbps.

\subsection{Startup Latency}
\label{eval:startup}

\begin{figure*}[h]
  \begin{minipage}[t]{.32\textwidth} 
    \includegraphics[width=\linewidth]{figs/coldstartup.pdf} 
    % \vspace*{-0.7cm}
    \caption{Cold Startup Latency.}
    \label{perf-coldstartup}
  \end{minipage}
  \hfill
  \begin{minipage}[t]{.32\textwidth} 
    \includegraphics[width=\linewidth]{figs/warmstartup.pdf} 
    % \vspace*{-0.7cm}
    \caption{Warm Startup Latency.}
    \label{perf-warmstartup}
  \end{minipage}
  \begin{minipage}[t]{.32\textwidth} 
    \includegraphics[width=\linewidth]{figs/batch.pdf} 
    % \vspace*{-0.7cm}
    \caption{Batch Cold Startup Latency. Bars indicate 10 and 90 percentiles.}
    \label{batch-startup}
  \end{minipage}
  \hfill
\end{figure*}


To evaluate container startup latency, we use the application image
\texttt{WordPress} from DockerHub.com. WordPress is the most popular
content management system powering about one third of the Web in
2019~\cite{wordpress}. The image consists of 21 layers in .tgz format
with a total size of 165MB. When unpacked, the image size is 501MB. In
\sys compressed format with lz4 compression, the image occupies
274MB. The tarball of DADI-specific metadata that is downloaded on
image pull is only 9KB in size.

\textbf{Cold start of a single instance.}  We test startup latencies
of a single container instance running WordPress when the layer blobs
are stored in the Registry (.tgz, DADI, CRFS) and on remote storage
servers (DADI, pseudo-Slacker). All the servers are located in the
same datacenter as the container host. The results, as summarized in
Figure~\ref{perf-coldstartup}, show that container cold startup time is
markedly reduced with DADI.

\textbf{Warm start of a single instance.}  Once the layer blobs are
stored or cached on local disk, the containers can be started and run
without a remote data source. In this case, any difference in startup
time can be attributed to the relative efficiency of the I/O paths. As
indicated in Figure~\ref{perf-warmstartup}, \sys performs 15\%\textasciitilde25\%
better than overlayfs and LVM on NVMe SSD, and more than 2 times
better on cloud disk.

% \textbf{Cold startup with trace-based prefetching.}  We first make use
% of \texttt{blktrace} to record an I/O trace when starting a
% container. On another host, we use \texttt{fio} to replay only the
% read operations in the trace while starting a new container instance
% of the same image. We set fio to replay with a relatively large I/O
% depth of 32 so as to fetch data blocks before they are actually read
% by the application.  Figure~\ref{perf-prefetched} shows the
% results. Observe that trace-based prefetching can reduce 95\% of the
% difference between cold and warm startup times.

\begin{figure*}[h]
  % \begin{minipage}[t]{.32\textwidth} 
  %   \includegraphics[width=\linewidth]{figs/cdf-launch.pdf} 
  %   % \vspace*{-0.7cm}
  %   \caption{Time to Launch Application in Production Environment.}
  %   \label{cdf-launch}
  % \end{minipage}
  \begin{minipage}[t]{.32\textwidth} 
    \includegraphics[width=\linewidth]{figs/perf-bittorrent.pdf} 
    % \vspace*{-0.7cm}
    \caption{Startup Latency with P2P-Based Image Sharing}
    \label{perf-bittorrent}
  \end{minipage}
  \hfill
  \begin{minipage}[t]{.32\textwidth} 
    \includegraphics[width=\linewidth]{figs/cdf-pull.pdf} 
    % \vspace*{-0.7cm}
    \caption{Time to Pull Image in Production Environment.}
    \label{cdf-pull}
  \end{minipage}
  \hfill
  \begin{minipage}[t]{.32\textwidth} 
    \includegraphics[width=\linewidth]{figs/cdf-layer-sizes.pdf} 
    % \vspace*{-0.7cm}
    \caption{Layer Blob Size of both \texttt{DADI (.zfile)} and \texttt{.tgz}.}
    \label{cdf-layer-sizes}
  \end{minipage}
  \hfill 
  % \hfill
\end{figure*}

\textbf{Batch cold startup.}
In practice, many applications are large and require multiple
instances to be started at the same time. For this batch startup
scenario, we compare only pseudo-Slacker and \sys because the .tgz
image and CRFS are bottlenecked by the Registry. The results are
presented in Figure~\ref{batch-startup}. Note that the startup time
with pseudo-Slacker begins at 1.5s for one instance and increases to
2.3s for 32 instances. On the other hand, the startup time with DADI
remains largely constant at 0.7s as the number of instances increases.

\textbf{Startup with P2P-based Decentralized Image Sharing.}
\zzh{We measure startup latency of a single container instance running 
WordPress in DADI-Agent with P2P-based decentralized image sharing policy. 
For cold startup latency, the image only exists on the Registry. For 
warm-root or warm-agent startup latency, the image has been pre-distributed 
to all the roots or other agents that are peers with the node where the 
instance resides. As shown in Figure~\ref{perf-bittorrent}, when the image 
has been pre-distributed to all the roots or other agents, the warm-agent 
startup latency is reduced by 57\% compared to cold startup. As we described
in Section~\ref{sec-p2p}, we warm up the root and agent servers' cache 
whenever a new layer is built or converted.}

\textbf{Startup in our Production Environment.}  We selected typical
deployment tasks for an application in our production environment and
analyzed its timing data. As shown in Figure~\ref{cdf-pull}, pulling
the \sys metadata tarball takes no more than 0.2s for nearly half of the
hosts and around 1s for the rest of the hosts. This compares very
favorably with pulling the equivalent .tgz image which takes more than
20s for most of the hosts.  Note that in this case, the .tgz image
pull only needs to download the application layers as the much larger
dependencies and OS layers already exist on the hosts. If all the
layers have to be downloaded, the time needed will be even higher.


% Conference contents
% As shown in Figure~\ref{cdf-launch}, applications start faster using
% \sys remote image and P2P data transfer than with the .tgz image
% stored on local SSD. This result surprised us initially but it turned
% out to be a common occurrence for a couple of reasons. First,
% overlayBD performs better than OverlayFS (See
% Section~\ref{performance}). Second, with the tree-structured P2P data
% transfer, hosts effectively read from their parents' page cache, and
% this is faster than reading from their local disks.

\subsection{\sys and ZFile Format}
\textbf{Compression.}
\zzh{Figure~\ref{cdf-layer-sizes} shows the CDF of layer blob size of both \sys and
.tgz in our production environment. As we use the lz4 codec that is fast but
weak in compression-ratio, \sys layer blobs are somewhat larger than those in 
.tgz format. }

\textbf{ZFile Performance with Compression-Aware Caching.} 
\zzh{As shown in Figure~\ref{perf-zfile}, the existence of ZFile
increases performance even on high-speed NVMe SSD. This was another 
fact beyond our expectation. The overhead saved by reading compressed 
data is larger than that introduced by decompression, so the overall result 
is beneficial, and the benefit is probably larger if the storage is slower. 
\zzhnew{The test uses a production image consisting of 21 layers, 
which is 2.1GB uncompressed, and 1.3GB in .zfile compressed with lz4.}}

\begin{figure*}[t]
  \begin{minipage}[t]{.33\textwidth} 
    \includegraphics[width=\linewidth]{figs/perf-zfile.pdf} 
    % \vspace*{-0.7cm}
    \caption{Time to \texttt{tar} All Files with Zfile Format.}
    \label{perf-zfile}
  \end{minipage}
  \hfill
  \begin{minipage}[t]{.32\textwidth} 
    \includegraphics[width=\linewidth]{figs/pull-push-prefetch.pdf} 
    % \vspace*{-0.7cm}
    \caption{Startup Latency with Prefetching.}
    \label{pull-push-prefetch}
  \end{minipage}
  \hfill
  \begin{minipage}[t]{.33\textwidth} 
    \includegraphics[width=\linewidth]{figs/pull-push-prefetch-multiple.pdf} 
    % \vspace*{-0.7cm}
    \caption{Multiple Apps Startup Latency with Prefetching.}
    \label{pull-push-prefetch-multiple}
  \end{minipage}
  \hfill
  % \begin{minipage}[t]{.32\textwidth} 
  %   \includegraphics[width=\linewidth]{figs/perf-build.pdf} 
  %   % \vspace*{-0.7cm}
  %   \caption{Time to Build Image.}
  %   \label{perf-build}
  % \end{minipage}
\end{figure*}

\subsection{Prefetch}
% {\color{blue}{

\zzh{
In order to evaluate the performance difference with and without prefetch, similarly,
we measure the service available time of a Wordpress container. In the test environment, 
the testing host is deployed in a different region with registry service to simulate the real production environment. 
Since we have pushed the specific image to Registry and have removed the local image, the measured time reflects cold start performance.  
The network bandwidth is uniformly set to 25Mbps to avoid differences in network bandwidth.}

\zzh{
The test program uses a Shell script \zzhnew{that leverages the \texttt{netcat} command to 
test if a WordPress service is running on its port} and records waiting time. When 
\zzhnew{the} WordPress service starts successfully, the script breaks from 
the \zzhnew{testing} loop and exits.
We first make use of \texttt{blktrace} to record an I/O trace when starting a
container. On another host, we use \texttt{fio} to replay only the
read operations in the trace while starting a new container instance
of the same image. We set fio to replay with a relatively large I/O
depth of 32 so as to fetch data blocks before they are actually read
by the application. As shown in Figure~\ref{pull-push-prefetch} and
~\ref{pull-push-prefetch-multiple}, we measure the startup latency of
single application and multiple applications across multiple containers.
The startup latency of trace-based prefetching \zzhnew{is reduced by up }
to 58\% compared to startup without prefetching.}

% \textbf{Cold startup with trace-based prefetching.}  We first make use
% of \texttt{blktrace} to record an I/O trace when starting a
% container. On another host, we use \texttt{fio} to replay only the
% read operations in the trace while starting a new container instance
% of the same image. We set fio to replay with a relatively large I/O
% depth of 32 so as to fetch data blocks before they are actually read
% by the application.  Figure~\ref{perf-prefetched} shows the
% results. Observe that trace-based prefetching can reduce 95\% of the
% difference between cold and warm startup times.

\zzh{\zzhnew{We then} use pull-push collaborative block prefetching to evaluate 
the cold startup latency of the agent node. We warmup the root 
nodes' cache pool in advance, and then measure the startup latency of a 
application and multiple applications in the agent nodes with or 
without pull-push prefetching. As shown in Figure~\ref{pull-push-prefetch} and
~\ref{pull-push-prefetch-multiple}, the startup latency with pull-push 
prefetching is 63\% faster than without prefetching.}

\subsection{Scalability}
For the scalability analysis, we use a lightweight application called
\texttt{Agility}. Agility is a Python application based on CentOS 7.6. 
Its image consists of 16 layers with a total size of 575MB in
ZFile format and 894MB uncompressed.
When Agility starts, it accesses a specified HTTP server which
records the time stamps of all the accesses. We use Agility instead of
WordPress for our scalability test because it provides a means to
collect timings of a large number of container instances. Agility also
consumes fewer resources, allowing us to create many more containers
in our testbed.

\textbf{Large-scale startup with DADI.}  We create 1,000 VMs on our
public cloud platform and use them as hosts for containers. A large
and increasing portion of our production environment is VM-based so
this test reflects our real world situation.  We start 10 containers
running Agility on each host for a total of 10,000 containers. As
shown in Figure~\ref{large-10k}, the cold start latency with \sys is
within a second or two of that for warm start. The experimental
environment is not dedicated and some noise is apparent in one of the
runs (Cold Startup 1). Note that other than for ramp-up and long-tail
effects, the time taken to start additional containers is relatively
constant.

\begin{figure*}[t]
  \begin{minipage}[t]{.32\textwidth} 
    \includegraphics[width=\linewidth]{figs/large-10k.pdf} 
    % \vspace*{-0.7cm}
    \caption{Startup Latency using \sys (Large-Scale Startup).}
    \label{large-10k}
  \end{minipage}
  \hfill
  \begin{minipage}[t]{.32\textwidth} 
    \includegraphics[width=\linewidth]{figs/xlarge-100k.pdf} 
    % \vspace*{-0.7cm}
    \caption{Projected Startup Latency using \sys (Hyper-Scale Startup).}
    \label{xlarge-100k}
  \end{minipage}
  \hfill
  \begin{minipage}[t]{.32\textwidth} 
    \includegraphics[width=\linewidth]{figs/perf-fio.pdf} 
    % \vspace*{-0.7cm}
    \caption{Uncached Random Read Performance.}
    \label{perf-fio}
  \end{minipage}
\end{figure*}

\textbf{Hyper-scale startup with DADI.}  We deliberately construct a
special P2P topology with tens of hosts and use it to project the
behavior for a full tree with tens of thousands of hosts. The special
topology models a single root-to-leaf path where each interior node
has the maximum number of children. Each host again runs 10 instances
of Agility. As shown in Figure~\ref{xlarge-100k}, the startup time is
largely flat as the number of containers increases to 100,000.  Notice
also that a binary tree for P2P is best when there are fewer than
20,000 participating hosts. A 3-ary or 4-ary tree works better beyond
that scale.

\begin{figure*}[t]
  \begin{minipage}[t]{.32\textwidth} 
    \includegraphics[width=\linewidth]{figs/perf-du-2020.pdf} 
    % \vspace*{-0.7cm}
    \caption{Time to \texttt{du} All Files.}
    \label{perf-du}
  \end{minipage}
  \hfill 
  \begin{minipage}[t]{.32\textwidth} 
    \includegraphics[width=\linewidth]{figs/perf-tar-2020.pdf} 
    % \vspace*{-0.7cm}
    \caption{Time to \texttt{tar} All Files.}
    \label{perf-tar}
  \end{minipage}
  \hfill
  \begin{minipage}[t]{.32\textwidth} 
    \includegraphics[width=\linewidth]{figs/perf-build.pdf} 
    % \vspace*{-0.7cm}
    \caption{Time to Build Image.}
    \label{perf-build}
  \end{minipage}
\end{figure*}

\subsection{I/O Performance}
\label{performance}

We perform micro benchmarks with \texttt{fio} to compare uncached
random read performance.  The results are summarized in
Figure~\ref{perf-fio}. At an I/O queue depth of 1, \sys offers
comparable performance to LVM despite its user-space implementation.
DADI's performance ramps up slower as the queue depth is increased but
it catches up and tops LVM to achieve the highest IOPS at an I/O queue
depth of 128 and without compression. This behavior suggests that
DADI's index is more efficient than that of LVM, but there
is room to optimize our queueing and batching implementation. Observe
that \sys with compression performs 10\%\textasciitilde20\% better than without
compression when the I/O queue depth is less than 32. This is because
compression, by reducing the amount of data transferred, increases
effective I/O throughput provided that the CPU is not bottlenecked. In
our experimental setup, the CPU becomes bottlenecked for ZFile beyond
a queue depth of 32.

We also test I/O performance with \texttt{du} and \texttt{tar} to scan
the entire image from inside the container.  These tests respectively
emphasize small random read and large sequential read. The output of
these commands are ignored by redirecting to \texttt{/dev/null}.  As
shown in Figure~\ref{perf-du} and \ref{perf-tar}, \sys outperforms
both overlayfs and LVM in all cases especially on the cloud disk. This
is again primarily due to the effect of compression in reducing the
amount of data transferred.

%It is worth noting that, \texttt{tar} all files on \sys from NFS or Registry is 
%still faster than downloading and unpacking the whole image. This is because
%(1) the sequential unpacking is slow; (2) the P2P downloading tools are designed
%for wide-area Internet, and are sub-optimal in cluster environment; (3) overlaid
%data blocks (files) in lower layers are not fetched (not tar-ed) on DADI.

\subsection{Image Building Speed}

Image building speed is driven primarily by write performance and the
time needed to setup an image. We evaluate image building performance
with a typical dockerfile from our production environment. The
dockerfile creates 15 new layers comprising 7,944 files with a total
size of 545MB, and includes a few \texttt{chmod} operations that
trigger copy-ups in overlayfs-backed images. As shown in
Figure~\ref{perf-build}, the image is built 20\%\textasciitilde40\%
faster on \sys than on overlayfs. Note that the time to commit or
compress the image is not included in this measurement.

\section{Related Work}

\zzh{
The block level container image storage system is the basis for realizing 
the elastic deployment of large-scale container services. At present, 
it has been successfully applied in Alibaba cloud. However, container 
cold-start is still one of the main performance bottlenecks. With the 
rise of serverless computing, a large number of applications in container 
exist in the form of lambda handler, which further aggravates the impact 
of container cold-start delay on the entire system performance. The fundamental 
problem restricting the speed of container cold-start is that there are 
unavoidable network and I/O delays in block read operation.}

\zzh{
\textbf{Prefetch. } Data prefetching is the most commonly used method to hide network 
or I/O latency. The traditional data prefetching improves the system 
performance mainly due to the locality of the program, including time 
locality and space locality. Taking disk read operation as an example, 
when a disk read operation occurs, the operating system will use an 
algorithm to determine a "read dimension" and read in the subsequent 
data blocks according to this "read dimension", so as to improve the 
system efficiency during continuous reading. There are two mainstream 
prefetching methods: Expand Prefetch and 
Prioritize Files with Landmarks. The \zzhnew{first} method is to simply 
read extra data beyond the designated range and the expand ratio could 
be 2x, 4x, or even higher. The \zzhnew{second} method which is already adopted in 
Google's stargz is to prefetch the range where prioritized files are 
contained to increase cache hit ratio and mitigate read overhead.}

% \lxx{
% \sys uses another prefetch mechanism called trace-prefetch. This special prefetch 
% mechanism can collect and record exact time-based I/O requests (trace) 
% when startup which will be replayed when cold-start. This prefetch 
% mechanism has greater granularity and predication accuracy. Replay 
% operation will preheat the cachein advance and the whole startup 
% process will be accelerated.}

% Cache has been playing an important role in the whole architecture of ACI's I/O flow. 
% When there is no cache (container cold start), however, the backend 
% storage engine will still need to visit Registry frequently, and temporarily.

% Prefetch is a common mechanism to avoid this situation. As it literally suggests, 
% the key is to retrieve data in advance, and save them into cache.

% There are many ways to do prefetch, for instance, it is now common to simply
% read extra data beyond the designated range of a Registry blob. That might be 
% called as Expand Prefetch, and the expand ratio could be 2x, 4x, or even higher, 
% if the network bandwidth is sufficient.

% Another way is to prioritize files and use landmarks, which is already 
% adopted in Google's stargz. The storage engine runtime will prefetch the 
% range where prioritized files are contained. And finally this information 
% will be leveraged for increasing cache hit ratio and mitigating read overhead.

%In this article we are about to introduce a new prefetch mechanism based 
%on time sequenced I/O patterns (trace). This mechanism has been integrated 
%as a feature into ctr record-trace command.


% \vspace{-0.5em}
% \subsection{Prefetch in Google}
% \begin{figure}[tb]
%   \centering
%   \includegraphics[width=0.99\linewidth]{figs/google-prefetch.pdf}
%   \caption{The prefetch in Google's starz mirror format.}
%   \label{google-prefetch}
% \end{figure}
% \vspace{-0.5em}
% {\color{blue}{
% Google's starz mirror format also has the prefetch feature, 
% which works by marking specific files at the first part of the image. 
% These files will be pulled first when the container is running.

% As shown in Figure~\ref{google-prefetch}, files in google's starz(eStargz) 
% are grouped into the following groups: A.prioritized files, 
% B.non-prioritized files. if no files are belonging to A, 
% a landmark file no-prefetch landmark must be contained in the archive.
% If one or more files are belonging to A, eStargz must consist of 
% two separated areas corresponding to these groups and a landmark file 
% prefetch landmark MUST be contained at the boundary between these two areas.
% The Landmark file must be a regular file entry with 4 bits contents 0xf in eStargz. 
% It must be recorded to TOC as a TOCEntry. Prefetch landmark must be named .prefetch.landmark. 
% No-prefetch landmark MUST be named .no.prefetch.landmark.

% Stargz Snapshotter makes use of eStargz prioritized files for workload-based 
% optimization to mitigate the overhead of reading files. The workload of the image 
% is the runtime configuration defined in the Dockerfile, including entrypoint command, 
% environment variables and user. Stargz snapshotter provides an image converter command 
% ctr-remote images optimize to create optimized eStargz images. When converting the image, 
% this command runs the specified workload in a sandboxed environment and profiles all file accesses. 
% This command treats all accessed files as prioritized files. Then it constructs eStargz by first 
% putting prioritized files from the top of the archive, sorting them by the accessed order, then 
% putting prefetch landmark file entry at the end of this range, and putting all other files 
% (non-prioritized files) after the prefetch landmark.

% Before running the container, stargz snapshotter prefetches and pre-caches the range 
% where prioritized files are contained, by a single HTTP Range Request supported by the 
% registry. This can increase the cache hit rate for the specified workload 
% and can mitigate runtime overheads.
% }}


\textbf{File System Changesets.} Exo-clones~\cite{exo-clone} implement
volume clones efficiently with file system changesets that can be
exported. \sys images are conceptually exo-clones with block level
deltas that are not tied to any specific file system.

\textbf{P2P downloading.}  Several systems
allow container hosts to download image layers in a P2P manner,
significantly reducing the download time in large
environments~\cite{dragonfly,kraken,borg,tupperware,fid}.
% These
%   systems, however, do not reduce the minimal latency needed for
%   downloading and unpacking the layers, especially for small or medium
%   clusters. Furthermore, every involved host must have a copy of the
%   layers, which is a waste of resources.
VMThunder~\cite{vmthunder} adopts a tree-structured P2P overlay network
to deliver fine-grained data blocks on-demand for large VM clusters. We 
reuse this general idea in DADI's optional P2P subsystem with a 
refined design and a production-level implementation.

\textbf{Trimmed images.} In order to pull less data and start a container in less time,
DockerSlim~\cite{DockerSlim} uses a combination of static and dynamic analyses 
to generate smaller-sized container images in which only files needed by the core 
application are included.
%
Cntr~\cite{Cntr} improves this by allowing dynamic accesses to trimmed files in
uncommon cases via a FUSE-based virtual files system.

\textbf{Storage Configuration for Containers.} The layering feature of
container image introduces new complexities in configuring
storage. \cite{tarasov2017search} demonstrates the impact of Docker
storage configuration on performance.

\textbf{VM images.} Standard VM image formats such as qcow2, vmdk, vhd,
etc. are block-level image formats and are technically reusable
for containers. The major drawback of these image formats is that they
are not layered.
% Linux supports a feature that allows a
% backing-file to be used as a data source for ``holes'' in the front image. 
It is possible to emulate layering by repeatedly applying
QEMU's backing-file feature, but doing this incurs significant
performance overhead for reads. As \zzhnew{seen} in
Section~\ref{image-layers}, the translation tables for standard VM
image formats are also much bigger than those needed for DADI.

% will end up with a high complexity of $O(N)$
% for reads, where $N$ is the number of layers in the image. This
% performance is not acceptable.

\zzh{\textbf{Log-structured storage.}
\sys image \zzhnew{implements a log-structured~\cite{lsm1992} writable layer}, 
so as to make it possible to stack on top of all types
of storage systems, including those append-only ones like HDFS, or our 
internal distributed file system. The unique part of \sys is that, 
an image consists of multiple layers, and DADI
is equipped with a novel algorithm to merge the indices, so that the lookup
performance does not degrade theoretically, with the number of layers 
increasing.}

%\section{Towards an Ideal Image Service}
%Pulling images has become an issue on the first day when container is
%adopted, for those who want to launch their applications as quickly as
%possible. Large users / applications suffer more than small ones. 
%%
%We started to use a P2P downloading tool years ago, to cope with the
%scalability problem of Registry. However, the downloading and unpacking
%process still takes a long time. 
% \section{Discussion and Future Work}

% With overlayfs, containers that share layers are able to share the
% host page cache when they access the same files in those shared
% layers.  Because \sys realizes each layered image as a separate
% virtual block device, when multiple containers access the same file in
% a shared layer, the accesses appear to the host to be for distinct
% pages. In other words, the host page cache is not shared, potentially
% reducing its efficiency.

% One way to address this issue is to introduce a shared block pool for
% all the virtual block devices corresponding to the different
% containers on a host. The basic idea is to use the device mapper to
% map segments from the pool to the virtual block devices such that
% accesses by different containers to the same file in a shared layer
% appear to be for the same segment in the pool.  The pool is backed by
% the page cache while the virtual block device and file system on top
% will need to support Direct Access (DAX) to avoid double caching. This
% solution can be further improved by performing block-level
% deduplication in the pool.

% % To address this problem, we have rethought the implementation of virtual 
% % block device. It is possible to realize a virtual persistent memory device, which
% % exposes a block device interface with Direct Access (DAX) feature, and it is 
% % backed by DRAM (ideally page cache). We consider \texttt{brd}\cite{brd} 
% % driver that is part of Linux kernel as a good starting point.
% % We are to solve the problem with help of DAX. 

% % This device plays a role of block pool, so it must be logically large enough to 
% % hold all data blocks of all layer blobs possibly accessed on its host. 
% % But it do not need to consume such large amount of physical memory, as it 
% % can be allocated, populated or recycled dynamically on-demand. 

% % We plan to use a relatively modern file system for the image that supports DAX, 
% % add enable the DAX feature by adding an option ``-o dax'' to \texttt{mount}
% % command. Once DAX is enabled all along the I/O stack, the host kernel
% % now can share page cache for the same data blocks.

% With the emergence of virtualized runtimes, container is becoming a
% new type of virtual machine and vice versa. The runtimes of container
% and VM may also begin to converge. By being based on the widely
% supported block device, \sys image is compatible with both containers
% and VMs, and is naturally a converged image service.  Such a converged
% infrastructure will bring the convenience and efficiency of layered
% image to VM users on the cloud today. It will also provide users with
% increased flexibility and enable applications to evolve gradually from
% cloud-based to cloud-native.
% % , and facilitate powerful functions such as
% % incremental replication and migration of VMs between datacenters and
% % even vendors.

% A key part of realizing the potential of \sys is to standardize its
% image format and facilitate its adoption. We are working to contribute
% core parts of \sys to the container community.

% \iffalse
% \section{Prefetch}
% Cache has been playing an important role in the whole architecture of ACI's I/O flow. 
% When there is no cache (container cold start), however, the backend 
% storage engine will still need to visit Registry frequently, and temporarily.

% Prefetch is a common mechanism to avoid this situation. As it literally suggests, 
% the key is to retrieve data in advance, and save them into cache.

% There are many ways to do prefetch, for instance, we can simply read extra data b
% eyond the designated range of a Registry blob. That might be called as Expand 
% Prefetch, and the expand ratio could be 2x, 4x, or even higher, if our network 
% bandwidth is sufficient.

% Another way is to prioritize files and use landmarks, which is already 
% adopted in Google's stargz. The storage engine runtime will prefetch the 
% range where prioritized files are contained. And finally this information 
% will be leveraged for increasing cache hit ratio and mitigating read overhead.

% In this article we are about to introduce a new prefetch mechanism based 
% on time sequenced I/O patterns (trace). This mechanism has been integrated 
% as a feature into ctr record-trace command.

% \subsection{Trace Prefetch}
% Since every single I/O request happens on user's own filesystem will 
% eventually be mapped into one overlaybd's layer blob, we can then 
% record all I/Os from the layer blob's perspective, and replay them 
% later. That's why we call it Trace Prefetch.

% Trace prefetch is time based, and it has greater granularity and 
% predication accuracy than stargz. We don't mark a file, because user 
% app might only need to read a small part of it in the beginning, simply 
% prefetching the whole file would be less efficient. Instead, we replay 
% the trace, by the exact I/O records that happened before. Each record 
% contains only necessary information, such as the offset and length of 
% the blob being read.

% Trace is stored as an independent image layer, and MUST always be 
% the uppermost one. Neither image manifest nor container snapshotter 
% needs to know if it is a trace layer, snapshotter just downloads and 
% extracts it as usual. The overlaybd backstore MUST recognize trace 
% layer, and replay it accordingly.

% \subsection{Record}
% Recording is to run a temporary container based on the target 
% image, persist I/O records during startup, and then dump them 
% into a trace blob. The trace blob will be chained, and become 
% the top layer.

% Recording functionality SHOULD be integrated into container's 
% build (compose) system, and MUST have a parameter to indicate 
% how long the user wishes to record. After timeout, the build 
% system MUST stop the running container, so the recording 
% terminates as well.

% The container could be either stateless or stateful. CNI is 
% enabled by default to provide an isolated network, so that 
% the recording container is unlikely to cause unexpected consequences 
% in production environment.

% When building a new image from a base image, the old 
% trace layer (if exists in the base image) MUST be removed. 
% New trace layer might be added later, if recording is desired.

% \subsection{Push}
% Push command will save both data layer and trace layer to 
% Registry. The trace layer is transparent to the push command.

% \subsection{Replay}
% After Recording and Pushing, users could pull and run the 
% specific image somewhere else. Snapshotter's storage backend 
% SHOULD load the trace blob, and replay I/O records for each layer blob.

% \subsection{Example Usage}
% The example usage of building a new image with trace layer would be as follows:
% ctr record-trace --time 20 <old\_image> <local>

% ctr push <new\_image> <local>
% Note the old image must be in overlaybd format. A temporary 
% container will be created and do the recording. The recording 
% progress will be terminated by either timeout, or user signals.

% Due to current limitations, this command might ask you remove 
% the old image locally, in order to prepare a clean environment 
% for the recording.

% \subsection{Performance}
% Measure the service available time (in seconds) of a Wordpress 
% container. The testing host is deployed in a different region 
% with registry service. The network bandwidth is 25Mbps.
% \fi

\section{Conclusions}
We have designed and implemented DADI, a block-level remote image
service for containers. \sys is based on the observation that
incremental image can be realized with block-based layers where each
layer corresponds to a set of file changes but is physically the set
of changes at the block level underneath a given file system. Such a
design allows the image service to be file system and platform
agnostic, enabling applications to be elastically deployed in
different environments. The relative simplicity of block-based layers
further facilitates optimizations to increase agility. These include
fine-grained on-demand data transfer of remote images, \zzh{online
decompression with efficient codecs and compression-aware caching}, trace based prefetching,
\zzh{P2P-based decentralized image sharing 
architecture with pull-push collaborative block prefetching} to handle burst workload, easy integration with the
container ecosystem. Our experience with \sys in the production environment
of one of the world's largest ecommerce platforms show that \sys is
very effective at increasing agility and elasticity in deploying
applications.

%\begin{figure}[tb]
%\centering
%\includegraphics[width=0.99\linewidth]{figs/perf-zfile.pdf}
%\caption{ZFile is beneficial to performance.}
%\label{perf-zfile}
%\end{figure}

%%
%% The acknowledgments section is defined using the "acks" environment
%% (and NOT an unnumbered section). This ensures the proper
%% identification of the section in the article metadata, and the
%% consistent spelling of the heading.
% \begin{acks}
%   This paper would not have been possible without the close
%   collaboration of our storage, container, serverless, kernel and
%   virtualization teams. We are especially grateful to Haobo Xu, Liang
%   Han, Qianbo Huai and Jiwei Lu for delivering the initial version of
%   the P2P Data Transfer capability. We are also grateful to our
%   shepherd, Vasily Tarasov, and anonymous reviewers for helping us
%   improve the paper.
% \end{acks}

%%
%% The next two lines define the bibliography style to be used, and
%% the bibliography file.
\bibliographystyle{ACM-Reference-Format}
\bibliography{dadi.bib}

%%
%% If your work has an appendix, this is the place to put it.
\appendix


\end{document}
\endinput
%%
%% End of file `sample-acmsmall.tex'.
