%% bare_conf.tex
%% V1.3
%% 2007/01/11
%% by Michael Shell
%% See:
%% http://www.michaelshell.org/
%% for current contact information.
%%
%% This is a skeleton file demonstrating the use of IEEEtran.cls
%% (requires IEEEtran.cls version 1.7 or later) with an IEEE conference paper.
%%
%% Support sites:
%% http://www.michaelshell.org/tex/ieeetran/
%% http://www.ctan.org/tex-archive/macros/latex/contrib/IEEEtran/
%% and
%% http://www.ieee.org/

%%*************************************************************************
%% Legal Notice:
%% This code is offered as-is without any warranty either expressed or
%% implied; without even the implied warranty of MERCHANTABILITY or
%% FITNESS FOR A PARTICULAR PURPOSE!
%% User assumes all risk.
%% In no event shall IEEE or any contributor to this code be liable for
%% any damages or losses, including, but not limited to, incidental,
%% consequential, or any other damages, resulting from the use or misuse
%% of any information contained here.
%%
%% All comments are the opinions of their respective authors and are not
%% necessarily endorsed by the IEEE.
%%
%% This work is distributed under the LaTeX Project Public License (LPPL)
%% ( http://www.latex-project.org/ ) version 1.3, and may be freely used,
%% distributed and modified. A copy of the LPPL, version 1.3, is included
%% in the base LaTeX documentation of all distributions of LaTeX released
%% 2003/12/01 or later.
%% Retain all contribution notices and credits.
%% ** Modified files should be clearly indicated as such, including  **
%% ** renaming them and changing author support contact information. **
%%
%% File list of work: IEEEtran.cls, IEEEtran_HOWTO.pdf, bare_adv.tex,
%%                    bare_conf.tex, bare_jrnl.tex, bare_jrnl_compsoc.tex
%%*************************************************************************

% *** Authors should verify (and, if needed, correct) their LaTeX system  ***
% *** with the testflow diagnostic prior to trusting their LaTeX platform ***
% *** with production work. IEEE's font choices can trigger bugs that do  ***
% *** not appear when using other class files.                            ***
% The testflow support page is at:
% http://www.michaelshell.org/tex/testflow/



% Note that the a4paper option is mainly intended so that authors in
% countries using A4 can easily print to A4 and see how their papers will
% look in print - the typesetting of the document will not typically be
% affected with changes in paper size (but the bottom and side margins will).
% Use the testflow package mentioned above to verify correct handling of
% both paper sizes by the user's LaTeX system.
%
% Also note that the "draftcls" or "draftclsnofoot", not "draft", option
% should be used if it is desired that the figures are to be displayed in
% draft mode.
%
\documentclass[10pt, draftclsnofoot, onecolumn, A4, journal]{IEEEtran}
%\documentclass[10pt, final, A4, journal]{IEEEtran}
\usepackage{mathrsfs}
\usepackage{times,amsmath}
\usepackage{algorithm}
\usepackage{algorithmic}

\usepackage{cite}
\ifx\pdfoutput\undefined
\usepackage{graphicx,color}
\else
\usepackage[pdftex]{graphicx,color}
\fi
\usepackage{psfrag}
\usepackage{subfigure}
\usepackage{url}
\usepackage{stfloats}
\usepackage{amsmath}
\usepackage{amssymb}
%\usepackage{array}

% Add the compsocconf option for Computer Society conferences.
%
% If IEEEtran.cls has not been installed into the LaTeX system files,
% manually specify the path to it like:
% \documentclass[conference]{../sty/IEEEtran}





% Some very useful LaTeX packages include:
% (uncomment the ones you want to load)


% *** MISC UTILITY PACKAGES ***
%
%\usepackage{ifpdf}
% Heiko Oberdiek's ifpdf.sty is very useful if you need conditional
% compilation based on whether the output is pdf or dvi.
% usage:
% \ifpdf
%   % pdf code
% \else
%   % dvi code
% \fi
% The latest version of ifpdf.sty can be obtained from:
% http://www.ctan.org/tex-archive/macros/latex/contrib/oberdiek/
% Also, note that IEEEtran.cls V1.7 and later provides a builtin
% \ifCLASSINFOpdf conditional that works the same way.
% When switching from latex to pdflatex and vice-versa, the compiler may
% have to be run twice to clear warning/error messages.






% *** CITATION PACKAGES ***
%
%\usepackage{cite}
% cite.sty was written by Donald Arseneau
% V1.6 and later of IEEEtran pre-defines the format of the cite.sty package
% \cite{} output to follow that of IEEE. Loading the cite package will
% result in citation numbers being automatically sorted and properly
% "compressed/ranged". e.g., [1], [9], [2], [7], [5], [6] without using
% cite.sty will become [1], [2], [5]--[7], [9] using cite.sty. cite.sty's
% \cite will automatically add leading space, if needed. Use cite.sty's
% noadjust option (cite.sty V3.8 and later) if you want to turn this off.
% cite.sty is already installed on most LaTeX systems. Be sure and use
% version 4.0 (2003-05-27) and later if using hyperref.sty. cite.sty does
% not currently provide for hyperlinked citations.
% The latest version can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/cite/
% The documentation is contained in the cite.sty file itself.






% *** GRAPHICS RELATED PACKAGES ***
%
\ifCLASSINFOpdf
  % \usepackage[pdftex]{graphicx}
  % declare the path(s) where your graphic files are
  % \graphicspath{{../pdf/}{../jpeg/}}
  % and their extensions so you won't have to specify these with
  % every instance of \includegraphics
  % \DeclareGraphicsExtensions{.pdf,.jpeg,.png}
\else
  % or other class option (dvipsone, dvipdf, if not using dvips). graphicx
  % will default to the driver specified in the system graphics.cfg if no
  % driver is specified.
  % \usepackage[dvips]{graphicx}
  % declare the path(s) where your graphic files are
  % \graphicspath{{../eps/}}
  % and their extensions so you won't have to specify these with
  % every instance of \includegraphics
  % \DeclareGraphicsExtensions{.eps}
\fi
% graphicx was written by David Carlisle and Sebastian Rahtz. It is
% required if you want graphics, photos, etc. graphicx.sty is already
% installed on most LaTeX systems. The latest version and documentation can
% be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/required/graphics/
% Another good source of documentation is "Using Imported Graphics in
% LaTeX2e" by Keith Reckdahl which can be found as epslatex.ps or
% epslatex.pdf at: http://www.ctan.org/tex-archive/info/
%
% latex, and pdflatex in dvi mode, support graphics in encapsulated
% postscript (.eps) format. pdflatex in pdf mode supports graphics
% in .pdf, .jpeg, .png and .mps (metapost) formats. Users should ensure
% that all non-photo figures use a vector format (.eps, .pdf, .mps) and
% not a bitmapped formats (.jpeg, .png). IEEE frowns on bitmapped formats
% which can result in "jaggedy"/blurry rendering of lines and letters as
% well as large increases in file sizes.
%
% You can find documentation about the pdfTeX application at:
% http://www.tug.org/applications/pdftex





% *** MATH PACKAGES ***
%
%\usepackage[cmex10]{amsmath}
% A popular package from the American Mathematical Society that provides
% many useful and powerful commands for dealing with mathematics. If using
% it, be sure to load this package with the cmex10 option to ensure that
% only type 1 fonts will utilized at all point sizes. Without this option,
% it is possible that some math symbols, particularly those within
% footnotes, will be rendered in bitmap form which will result in a
% document that can not be IEEE Xplore compliant!
%
% Also, note that the amsmath package sets \interdisplaylinepenalty to 10000
% thus preventing page breaks from occurring within multiline equations. Use:
%\interdisplaylinepenalty=2500
% after loading amsmath to restore such page breaks as IEEEtran.cls normally
% does. amsmath.sty is already installed on most LaTeX systems. The latest
% version and documentation can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/required/amslatex/math/





% *** SPECIALIZED LIST PACKAGES ***
%
%\usepackage{algorithmic}
% algorithmic.sty was written by Peter Williams and Rogerio Brito.
% This package provides an algorithmic environment fo describing algorithms.
% You can use the algorithmic environment in-text or within a figure
% environment to provide for a floating algorithm. Do NOT use the algorithm
% floating environment provided by algorithm.sty (by the same authors) or
% algorithm2e.sty (by Christophe Fiorio) as IEEE does not use dedicated
% algorithm float types and packages that provide these will not provide
% correct IEEE style captions. The latest version and documentation of
% algorithmic.sty can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/algorithms/
% There is also a support site at:
% http://algorithms.berlios.de/index.html
% Also of interest may be the (relatively newer and more customizable)
% algorithmicx.sty package by Szasz Janos:
% http://www.ctan.org/tex-archive/macros/latex/contrib/algorithmicx/




% *** ALIGNMENT PACKAGES ***
%
%\usepackage{array}
% Frank Mittelbach's and David Carlisle's array.sty patches and improves
% the standard LaTeX2e array and tabular environments to provide better
% appearance and additional user controls. As the default LaTeX2e table
% generation code is lacking to the point of almost being broken with
% respect to the quality of the end results, all users are strongly
% advised to use an enhanced (at the very least that provided by array.sty)
% set of table tools. array.sty is already installed on most systems. The
% latest version and documentation can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/required/tools/


%\usepackage{mdwmath}
%\usepackage{mdwtab}
% Also highly recommended is Mark Wooding's extremely powerful MDW tools,
% especially mdwmath.sty and mdwtab.sty which are used to format equations
% and tables, respectively. The MDWtools set is already installed on most
% LaTeX systems. The lastest version and documentation is available at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/mdwtools/


% IEEEtran contains the IEEEeqnarray family of commands that can be used to
% generate multiline equations as well as matrices, tables, etc., of high
% quality.


%\usepackage{eqparbox}
% Also of notable interest is Scott Pakin's eqparbox package for creating
% (automatically sized) equal width boxes - aka "natural width parboxes".
% Available at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/eqparbox/





% *** SUBFIGURE PACKAGES ***
%\usepackage[tight,footnotesize]{subfigure}
% subfigure.sty was written by Steven Douglas Cochran. This package makes it
% easy to put subfigures in your figures. e.g., "Figure 1a and 1b". For IEEE
% work, it is a good idea to load it with the tight package option to reduce
% the amount of white space around the subfigures. subfigure.sty is already
% installed on most LaTeX systems. The latest version and documentation can
% be obtained at:
% http://www.ctan.org/tex-archive/obsolete/macros/latex/contrib/subfigure/
% subfigure.sty has been superceeded by subfig.sty.



%\usepackage[caption=false]{caption}
%\usepackage[font=footnotesize]{subfig}
% subfig.sty, also written by Steven Douglas Cochran, is the modern
% replacement for subfigure.sty. However, subfig.sty requires and
% automatically loads Axel Sommerfeldt's caption.sty which will override
% IEEEtran.cls handling of captions and this will result in nonIEEE style
% figure/table captions. To prevent this problem, be sure and preload
% caption.sty with its "caption=false" package option. This is will preserve
% IEEEtran.cls handing of captions. Version 1.3 (2005/06/28) and later
% (recommended due to many improvements over 1.2) of subfig.sty supports
% the caption=false option directly:
%\usepackage[caption=false,font=footnotesize]{subfig}
%
% The latest version and documentation can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/subfig/
% The latest version and documentation of caption.sty can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/caption/




% *** FLOAT PACKAGES ***
%
%\usepackage{fixltx2e}
% fixltx2e, the successor to the earlier fix2col.sty, was written by
% Frank Mittelbach and David Carlisle. This package corrects a few problems
% in the LaTeX2e kernel, the most notable of which is that in current
% LaTeX2e releases, the ordering of single and double column floats is not
% guaranteed to be preserved. Thus, an unpatched LaTeX2e can allow a
% single column figure to be placed prior to an earlier double column
% figure. The latest version and documentation can be found at:
% http://www.ctan.org/tex-archive/macros/latex/base/



%\usepackage{stfloats}
% stfloats.sty was written by Sigitas Tolusis. This package gives LaTeX2e
% the ability to do double column floats at the bottom of the page as well
% as the top. (e.g., "\begin{figure*}[!b]" is not normally possible in
% LaTeX2e). It also provides a command:
%\fnbelowfloat
% to enable the placement of footnotes below bottom floats (the standard
% LaTeX2e kernel puts them above bottom floats). This is an invasive package
% which rewrites many portions of the LaTeX2e float routines. It may not work
% with other packages that modify the LaTeX2e float routines. The latest
% version and documentation can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/sttools/
% Documentation is contained in the stfloats.sty comments as well as in the
% presfull.pdf file. Do not use the stfloats baselinefloat ability as IEEE
% does not allow \baselineskip to stretch. Authors submitting work to the
% IEEE should note that IEEE rarely uses double column equations and
% that authors should try to avoid such use. Do not be tempted to use the
% cuted.sty or midfloat.sty packages (also by Sigitas Tolusis) as IEEE does
% not format its papers in such ways.





% *** PDF, URL AND HYPERLINK PACKAGES ***
%
%\usepackage{url}
% url.sty was written by Donald Arseneau. It provides better support for
% handling and breaking URLs. url.sty is already installed on most LaTeX
% systems. The latest version can be obtained at:
% http://www.ctan.org/tex-archive/macros/latex/contrib/misc/
% Read the url.sty source comments for usage information. Basically,
% \url{my_url_here}.

% *** Do not adjust lengths that control margins, column widths, etc. ***
% *** Do not use packages that alter fonts (such as pslatex).         ***
% There should be no need to do such things with IEEEtran.cls V1.6 and later.
% (Unless specifically asked to do so by the journal or conference you plan
% to submit to, of course. )


% correct bad hyphenation here
\hyphenation{op-tical net-works semi-conduc-tor}
\begin{document}

%
% paper title
% can use linebreaks \\ within to get better formatting as desired
\title{C$5$: Collaborative Content Fetching for Common Characteristic Community in Wireless Mobile Networks}


% author names and affiliations
% use a multiple column layout for up to two different
% affiliations

\author{Lai~Tu,~\IEEEmembership{Member,~IEEE,}
        and Chung-Ming~Huang,~\IEEEmembership{Senior Member,~IEEE}

\thanks{The research is supported by the National Science Council of the
Republic of China under Grant No. NSC 98-2219-E-006-008, the National Science and Technology Major Project of the Ministry of Science and Technology of China (Grant No. 2009ZX03004-004) and China
Postdoctoral Science Foundation (Grant No. 20070420911).}
\thanks{Dr. Lai Tu is with the Department of Electronics and Information Engineering,
Huazhong University of Science and Technology and the Department of Computer
Science and Information Engineering, National Cheng Kung University.}% <-this % stops a space
\thanks{Prof. Chung-Ming Huang is with the Department of Computer
Science and Information Engineering, National Cheng Kung University, Tainan
City, 70101, Taiwan.} % <-this % stops a space
\thanks{*The corresponding author is Chung-Ming Huang.}
\thanks{Manuscript received month date, year; revised month date, year.}}

% conference papers do not typically use \thanks and this command
% is locked out in conference mode. If really needed, such as for
% the acknowledgment of grants, issue a \IEEEoverridecommandlockouts
% after \documentclass

% for over three affiliations, or if they all won't fit within the width
% of the page, use this alternative format:
%
%\author{\IEEEauthorblockN{Michael Shell\IEEEauthorrefmark{1},
%Homer Simpson\IEEEauthorrefmark{2},
%James Kirk\IEEEauthorrefmark{3},
%Montgomery Scott\IEEEauthorrefmark{3} and
%Eldon Tyrell\IEEEauthorrefmark{4}}
%\IEEEauthorblockA{\IEEEauthorrefmark{1}School of Electrical and Computer Engineering\\
%Georgia Institute of Technology,
%Atlanta, Georgia 30332--0250\\ Email: see http://www.michaelshell.org/contact.html}
%\IEEEauthorblockA{\IEEEauthorrefmark{2}Twentieth Century Fox, Springfield, USA\\
%Email: homer@thesimpsons.com}
%\IEEEauthorblockA{\IEEEauthorrefmark{3}Starfleet Academy, San Francisco, California 96678-2391\\
%Telephone: (800) 555--1212, Fax: (888) 555--1212}
%\IEEEauthorblockA{\IEEEauthorrefmark{4}Tyrell Inc., 123 Replicant Street, Los Angeles, California 90210--4321}}




% use for special paper notices
%\IEEEspecialpapernotice{(Invited Paper)}

\markboth{Manuscript for IEEE Transactions on Parallel and Distributed Systems}%
{Shell \MakeLowercase{\textit{et al.}}: Bare Demo of IEEEtran.cls for Journals}


% make the title area
\maketitle


\begin{abstract}
It is common that current mobile devices are equipped with multiple
interfaces such as GPRS or UMTS for wireless WAN (WWAN) link as well
as WiFi and/or bluetooth for local networking. Meanwhile, it is also
possible that a number of mobile subscribers keep close for a period
of time and fetch same content from Internet. Stimulated by these
facts, we develop \textit{C$5$}, a collaborative content fetching
scheme for groups of mobile subscribers with common characteristics.

\textit{C$5$} makes a number of new contributions over prior work:
(a) a small scale P2SP framework of a hybrid mobile network which
considers possible concurrent mobile Internet traffic to maximize
the utility of WWAN links; (b) support of MAC layer multicast in
community and a new community formation procedure with the multicast
rate estimation; (c) an application-level fetching procedure for
both the server and the peer clients; and (d) an investigation of
chunk selection strategies for the file downloading applications and
the live streaming applications in \textit{C$5$}. We also formulate
the content fetching process in \textit{C$5$} into an optimization
problem and give a simple method to estimate the bound of the
objective function. Simulation results show that \textit{C$5$} can
shorten the content fetching time in the file downloading
application and increase playback performance in the live streaming
application for community members. Thus it can improve the user
experiences for mobile Internet subscribers.
\end{abstract}

\begin{IEEEkeywords}
Mobile networks; collaborative fetching; P2SP; common characteristic
community; MAC layer multicast;

\end{IEEEkeywords}


% For peer review papers, you can put extra information on the cover
% page as needed:
% \ifCLASSOPTIONpeerreview
% \begin{center} \bfseries EDICS Category: 3-BBND \end{center}
% \fi
%
% For peerreview papers, this IEEEtran command inserts a page break and
% creates the second title. It will be ignored for other modes.
\IEEEpeerreviewmaketitle


\section{Introduction}\label{sec:intro}
% no \IEEEPARstart
The mobile phone, now with significant computing power, is being the
primary connection tool for most people in the world, providing
information in a portable, well-connected form at a relatively low
price~\cite{future}. It is common that mobile phones are equipped
with multiple interfaces such as GPRS or UMTS for WWAN connection as
well as WiFi and/or bluetooth for local networking. This enables
mobile phones to use any or all of these wireless interfaces
separately and simultaneously to get better Internet access or
interconnection. Different interfaces are usually used in different
environments to make the best of all wireless resources. WiFi is
preferable when the mobile is motionless in a WLAN coverage such as
a WiFi hotspot because it offers a high data rate while users would
rather use WWAN for its predominance in ubiquity and high mobility
support. As available the WLAN coverage is still quite limited in
outdoor environments and WiFi performs rather poor for high mobility
users (e.g., on vehicles), the status quo is that outdoor
subscribers have to choose the low speed WWAN interface to access
Internet while leaving the high speed WLAN interface idle in most
cases.

On the other hand, it is quite common that a number of mobile
subscribers move together and keep in close vicinity for a period of
time (e.g., traveling on buses)~\cite{Glenn}, and it may also be
possible for these subscribers fetching same data from Internet. For
instance, people tend to download same most rated songs or receive
same live stream at the same time. While leveraging peers
downloading to maximize network utility has been extensively studied
in P2P media sharing system and already put into practical Internet
application, only a few researches consider using such features in
mobile networks because of their bottleneck at the wireless access
side.

Taking account of subscribers' characteristics, in this paper, we
present \textit{C$5$}, an architecture of collaborative content
fetching scheme for a group of mobile users that has common
characteristics, i.e., colocated nodes requesting same data. We name
this group of mobile users \textit{Common Characteristic Community}.
\textit{C$5$} attempts to use both the WWAN and the WLAN interfaces
in a collaborative manner. Nodes in close vicinity use the
high-speed WLAN to form a group, strip traffic both from their own
WWAN links and from community members using the WLAN interfaces, and
contribute their own data for requesting members as well.
\textit{C$5$} also takes the advantage of broadcast in the wireless
channel to perform a MAC layer multicast when nodes distribute their
own data. \textit{C$5$} aims to use the idle WLAN interfaces for in
community communications and makes the traffic from WWAN links to
serve as most members as possible. As a result, \textit{C$5$}
multiplexes all WWAN resources in community and serves all members.

While pooling together different wireless interfaces of one mobile
node or links of multiple nodes has been studied in prior work (see
Section 2), few considers integration of mobile subscribers'
features and P2P (Peer to Peer) or P2SP (Peer to Server \& Peer)
sharing. Thus \textit{C$5$} brings some novel contributions in the
following regards:

\begin{enumerate}

\item \textit{C$5$} introduces a new architecture of a hybrid mobile
network which considers features of mobile Internet traffic to
maximize the utility of WWAN links, so as to mitigate the bottleneck
in mobile Internet access.

\item An optimization model is developed to formulate the content
distribution in \textit{C$5$} and is further simplified to pursue
the lower bound of the exponential sum of content fetching time.

\item \textit{C$5$} enables MAC layer multicast in community to avoid
possible bottleneck in the WLAN and estimates an acceptable
multicast rate during community formation.

\item A set of algorithms for content distribution and chunk selection strategies in \textit{C$5$}
 are presented in the paper, which can approach the bound of the
objective close and keep fair among peers in community as well.
\end{enumerate}

We believe that these contributions of \textit{C$5$} complement
prior work, especially in case that there is common network traffic
in a group of mobile nodes in close vicinity. As P2P or P2SP traffic
are becoming the dominate network traffic in future
Internet~\cite{P4P}, there will be more chances to leverage
\textit{C$5$}'s advantages to improve the quality of mobile Internet
services.

The paper is organized as follow: After a brief introduction of
background and motivation in this Section, we survey related work
and discuss how \textit{C$5$} relates to them in
Section~\ref{sec:related}. We present an overview of the network
model of \textit{C$5$} and give a motivating example in
Section~\ref{sec:model}. In Section~\ref{sec:alg}, we formulate the
content distribution in \textit{C$5$} into an optimization problem
and give a simplified model to obtain lower bound of the objective
function. The collaborative fetching and distribution procedures in
both the clients and the server are then presented in
Section~\ref{sec:alg}, followed by the study of the chunk selection
strategies in Section~\ref{sec:chunk}. We evaluate performance in
Section~\ref{sec:eva} and finally after discussing some technical
issues with respect to practical application of \textit{C$5$} and
future work, we summarizing the work in this paper in the last
Section.

\section{Related Work}\label{sec:related}
Similar researches on aggregating bandwidth of multiple interfaces
or grouping mobiles to improve efficiency have been investigated on
prior
work~\cite{bag,mar,kim,Sharma,kandula,fatvap,ucan,combine,badia}.

BAG~\cite{bag} motivates the advantages of simultaneous use of
multiple interfaces and proposes a network-layer architecture that
enables such use. MAR \cite{mar} makes use of the multiplicity of
the wireless networks available by dynamically instantiating new
channels based on traffic demand, aggregating the bandwidth and
dynamically shifting load from poor quality to better quality
channels. Other systems have considered coordinating communications
from multiple mobile computing devices~\cite{kim,Sharma}.

Kandula et. al.~\cite{kandula} focused on load-balancing for more
flexible and efficient allocation of resources, and thereby extends
the lifetime of a network. The authors addressed the packet
reordering issues in multi-homing networks and presented a new
approach dubbed FLARE that operates on bursts of packets (flowlets)
carefully chosen to avoid reordering, but allowing a finer
granularity of balancing. These solutions either operate on network
layer or transport layer and pay much attention on flow allocation
in inverse multiplexing.

FatVAP~\cite{fatvap} introduces an 802.11 driver that aggregates the
bandwidth available at accessible APs and also balances their load.
FatVAP challenges the fact that APs often provide high speed
wireless connectivity but access the Internet via independent,
relatively low-speed DSL or cable modem links. If we regard a mobile
phone with a WLAN interface as an AP and an Internet application as
a node in the WLAN, the scenarios considered in FatVAP are similar
to ours where both exist bandwidth unbalance.


UCAN~\cite{ucan} considers secure crediting, it uses the WLAN to
increase the reach of the WWAN rather than for bandwidth
aggregation.

Inspired by bridging the gap between the range-speed dichotomy of
WWAN and WLAN, Ganesh et. al.~\cite{combine} presented COMBINE, a
system for collaborative downloading. COMBINE enables mobile devices
that are within WLAN range to pool together their WWAN links, thus
significantly increasing the effective speed available to them.
Moreover, COMBINE has a practical energy-cognizant incentives scheme
and a secure accounting mechanism that enables the collaborators to
be assured to easy and secure redemption of credit and hence
encourages collaboration.

Badia~\cite{badia} discussed the exploitation of aggregated mobility
patterns and physical proximity of nodes and use the knowledge about
node movements and geographical positions to create routing groups
of adjacent nodes, which might be beneficial in order to decrease
signaling overhead and increase transmission efficiency.

These work consider different aspects in group formation for
collaborative communication, but they are mostly like
\textit{bandwidth borrow} schemes and none of them take account of
the possible common features in P2P network traffic nor advantages
of multicast in the MAC layer.

Liam et. al.~\cite{liam} proposed a new content source selection
scheme for single-hop, peer-to-peer based content sharing on public
transport. The scheme aims to identify, among colocated peers that
have relevant content, the one that has the highest chance to remain
colocated long enough for data transfers to complete. Thus it can
minimize resource waste due to incomplete transfers and
interference. Common interests in mobile subscribers are considered
in this work, but it is more like a mobile P2P content sharing
system rather than a collaborative fetching system.

To sum up, these studies discussed: i) resource aggregation and
allocation of multiple interfaces in mobile
networks~\cite{bag,mar,kim,Sharma,kandula,fatvap}; or ii) group
formation and coordination~\cite{ucan,combine,badia}; or iii) peer
selection in a mobile P2P content sharing~\cite{liam}. \textit{C$5$}
brings some new ideas such as common characteristics and MAC layer
multicast support, while we believe elements of previous work can be
adapted in \textit{C$5$}. Our focus in this work is on the
architecture of \textit{C$5$} and the content distribution
algorithms employed as applications in both the server and clients.
We also evaluate \textit{C$5$}'s performance through simulation.

\section{Model Overview}\label{sec:model}
\subsection{\textit{C$5$} Model Overview}
Our work is driven by three observations. The first is that while
cellar networks can provide ubiquitous Internet access for mobile
devices, they offer very limited bandwidth which can not meet the
requirements of users and many Internet applications. Secondly,
despite of the proliferation of WiFi deployment and its high data
rate , WiFi is still not popularly used in mobile networking in
outdoor area due to its limit in both coverage and support for high
mobility. As a result, the high speed WLAN interfaces in mobile
devices are usually left unused in outdoor environments. Thirdly, in
modern society people living in urban areas spend a considerable
amount of time traveling together, for example, on public transport
to/from work and may also be interested in same popular content
(e.g., video clips, music files, live media streaming), which offer
opportunities for collaborative content sharing and distribution.

To maximize the efficiency of content fetching, \textit{C$5$}
enables a mobile node to share content with its community members to
multiplex the effective WWAN speed. The motivating application is
large popular content fetching or streaming, which would likely
benefit the most from collaborative sharing. Given the growing
market for mobile music and video services (e.g., iTune and
Youtube), we believe the speedup provided by collaborative fetching
would significantly enhance the user experiences.

\textit{C$5$} borrows the idea from a P2SP system, where peers fetch
data from both the server and peers. However \textit{C$5$} differs
from an Internet P2SP application in the manners of peer sharing.
Nodes in \textit{C$5$} only fetch content from the server and the
peers in local community, excluding other Internet peers, which
means no extra burden to the limited WWAN bandwidth. Moreover, since
the WLAN channels physically support broadcast, \textit{C$5$}
leverages such feature to provide MAC layer multicast to minimize
the traffic in the WLAN.

\begin{figure}
\centerline{\includegraphics[width=0.4\textwidth]{fig/scene.pdf}}
\caption{An Illustrated Scenario of \textit{C$5$}.}
\label{fig:scene}
\end{figure}

\subsection{Motivating Examples}
To better understand the effectiveness of \textit{C$5$}, let us look
at a simple example. Consider the scenario in Fig.~\ref{fig:scene},
where $5$ nodes are in close vicinity and request same content from
Internet. Without regard to interferences, since all node operate on
orthogonal channels connecting to the cell tower, it doesn't matter
whether these nodes attach to one same tower or different ones.
Assuming that the bottlenecks are the WWAN links, it is also
peddling whether these subscribers belong to one ISP or different
ones. For each node, let the \textit{end-to-end available
bandwidth}, $r$, be the rate at which the client communicates with
the server via cell tower, and the \textit{WLAN available
bandwidth}, $R_b$, be the basic data rate at which any pair of the
nodes in community can communicate with each other. Note that these
values do not refer to link capacities but the throughput achieved
by the client and in particular subsume link losses, driver's rate
selection and competition from other nodes. Note also that the
end-to-end bandwidth is also bounded by the link capacity of the
node to cell tower channel. In practice, WLAN available bandwidth is
usually much greater than the end-to-end available bandwidth.

As a concrete example, say that $5$ mobile subscribers can achieve
$1$Mb/s end-to-end throughput to the server, i.e., $r_s =
1\textrm{Mb/s}$, and the WLAN available bandwidth is $11$Mb/s.
Assume all of them are fetching a $5$MB file, for example, a song,
from the server simultaneously. Let $t$ be the time for all to
finish fetching. If the nodes work as individuals, they have to take
$t = \frac{5\textrm{MB}}{1\textrm{Mb/s}} = 40\textrm{sec}$ to finish
downloading. If they work collaboratively as a community, all nodes
take $\frac{5 \textrm{MB} } {5 \times 1 \textrm{Mb/s}} = 8
\textrm{sec}$ to fetch different one fifth part of the file and
share with others. Since the WWAN interface and the WLAN interface
are independent, nodes can also distribute their own one fifth part
while fetching. Assuming the granularity of transmission is small
enough, all nodes can transmit content up to $11\textrm{Mb/s} \times
8 \textrm{sec} = 11\textrm{MB}$ in $8$sec, which means $8$ seconds
are enough for all nodes to distribute their own one fifth part for
more than twice. If MAC layer data broadcast is enabled in the WLAN,
ideally, all these $5$ nodes can finish fetching in $8$sec. If MAC
layer only allows data unicast, it is easy to get that ideally
$t=12.5$sec $(1\textrm{Mb/s} \cdot t + \frac{11\textrm{Mb/s}}{5
\times 4} \cdot 4 \cdot t = 5\textrm{MB})$.

\begin{figure}
\centerline{\includegraphics[width=0.4\textwidth]{fig/motex.pdf}}
\caption{A comparison of coarse estimations of the time for fetching
a $5$MB content under different numbers of nodes and WLAN data
rates.} \label{fig:ex}
\end{figure}

Fig.~\ref{fig:ex} further shows a counter example, where the
end-to-end rate for all nodes is $1$Mb/s and all nodes are
requesting a file of $5$MB. Fig.~\ref{fig:ex} gives coarse
estimations of the fetching time for different numbers of nodes and
WLAN data rates. In the conventional individual fetching scheme, the
time keeps constant despite of the number of nodes. Using
collaborative fetching schemes, the time to finish fetching is
always below that with the conventional scheme. However, without MAC
layer multicast support, the performance will soon reach the
bottleneck of WLAN bandwidth as the number of nodes increases. While
in cases that support MAC layer multicast, the fetching time will
keep decreasing as the number of nodes increases. Such advantages
are expected as each node will fetch less from the WWAN link when
the number of nodes gets greater and the amount of network traffic
in the WLAN keeps same with multicast support.

In Section~\ref{sec:form}, we will formalize the collaborative
fetching problem that minimizes the fetching time and give solution
in Section~\ref{sec:alg}.

\subsection{Goals and Challenges}
Several challenges from lower layer to application need to be solved
to enable \textit{C$5$}. Above all, even we assume nodes keep
colocated and request same content, formation of the community will
still be an issue due to nodes' mobility and some other factors
(e.g., accounting, energy cost). Prior researches have addressed
such issues and given solutions which we believe can be adopted in
\textit{C$5$}~\cite{badia,combine}.

One more point that exists in \textit{C$5$} other than prior work is
to support WLAN MAC layer multicast. Since original 802.11 standard
does not support MAC layer multicast well, \textit{C$5$} applies a
modification of broadcast as the multicast method. IEEE 802.11
implements multicasting by transmitting packets at the base
transmission rate upon observing a clear channel. Unlike the RTS/CTS
mechanism designed for unicast transmissions, it does not have any
mechanism to obtain feedback from the intended multicast receivers.
Thus broadcasting has to use the base transmission rate which may be
much lower than the highest acceptable rate for the multicast
neighbors. Consequently, in \textit{C$5$}, nodes first use the base
transmission rate when broadcasting packets for community formation.
While sending and receiving a broadcast packet for community
formation, a node will measure the channel quality and evaluate the
maximum multicast data rate that is applicable. Nodes will then use
this multicast data rate in distributing content in community. As we
assume nodes keep in close vicinity, the multicast data rate in
community is considered stable in following analysis.

Note that data multicasting in the MAC layer without acknowledgement
will result in unreliable transmission. If acknowledgement is
enabled for MAC layer multicast, multiple acknowledgements will lead
to too much contentions and retransmissions, which may be
inefficient either. Furthermore, with data multicast in the MAC
layer, connection oriented transport layer protocol such as TCP is
hard to employ. So, we use modified UDP as the transport layer
protocol and push all retransmission mechanisms to the application
layer. The data units in the application layer are chunks split from
data content. Too large chunks may cause inefficiency in
retransmissions while too small ones will increase protocol
overhead. Therefore, an optimal chunk size will be chosen to
trade-off this dilemma. Furthermore, UDP does not support IP packet
fragmentation, so we make a little modification on UDP protocol to
let it support fragmentation and assembly.

Once a community has been formed, nodes will request content both
from the server and the peers in community. The server and peers
will select chunks to send based on some algorithms. Schemes and
algorithms to tackle aforementioned issues serve for a common goal
that is to effectively fetch content from Internet for all
subscribers. Besides the MAC layer multicast support, most work are
done at application level, which we will discuss in detail in
Section~\ref{sec:alg}.

\section{Problem Formulation}\label{sec:form}
In this section, we model the content fetching process of a
\textit{C$5$} enabled network as a discrete optimization problem.
Instead of pursuing the optimal solution, we relax some conditions
and develop a bound to estimate the objective function. As
the optimal solution lies between the bound and a
feasible one, if our scheme can result in a solution close to the
bound, it can be considered as an acceptable near-optimal result.


\begin{table}
\centering \caption{Terms and Symbols} \label{tab:terms}
\begin{tabular}
{|c|p{170pt}|}
\hline Symbols & Terms \\

\hline $\mathcal{N}$ & The set of the nodes in community.\\

\hline $\left\| \cdot \right\|$ & Size of a set. \\

\hline $N$ & $=\left\| \mathcal{N} \right\|$, the number of the nodes in community.\\

\hline $\mathcal{C}$ & The set of chunks of the requested content.\\

\hline $\mathcal{P}, \mathcal{R}$ & $\mathcal{P}, \mathcal{R} \subset \mathcal{C}$, subsets of chunks of the requested content.\\

\hline $m$ & $=\left\| \mathcal{C} \right\|$, the number of the chunks of the requested content. \\

\hline $M$ & The size in bits of a chunk.\\

\hline $r_s$ & The WWAN link rate (in b/s) of Node $s$ to cell tower.\\

\hline $R_b$ & The maximum broadcast (in b/s) rate of nodes in
community. \\

\hline $T(i)$ & The time instance of a node broadcasting the
$(i+1)^{\textrm{th}}$ chunk in community via the WLAN interface. \\

\hline $T_i$ & $ = T(i) - T(i-1)$, the time duration of \textit{Time
Slot} $i$. \\

\hline $s(i)$ & The source node that broadcasts a chunk in
\textit{Time Slot} $i$. \\

\hline $C(i)$ & The chunk transmitted in \textit{Time Slot} $i$. \\

\hline $\tau(s,i)$ & The time instance of Node $s$ starting to
fetching its $(i+1)^{\textrm{th}}$ chunk from WWAN link. \\

\hline $\tau_s^i$ & $ =\tau(s,i) - \tau(s,i-1)$, the time duration
of \textit{Time Slice} $i$. \\

\hline $c(s,i)$ & The chunk received by Node $s$ in \textit{Time Slice} $i$. \\

\hline $\mathcal{W}(s,t)$ & The set of chunks that have been
received by Node $s$ from WWAN link at time $t$. \\

\hline $\mathcal{L}(s,t)$ & The set of chunks that have been
received by Node $s$ from WLAN interface at time $t$. \\

\hline $\mathcal{T}_s$ & The time instance of Node $s$ accomplishing
fetching all requested data. \\

\hline $U \left( \cdot \right)$ & Utility function for optimization object.\\

\hline $\mathcal{M}(\mathcal{R})$ & A bitmap that represents the set
of the indexes of a set $\mathcal{R}$ of chunks.\\

\hline
\end{tabular}
\end{table}


Table \ref{tab:terms} lists all notations used in the problem
formulation. We consider a \textit{C$5$} enabled network consisting
of a set of $\mathcal{N}$ nodes forming community. Each node has a
WWAN interface directly linking the cell tower and a WLAN interface
for in-community communication. We denote $r_s$ and $R_b$ the WWAN
link rate of Node $s$ and the maximum achievable basic rate of nodes
in community. Note that both $r_s$ and $R_b$ may vary over time in a
real system. However, we consider they are constants in the bound analysis.

All nodes in community request same data from a server which splits
the data into a sequence of chunks with the same size. Let $M$ be
the chunk size and $\mathcal{C}$ be the set of chunks.

There may exist some differences among the time of nodes starting
fetching content from the server. Denote $\tau^0_s$ and $T_0$ the time
of Node $s$ starting fetching and that of the first chunk
starting being broadcast in WLAN. To simplify analysis, we split the
transmission timeline into epoches according to the transmission of
each chunk. We distinguish the epoches for WWAN and WLAN traffic
as \textit{Time Slice} and \textit{Time Slot} respectively. Let
$\tau_s^i$ be the $i^{\mbox{th}}$ \textit{Time Slice} during which
Node $s$ strips a chunk from WWAN link and $t_j$ be the
$j^{\mbox{th}}$ \textit{Time Slot} that a certain node uses to
broadcast a chunk via the WLAN interface. Denote $s(j)$ the
contributing node in $t_j$ and $C(j)$ the chunk being transmitted in
the WLAN channel in $t_j$. Similarly, the chunk received by Node $s$
at $\tau_s^i$ is denoted as $c(s,i)$. As an example, Fig. \ref{fig:timeline}
shows the time lines of nodes using different resources to fetch and
contribute network traffic.

\begin{figure}
\centerline{\includegraphics[width=0.4\textwidth]{fig/timeline.pdf}}
\caption{An Illustration of time lines in \textit{C$5$}.}
\label{fig:timeline}
\end{figure}

Since there are some differences in the constraints and objectives between
a \textit{File Downloading} case and a \textit{Live Streaming} case,
we separate their discussions in the following two subsections.

\subsection{File Downloading}
We first explore the problem of a \textit{File Downloading} case.
For a \textit{File Downloading} case, suppose all peers downloading
a same file from the content server and the set of chunks to be fetched,
$\mathcal{C}$, is static. We do not consider any redundant information
for error check or correction in the content source for the \textit{File Downloading}
case . Therefore all chunks should be downloaded before they make sense.
Otherwise,  even missing only one chunk means a failed or uncompleted fetching.
An ideal result is that all nodes can accomplish fetching in minimum
time. Note that in Eq.~(\ref{eq2}), at $\mathcal{T}_s$, the received
chunks by Node $s$ via the WWAN and the WLAN build up the complete
collection of requested chunks. Hence, the minimum of $\mathcal{T}_s
- \tau^0_s$ will be the time for Node $s$ to finish downloading. We
use exponential function as the utility function (i.e., $U \left( x
\right) = \alpha ^ x$) for its downward concavity and feature of
fairness~\cite{opt}. Meanwhile, the received chunks of a node are
comprised of the elements fetched from the WWAN and those broadcast
in the WLAN since the node's startup. Moreover, when a node
distributes its own chunk to others, the chunk must be selected from
those already received, either from the WLAN or from the WWAN. As a
result, we have the constraints expressed as
Eq.~(\ref{eq3}-\ref{eq6}), and the collaborative data fetching
scheme is mathematically modeled as a discrete optimization problem
expressed as Tab.~\ref{tab:fileform}.

\begin{table}
\centering \caption{Problem formulation of the \textit{File Downloading} case in \textit{C$5$}}
\label{tab:fileform}
\begin{tabular}
{p{0.48\textwidth}} \hline
\begin{equation}\label{eq1}
\mathbf{\min} \mathcal{T}_{obj} = \sum \limits_{s} U \left(
\mathcal{T}_s - \tau^0_s \right)
\end{equation}
\\
Subject To:
\\
\begin{eqnarray}
\mathcal{W}\left( s,\mathcal{T}_s \right) \bigcup \mathcal{L}\left(
s,\mathcal{T}_s \right) = \mathcal{C}, & \forall s \in \mathcal{N}
\label{eq2}
\\
\mathcal{W}\left( s,t \right) = \mathcal{W}\left( s , \tau(s, i-1)
\right) \bigcup \left\{c(s,i)\right\}, & \nonumber
\\
\tau(s, i) \leqslant t < \tau(s, i+1), & \forall s \in \mathcal{N}
\label{eq3}
\\
\mathcal{L}\left( s,t \right) = \mathcal{L}\left( s, T(j-1) \right)
\bigcup \left\{C(j)\right\}, & \nonumber
\\
\tau^0_s \leqslant T(j-1) \leqslant t < T(j), & \forall s \in
\mathcal{N}\label{eq4}
\\
C(j) \in \mathcal{W}\left( s(j), T(j)\right) \bigcup
\mathcal{L}\left( s(j),T(j) \right) & \forall j \label{eq5}
\\
\mathcal{W}\left( s,0 \right) = \mathcal{L}\left( s,0 \right) = \Phi
& \forall s \label{eq6}
\end{eqnarray}
\\
\hline
\end{tabular}
\end{table}


\subsection{Live Streaming}
For a \textit{Live Streaming} case, it is subtly different from the
\textit{File Downloading} case. On the one hand, the good thing is
that missing some chunks may be acceptable in a \textit{Live Streaming} case.
On the other hand, the difficulty is that content in live streaming has strict playback deadlines.
Even temporary decrease in peer fetching rate leads to peer playback quality
degradation, such as video playback freezing or skipping. To make things worse,
at any given moment, peers are only interested in fetching a small set of chunks
falling into the current playback window. Meanwhile, the content server may also
only supply chunks that are in its playback buffer.

\begin{figure}
\centerline{\includegraphics[width=0.4\textwidth]{fig/p2pstreaming.pdf}}
\caption{Sliding Window Mechanism in \textit{Live Streaming} in \textit{C$5$}.}
\label{fig:buffer}
\end{figure}

As a result, there are more constraints in the problem formulation for a
\textit{Live Streaming} case than that of the \textit{File Downloading} case
and the optimization objective also differs. Suppose the set of $\mathcal{N}$
nodes forming into community and fetching streaming chunks $\mathcal{C}$
from the content server. The server streams chunks of content (e.g., video) in
playback order. Each chunk has a sequence number and both the server and the
peers maintain a buffer $B$ that can cache up to $n$ chunks for sending
or local playback. We reference the buffer positions according to the \textit{age}
of the chunks stored: For the peers, $B\left( n \right)$ is used to store
the chunk to be played back immediately and $B\left( 1 \right)$ is reserved
for the newest chunk received from the network; For the content server,
$B\left( 1 \right)$ is reserved for the newly encoded chunk of the stream
and $B\left( n \right)$ stores the chunk to be expired in next time slot.
After each playback time slot, the chunk played back in the previous time slot is removed
from $B$ and all other chunks are shifted up $1$. In other words, the
buffer acts as a sliding window into the stream of the chunks distributed by the server,
as shown in Fig.~\ref{fig:buffer}. For the content server, the buffer is always filled in the
most recent $n$ chunks in the playback, while for the peers, each buffer space is initially empty
and gets filled by the fetching protocol.

Let $p_s \left( i \right) \left[ t \right]$ denote the probability that $i^{th}$
buffer space $B_s \left(  i \right)$, of peer $s$ is filled with the correct
chunk $\mathcal{B}_s \left( i \right)$ at time $t$. We assume this probability
reaches a steady state for sufficiently large $t$, namely
$p_s \left( i \right) \left[ t \right] = p_s \left( i \right)$.  $p_s \left( i \right)$
is called the buffer occupancy probability of the $s^{th}$ peer~\cite{p2pmodel}.
Consequently, $p_s \left( n \right)$ is the probability that $B_s \left( n \right)$ is available,
which reflects the continuity and playback performance of a peer $s$.
The playback performance, given by $p \left( n \right)$, will be the maximum
objective in the \textit{Live Streaming} case.  The problem of the \textit{Live Streaming} case
is then formulated as Tab.~\ref{tab:streamform}.

\begin{table}
\centering \caption{Problem formulation of the \textit{Live Streaming} case in \textit{C$5$}}
\label{tab:streamform}
\begin{tabular}
{p{0.48\textwidth}} \hline
\begin{equation}\label{eqb-1}
\mathbf{\max} P_{obj} = 
p_s \left( n \right)
\end{equation}
\\
Subject To:
\\
\begin{eqnarray}
p_s \left( n \right) = \lim_{t \rightarrow \infty} \mathbb{P}
\left[ B \left( n \right) =  \mathcal{B}_s \left( n \right) \right]
\label{eqb-2}
\\
\mathcal{W}\left( s,t \right) = \mathcal{W}\left( s , \tau(s, i-1)
\right) \bigcup \left\{c(s,i)\right\}, & \nonumber
\\
c(s,i) \in \mathcal{B} \left( t \right), \tau(s, i) \leqslant t < \tau(s, i+1), & \forall s \in \mathcal{N}
\label{eqb-3}
\\
\mathcal{L}\left( s,t \right) = \mathcal{L}\left( s, T(j-1) \right)
\bigcup \left\{C(j)\right\}, & \nonumber
\\
\tau^0_s \leqslant T(j-1) \leqslant t < T(j), & \forall s \in
\mathcal{N}\label{eqb-4}
\\
C(j) \in \mathcal{W}\left( s(j), T(j)\right) \bigcup
\mathcal{L}\left( s(j),T(j) \right)  \bigcap \mathcal{B}_s
\left( t \right) & \forall j \label{eqb-5}
\\
\mathcal{W}\left( s,0 \right) = \mathcal{L}\left( s,0 \right) = \Phi
& \forall s \label{eqb-6}
\end{eqnarray}
\\
\hline
\end{tabular}
\end{table}

\subsection{Bounds for the Objective Function}
The solutions for the problems in Tab.~\ref{tab:fileform} and \ref{tab:streamform}
shall be optimal sequences of chunk transmissions from server to each node
and from the contributing node to community members, which are corresponding
to chunk selection strategies in the content server and peers. The solutions
may not be unique nor easy to solve and the complexity of the
problems formulated in Tab.~\ref{tab:fileform} and \ref{tab:streamform} arise from the
number of chunks and number of nodes.

To make it simple, we relax some of the constrains and pursue the bounds of the
target functions as references for performance evaluation. Since the ideal
optimal solution always lies between a practical result and the bound,
if our algorithm can get solutions near to the bounds, they can be
considered as acceptable results.

\subsubsection{File Downloading}
For the \textit{File Downloading} case, we estimate the lower bound of
the exponential sum of file downloading time. It is easy to
understand that the time to finish downloading will be
less if all nodes start up simultaneously. So, to begin with, in the
relaxed model we consider the case that all nodes start up at the
same time 0. Secondly we simplify the duration of \textit{Time Slot}
and \textit{Time Slice} of each node to some constants with respect
to the link rates of each channel, i.e., $T_i = M/R_b, i > 0$ and
$\forall s \in \mathcal{N}, \tau^i_s = M/r_s$. Practically, a
successful fetching of chunk may consist of several rounds of
control packets transmissions or even some retransmissions because
of failure. There may also be some deferred time between every two
transmissions, which we will not consider in the simplified problem
either, such that the feasible minimum fetching time will not be
less than the lower bound calculated with the constants. In
addition, it is also intelligible that the minimum fetching time
will be obtained only in case that the number of duplicated received
chunks is kept least, thus we have $\forall i \neq j,
\mathcal{W}(i,t) \bigcap \mathcal{W}(j,t) = \Phi$. Finally, every
chunk has to be downloaded from WWAN and broadcast in WLAN by one of
the nodes, so $\biguplus_s \mathcal{W}(s,\mathcal{T}_s) =
\mathcal{M}$. Therefore, such relax enables we have the following
expressions as Eq.~(\ref{eq7}).

\begin{eqnarray}
\mathcal{T}_{obj} & > & \min \sum \limits_{s} U \left( \max \left\{
\frac{w_sM}{r_s}, \frac{mM}{R_b} \right\} \right)  \nonumber\\
& \geqslant & \max \left\{ \min \sum \limits_s U \left(
\frac{w_sM}{r_s} \right), N U \left( \frac{mM}{R_b} \right) \right\}
\label{eq7}
\end{eqnarray}

\noindent Where $w_s = \left\| \mathcal{W}(s, \mathcal{T}_s)
\right\|$ and $\sum_s w_s = m$.

Noting that the saturated throughput of WLAN can not exceed about
$85\%$ of the bandwidth~\cite{bianchi}, we use $80\%$ of the link
rate of the achievable $R_b$ in calculation. The relaxed model has
such a simple form that it is easy to figure out the lower bound.
There may not exist a feasible solution to achieve this lower bound,
but this model and lower bound offer a benchmark to measure the
quality of a feasible solution, which we will develop in the next
section.

\subsubsection{Live Streaming}
Differing from the \textit{File Downloading} case, in \textit{Live Streaming}
application, synchronization is considered to be achieved inherently without
relaxation, i.e., all peers begin to request same blocks of new chunks when they
enter the steady state. We assume homogeneous peers, i.e., the \textit{end-to-end available
bandwidths} $r$ for all peers are same and there is no difference in the chunk selection
strategies among peers. Therefor the continuity $p \left( n \right)$ of each peer can be
also considered same in the community when the peers have been in the steady state.

If the \textit{Time Slice} $\tau$ of a peer is smaller than the chuck sliding epoch
$\Delta$, i.e., \textit{end-to-end available bandwidth} is large enough for the
peer to fetch each chunk before it is played back, the \textit{bottleneck} does
not exist. Thus the continuity $p \left( n \right)$ in steady state can achieve
$100\%$ ideally. We focus on the cases that \textit{end-to-end available bandwidth} is
not enough. Let $\delta_s = \Delta / \tau_s < 1$. If all peers fetch streaming chunks from
the server individually, it is easy to get the buffer occupancy distribution can be
expressed as follows:

\begin{eqnarray}
p_s\left( \lceil 1/\delta_s \rceil \right) = p_s\left( \lceil 1/\delta_s \rceil \right) = \delta_s &  &\forall s \in \mathcal{N}, \label{eqc1}\\
p_s(i+1) = p_s(i+1) = p_s(i) & & i \in \left[\lceil 1/\delta_s \rceil, n-1 \right], \forall s \in \mathcal{N}. \label{eqc2}
\end{eqnarray}

Eq.~\ref{eqc1} reflects the chance for the peer to successfully fetch a chunk from the server,
while Eq.~\ref{eqc2} reflects the fact that successful fetching only occurs at the $\lceil 1/\delta_s \rceil^{th}$ location of the buffer in individual fetching. The playback performance $p_s(n)$, equal to $\delta_s$,
is constrained by the \textit{end-to-end available bandwidth}.

When in community collaboration is enabled, the chunk occupancy probability can increase by exchanging
fetched chunks among peers in community. Similar to analysis of file downloading problem, we assume each
peer fetches different chunks from the content server. Thus the chunk occupancy probability can be expressed
as:
\begin{equation}\label{eqc3}
p_s\left( \lceil 1/\delta_s \rceil \right) = \min\left\{1, \delta + \sum_{s' \neq s, s' \in \mathcal{N}} {\gamma \delta_{s'}} \right\}
\end{equation}
\noindent where $\gamma = \min\{1, \Delta / T\}$, representing the probability that a chunk can be successfully transmitted during a chunk sliding epoch. Note that we assume the buffer size $n$ is large enough, namely $n \geq 1/\delta_s, \forall s$, so that at least one chunk can be successfully fetched from the WWAN
interface by the peer before its playback slides $n$ chunks forward.

\section{Protocol Design}\label{sec:alg}
For an incentive scheme to obtain the goal to minimize the content
fetching time, we consider a \textit{C$5$} enabled application
working on both the clients and the server.

On the client side, the application first initiates community
formation based on a set of messages exchanging with its neighbor
nodes. When requesting a content, the client sends a request of its
desired content from the server. The server will return a bitmap
corresponding to the chunks of the content to the client. The client
then begins requesting and receiving rounds both with the server and
other peers in the collaborative community. When a client
application receives a request from other peers in its community, it
updates its tx-priority bitmap and distributes its own parts at
proper time.

\subsection{Community Formation Algorithm}
The detailed community formation mechanism for \textit{C$5$} is as
follows:

\begin{enumerate}
\item At startup, a \textit{C$5$} application will initialize its
community as an empty set, i.e., $\mathcal{N} \leftarrow \Phi$, and
set the WLAN data $R_b$ to the maximum WLAN bandwidth.

\item While the application is running in the client,
it periodically broadcasts \textit{beacon} messages with the
following information.

\begin{itemize}
\item $s$: Address information of local node.

\item $\mathbb{B}_s$: The set of address information of the nodes
whose \textit{beacon} messages have been received by $s$.
\end{itemize}

\item On receiving a \textit{beacon} message from Node $s$,
Node $d$ will add $s$ to $\mathbb{B}_d$ and check whether $d$ is in
$\mathbb{B}_s$ of the message or not. If yes, Node $d$ then
estimates the acceptable date rate $r_{ds}$ with Node $s$. If
$r_{ds}$ is larger than a preset requirement rate for community
formation, denoted as $r_{th}$ and $\|\mathcal{N}\|$ does not exceed
the preset community size threshold, then Node $d$ adds $s$ to local
community ($\mathcal{N} \leftarrow \mathcal{N} \cup \{ s \}$) and
updates the WLAN multicast rate as $R_b \leftarrow \min \{ r_{ds},
R_b \}$.
\end{enumerate}

Therefore, the procedures accomplish the community formation and
rate selection for MAC layer multicast. As we assume the community
are stable, $\mathcal{N}$ and $R_b$ do not fluctuate much in our
discussion.
\begin{algorithm}[b]
\caption{Symbols definition:}\label{alg:cf1}
\begin{algorithmic}[1]
\STATE $s$: local peer client; \STATE $S$: The number of chunks sent
by $s$; \STATE Message(``\texttt{Type}'', \texttt{Data}): A message
whose type is
    \texttt{Type} and which contains \texttt{Data} as packet data;
\STATE Timer(``\texttt{TimerName}'', $T$): A timer named
\texttt{TimerName}
    which will expire in $T$ seconds;
\STATE $Pri(i)$: Priority to send chunk $i$.
\end{algorithmic}
\end{algorithm}

\begin{algorithm}
\caption{Initialization at Client:} \label{alg:cf2}
\begin{algorithmic}[1]
    \STATE $s$ sends a content request to the server;
    \STATE Set Timer(``\texttt{Chunk BMP}'', $T_{chunk\_bmp}$);
    \STATE $S \leftarrow 0$;
    \STATE $\mathcal{W} \leftarrow \Phi$;
    \STATE $\mathcal{L} \leftarrow \Phi$;
    \STATE Wait for message;
\end{algorithmic}
\end{algorithm}

\begin{algorithm}
\caption{On receive a message from the server:} \label{alg:cf3}
\begin{algorithmic}[1]
    \IF {MESSAGE\_TYPE = \texttt{CHUNK\_BMP}}
        \STATE Cancel Timer(``\texttt{Chunk BMP}'');
        \STATE Store $\mathcal{M}(\mathcal{C})$;
        \STATE Send Message(``\texttt{SRVR\_REQ}'', $\mathcal{M}(\mathcal{C})$) to Server;
        \STATE Broadcast Message(``\texttt{PEER\_REQ}'', $\mathcal{M}(\mathcal{C})$) in community;
        \STATE Set Timer(``\texttt{Server Request}'', $T_{srvr\_data}$);
        \STATE Set Timer(``\texttt{Peer Request}'', $T_{peer\_data}$);
    \ENDIF
    \IF {MESSAGE\_TYPE = \texttt{SRVR\_DATA}}
        \STATE Cancel Timer(``\texttt{Server Request}'');
        \STATE Extract chunk from received message data, denoted as $c$;
        \STATE $\mathcal{W} \leftarrow \mathcal{W} \cup \{ c \}$;
        \IF {$\mathcal{M}\left(\mathcal{W} \cup \mathcal{L}\right) \neq \mathcal{M}(\mathcal{C})$}
            \STATE $\mathcal{R} \leftarrow \mathcal{C} -\mathcal{W} \cup
            \mathcal{L}$;
            \STATE Send Message(``\texttt{SRVR\_REQ}'', $\mathcal{M}(\mathcal{R})$) to Server;
            \STATE Set Timer(``\texttt{Server Request}'', $T_{srvr\_data}$);
        \ELSE
            \STATE Assemble Data;
        \ENDIF
    \ENDIF
\end{algorithmic}
\end{algorithm}
\subsection{Collaborative Fetching Algorithm on Client}
Once community has been formed, collaborative fetching can be
applied when clients request content from Internet. The
collaborative fetching begins with the clients sending content
requests to the server. The server will reply the requesting clients
a bitmap of chunks of the content. The client then fetches chunks
from the server and other peer clients in community and shares it
own chunks if they are being requested. Procedures can be divide
into receiving process and sending process as illustrated in
Algorithm~\ref{alg:cf1}-\ref{alg:cf5}.

At initialization, a client will set the set of chunks received both
from WWAN and from WLAN to empty and send a request to get the
bitmap of chunks for its desired content.

After initialization, a client will wait for the \texttt{CHUNK\_BMP}
message from the server, which contains the indexes of all chunks of
the requested content. On receiving the message, the client will
start fetching by sending content request messages both to the
server and to the peers in community, which is elaborated in
Algorithm~\ref{alg:cf3}. A timer will be set for each request which
invokes a new request if no response is received when the timer
expires. When a chunk is received from the server, the client stores
it and assembles data when all chunks have been received. While
there are still chunks not received, the client will send a new
request to the server after it finishes processing current
receiving, which contains the updated indexes set for the absent
chunks.

\begin{algorithm}
\caption{On receive a message from peer Client:} \label{alg:cf4}
\begin{algorithmic}[1]
    \IF {MESSAGE\_TYPE = \texttt{PEER\_REQ}}
        \STATE Extract request bitmap from message data: $\mathcal{M}(\mathcal{R})$;
        \FOR{$i \in \mathcal{M}(\mathcal{R})$}
            \STATE $Pri(i) \leftarrow Pri(i) + 1$;
        \ENDFOR
        \STATE $T_{jitter}$ Random Select from
                $\left[0,\frac{N S}{\left\| \mathcal{L}\right\|} T_{defer} \right]$
        \STATE Set Timer(``Peer Data'', $T_{jitter}$);
    \ENDIF
    \IF {MESSAGE\_TYPE = \texttt{PEER\_DATA}}
        \STATE Cancel Timer(``\texttt{Peer Request}'');
        \STATE Extract chunk from received message, denoted as $c$;
        \STATE $\mathcal{L} \leftarrow \mathcal{L} \cup {c}$;
        \STATE $Pri(\mathcal{M}(c)) \leftarrow 0$;
        \IF {$\mathcal{M}\left(\mathcal{W} \cup \mathcal{L}\right) \neq \mathcal{M}(\mathcal{C})$}
            \STATE $T_{jitter}$ Random Select from $\left[0,
            \frac{M_{req}}{R_b}\right]$;
            \STATE Set Timer(``\texttt{Peer Request}'', $T_{jitter}$);
        \ELSE
            \STATE Assemble Data
        \ENDIF
    \ENDIF
\end{algorithmic}
\end{algorithm}

The peer client shall also receive messages of two types from other
peers in the community. As mentioned before, a client may broadcast
peer request in the community. Accordingly, when a client receives a
\texttt{PEER\_REQ} message, it will extract the indexes of the
chunks that are requested by the source peer from the message.
Unlike process in the server, the client will calculate the
priorities to send different chunks other than respond the request
immediately. There are two reasons for such process. The first is to
avoid multiple responses from several peers which may lead to low
efficiency due to channel contention. Secondly, we can control the
fairness among peers by tuning the deferred time to distribute a
chunk in a client. We hook the deferred time with the contribution
ratio of a peer, i.e., the ratio of distributed chunks to the
received chunks from peers. A client with higher contribution ratio
will have longer deferment.

When a peer receives data from another peer, it will extract the
data as receiving from the server. Meanwhile, as the client assumes
the chunk it receives will also be received by other peers in the
community, it will reset the priority of sending this chunk to $0$.
When finishing receiving, the client will invoke another peer
request in the community. Same as sending peer data, to avoid
concurrent broadcast, the client will wait for a random period of
time before broadcasting a new request.

When timer expires, the client will operate according to the
functionality of the timer, as depicted in Algorithm~\ref{alg:cf5}.
Noting that the priority represents the degree of demand of a chunk,
the client will the choose one of the most requested chunks that it
owns to broadcast.

\begin{algorithm}[t]
\caption{Timer Expire:} \label{alg:cf5}
\begin{algorithmic}[1]
    \IF {Name = \texttt{Server Request}}
        \STATE Cancel Timer(``\texttt{Server Request}'');
        \STATE $\mathcal{R} \leftarrow \mathcal{C} -\mathcal{W} \cup \mathcal{L}$
        \STATE Send Message(``\texttt{SRVR\_REQ}'', $\mathcal{M}(\mathcal{R})$) to Server;
        \STATE Reset Timer(``\texttt{Server Request}'', $T_{srvr\_req}$);
    \ENDIF
    \IF {Name = \texttt{Peer Request}}
        \STATE Cancel Timer(``\texttt{Peer Request}'');
        \STATE $\mathcal{R} \leftarrow \mathcal{C} -\mathcal{W} \cup \mathcal{L}$
        \STATE Broadcast Message(``\texttt{PEER\_REQ}'', $\mathcal{M}(\mathcal{R})$) in community;
        \STATE Reset Timer(``\texttt{Peer Request}'', $T_{peer\_req}$);
    \ENDIF
    \IF {Name = \texttt{Peer Data}}
        \STATE Cancel Timer(``\texttt{Peer Data}'');
        \STATE $\mathcal{P}_{tx} \leftarrow \{c:c \in \mathcal{W} \cup
        \mathcal{L}\ \quad \& \quad Pri(\mathcal{M}(c)) = \max Pri(\mathcal{W} \cup \mathcal{L}) \}$
        \IF {$\mathcal{P}_{tx} \neq \Phi$}
            \STATE Select a chunk $c$ from $\mathcal{P}_{tx}$
            \STATE Broadcast Message(``\texttt{Peer Data}'', $c$);
            \STATE $Pri(\mathcal{M}(c)) \leftarrow 0$;
            \STATE $S \leftarrow S + 1$
        \ENDIF
    \ENDIF
    \IF {Name = \texttt{Chunk BMP}}
        \STATE $s$ sends a content request to the server;
        \STATE Set Timer(``\texttt{Chunk BMP}'', $T_{chunk\_bmp}$);
    \ENDIF
\end{algorithmic}
\end{algorithm}

\subsection{Content Distribution Algorithm on Server}
The server distributes contents based on clients' requests.
Initially, when the server receives a request of a content, it first
responds a bitmap representing the indexes of the chunks of the
content to the initiator. Once the server receives a content request
with the requesting bitmap, the server will be responsible for
choosing one chunk to send to the client.

Motivated by the result of problem formulation in
Section~\ref{sec:form}, it is better to send different chunks to
different nodes in a community even if they are requesting the same
set of chunks. However, the challenge is that the server may not
know whether two requests are from one community or not. On the
other hand, there may also be several servers responsible for the
nodes in one community. Therefore, a server does not know whether a
chunk has already been received by the members of the community of
the requester or not. In a live streaming case, the chunk selection strategy
is even more important because the chunk availability is time constrained.
We will further discuss the chunk selection strategies in the next section.

\section{Chunk Selection Strategies}\label{sec:chunk}
We evaluate two chunk selection methods for file downloading and
three chunk selection strategies for live streaming.

\subsection{Chunk Selection Strategies for File Downloading}
For the file downloading case, all chunks must be downloaded before
they can be used and the order of their being fetched does not make sense.
Therefore, one simple method is to randomly select a chunk from the
set of chunks that the client requests. Another is to select the chunk
that has been sent for fewest times. The second may be optimal as
it tries to avoid distributing same chunk to one community. However
 it is true only if a server serves one community at the same time.
From simulation, we find that randomly chunk selection performs
rather well in most scenarios, thus we suggest to apply the simple randomly
chunk selection method in server side. We will further show the results in
the next section.

\subsection{Chunk Selection Strategies for Live Streaming}
Since a P2P live streaming has strict requirements for arrival time, chunk selection
strategy is one of the most important issue in a live streaming case. \textit{Rarest
First} and \textit{Greedy} strategies used to be the most studied ones. We have adopted
these two schemes and also proposed some modifications on them to fit our scenario.
We will compare their performance in the next section. The chunk selection strategies
that are involved in this work are elaborated as follows:

\subsubsection{Rarest First Strategy}
The "\textit{Rarest First Strategy}" is widely adopted in P2P file distribution protocol
BitTorrent and P2P streaming protocol CoolStreaming. A peer using the Rarest First
Strategy will select a chunk which has the \textit{fewest number of copies} in the system.
From the perspective of the buffer $B$ in a P2P streaming case, the buffer occupancy probability
in steady state, $p(i)$, is an increasing function of $i$, i.e., $p(i+1) \geq p(i)$. Therefore,
the Rarest First Strategy means that the server or a peer will first select $\mathcal{B}(1)$
to serve if $\mathcal{B}(1)$ is requested by a peer, else will select $\mathcal{B}(2)$ to
serve if $\mathcal{B}(2)$ is requested and so on. Previous research has shown that
the rarest first strategy is very effective in maximizing peer contribution in a P2P streaming
system, especially a large scale P2P system, whose bottleneck is content service rate. However,
in our case, community size is usually not too small and the bottleneck is supposed to be
the WWAN link rate instead of the content service rate, peer can only benefit a little from the rarest
first strategy. We show the results in the next section.

\subsubsection{Greedy Strategy with Conservative Margin}
For the Greedy Strategy, on the contrary, the server or a peer will select a chunk which is
\textit{closest to the playback deadline} to serve. From the buffer's point of view,
$\mathcal{B}(n)$ will be the closest to playback time and first selected if it is requested, then
$\mathcal{B}(n-1)$ is the next and so on. The greedy is proved to be the best from a single peer's
view for the playback and provide the fast convergency speed in a P2P streaming system, will
lead to lowest start-up latency. But when the WWAN link rate is not enough, using the greedy
strategy for the server to select a chunk may result in a total failure in chuck fetching, i.e., $p(n) = 0$.
So it is better to add a certain margin for conservation. Let $m \geq \lceil 1/\delta_{\min} \rceil $ be
the margin for conservation. Under the modified greedy strategy, a peer or the server will first serve
$\mathcal{B}(n-m)$ if it is requested and then  $\mathcal{B}(n-m-1)$ and so on. The modified Greedy
Strategy with Conservative Margin can avoid zero buffer occupancy but still cannot reach the ideal upper
bound due to too much duplicated transmissions in the community.

\subsubsection{Mixed Greedy, Rarest First and Random Strategies}
Driven by the results of the random strategy in the file downloading
case, we proposed a mixed strategy, which is a combination of
\underline{G}reedy Strategy with Conservative Margin, the
\underline{R}arest \underline{F}irst strategy and the
\underline{R}andom strategy (GFRR). Different strategies are used
for a server selecting a chunk and for a peer selecting a chunk.

When the server selects a chunk to serve, it first random selects a
requested chunk from a window that is closest to the playback time
with conservative margin, i.e., to randomly select a chunk from
$\mathcal{B}(n-m)$ to $\mathcal{B}(n-m-w)$, where $w$ is the window
size. If no chunk is needed in the window, the server then randomly
selects a chunk from the next window $\mathcal{B}(n-m-w-1)$ to
$\mathcal{B}(n-m-2w)$ and so on.

When the peer selects a chunk to broadcast, it first random selects
a requested chunk from the window $\mathcal{B}(1)$ to
$\mathcal{B}(w)$ if there is any and randomly selects a chunk from
the next window $\mathcal{B}(w+1)$ to $\mathcal{B}(2w)$ if there is
no needed one in the first window and so on. The randomness is
supposed to reduce the duplicated transmissions from the server and
thus can increase the efficiency of the peers broadcasting.

We also investigate other mixed combinations: a) \textit{RFGR},
Rarest First integrated with Random Strategy for fetching from the
server and Greedy with conservative margin integrated with Random
Strategy for fetching from the peers; b) \textit{RFG}, Rarest First
Strategy for fetching from the server and Greedy with conservative
margin for fetching from peers; c) \textit{GRF}, Greedy with
conservative margin strategy for fetching from the server and Rarest
First Strategy for fetching from the peers. Simulation results show
that the GFRR strategy wins in the overall performance among other
mixed strategies, which we will show in the next section.


\section{Evaluation} \label{sec:eva}
The main metric in evaluating the performance of a content fetching
scheme in \textit{C$5$} is the fetching speed. Higher fetching speed leads
to less time to finish fetching in the file downloading case and better play back
quality (higher $p(n)$). We have shown the preliminary
results by simple numerical calculation in the motivating example in
Section~\ref{sec:model}. However, the cases are much more
complicated in practice. The overall performance may be impacted by
many factors, such as the chunk size, bandwidth of the WWAN and the
WLAN, community size and deferred time window before transmission.
Therefore, to find an optimal parameter configuration, we build a simulation
model with NS2 and measure performance of \textit{C$5$} under
different combinations of parameters. Parameters used in the
simulation are listed in Tab.~\ref{tab:simpara}.

\begin{table}
\centering \caption{Parameter Configuration in Simulation.}
\label{tab:simpara}
\begin{tabular}
{|p{100pt}|p{100pt}|} \hline \textbf{Parameters} & Values \\
\hline \hline Chunk Size (KBytes)&
$1.4$, $2.8$, $4.2$, $5.6$, $7$ \\
\hline WLAN Data Rate (Mbps)&
$5$, $11$, $24$, $54$ \\
\hline WLAN MTU (Bytes)&
$1500$ \\
\hline WWAN Link Rate (Kbps)&
$1024$(down), $256$(up) \\
\hline Deferred Time Window (ms)&
$10$, $20$, $30$, $40$, $50$, $60$, $70$ $80$\\
\hline Community Size&
$2$, $4$, $6$, $8$, $10$, $15$ \\
\hline
Streaming Chunk Size& $4200$Bytes \\
\hline
Streaming Playback Rate& $4096$Kbps \\
\hline
Streaming Buffer Size $n$& $16$, $32$, $64$ \\
\hline
Random Window $w$& $4$, $6$, $8$, $16$ \\
\hline
\end{tabular}
\end{table}

\subsection{Optimal Chunk Size and Deferred Time}
The chunk size affects \textit{C$5$}'s performances in two aspects,
namely overhead and failure handling. With the same size of content,
smaller chunk size results in bigger request packet (due to
increasing average $\|\mathcal{M}(\mathcal{R})\|$) and more
transmission rounds, which increase overhead. On the other hand, as
we choose MAC layer multicast as a method of multiplexing, we have
to face the unreliable transmission problem. If we choose a too
large chunk size, the chunk transmission failure ratio will increase
severely which almost counteract the benefit from collaborative
fetching. So it is important to arrive at an optimal value of the
chunk size.

\begin{figure*}
{ \subfigure[Content Fetching Time over different Chunk Sizes (WLAN
Data Rate $=24$Mbps).]
{\includegraphics[width=0.32\textwidth]{fig/chksize.pdf}\label{fig:chunksize}}
\hfil \subfigure[Optimal Deferred Time over Chunk Sizes (WLAN Data
Rate $=24$Mbps).]
{\includegraphics[width=0.32\textwidth]{fig/deftime1.pdf}\label{fig:defer:a}}
\hfil \subfigure[Optimal Deferred Time over WLAN Data Rates (Chunk
Size $=4200$Bytes).]
{\includegraphics[width=0.32\textwidth]{fig/deftime2.pdf}\label{fig:defer:b}}
} \caption{Optimal Chunk Size and Deferred Time selection for
different Community Sizes and WLAN data rates.} \label{fig:defer}
\end{figure*}

Considering the popular MTU ($1500$) in the WLAN, we tune the chunk
size from $1400$ bytes to $7000$ bytes, which is about one to five
times of the MTU. Fig.~\ref{fig:chunksize} plots the fetching time
of $8$ community members with different chunk sizes. We find $4$KB
to $5$KB to be a reasonably optimal chunk size. This differs from
the finding in \cite{combine}, wherein $200$KB is assumed to be an
optimal value. Noting that the collaborative downloading scheme in
\cite{combine} is based on HTTP connection which can provide some
kind of packet level reliability that is not applicable in
\textit{C$5$}, the optimal chunk size in \textit{C$5$} is much
smaller. This optimal chunk size, about $4$KB to $5$KB, happens to
be close to the most common chunk size value in a P2P live streaming
application~\cite{chunksize}, which hints that \textit{C$5$} will be suitable
for the live streaming application. The deferred time, $T_{defer}$, is set to avoid
broadcast collision. There also exists a dilemma in $T_{defer}$ choice to be traded off.
We examine the different $T_{defer}$ for different chunk sizes,
community sizes and WLAN data rates. We find that the optimal
$T_{defer}$ almost increases proportionately with the chunk size and
community size and decreases with higher WLAN data rates, which is
shown in Fig.~\ref{fig:defer}. Hence we assume $\frac{M}{R_b}$ to be
an acceptable optimal deferred time for data broadcast.

\begin{figure}
{ \subfigure[All subscribers start fetching simultaneously.]
{\includegraphics[width=0.23\textwidth]{fig/speedup.pdf}\label{fig:speedup:a}}
\hfil \subfigure[Subscribers start fetching asynchronously (Start-up
time Interval $T_I$), WLAN Data Rate $24$Mbps).]
{\includegraphics[width=0.23\textwidth]{fig/asynchron.pdf}\label{fig:speedup:b}}
} \caption{Variation of the speed-up of \textit{C$5$} with varying
community size.} \label{fig:speedup}
\end{figure}

\begin{figure}
\centerline{\includegraphics[width=0.45\textwidth]{fig/lbound.pdf}}
\caption{Comparison of the Exponential Sums of Content Fetching Time
of the calculated lower bound and simulation results
($\alpha=1.2$).} \label{fig:exsum}
\end{figure}

\begin{figure*}
{ \subfigure[Peer Receiving Ratios with different community sizes
and WLAN data rates.]
{\includegraphics[width=0.32\textwidth]{fig/psratio.pdf}\label{fig:pshare}}
\hfil \subfigure[Contribution Ratios with different community sizes
and WLAN data rates.]
{\includegraphics[width=0.32\textwidth]{fig/contratio.pdf}\label{fig:contratio}}
\hfil \subfigure[Contribution Efficiency with different community
sizes and WLAN data rates.]
{\includegraphics[width=0.32\textwidth]{fig/contreff.pdf}\label{fig:contreff}}
} \caption{Peer Sharing Ratios and Contribution Efficiency.}
\label{fig:ratios}
\end{figure*}

\subsection{Content Fetching Time for File Downloading}
Given an optimal configuration of \textit{C$5$}, we then evaluate
the major performance metric of the content distribution algorithms
by measuring the fetching time of a $5$MB data. We fix the chunk
size to be $4200$ bytes and refer to the results in
Fig.~\ref{fig:defer} as the optimal deferred time selection for
different WLAN data rates and community sizes.

\subsubsection{Speed-up}
We first look at the speed-up value obtained by \textit{C$5$} with
varying community size. Here the speed-up value refers to the
fetching speed (reciprocal of fetching time) gain from \textit{C$5$}
over that of the conventional individual scheme.
Fig.~\ref{fig:speedup} shows the speed-up values variation from
which we find the speed-up values almost increase linearly with the
number of members of community, until it reaches the bottleneck of
the WLAN data rate. Both simultaneous start-up and asynchronous
start-up are considered. True to intuition, average speed-up values
with simultaneous start-up outperform those with the asynchronous
start-up, despite that the speed-up values of late starting nodes
can reach very high in asynchronous cases. In practice, the client
application can be designed with some tit-for-tat mechanisms to
constrain leeches. From the other view, \textit{C$5$} may be quite
suitable for live streaming applications such as Internet FM or live
TV broadcast, which we will show in the next subsection.

\subsubsection{Exponential Sum}
Taking fairness into account, we evaluate the exponential sum of the
fetching time of all subscribers as analyzed in
Section~\ref{sec:form}. Simulation results show that in simultaneous
start-up cases, the values of the optimization object obtained by
the algorithms are close to the lower bounds obtained via relaxation
when WLAN data rate is above $11$Mbps, as depicted in
Fig.~\ref{fig:exsum}, thus suggesting that the solution produced by
the algorithms is near-optimal. Note that when WLAN data rate is
low, (e.g., $5$Mbps), the exponential sum of fetching time in
simulation results departs from the lower bound as the community
size increases. This is because WLAN channel becomes over saturated
so that the actual data rate is far less than $R_b$ used in
analysis.


\subsection{Peer Sharing Ratios and Contribution Efficiency}
Peer sharing ratios of a client include peer receiving ratio namely
$\frac{\| \mathcal{L} \|}{\| \mathcal{C} \|}$ and peer contribution
ratio, i.e., $\frac{S}{\|\mathcal{C}\|}$. Since the WLAN data rate
is usually much higher than the WWAN link rate, the higher the peer
receiving ratio of a client is, the more speed-up it gains. In
simultaneous cases, the peer receiving ratio will approach
$\frac{N-1}{N}$ if in community data transmissions do not reach the
bottleneck of WLAN data rate. Similarly contribution ratio should
approximately approach $\frac{1}{N}$ in an ideal case, as shown in
Fig.~\ref{fig:pshare} and ~\ref{fig:contratio}.

However there may exist redundant transmissions in practice due to
transmission failures. Therefore, neither peer receiving ratios nor
contribution ratios could reach the ideal values. We define the
contribution efficiency as the ratio of the number of non-duplicated
successful chunks broadcasts to the number of the total sent chunks
including failed ones in the community. Fig.~\ref{fig:contreff}
shows the contribution efficiencies with different community sizes
and WLAN data rates. We find that the contribution efficiencies
almost keep about $60\%$ to $80\%$. Efficiency decreases in some
cases which we think is caused by our preference of high fetching
speed in deferred time configuration. When the available WLAN data
rate is high, the selected deferred time is small to get greater
speed-up, which may increase the probability of packet collisions in
the WLAN and result in lower contribution efficiency.

\subsection{Playback Performance for Live Streaming}
For the live streaming application, we evaluate the playback performances of
the peer clients in a community that are fetching a live streaming whose playback rate
is $1024$bps. The chosen playback rate is set rather high in order to investigate the
extreme condition in which WWAN bandwidth is not enough. Considering that
more and more HD-video streaming services emerge in the Internet and mobile devices
become more and more powerful, high playback rate can be possible in mobile live streaming
applications. Community size is tuned from 2 peers to 8 peers. The chunk sizes for both
streaming buffer and transmission are set to $4200$bytes.

\subsubsection{Continuity}
\begin{figure}
\centerline{\includegraphics[width=0.45\textwidth]{fig/conti2csize.pdf}}
\caption{Playback Continuity performance over Community Size with different chunk selection strategies.} \label{fig:conti}
\end{figure}

\begin{figure}
\centerline{\includegraphics[width=0.45\textwidth]{fig/conti2buf.pdf}}
\caption{Playback Continuity performance over different Random Window Size and Buffer Size
(Community Size $=4$, GFRR Strategy).} \label{fig:buf}
\end{figure}

We first look at key metric of the live streaming performance, continuity. The continuity is represented
by the occupancy probability of chunk closest to the playback in the buffer. Similar to the results in the
file downloading case, \textit{C$5$} with proper chunk selection strategies can achieve a near optimal
result in continuity. As shown in Fig.~\ref{fig:conti}, the continuities of the peers with individual fetching
schemes are constrained by the bottleneck of WWAN link rate. With the collaborative fetching scheme in \textit{C$5$}, the continuities of peers can increase with the number of peers in the community and can
almost reach the calculated upper bound with proper chunk selection strategies.

Differing from a file downloading case, chunk selection strategy plays a very important role in the live
streaming case. Fig.~\ref{fig:conti} also shows the continuity performances with different chunk selection
strategies. We investigated different combinations of chunk selection strategies for the fetching schemes
from the server and peers. We found that without integration of random selection, neither rarest first and
greedy nor their combinations can achieve an ideal result, despite that they can get some performance
improvement from the collaborative fetching. The reason lies in the contribution efficiency. If all the peers
select chunks from the server or other peers using same strategy, e.g., the rarest first strategy, the probability
that they have fetched same chunks is high. As a result, peer sharing ratio will be low and peers can not benefit
much from the collaborative fetching. Integrated with random strategy, peers are more likely to have
different chunks from the server and thus it can improve the sharing ratio among peers. Consequently, FRGR and GFRR strategies outperform other mixed strategies greatly, as shown in Fig.~\ref{fig:conti}.

Fig.~\ref{fig:buf} further illustrates the relationship between continuity performance and the random window size. We evaluated the continuity performance of $4$ peers collaboratively fetching a live streaming with GFRR strategy. The buffer size is tuned from $32$ to $64$ chunks and the random window size for the mix chunk selection strategy is chosen from $4$ to $24$.  True to intuition, continuity can increase with larger buffer size and random window size. However, if the random window size is larger than the half of the buffer size, increasing random window size does not help to improve performance. On the other hand, larger buffer size
will increase the space complexity, so we use $40$ as the buffer size and $16$ as the random window size
as the recommended parameters.

\subsubsection{Start-up Latency}
As is shown in Fig.~\ref{fig:conti}, both the FRGR strategy and the
GFRR strategy can obtain a near optimal continuity. However, GFRR
strategy outperforms the FRGR strategy much in another performance
metric, the \textit{start-up latency}, which is the time that a peer
should wait before starting playback. As long as all peers cooperate
by following the same chunk selection strategy and offering
downloading when requested, a peer may choose to start its own
playback independently without affecting other peers except itself.
But the best start-up latency for a newly arriving peer (with empty
buffer) should be the time that the peer waits until its buffer has
reached steady state~\cite{p2pmodel}, therefore the start-up latency
$T_{lat}$ is expressed as:
\begin{equation}\label{eqstuplat}
T_{lat} = \Delta \sum_{i=1}^{n}p(i)
\end{equation}
After $\sum_{i=1}^{n}p(i)$ chunk sliding slots, the newly arriving
peer is expected to have acquired the same number of chunks as the
rest of the peers in steady state, which also equals to
$\sum_{i=1}^{n}p(i)$.

Fig.~\ref{fig:startuplat} compares the average start-up latency
performances of the peers with different chunk selection strategies.
Although the GFR mixed strategy offers the lowest start-up latency,
it performs poor in continuity as mentioned above. Taking both the
continuity and start-up latency performances into account, the GFRR
wins the overall performance in the collaborative live streaming
fetching application.

\begin{figure}
\centerline{\includegraphics[width=0.45\textwidth]{fig/startuplat.pdf}}
\caption{Average Start-up Latency of the peers with different chunk
selection strategies.} \label{fig:startuplat}
\end{figure}

\subsubsection{Performance in Asynchronously Starting up Case}
Fig.~\ref{fig:asystreaming} shows the continuity performances of the
peers over time in the asynchronously starting up case. Unlike the
file downloading case, all peers can arrive almost same continuity
despite of the difference in start-up time, which also proves our
previous point that \textit{C$5$} is more suitable for the live
streaming applications.

\begin{figure}
\centerline{\includegraphics[width=0.45\textwidth]{fig/asystream.pdf}}
\caption{Continuity performances of the peers over time in
asynchronously starting up case.} \label{fig:asystreaming}
\end{figure}

\subsection{Multiple Servers and Communities}
Finally, we investigate whether the number of servers that serve a
community and the number of communities that connect to one server
will impact the performance of \textit{C$5$}. From
Fig.~\ref{fig:multi}, we can find that with random chunk selection
in the server side, neither the number of the servers nor of the
communities affects the performance of fetching, which is almost
same as the result with ideal non-overlapped chunk selection method
in the servers. Thus we suggest to apply the simple randomly chunk
selection method in server side

\begin{figure}
\centerline{\includegraphics[width=0.45\textwidth]{fig/multisrv.pdf}}
\caption{Comparison of the Content Fetching Time in cases with
Multiple Servers and Multiple Communities. (WLAN data rate$=24$Mbps;
Community Size$=8$ for each Community; Each server serves $4$
subscribers in the case with two servers.)} \label{fig:multi}
\end{figure}

\section{Discussions and Conclusion}
\subsection{Discussions}
We briefly discuss some other issues pertaining to \textit{C$5$}.

\begin{enumerate}
\item Energy Consumption: One more issue of \textit{C$5$} we have
not mentioned is that \textit{C$5$} may possibly save the mobile
devices' energy due to effective multicast and less long distance
wireless communication with cell tower. However sending costs much
more power than receiving, which may counteract the benefit and
result in uncertainty. We will further investigate the energy
performance of \textit{C$5$} in future work.

\item Cost Saving: Although it is still not clear whether
\textit{C$5$} can save energy or not, we can definitely conclude
that \textit{C$5$} can save WWAN traffic. Thus the subscribers may
benefit from this and save Internet traffic cost.

\item Potential Beneficial Applications: While we focus on content
fetching in this paper, there can be many other Internet
applications that can benefit from the \textit{C$5$} architecture,
such as net-game. We summarize this kind of potential beneficial
applications as the one that can represent subscribers' common
characteristics (colocation and same content). We believe more
applications of such kind will emerge as the mobile Internet
penetrates deeper into human life.
\end{enumerate}

\subsection{Conclusion}
In this paper, we present \textit{C$5$}, an architecture of
collaborative content fetching for a group of mobile users that has
common characteristics. \textit{C$5$} enables nodes in close
vicinity to use the high-speed WLAN to form a group, strip traffic
both from their own WWAN links and from community members using the
WLAN interfaces, and contribute their own data for requesting
members as well. \textit{C$5$} takes the advantage of broadcast in
wireless channel to perform a MAC layer multicast when nodes
contribute their own data. \textit{C$5$} aims to use idle WLAN
interfaces for in community communication and makes the traffic from
the WWAN links to serve as most members as possible. Simulation
results show that \textit{C$5$} can speed up the content fetching in
community and approach to a near-optimal performance.

% An example of a floating figure using the graphicx package.
% Note that \label must occur AFTER (or within) \caption.
% For figures, \caption should occur after the \includegraphics.
% Note that IEEEtran v1.7 and later has special internal code that
% is designed to preserve the operation of \label within \caption
% even when the captionsoff option is in effect. However, because
% of issues like this, it may be the safest practice to put all your
% \label just after \caption rather than within \caption{}.
%
% Reminder: the "draftcls" or "draftclsnofoot", not "draft", class
% option should be used if it is desired that the figures are to be
% displayed while in draft mode.
%
%\begin{figure}[!t]
%\centering
%\includegraphics[width=2.5in]{myfigure}
% where an .eps filename suffix will be assumed under latex,
% and a .pdf suffix will be assumed for pdflatex; or what has been declared
% via \DeclareGraphicsExtensions.
%\caption{Simulation Results}
%\label{fig_sim}
%\end{figure}

% Note that IEEE typically puts floats only at the top, even when this
% results in a large percentage of a column being occupied by floats.


% An example of a double column floating figure using two subfigures.
% (The subfig.sty package must be loaded for this to work.)
% The subfigure \label commands are set within each subfloat command, the
% \label for the overall figure must come after \caption.
% \hfil must be used as a separator to get equal spacing.
% The subfigure.sty package works much the same way, except \subfigure is
% used instead of \subfloat.
%
%\begin{figure*}[!t]
%\centerline{\subfloat[Case I]\includegraphics[width=2.5in]{subfigcase1}%
%\label{fig_first_case}}
%\hfil
%\subfloat[Case II]{\includegraphics[width=2.5in]{subfigcase2}%
%\label{fig_second_case}}}
%\caption{Simulation results}
%\label{fig_sim}
%\end{figure*}
%
% Note that often IEEE papers with subfigures do not employ subfigure
% captions (using the optional argument to \subfloat), but instead will
% reference/describe all of them (a), (b), etc., within the main caption.


% An example of a floating table. Note that, for IEEE style tables, the
% \caption command should come BEFORE the table. Table text will default to
% \footnotesize as IEEE normally uses this smaller font for tables.
% The \label must come after \caption as always.
%
%\begin{table}[!t]
%% increase table row spacing, adjust to taste
%\renewcommand{\arraystretch}{1.3}
% if using array.sty, it might be a good idea to tweak the value of
% \extrarowheight as needed to properly center the text within the cells
%\caption{An Example of a Table}
%\label{table_example}
%\centering
%% Some packages, such as MDW tools, offer better commands for making tables
%% than the plain LaTeX2e tabular which is used here.
%\begin{tabular}{|c||c|}
%\hline
%One & Two\\
%\hline
%Three & Four\\
%\hline
%\end{tabular}
%\end{table}


% Note that IEEE does not put floats in the very first column - or typically
% anywhere on the first page for that matter. Also, in-text middle ("here")
% positioning is not used. Most IEEE journals/conferences use top floats
% exclusively. Note that, LaTeX2e, unlike IEEE journals/conferences, places
% footnotes above bottom floats. This can be corrected via the \fnbelowfloat
% command of the stfloats package.

% conference papers do not normally have an appendix


% use section* for acknowledgement
%\section*{Acknowledgment}
%We would like to thank ...


% trigger a \newpage just before the given reference
% number - used to balance the columns on the last page
% adjust value as needed - may need to be readjusted if
% the document is modified later
%\IEEEtriggeratref{8}
% The "triggered" command can be changed if desired:
%\IEEEtriggercmd{\enlargethispage{-5in}}

% references section

% can use a bibliography generated by BibTeX as a .bbl file
% BibTeX documentation can be easily obtained at:
% http://www.ctan.org/tex-archive/biblio/bibtex/contrib/doc/
% The IEEEtran BibTeX style support page is at:
% http://www.michaelshell.org/tex/ieeetran/bibtex/
%\bibliographystyle{IEEEtran}
% argument is your BibTeX string definitions and bibliography database(s)
%\bibliography{IEEEabrv,../bib/paper}
%
% <OR> manually copy in the resultant .bbl file
% set second argument of \begin to the number of references
% (used to reserve space for the reference number labels box)
%\begin{thebibliography}{1}
%
%\bibitem{IEEEhowto:kopka}
%H.~Kopka and P.~W. Daly, \textit{A Guide to \LaTeX}, 3rd~ed.\hskip 1em plus
%  0.5em minus 0.4em\relax Harlow, England: Addison-Wesley, 1999.
%
%\end{thebibliography}

%\nocite{*}
%input "ref/tpds.bib"
\bibliographystyle{IEEEtran}
\bibliography{ref/tpds}


\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{bio/tulai.pdf}}]{Lai~Tu}
received the B.S in Communication Engineering and Ph.D. degree in
Information and Communication Engineering from Huazhong University
of Science and Technology, Wuhan, China, in 2002 and 2007
respectively. From 2007/7 to 2008/12, he worked as a postdoc fellow
in the Dept. of EIE. in Huazhong University of Science and
Technology. He is now working as a postdoc researcher in the Dept.
of CSIE. in Nation Cheng Kung University, Taiwan. His research areas
include network communication, mobile computing and networking.
\end{IEEEbiography}

\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{bio/huangcm.pdf}}]{Chung-Ming~Huang}
received the B.S. degree in electrical engineering  from National
Taiwan University on 1984/6, and the M.S. and Ph.D. degrees in
Computer and Information Science from the Ohio State University on
1988/12 and 1991/6. Currently, he is a Distinguished Professor of
Dept. of Computer Science and Information Engineering, National
Cheng Kung University, Taiwan, R.O.C. He also serves as the director
for the Promotion Center for the Telematics Consortium (PCTC),
Ministry of Education (MOE), Taiwan. He has published more than 200 referred journal and
conference papers in wireless and mobile communication protocols,
interactive multimedia systems, audio and video streaming and formal
modeling of communication protocols. His research interests include
wireless and mobile network protocol design and analysis, media
processing and streaming, web technologies, and network applications
and services.
\end{IEEEbiography}

% that's all folks
\end{document}
