\documentclass[11pt]{article}

\usepackage{algorithm}
\usepackage{algorithmic}
\usepackage{subfigure}
\usepackage{epsfig,amsthm,amsmath,color, amsfonts}
\usepackage{epsfig,color}
\usepackage{framed}
%\usepackage{epsf}
\usepackage{psfrag}
\usepackage{hyperref}
%\usepackage{pdfpages}
\usepackage{fullpage}
\usepackage{color}
%\usepackage[usenames,dvipsnames]{color}

%\usepackage[latin1]{inputenc}
\usepackage{tikz}
\usetikzlibrary{shapes,arrows}

%\setlength{\textheight}{9.4in} \setlength{\textwidth}{6.55in}
\setlength{\textheight}{9.2in} \setlength{\textwidth}{6.55in}
%\setlength{\topmargin}{0in}

%\voffset=-0.9in \hoffset=-0.8in

\newtheorem{theorem}{Theorem}[section]
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{claim}[theorem]{Claim}
\newtheorem{conjecture}[theorem]{Conjecture}
\newtheorem{observation}[theorem]{Observation}
\newtheorem{remark}[theorem]{Remark}
\theoremstyle{definition}\newtheorem{example}[theorem]{Example}
\theoremstyle{definition}\newtheorem{definition}[theorem]{Definition}
\theoremstyle{invariant}\newtheorem{invariant}[theorem]{Invariant}
%\theoremstyle{theorem}\newtheorem{observation}[theorem]{Observation}

\newcommand{\comment}[1]{}
\newcommand{\QED}{\mbox{}\hfill \rule{3pt}{8pt}\vspace{10pt}\par}
%\newcommand{\eqref}[1]{(\ref{#1})}
\newcommand{\theoremref}[1]{(\ref{#1})}
\newenvironment{proof1}{\noindent \mbox{}{\bf Proof:}}{\QED}
%\newenvironment{observation}{\mbox{}\\[-10pt]{\sc Observation.} }%
%{\mbox{}\\[5pt]}

\def\m{{\rm min}}
%\def\m{\bar{m}}
\def\eps{{\epsilon}}
\def\half{{1\over 2}}
\def\third{{1\over 3}}
\def\quarter{{1\over 4}}
\def\polylog{\operatorname{polylog}}
\def\poly{\operatorname{poly}}
\newcommand{\ignore}[1]{}
\newcommand{\eat}[1]{}
\newcommand{\floor}[1]{\left\lfloor #1 \right\rfloor}
\newcommand{\ceil}[1]{\left\lceil #1 \right\rceil}
\newcommand{\diam}{\Lambda}
\newcommand{\numpaths}{\Gamma}
\newcommand{\pathlength}{L}
\newcommand{\graph}{G(\Gamma, \kappa, \Lambda)}
\newcommand{\algorithmsize}[0]{}


%---------------------
%  SPACE SAVERS
%---------------------
%\usepackage{times}
%\usepackage[small,compact]{titlesec}
%\usepackage[small,it]{caption}
%\smallskip

%\newcommand{\squishlist}{
% \begin{list}{$\bullet$}
%  { \setlength{\itemsep}{0pt}
%     \setlength{\parsep}{3pt}
%     \setlength{\topsep}{3pt}
%     \setlength{\partopsep}{0pt}
%     \setlength{\leftmargin}{1.5em}
%     \setlength{\labelwidth}{1em}
%     \setlength{\labelsep}{0.5em} } }
%\newcommand{\squishend}{
%  \end{list}  }

\newcommand{\squishlist}{\begin{itemize}}
\newcommand{\squishend}{\end{itemize}}

%---------------------------------
% FOR MOVING PROOFS TO APPENDIX
%\usepackage{answers}
%%\usepackage[nosolutionfiles]{answers}
%\Newassociation{movedProof}{MovedProof}{movedProofs}
%\renewenvironment{MovedProof}[1]{\begin{proof}}{\end{proof}}

\def\e{{\rm E}}
\def\var{{\rm Var}}
\def\ent{{\rm Ent}}
\def\eps{{\epsilon}}
\def\lam{{\lambda}}
\def\bone{{\bf 1}}


\def\EQ{\mbox{\tt EQ}}
\def\EQfunc{\mbox{\tt eq}}
\def\DISJ{\mbox{\tt DISJ}}
\def\DISJfunc{\mbox{\tt disj}}
\def\PC{\mbox{\sc pc}}

\def\LAB{\mbox{\tt MAX\_Label}}

\def\cS{\mathcal{S}}
\def\cP{\mathcal{P}}
\def\cH{\mathcal{H}}
\def\cA{\mathcal{A}}
\def\cT{\mathcal{T}}
\def\cD{\mathcal{D}}
\def\cC{\mathcal{C}}
\def\cG{\mathcal{G}}
\def\cJ{\mathcal{J}}
\def\cF{\mathcal{F}}
\def\hC{\hat{\mathcal{C}}}
\def\tO{{\tilde{O}}}
\def\INPUT{\bar{x}^s,\bar{x}^r}
\def\INPUTPrime{\bar{x'}^s,\bar{x'}^r}
\def\INPUTS{\bar{x}^s}
\def\INPUTR{\bar{x}^r}
\def\indVarS{X^s}
\def\indVarR{X^r}
\def\partyA{P_A}
\def\partyB{P_B}
\def\sminus{\smallsetminus}
\def\figspace{3}

\def\property{\mathcal{P}}

\iffalse

\def\danupon#1{\marginpar{$\leftarrow$\fbox{D}}\footnote{$\Rightarrow$~{\sf #1 --Danupon}}}
\def\atish#1{\marginpar{$\leftarrow$\fbox{A}}\footnote{$\Rightarrow$~{\sf #1 --Atish}}}
\def\gopal#1{\marginpar{$\leftarrow$\fbox{G}}\footnote{$\Rightarrow$~{\sf #1 --Gopal}}}
\def\note#1{\footnote{$\Rightarrow$~{\sf #1 --Just a note}}}

\fi
%\iffalse

\def\danupon#1{}
\def\atish#1{}
\def\gopal#1{}
\def\note#1{}

%\fi

\begin{document}

\begin{titlepage}
\title{A tight unconditional lower bound on distributed random walk computation}
\author{
Danupon Nanongkai \thanks{College of Computing, Georgia Institute of Technology, Atlanta, GA 30332, USA. \hbox{E-mail}:~\url{danupon@cc.gatech.edu}}
%
\and
Atish Das Sarma \thanks{Google Research, Google Inc., Mountain View, USA. \hbox{E-mail}:~\url{dassarma@google.com}.}
%
\and
Gopal Pandurangan \thanks{Division of Mathematical
Sciences, Nanyang Technological University, Singapore 637371 and Department of Computer Science, Brown University, Providence, RI 02912, USA.  \hbox{E-mail}:~\url{gopalpandurangan@gmail.com}. Supported in part by NSF grant CCF-1023166 and by a grant from the United States-Israel Binational Science
Foundation (BSF).}
}
\date{}

\maketitle

\thispagestyle{empty}

\begin{abstract}
We consider the problem of performing a random walk in a distributed network. Given bandwidth constraints, the goal of the problem is to minimize the number of rounds required to obtain a random walk sample. Das Sarma et al. [PODC'10] show that a random walk of length $\ell$ on a network of diameter $D$ can be performed in $\tilde O(\sqrt{\ell D}+D)$ time. A major question left open is whether there exists a faster algorithm, especially whether the multiplication of $\sqrt{\ell}$ and $\sqrt{D}$ is necessary.

In this paper, we show a tight unconditional lower bound on the time complexity of distributed random walk computation. Specifically, we show that for any $n$, $D$, and $D\leq \ell \leq (n/(D^3\log n))^{1/4}$, performing a random walk of length $\Theta(\ell)$ on an $n$-node network of diameter $D$ requires $\Omega(\sqrt{\ell D}+D)$ time. This bound is {\em unconditional}, i.e., it holds for any (possibly randomized) algorithm. To the best of our knowledge, this is the first lower bound that the diameter plays a role of multiplicative factor. Our bound shows that the algorithm of Das Sarma et al. is time optimal.

Our proof technique introduces a new connection between {\em bounded-round} communication complexity and distributed algorithm lower bounds with $D$ as a trade-off parameter, strengthening the previous study by Das Sarma et al. [STOC'11]. In particular, we make use of the bounded-round communication complexity of the pointer chasing problem. Our technique can be of independent interest and may be useful in showing non-trivial lower bounds on the complexity of other fundamental distributed computing problems.
\end{abstract}


\noindent {\bf Keywords:} Random walks, Distributed algorithms, Lower bounds, Communication complexity. \\

%\noindent {\bf Format:} Regular Presentation.

%\noindent {\bf Eligible for Best Student Paper Award:} Student recommended for award - Danupon Nanongkai.

\end{titlepage}

\input{introduction}
\input{simulation_theorem}
\input{pointer_chasing}
\input{main_theorem}
\input{conclusion}


\newpage
\bibliographystyle{plain}
\bibliography{randomwalk-lowerbound}

%\newpage
%\appendix
%\section*{Appendix}
%\input{omitted_proofs}


\end{document}

