% This is "sig-alternate.tex" V1.9 April 2009
% This file should be compiled with V2.4 of "sig-alternate.cls" April 2009
%
% This example file demonstrates the use of the 'sig-alternate.cls'
% V2.4 LaTeX2e document class file. It is for those submitting
% articles to ACM Conference Proceedings WHO DO NOT WISH TO
% STRICTLY ADHERE TO THE SIGS (PUBS-BOARD-ENDORSED) STYLE.
% The 'sig-alternate.cls' file will produce a similar-looking,
% albeit, 'tighter' paper resulting in, invariably, fewer pages.
%
% ----------------------------------------------------------------------------------------------------------------
% This .tex file (and associated .cls V2.4) produces:
%       1) The Permission Statement
%       2) The Conference (location) Info information
%       3) The Copyright Line with ACM data
%       4) NO page numbers
%
% as against the acm_proc_article-sp.cls file which
% DOES NOT produce 1) thru' 3) above.
%
% Using 'sig-alternate.cls' you have control, however, from within
% the source .tex file, over both the CopyrightYear
% (defaulted to 200X) and the ACM Copyright Data
% (defaulted to X-XXXXX-XX-X/XX/XX).
% e.g.
% \CopyrightYear{2007} will cause 2007 to appear in the copyright line.
% \crdata{0-12345-67-8/90/12} will cause 0-12345-67-8/90/12 to appear in the copyright line.
%
% ---------------------------------------------------------------------------------------------------------------
% This .tex source is an example which *does* use
% the .bib file (from which the .bbl file % is produced).
% REMEMBER HOWEVER: After having produced the .bbl file,
% and prior to final submission, you *NEED* to 'insert'
% your .bbl file into your source .tex file so as to provide
% ONE 'self-contained' source file.
%
% ================= IF YOU HAVE QUESTIONS =======================
% Questions regarding the SIGS styles, SIGS policies and
% procedures, Conferences etc. should be sent to
% Adrienne Griscti (griscti@acm.org)
%
% Technical questions _only_ to
% Gerald Murray (murray@hq.acm.org)
% ===============================================================
%
% For tracking purposes - this is V1.9 - April 2009

\documentclass{sig-alternate}
\usepackage{algorithm}
\usepackage{algorithmic}

\usepackage{epsfig,amsmath,color, amsfonts}
\usepackage{epsfig,color}
\usepackage{subfigure}
\usepackage{url}
%\usepackage{mdwlist}
%\usepackage{epsfig}

%\usepackage{amsthm}
%\usepackage{amsmath,color, amsfonts}
%\usepackage{epsfig,color}
%\newcommand{\xxx}[1]{\textcolor{red}{#1}}
%%\usepackage{fullpage}
%\usepackage{framed}
%%\usepackage{epsf}
%%\usepackage{hyperref}

%%\setlength{\textheight}{9.4in} \setlength{\textwidth}{6.55in}
%\setlength{\textheight}{9.2in} \setlength{\textwidth}{6.55in}
%%\setlength{\topmargin}{0in}

%\voffset=-0.9in
%\hoffset=-0.8in

\newtheorem{theorem}{Theorem}[section]
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{claim}[theorem]{Claim}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{definition}[theorem]{Definition}

%\newtheorem{theorem}{Theorem}[section]
%%\newtheorem{definition}[theorem]{Definition}
%\newtheorem{corollary}[theorem]{Corollary}
%\newtheorem{proposition}[theorem]{Proposition}
%\newtheorem{lemma}[theorem]{Lemma}
%\newtheorem{claim}[theorem]{Claim}
%%\newtheorem{example}[theorem]{Example}
%\newtheorem{remark}[theorem]{Remark}
%\theoremstyle{definition}\newtheorem{example}[theorem]{Example}
%\theoremstyle{definition}\newtheorem{definition}[theorem]{Definition}
%\theoremstyle{observation}\newtheorem{observation}[theorem]{Observation}

\newcommand{\comment}[1]{}
\newcommand{\QED}{\mbox{}\hfill \rule{3pt}{8pt}\vspace{10pt}\par}
%\newcommand{\eqref}[1]{(\ref{#1})}
\newcommand{\theoremref}[1]{(\ref{#1})}
\newenvironment{proof1}{\noindent \mbox{}{\bf Proof:}}{\QED}
%\newenvironment{observation}{\mbox{}\\[-10pt]{\sc Observation.} }%
%{\mbox{}\\[5pt]}

\def\m{{\rm min}}
%\def\m{\bar{m}}
\def\eps{{\epsilon}}
\def\half{{1\over 2}}
\def\third{{1\over 3}}
\def\quarter{{1\over 4}}
\def\polylog{\operatorname{polylog}}
\newcommand{\ignore}[1]{}
\newcommand{\eat}[1]{}
\newcommand{\floor}[1]{\left\lfloor #1 \right\rfloor}
\newcommand{\ceil}[1]{\left\lceil #1 \right\rceil}

\newcommand{\algorithmsize}[0]{}

%---------------------
%  SPACE SAVERS
%---------------------

%\usepackage{times}
%\usepackage[small,compact]{titlesec}
%\usepackage[small,it]{caption}

\newcommand{\squishlist}{
 \begin{list}{$\bullet$}
  { \setlength{\itemsep}{0pt}
     \setlength{\parsep}{3pt}
     \setlength{\topsep}{3pt}
     \setlength{\partopsep}{0pt}
     \setlength{\leftmargin}{1.5em}
     \setlength{\labelwidth}{1em}
     \setlength{\labelsep}{0.5em} } }
\newcommand{\squishend}{
  \end{list}  }


%\newcommand{\squishlist}{
% \begin{itemize*}}
%\newcommand{\squishend}{
%  \end{itemize*}  }


%---------------------------------
% FOR MOVING PROOFS TO APPENDIX
%\usepackage{answers}
%%\usepackage[nosolutionfiles]{answers}
%\Newassociation{movedProof}{MovedProof}{movedProofs}
%\renewenvironment{MovedProof}[1]{\begin{proof}}{\end{proof}}

\def\e{{\rm E}}
\def\var{{\rm Var}}
\def\ent{{\rm Ent}}
\def\eps{{\epsilon}}
\def\lam{{\lambda}}
\def\bone{{\bf 1}}


%First definitions. Use these when you want to read comments.
%\def\prasad#1{\marginpar{$\leftarrow$\fbox{P}}\footnote{$\Rightarrow$~{\sf #1 --Prasad}}}
%\def\danupon#1{\marginpar{$\leftarrow$\fbox{D}}\footnote{$\Rightarrow$~{\sf #1 --Danupon}}}
%\def\gopal#1{\marginpar{$\leftarrow$\fbox{G}}\footnote{$\Rightarrow$~{\sf #1 --Gopal}}}
%\def\atish#1{\marginpar{$\leftarrow$\fbox{A}}\footnote{$\Rightarrow$~{\sf #1 --Atish}}}
%
%Second definitions. Use these to remove all comments.
\def\prasad#1{}
\def\danupon#1{}
\def\gopal#1{}
\def\atish#1{}


\begin{document}
%
% --- Author Metadata here ---
\conferenceinfo{PODC'10,} {July 25--28, 2010, Zurich, Switzerland.}
\CopyrightYear{2010} \crdata{978-1-60558-888-9/10/07}
\clubpenalty=10000 \widowpenalty = 10000
%\CopyrightYear{2007} % Allows default copyright year (20XX) to be over-ridden - IF NEED BE.
%\crdata{0-12345-67-8/90/01}  % Allows default copyright data (0-89791-88-6/97/05) to be over-ridden - IF NEED BE.
% --- End of Author Metadata ---



\title{Efficient Distributed Random Walks with Applications}
%\subtitle{[Extended Abstract]
%\titlenote{A full version of this paper is available in \cite{fullversion}}}
%
% You need the command \numberofauthors to handle the 'placement
% and alignment' of the authors beneath the title.
%
% For aesthetic reasons, we recommend 'three authors at a time'
% i.e. three 'name/affiliation blocks' be placed beneath the title.
%
% NOTE: You are NOT restricted in how many 'rows' of
% "name/affiliations" may appear. We just ask that you restrict
% the number of 'columns' to three.
%
% Because of the available 'opening page real-estate'
% we ask you to refrain from putting more than six authors
% (two rows with three columns) beneath the article title.
% More than six makes the first-page appear very cluttered indeed.
%
% Use the \alignauthor commands to handle the names
% and affiliations for an 'aesthetic maximum' of six authors.
% Add names, affiliations, addresses for
% the seventh etc. author(s) as the argument for the
% \additionalauthors command.
% These 'additional authors' will be output/set for you
% without further effort on your part as the last section in
% the body of your article BEFORE References or any Appendices.



\numberofauthors{4} %  in this sample file, there are a *total*
%% of EIGHT authors. SIX appear on the 'first-page' (for formatting
%% reasons) and the remaining two appear in the \additionalauthors section.
%%
%
\author{
% You can go ahead and credit any number of authors here,
% e.g. one 'row of three' or two rows (consisting of one row of three
% and a second row of one, two or three).
%
% The command \alignauthor (no curly braces needed) should
% precede each author name, affiliation/snail-mail address and
% e-mail address. Additionally, tag each line of
% affiliation/address with \affaddr, and tag the
% e-mail address with \email.
%
% 1st. author
\alignauthor
Atish {Das Sarma}\\
       \affaddr{College of Computing, Georgia Institute of Technology, Atlanta, GA 30332, USA}\\
       %\affaddr{Georgia Institute of Technology}\\
       %\affaddr{Atlanta, GA 30332, USA}\\
       \email{atish@cc.gatech.edu}
%% 2nd. author
%\and
\alignauthor
Danupon Nanongkai\\
       \affaddr{College of Computing, Georgia Institute of Technology, Atlanta, GA 30332, USA}\\
       %\affaddr{Georgia Institute of Technology}\\
       %\affaddr{Atlanta, GA 30332, USA}\\
       \email{danupon@cc.gatech.edu}
%\and
%3rd.author
\alignauthor Gopal Pandurangan\thanks{Also affiliated with
Department of Computer Science, Brown University, Providence, RI
02912, USA. Supported in part by NSF grant
CCF-0830476}\\
       \affaddr{Division of Mathematical Sciences, Nanyang Technological University, Singapore 637371}\\
       %\affaddr{Nanyang Technological University}\\
       %\affaddr{Singapore 637371}\\
%      \affaddr{Department of Computer Science}\\
%       \affaddr{Brown University}\\
%       \affaddr{Providence, RI 02912, USA}\\
       \email{gopalpandurangan@gmail.com}
\and
% 4th. author
\alignauthor Prasad Tetali\thanks{Supported in part by NSF DMS
0701023 and NSF CCR 0910584}\\
       \affaddr{School of Mathematics and School of Computer Science, Georgia Institute of Technology, Atlanta, GA 30332, USA}\\
       %\affaddr{Georgia Institute of Technology}\\
       %\affaddr{Atlanta, GA 30332, USA}\\
       \email{tetali@math.gatech.edu}
}

%%We should use the style above but with 4 authors it looks ugly -- Danupon
%\author{Atish {Das Sarma} \thanks{College of Computing, Georgia Institute of Technology, Atlanta, GA 30332, USA.
%\hbox{E-mail}:~{\tt atish@cc.gatech.edu, danupon@cc.gatech.edu}}
%\and Danupon Nanongkai \addtocounter{footnote}{-1} \footnotemark
%\and  Gopal Pandurangan \thanks{Division of Mathematical Sciences,
%Nanyang Technological University, Singapore 637371 and Department of
%Computer Science, Brown University, Providence, RI 02912.
%\hbox{E-mail}:~{\tt gopalpandurangan@gmail.com}. Supported in part
%by NSF grant CCF-0830476.}   \and Prasad Tetali \thanks{School of
%Mathematics and School of Computer Science, Georgia Institute of
%Technology Atlanta, GA 30332, USA. \hbox{E-mail}:~{\tt
%tetali@math.gatech.edu}. Supported in part by NSF DMS 0701023 and
%NSF CCR 0910584.}}
%

\date{}

\maketitle

%\vspace*{-70pt}


\begin{abstract}
We  focus on  the problem of performing random walks efficiently in a distributed network. Given bandwidth constraints, the goal is to minimize the number of rounds required to obtain a random walk sample. We first present a fast sublinear time distributed algorithm for performing random walks whose time complexity is sublinear in the length of the walk. Our algorithm performs a random walk of length $\ell$  in $\tilde{O}(\sqrt{\ell D})$  rounds (with high probability) on an undirected  network, where $D$ is the diameter of the network. This improves over the previous best algorithm that ran in $\tilde{O}(\ell^{2/3}D^{1/3})$ rounds (Das Sarma et al., PODC 2009). We further extend our algorithms to efficiently perform $k$ independent random walks in   $\tilde{O}(\sqrt{k\ell D} + k)$ rounds. We then show that there is a fundamental difficulty in improving the dependence on $\ell$ any further by proving a lower bound of $\Omega(\sqrt{\frac{\ell}{\log \ell}} + D)$ under a general model of distributed random walk algorithms. Our random walk algorithms are useful in speeding up distributed algorithms for a variety of applications that use random walks as a subroutine. We present two main applications. First, we give a fast distributed algorithm for computing a random spanning tree (RST) in an arbitrary (undirected) network which runs in $\tilde{O}(\sqrt{m}D)$ rounds (with high probability; here $m$ is the number of edges). Our second application is a fast decentralized algorithm for estimating mixing time and related parameters of the underlying network. Our algorithm is fully decentralized and can serve as a building block in the design of topologically-aware networks.

\end{abstract}

\category{F.2.2}{Analysis of Algorithms and Problem
  Complexity}{\\Nonnumerical Algorithms and Problems}[computations on
discrete structures] \category{\\G.2.2}{Discrete Mathematics}{Graph
Theory}[graph algorithms] \category{\\G.2.2}{Discrete
Mathematics}{Graph Theory}[network problems]

\terms{Algorithms, Theory}


\keywords{Random walks, Random sampling, Decentralized computation,
Distributed algorithms, Random Spanning Tree, Mixing Time.}


\section{Introduction}
Random walks play a central role in computer science, spanning a
wide range of areas in both theory and practice. The focus  of this
paper is  random walks in networks, in particular, decentralized
algorithms for performing random walks in arbitrary networks. Random
walks are used as an integral subroutine in a wide variety of
network applications ranging from token management and load
balancing to search, routing, information propagation and gathering,
network topology construction and building random spanning trees
(e.g., see \cite{DNP09-podc} and the references therein). Random
walks  are also very useful in providing uniform and efficient
solutions to distributed control of dynamic networks \cite{BBSB04,
ZS06}.  Random walks  are local and lightweight and require little
index or state maintenance which make them especially attractive to
self-organizing dynamic networks such as Internet overlay and ad hoc
wireless networks.

A key purpose of random walks in  many of these network applications
is to perform  node sampling.  While the sampling requirements in
different applications vary, whenever a true sample is required from
a random walk of certain steps, typically all applications perform
the walk naively
--- by simply passing a token from one node to its neighbor: thus to
perform a random walk of length $\ell$ takes time linear in $\ell$.


In this paper, we  present a  sublinear  time (sublinear in $\ell$)
distributed  random walk sampling algorithm that is significantly
faster than the previous best result. Our algorithm runs in time
$\tilde{O}(\sqrt{\ell D})$ rounds.
  We then present  an almost matching lower bound that applies
to a general class  of distributed algorithms (our algorithm also
falls in this class). Finally, we present two key applications of
our algorithm. The first is a fast distributed algorithm for
computing a random spanning tree, a fundamental spanning tree
problem that has been studied widely in the classical setting (see
e.g., \cite{kelner-madry} and references therein). To the best of
our knowledge,  our algorithm gives the fastest known running time
in an arbitrary network.
 The second is to devising efficient decentralized algorithms for computing
 key global metrics of the underlying network ---
 mixing time, spectral gap, and conductance. Such algorithms can be useful building
 blocks in the design of {\em topologically (self-)aware} networks, i.e., networks that can  monitor and regulate themselves in a decentralized fashion. For example,  efficiently computing the mixing time or the spectral gap, allows  the network to monitor connectivity and expansion properties of the network.


\subsection{Distributed Computing Model}
Consider an undirected, unweighted, connected $n$--node graph $G =
(V, E)$.  Suppose that every node (vertex) hosts a processor with
unbounded computational power, but with limited initial knowledge.
Specifically, assume that each node is associated with a distinct
identity number from the set $\{1, 2, . . . , n\}$. At the beginning
of the computation, each node $v$ accepts as input its own identity
number and the identity numbers of its neighbors in $G$. The node
may also accept some additional inputs as specified by the problem
at hand. The nodes are allowed to communicate through the edges of
the graph $G$. The communication is synchronous, and occurs in
discrete pulses, called {\em rounds}. In particular, all the nodes
wake up simultaneously at the beginning of round 1, and from this
point on the nodes always know the number of the current round. In
each round each node $v$ is allowed to send an arbitrary message of
size $O(\log n)$ through each edge $e = (v, u)$ that is adjacent to
$v$, and the message will arrive to $u$ at the end of the current
round. This is a standard model of distributed computation known as
the {\em CONGEST model} \cite{peleg} and has been attracting a lot
of research attention during last two decades
(e.g., see \cite{peleg} and the references therein).%This is a  widely used  standard model
% to study distributed algorithms and captures the realistic notion that
%there is a bound on the amount of messages that can be sent through
%an edge in one time step  and hence captures the bandwidth
%constraints inherent
% in  real-world computer  networks \cite{peleg, PK09}.
 % (We note that if unbounded-size messages were allowed through every
%edge in each time step, then the problems addressed here can be
%trivially solved in $O(D)$ time by collecting all  the topological information %at
%one node, solving the problem locally, and then broadcasting the
%results back to all the nodes \cite{peleg}.)

There are several measures of efficiency of distributed algorithms,
but we will concentrate on one of them, specifically, {\em the
running time}, that is, the number of rounds of distributed
communication. (Note that the computation that is performed by the
nodes locally is ``free'', i.e., it does not affect the number of
rounds.)
%\atish{Should we mention here explicitly that we do not consider message complexity - the total number of messages exchanged. And admit that our algorithm is expensive in this aspect?}
 Many
fundamental network problems such as minimum spanning tree, shortest
paths, etc. have been addressed in this model (e.g., see
\cite{lynch, peleg, PK09}). In particular, there has been much
research into designing very fast distributed   approximation
algorithms (that are even faster at the cost of producing
sub-optimal solutions) for many of these  problems (see e.g.,
\cite{elkin-survey,dubhashi, khan-disc,khan-podc}).  Such algorithms
can be useful for large-scale resource-constrained and dynamic
networks where running time is crucial.
%This work addresses the
%problem of computing random walks in a time-efficient manner.



\subsection{Problem Statement, Motivation, and Related Work}
The basic problem we address is the following.
%
We are given an arbitrary undirected, unweighted, and connected
$n$--node network $G = (V,E)$ and a (source) node $s \in V$.
%
The goal is to devise a distributed algorithm such that, in the end,
$s$ outputs the ID of a node $v$ which is randomly picked according
to the probability that it is the destination of a random walk of
length $\ell$ starting at $s$. Throughout this paper, we assume the
standard (simple) random walk: in each step, an edge is taken from
the current node $x$ with probability proportional to $1/d(x)$ where
$d(x)$ is the degree of $x$. Our goal is to output a true  random
sample from the $\ell$-walk distribution starting from $s$.
%%%%%%%%
%\danupon{It doesn't matter whether it's SoD or DoS in this paper but
%SoD is more intuitive. This is
%what's written before:\\
% The goal is to devise a distributed algorithm
%such that, in the end, one node $v$ outputs the ID of $s$, where $v$
%is randomly picked according to the probability that it is the
%destination of a random walk of length $\ell$ starting at $s$ (the
%source node).}
%%%%%%%%%
%%%%%%%%%
%We want an algorithm that finishes in the smallest number of rounds.
%\atish{Do we need to define random walk here?}

For clarity, observe that the following naive algorithm solves the
above problem in $O(\ell)$ rounds:
%\danupon{Why do we need ``+D''?}
The walk of length $\ell$ is performed by sending a token for $\ell$
steps, picking a random neighbor with each step. Then, the
destination node $v$ of this walk sends its ID back (along the same
path) to the source for output. Our goal is to perform such sampling
with significantly less number of rounds, i.e., in time that is
sublinear in $\ell$.  On the other hand, we note that it can take
too much time (as much as $\Theta(|E|+D)$ time) in the CONGEST model
to collect all  the topological information at the source node (and
then computing the walk locally).

This problem was proposed in~\cite{DNP09-podc} under the name
\textit{Computing One Random Walk where Source Outputs Destination
(1-RW-SoD)}
%%%%%%%%%%%
%%%%%%%%%%%
%\danupon{Before: This problem is called as \textit{Computing One
%Random Walk where Destination Outputs Source (1-RW-DoS)}.}
%%%%%%%%%%%
%%%%%%%%%%
(for short, this problem will be simply called {\em Single Random
Walk} in this paper),
%
wherein the first sublinear time distributed algorithm was provided,
requiring $\tilde{O}(\ell^{2/3}D^{1/3})$ rounds ($\tilde{O}$ hides
$\polylog(n)$ factors); this improves over the naive $O(\ell)$
algorithm when the walk is long compared to the diameter (i.e.,
$\ell = \Omega(D \polylog n)$ where $D$ is the diameter of the
network). This was the first result to break past the inherent
sequential nature of random walks and beat
%improvement beyond
the naive $\ell$ round approach, despite the fact that random walks
have been used in distributed networks for long and in a wide
variety of applications.

There are two key motivations for obtaining sublinear time bounds.
The first is that in many algorithmic applications, walks of length
significantly greater than the network diameter are needed. For
example, this is necessary in both the  applications   presented
later in the paper, namely distributed computation of a random
spanning tree (RST) and  computation of mixing time. In the RST
algorithm, we need to perform a random walk of expected length
$O(mD)$ (where $m$ is the number of edges in the network). In
decentralized computation of mixing time, we need to perform walks
of length at least equal to the mixing time which can be
significantly larger than the diameter (e.g., in a random geometric
graph model \cite{MP}, a popular model for ad hoc networks, the
mixing time can be larger than the diameter by a factor of
$\Omega(\sqrt{n})$.) More generally, many real-world communication
networks  (e.g., ad hoc  networks and peer-to-peer networks) have
relatively small diameter, and random walks of length at least the
diameter are usually performed for many sampling applications, i.e.,
$\ell >> D$. It should be noted that  if the network is rapidly
mixing/expanding which is sometimes the case in practice, then
sampling from walks of length $\ell >> D$ is close to sampling from
the steady state (degree) distribution; this can be done in $O(D)$
rounds (note however, that this gives only an approximately close
sample, not the exact sample for that length). However, such an
approach fails when $\ell$ is smaller than the mixing time.

The second motivation is understanding the time complexity of
distributed random walks. Random walk is essentially a global
problem  which requires the algorithm to ``traverse" the entire
network. Classical ``global" problems include the minimum spanning
tree, shortest path etc. Network diameter is an inherent lower bound
for such problems. Problems of this type raise the basic question
whether $n$ (or $\ell$ as the case here) time is essential or is the
network diameter $D$, the inherent parameter. As pointed out in the
seminal work of \cite{peleg-mst}, in the latter case, it would be
desirable to design algorithms that have a better complexity for
graphs with low diameter.

%
%Before:\\
%\cite{DNP09-podc} presented the first non-trivial distributed
%algorithm
% for this problem that had a running time of
%$\tilde{O}(\ell^{2/3}D^{1/3})$ rounds on an undirected unweighted
%network, where $D$ is the diameter of the
%network ($\tilde{O}$ hides $\polylog(n)$ factors). When $\ell = \Omega(D \log n)$, this is an
%improvement over the naive $O(\ell)$ bound.   (It was shown that $\Omega(\min\{D, \ell\})$ is a lower bound and hence in general we cannot have a running time faster than the diameter of the graph.)
%This was the first improvement beyond the naive $\ell$ round approach, despite the fact that random walks have been used in distributed networks for long and in a wide variety of applications.
%
%It was further conjectured in \cite{DNP09-podc} that the true number of rounds for this problem is $\tilde O(\sqrt{\ell D})$.
%\danupon{Actually, this is conjectured for 1-RW-DoS.}
\iffalse \danupon{I removed the following paragraph which seems to
be repetitive and I don't
see why it is here:\\
The work of \cite{DNP09-podc} raised two important questions: (1)
Can we devise even faster algorithms for the above problem? The
paper conjectured that it might be possible to  obtain a running
time of $\tilde{O}(\sqrt{\ell D})$; (2) $D$ is an easy lower bound
on the running time (\cite{DNP09-podc}), but  are their non-trivial
lower bounds for this problem?} \fi

The high-level idea used in the $\tilde{O}(\ell^{2/3}D^{1/3})$-round
algorithm in \cite{DNP09-podc} is to ``prepare'' a few short walks
in the beginning (executed in parallel) and then carefully stitch
these walks together later as necessary.
%
The same general approach was introduced in~\cite{AtishGP08} to find
random walks in data streams with the main motivation of finding
PageRank.
%
However, the two models have very different constraints and
motivations and hence the subsequent techniques used in
\cite{DNP09-podc} and \cite{AtishGP08} are very different.
%


%
%%%%%%%%
%%%%%%%%
%
%\danupon{Before:\\
%The first, and only work prior to~\cite{DNP09-podc} that uses the
%same general approach is the  paper of Das Sarma et
%al.~\cite{AtishGP08}. They consider the problem of finding random
%walks in data streams with the main motivation of finding PageRank.
%The same general idea of stitching together short walks is used.
%They consider the model where the graph is too big to store in main
%memory, and the algorithm has {\em streaming} access to the edges of
%the graph while maintaining limited storage. They show how to
%perform $\ell$ length random walks in about $\sqrt{\ell}$ passes
%over the data. This improves upon the naive $\ell$ pass approach and
%thereby leads to improved algorithms for estimating PageRank
%vectors. The distributed setting considered in this paper has very
%different constraints and motivations from the streaming setting and
%calls for new techniques.}


%Besides our approach of speeding up the random walk itself, one
%related motivation is to reduce the {\it cover time}. Recently, Alon
%et. al.~\cite{AAKKLT} show that performing several random walks in
%parallel reduces the cover time in various types of graphs. They
%assert that the problem with performing random walks is often the
%latency.
%
Recently, Sami and Twigg~\cite{ST08} consider lower bounds on the
communication complexity of computing stationary distribution of
random walks in a network. Although, their problem is  related to
our problem, the lower bounds obtained do not  imply anything in our
setting. Other recent works involving multiple random walks in
different settings include Alon et. al.~\cite{AAKKLT}, Els{\"a}sser
et. al.~\cite{ElsasserS09}, and Cooper et al. \cite{frieze}.
%
%Also recently, Cooper et al. \cite{frieze} show various results
%related to multiple random walks on random regular graphs.

\subsection{Our Results}

\squishlist
%
\item {\bf A Fast Distributed Random Walk Algorithm:} We present a sublinear, almost time-optimal, distributed algorithm for the single random walk
problem in arbitrary networks that runs in  time
$\tilde{O}(\sqrt{\ell D})$, where $\ell$ is the length of the walk
(cf. Section \ref{sec:upperbound}). This is a significant
improvement over the naive $\ell$-round algorithm for $\ell =
\Omega(D)$ as well as  over the previous best running time of
$\tilde{O}(\ell^{2/3}D^{1/3})$  \cite{DNP09-podc}. The dependence on
$\ell$ is reduced from $\ell^{2/3}$ to $\ell^{1/2}$.
%(which is optimal).
%In such cases, our algorithm finishes in $\tilde{O}(\ell^{1/2})$ rounds,
%significantly better than the previous best bound of $\tilde{O}(\ell^{2/3})$.
%Gopal, commented above sentence again based on Prasad's suggestion.

%\iffalse

Our algorithm in this paper uses an approach similar to that of
\cite{DNP09-podc} but  exploits certain key properties of random
walks  to design an even faster sublinear time algorithm. Our
algorithm is randomized (Las Vegas type, i.e., it always  outputs
the correct result, but the running  time claimed is with high
probability) and is conceptually simpler compared to the
$\tilde{O}(\ell^{2/3}D^{1/3})$-round algorithm. While the previous
(slower) algorithm \cite{DNP09-podc} applies to the more general
Metropolis-Hastings walk, in this work we focus primarily on the
simple random walk for the sake of obtaining the best possible
bounds in this commonly used setting.
%
%However, whereas, the present algorithm works for only for the standard random walk, the previous slower algorithm applies also to a more general Metropolis-Hastings type random walk \cite{DNP09-podc}.

   One of the key ingredients in the improved algorithm is proving a bound on the number of times any node is visited in an $\ell$-length walk, for any  length $\ell = O(m^2)$. We show that w.h.p. any node $x$ is visited at most $\tilde{O}(d(x)\sqrt{\ell})$ times, in an $\ell$-length walk from any starting node ($d(x)$ is the degree of $x$).  We then show that if only certain $\ell/\lambda$ special  points of the walk (called as {\em connector points}) are observed,
%   (where we spread these {\em special} connector points evenly and randomly perturb them slightly),
then any node is observed only $\tilde{O}(d(x)\sqrt{\ell}/\lambda)$
times. The algorithm starts with all nodes performing short walks
(of length uniformly random in the range $\lambda$ to $2\lambda$ for
appropriately chosen $\lambda$) efficiently simultaneously; here the
randomly chosen lengths play a crucial role in arguing about a
suitable spread of the connector points.   Subsequently, the
algorithm begins at the source and carefully stitches these walks
together till $\ell$ steps are completed.
%If all short walks from a node are exhausted (a low probability event), then an efficient approach to sample several walks from one source is used.
%If the low probability event of a node being visited too many times occurs, then an efficient algorithm to generate several shorts walks from a single node is used.  %(Section~\ref{sec:one_walk_DoS}.)
%
%\fi

We also extend to give algorithms for computing $k$ random walks
(from any $k$ sources
 ---not necessarily distinct) in $\tilde O\left(\min(\sqrt{k\ell D}+k, k+\ell)\right)$ rounds. Computing $k$ random
walks is useful in many applications such as the one we present
below on decentralized computation of mixing time and related
parameters. While the main requirement of our algorithms is to just
obtain the random walk samples (i.e. the end point of the $\ell$
step walk), our algorithms can regenerate the entire walks such that
each node knows its position(s) among the $\ell$ steps. Our
algorithm can  be extended to do this in the same number of rounds.

\item {\bf A Lower Bound:} We establish an almost matching lower bound on the running time of distributed random walk that applies to
a general class of distributed random walk algorithms. We show that
any algorithm belonging to the class needs at least
$\Omega(\sqrt{\frac{\ell}{\log \ell}} + D)$  rounds to perform a
random walk of length $\ell$; notice that this lower bound is
nontrivial even in graphs of  small ($D = O(\log n)$)  diameter (cf.
Section \ref{sec:lowerbound}).
 Broadly speaking,  we consider   a class of
 token forwarding-type algorithms where nodes can only store and (selectively) forward  tokens (here tokens are $O(\log n)$-sized messages consisting of two node ids  identifying the
beginning and end of a segment --- we make this more precise in
Section \ref{sec:lowerbound}). Selective forwarding (more general
than just store and forwarding) means that nodes can omit to forward
certain segments (to reduce number of messages), but they cannot
alter   tokens in any way (e.g., resort to data compression
techniques). This class includes many natural algorithms, including
the algorithm in this paper.
%This shows that our algorithm is existentially optimal (up to polylogarithmic factors) and in
%general, no other algorithm can do better.

Our technique involves showing the same non-trivial lower bound  for
a problem that we call {\em path verification}. This simpler problem
appears quite basic and can have other applications. Informally,
given a graph $G$ and a sequence of $\ell$ vertices in the graph,
the problem is for some (source) node in the graph to verify that
the sequence forms a path.
%We show by constructing a graph for any choice of $D$ and $\ell$ that any algorithm in a natural class of distributed algorithms needs $\Omega(\sqrt{\frac{\ell}{\log \ell}})$ rounds to solve path verification.
One main idea in this proof is to show that independent nodes may be
able to verify short {\em local} paths; however, to be able to {\em
merge} these together and verify an $\ell$-length path would require
exchanging several messages. The trade-off is between the lengths of
the local paths that are verified and the number of such local paths
that need to be combined.  Locally verified paths can be exchanged
in one round, and messages can be exchanged at all nodes.  Despite
this, we show that the bandwidth restriction necessitates a large
number of rounds even if the diameter is small. We then show a
reduction to the random walk problem, where we require that each
node in the walk should know its (correct) position(s) in the walk.

\iffalse The path verification problem captures the ``degree" of
{\em locality} needed for the random walk problem. The previous
work~\cite{DNP09-podc} conjectured that the {\em correct} time bound
for the random walk problem is $\tilde{\Theta}(\sqrt{\ell D})$.  The
intuition behind such conjecture is that if we perform short walks
of length $\alpha$ (which takes $\alpha$ rounds) and then  stitch
them, then we would need $\frac{\ell}{\alpha}$ such walks, each
stitch taking $D$ rounds. Therefore, one ends up with $(\alpha +
\frac{\ell D}{\alpha})$ rounds.  The same intuition holds for the
path verification problem, even though the sequence of vertices is
given: one can ``locally" verify sub-paths of length $\lambda$  (in
a sequential manner)
  and then send these  verified subpaths to the source; these $\ell/\lambda$ subpaths will cause congestion amounting
  to $\ell/\lambda$ rounds.
\fi
% from the path verification problem.

Similar non-trivial matching lower bounds on running time  are known
only for a few important problems in distributed computing, notably
the minimum spanning tree problem (e.g., see \cite{peleg-bound,
elkin}). Peleg and Rabinovich \cite{peleg-bound} showed that
$\tilde{\Omega}(\sqrt{n})$ time is required for constructing an MST
even on graphs of small diameter (for any $D=\Omega(\log n)$) and
\cite{kutten-domset} showed an essentially matching upper bound.
%this is essentially the best possible \cite{kutten-domset}.


\item {\bf Applications:} Our faster distributed random walk algorithm can be used in speeding up
distributed applications where  random walks arise as a subroutine.
Such applications include distributed construction of expander
graphs, checking whether a graph is an expander, construction of
random spanning trees, and random-walk based search (we refer to
\cite{DNP09-podc} for details). Here, we present two key
applications:


(1) {\em A Fast Distributed Algorithm for Random Spanning Trees
(RST):} We give a $\tilde{O}(\sqrt{m}D)$ time distributed algorithm
(cf. Section \ref{sec:rst}) for uniformly sampling a random spanning
tree in an arbitrary undirected (unweighted) graph (i.e., each
spanning tree in the underlying network has the same probability of
being selected). ($m$ denotes the number of edges in the graph.)
%To the best of our knowledge this is the fastest known distributed RST algorithm.
Spanning trees are fundamental network primitives and distributed
algorithms for various types of spanning trees such as minimum
spanning tree (MST), breadth-first spanning tree (BFS), shortest
path tree, shallow-light trees etc., have been studied extensively
in the literature \cite{peleg}. However, not much is known about the
distributed complexity of the random spanning tree problem.
%(in fact, this is true to some extent in
The centralized case
%also, which
has been studied for many decades, see e.g., the recent work of
\cite{kelner-madry} and the references therein; also see the recent
work of Goyal et al. \cite{goyal} which gives nice applications of
RST to fault-tolerant routing and constructing expanders.  In the
distributed context, the work of Bar-Ilan and Zernik \cite{bar-ilan}
give a distributed RST algorithm for two  special cases, namely that
of a complete graph (running in constant time) and a synchronous
ring (running in  $O(n)$ time). The work of \cite{Baala} give a
self-stablizing distributed algorithm for constructing a RST in a
wireless ad hoc network and mentions that RST is more resilient to
transient failures that occur in mobile ad hoc networks.
%None of these previous works,
%however, address the distributed complexity of constructing an RST in a general network.

Our algorithm works by giving an efficient distributed
implementation of the well-known Aldous-Broder random walk algorithm
\cite{aldous, broder} for constructing a RST.

(2) {\em Decentralized Computation of Mixing Time.} We present a
fast decentralized algorithm for estimating mixing time, conductance
and spectral gap of the network (cf. \ref{sec:mixingtime}). In
particular, we show that given a starting point $x$, the mixing time
with respect to $x$, called $\tau^x_{mix}$, can be estimated in
$\tilde{O}(n^{1/2} + n^{1/4}\sqrt{D\tau^x_{mix}})$ rounds. This
gives an alternative algorithm to the only previously known approach
by Kempe and McSherry \cite{kempe} that can be used to estimate
$\tau^x_{mix}$ in $\tilde O(\tau^x_{mix})$ rounds.\footnote{Note
that \cite{kempe} in fact do more and give a decentralized algorithm
for computing the top $k$ eigenvectors of a weighted adjacency
matrix that runs in $O(\tau_{mix}\log^2 n)$ rounds if two adjacent
nodes are allowed to exchange $O(k^3)$ messages per round, where
$\tau_{mix}$ is the mixing time and $n$ is the size of the network.}
To compare,  we note that when $\tau^x_{mix} = \omega(n^{1/2})$ the
present algorithm is faster (assuming $D$ is not too large).

%(GOPAL-- We have to check Kempe's running time in the footnote ,should $k^3$ come in the rounds?)

  The
work of \cite{mihail-topaware}  discusses spectral algorithms for
enhancing the topology awareness, e.g., by identifying and assigning
weights to critical links. However, the algorithms are centralized,
and it is mentioned that obtaining efficient decentralized
algorithms is a major open problem. Our algorithms are fully
decentralized and  based on performing random walks, and so more
amenable to dynamic and self-organizing networks. \squishend



%\atish{Should we expand here on the techniques of the upper and lower bounds?}
%\atish{Should we somewhere admit that if the distributed network is expanding/rapidly mixing (as is the case sometimes with real world networks), then $\ell \approx D$ and so there are easy ways to sample from walks of length $\ell \gg D$, namely sample from degree distribution?}





\section{A Sublinear Time Distributed Random Walk Algorithm}\label{sec:one_walk_DoS}
\label{sec:upperbound}
\subsection{Description of the Algorithm}
%To better understand our algorithm, we
We first describe the $\tilde{O}(\ell^{2/3}D^{1/3})$-round algorithm
in \cite{DNP09-podc} and then highlight the changes in our current
algorithm. The current algorithm is randomized and uses several new
ideas that are crucial in obtaining the new bound.

The high-level idea is to perform ``many" short random walks in
parallel and later stitch them together as needed. In the first
phase of the algorithm {\sc Single-Random-Walk} (we refer to the
full version~\cite{fullversion} for pseudocodes of all algorithms
and subroutines), each node performs $\eta$ independent random walks
of length $\lambda$. (Only the destination of each of these walks is
aware of its source, but the sources do not know destinations right
away. The sources will get to know destinations later on when it is
needed.) It is shown that this takes $\tilde{O}(\eta\lambda)$ rounds
with high probability. Subsequently, the source node that requires a
walk of length $\ell$ extends a walk of length $\lambda$ by
``stitching'' walks.  If the end point of the first $\lambda$ length
walk is $u$, one of $u$'s $\lambda$ length walks is used to extend.
When at $u$, one of its $\lambda$-length walk destinations are
sampled uniformly (to preserve randomness) using {\sc
Sample-Destination} in $O(D)$ rounds (including the time to deliver
such sampled destination to $u$). (We call such $u$ and other nodes
at the stitching points as {\em connectors}
--- cf. Algorithm 1.) Each stitch takes $O(D)$ rounds (via the BFS tree). This process is extended as long as unused
$\lambda$-length walks are available from visited nodes. If the walk
reaches a node $v$ where all $\eta$ walks have been used up (which
is a key difficulty), then {\sc Get-More-Walks} is invoked. {\sc
Get-More-Walks} performs $\eta$ more walks of length $\lambda$ from
$v$, and this can be done in $\tilde{O}(\lambda)$ rounds. The number
of times {\sc Get-More-Walks} is invoked can be bounded by
$\frac{\ell}{\eta\lambda}$ in the worst case by an amortization
argument.  The overall bound on the algorithm is  $O(\eta\lambda +
\ell D/\lambda + \frac{\ell}{\eta})$. The bound of
$\tilde{O}(\ell^{2/3}D^{1/3})$ follows from appropriate choice of
parameters $\eta$ and $\lambda$.

%\gopal{ It will be great to give a figure here, which will just figuratively show what we just describe. This can be very helpful for the reader, e.g., connector etc.}

The current algorithm uses two crucial ideas to improve the running
time. The first  idea is to bound the number of times any node is
visited in a random walk of length $\ell$ (which in turn bounds the
number of times {\sc Get-More-Walks} is invoked). Instead of the
worst case analysis in \cite{DNP09-podc}, the new bound is obtained
by bounding the number of times any node is visited (with high
probability) in a random walk of length $\ell$ on an undirected
unweighted graph. The number of visits to a node beyond the mixing
time can be bounded using its stationary probability distribution.
However, we need a bound on the visits to a node  for any
$\ell$-length walk starting from the first step.
 We show a somewhat surprising bound that applies to an $\ell$-length (for $\ell = O(m^2)$)  random walk on any arbitrary (undirected) graph: {\em no node $x$ is visited more than
 $\tilde{O}(d(x)\sqrt{\ell})$ times}, in an $\ell$-length walk from any starting node ($d(x)$ is the degree of $x$)   (cf. Lemma~\ref{lemma:visits bound}).
 Note that this bound does not depend on any other parameter of the graph, just on the (local) degree of the node and the length of the walk. This bound is
 tight in general (e.g., consider a line and a walk of length $n$).

The above bound is not enough to get the desired running time, as it
does not say anything about the
 distribution of connectors when we chop the length $\ell$ walk into $\ell/\lambda$ pieces.
 We have to bound the number
of visits to a node as a connector in order to bound the number of
times {\sc Get-More-Walks} is invoked. To overcome this we use a
second idea:
  Instead of nodes performing walks of length $\lambda$, each such walk $i$ is of length $\lambda+r_i$ where $r_i$ is a random number in the range $[0,\lambda-1]$. Notice that the random numbers are independent for each walk.
   We show  the following ``uniformity lemma":  if  the short walks are now of a random length in the range of $[\lambda, 2\lambda-1]$, then if a node $u$ is visited at most $N_u$ times  in an $\ell$ step walk, then the node is visited at most $\tilde{O}(N_u/\lambda)$ times as an endpoint of a short walk  (cf. Lemma \ref{lem:uniformityused}).  This modification to {\sc Single-Random-Walk} allows us to
bound the number of visits to each node (cf.
Lemma~\ref{lem:uniformityused}).

%
%This follows from a result of Gillman~\cite{Gillman98}.
%This follows from the theorem we developed in the next section (cf.
%Theorem~\ref{thm:1-walk}). The reason this is crucial is because we
%want to obtain a tighter bound on the number of times {\sc
%Get-More-Walks} will be invoked. Unfortunately, Gillman's bound
%turns out to be too weak to directly improve the
%$\tilde{O}(\ell^{2/3}D^{1/3})$-round guarantee. The goal, therefore,
%is to obtain a tighter bound on the number of times a node is
%visited as the end point of a short walk (as this is the only case
%where we may need to invoke {\sc Get-More-Walks}).
%
\iffalse To get around this difficulty, we make a simple yet
insightful modification to {\sc Single-Random-Walk}. Instead of
nodes performing short walks of length $\lambda$, each such short
walk $i$ is of length $\lambda+r_i$ where $r_i$ is a random number
in the range $[0,\lambda-1]$. (Notice that the random numbers are
independent for each short walk.) This modification allows us to
bound the number of visits to each node (cf.
Lemma~\ref{lem:uniformityused}). \fi

The change of the short walk length above leads to two modifications
in Phase~1 of {\sc Single-Random-Walk} and {\sc Get-More-Walks}. In
Phase~1, generating $\eta$ walks of different lengths from each node
is straightforward: Each node simply sends $\eta$ tokens containing
the source ID and the desired length. The nodes keep forwarding
these tokens with decreased desired walk length until the desired
length becomes zero.
%
The modification of {\sc Get-More-Walks} is trickier. To avoid
congestion, we use the idea of {\em reservoir
sampling}~\cite{Vitter85}. In particular, we add the following
process at the end of the {\sc Get-More-Walks} algorithm
in~\cite{DNP09-podc}:


%\noindent
%\fbox{
%\begin{minipage}{\linewidth} \noindent
\begin{quote}
\begin{algorithmic}
\FOR{$i=0$ to $\lambda-1$}

\STATE \label{line:reservoir} For each message, independently with
probability $\frac{1}{\lambda-i}$, stop sending the message further
and save the ID of the source node (in this event, the node with the
message is the destination). For messages $M$ that are not stopped,
each node picks a neighbor correspondingly and sends the messages
forward as before.

\ENDFOR
\end{algorithmic}
\end{quote}
%\end{minipage}%}
%\\



%\noindent\fbox{ \begin{minipage}{\linewidth} \noindent
%\begin{algorithmic}
%\FOR{$i=0$ to $\lambda-1$}
%
%\STATE \label{line:reservoir} For each message, independently with
%probability $\frac{1}{\lambda-i}$, stop sending the message further
%and save the ID of the source node (in this event, the node with the
%message is the destination). For messages $M$ that are not stopped,
%each node picks a neighbor correspondingly and sends the messages
%forward as before.
%
%\ENDFOR
%\end{algorithmic}
%\end{minipage}}\\


The reason it needs to be done this way is that if we first sampled
the walk length $r$, independently for each walk, in the range
$[0,\lambda-1]$ and then extended each walk accordingly, the
algorithm would need to pass $r$ independently for each walk. This
will cause congestion along the edges; no congestion occurs in the
mentioned algorithm as only the {\em count} of the number of walks
along an edge are passed to the node across the edge. Therefore, we
need to decide when to stop on the fly using  reservoir sampling.

%First, in Phase~1 of {Single-Random-Walk} we create $\eta\deg v$
%walks from each node $v$, instead of $\eta$ walks, and
%
%Second, in {\sc Get-More-Walk}, we create $\ell/\lambda$ walks
%instead of $\eta$ walks and show that it still uses $O(D)$ rounds
%(cf. Lemma~\ref{lem:get-more-walks}).

We also have to make another modification in Phase~1 due to the new
bound on the number of visits. Recall that, in this phase, each node
prepares $\eta$ walks of length $\lambda$. However, since the new
bound of visits of each node $x$ is proportional to its degree
$d(x)$ (see Lemma~\ref{lemma:visits bound}), we make each node
prepare $\eta d(x)$ walks instead. We show that Phase~1 uses $\tilde
O(\eta\lambda)$ rounds, instead of $\tilde O(\frac{\lambda
\eta}{\delta})$ rounds where $\delta$ is the minimum degree in the
graph (cf. Lemma~\ref{lem:Sample-Destination}).

%Another modification is that instead of each node performing $\eta$
%walks, a node performs number of walks proportional to its degree
%(we know that the number of times a node is visited {\em beyond} the
%mixing time is proportional to its degree).
%We show, crucially using
%the fact that the short walks are now of a random length in the
%range of $[\lambda, 2\lambda-1]$, that if a node $u$ is visited at
%most $N_u$ times (given by Gillman) in an $\ell$ step walk, then the
%node is visited at most $\tilde{O}(N_u/\lambda)$ times as an end
%point of a short walk. $N_u$ has an exponential dependance on
%$\frac{1}{\epsilon}$ where $\epsilon$ is the spectral gap.
%Therefore, we choose parameter $\lambda$ to depend on $\epsilon$
%(the value of $\epsilon$ can be guessed by doubling since
%$\frac{1}{\epsilon}$ is at most polynomial in $n$ for undirected
%unweighted graphs). We apply Gillman's result on sets of size $k$ (a
%parameter in the algorithm) and obtain a bound on the probability
%that more than $k$ distinct nodes would invoke {\sc Get-More-Walks}.
%This allows us to prove an improved bound of $\tilde{O}(\min\{(\ell
%D)^{1/2}\epsilon^{-1/4} + D\epsilon^{-1/2},(\ell
%D)^{1/2}\epsilon^{-1/3}\})$ rounds on performing $\ell$-length
%random walks. The dependance on $\ell$ is reduced from $\ell^{2/3}$
%to $\ell^{1/2}$.

%{\bf TODO: In the above paragraph, I first describe what we need and then describe the modifications in the algorithm to explain how we achieve it.... Might be better to first describe the modifications to the algorithm and then show how they help? Not sure.}


%The main idea used in the
%$\tilde{O}(\ell^{2/3}D^{1/3})$-round
%algorithm in \cite{DNP09-podc} is to ``prepare'' a few short walks in the beginning and
%carefully stitch these walks together later as necessary. If there
%are not enough short walks, they construct more of them on the fly. They
%overcome a key technical problem by showing how one can perform many
%short walks in parallel without causing too much congestion.




To summarize, the main algorithm for performing a single random walk
is {\sc Single-Random-Walk}. This algorithm, in turn, uses {\sc
Get-More-Walks} and {\sc Sample-Destination}.
%
The key modification is that, instead of creating short walks of
length $\lambda$ each, we create short walks where each walk has
length in range $[\lambda, 2\lambda-1]$. To do this, we modify the
Phase~1 of {\sc Single-Random-Walk} and {\sc Get-More-Walks}.
%
%to random the length of each walk and shown that the same running
%time guarantee still holds (cf. Lemma~\ref{lem:phase1}).
%
%We summarize the changes we made to the algorithm here and reserve the full description of the algorithms (which are slight
%modifications of algorithms with the same names in \cite{DNP09-podc}) in Appendix~\ref{sec:pseudocode}.


%\textbf{TO DO: EXPLAIN THE PSEUDOCODES IN THE APPENDIX WITHOUT REFERENCING TO THE APPENDIX.}


%{\bf Gopal:} For space, we should move the pseudocodes to the appendix. I already say that  at the beginning
%of this section.

%\newcommand{\mindegree}[0]{\delta}
%\begin{algorithm}
%\caption{\sc Single-Random-Walk($s$, $\ell$)} \label{alg:single-random-walk}
%\textbf{Input:} Starting node $s$, and desired walk length $\ell$.\\
%\textbf{Output:} Destination node of the walk outputs the ID of
%$s$.\\
%
%\textbf{Phase 1: (Each node $v$ performs $\eta_v=\eta \deg(v))$ random walks of length
%$\lambda + r_i$ where $r_i$ (for each $1\leq i\leq \eta$) is chosen independently at random
%in the range $[0,\lambda-1]$.)}
%\begin{algorithmic}[1]
%\STATE Let $r_{max} = \max_{1\leq i\leq \eta}{r_i}$, the random numbers chosen independently for each of the $\eta_x$ walks.
%
%\STATE Each node $x$ constructs $\eta_x$ messages containing
%its ID and in addition, the $i$-th message contains the desired walk length of $\lambda + r_i$.
%
%\FOR{$i=1$ to $\lambda + r_{max}$}
%
%\STATE This is the $i$-th iteration. Each node $v$ does the
%following: Consider each message $M$ held by $v$ and received in the
%$(i-1)$-th iteration (having current counter $i-1$). If the message $M$'s
%desired walk length is at most $i$, then $v$ stored the ID of the source ($v$ is the
%desired destination). Else, $v$ picks a neighbor
%$u$ uniformly at random and forward $M$ to $u$ after incrementing
%its counter.
%
%\COMMENT{Note that any iteration could require more than 1 round.}
%
%\ENDFOR
%
%\end{algorithmic}
%
%
%\textbf{Phase 2: (Stitch $\Theta(\ell/\lambda)$ walks, each of length in $[\lambda,2\lambda-1]$)}
%\begin{algorithmic}[1]
%\STATE The source node $s$ creates a message called ``token'' which contains the ID
%of $s$
%
%\STATE The algorithm generates a set of {\em connectors}, denoted by $C$, as follows.
%
%\STATE Initialize $C = \{s\}$
%
%\WHILE {Length of walk completed is at most $\ell-2\lambda$}
%
%  \STATE Let $v$ be the node that is currently holding the token.
%
%  \STATE $v$ calls {\sc Sample-Destination($v$)} and let $v'$ be the
%  returned value (which is a destination of an unused random walk starting at $v$
%  of length between $\lambda$ and $2\lambda-1$.)
%
%  \IF{$v'$ = {\sc null} (all walks from $v$ have already been used up)}
%
%  \STATE $v$ calls {\sc Get-More-Walks($v$, $\lambda$)} (Perform $\Theta(l/\lambda)$ walks
%  of length $\lambda$ starting at $v$)
%
%  \STATE $v$ calls {\sc Sample-Destination($v$)} and let $v'$ be the
%  returned value
%
%  \ENDIF
%
%  \STATE $v$ sends the token to $v'$
%
%  \STATE $C = C \cup \{v\}$
%
%\ENDWHILE
%
%\STATE Walk naively until $\ell$ steps are completed (this is at
%most another $2\lambda$ steps)
%
%\STATE A node holding the token outputs the ID of $s$
%
%\end{algorithmic}
%
%\end{algorithm}
%
%
%\begin{algorithm}[t]
%\caption{\sc Get-More-Walks($v$, $\lambda$)} \label{alg:Get-More-Walks}
%(Starting from node $v$,  perform $\lfloor\ell/\lambda\rfloor$ number of random walks, each of  length $\lambda + r_i$ where
%$r_i$ is chosen uniformly at random in the range $[0,\lambda-1]$ for the $i$-th walk.) \\
%\begin{algorithmic}[1]
%\STATE The node $v$ constructs $\lfloor\ell/\lambda\rfloor$ (identical) messages
%containing its ID.
%
%\FOR{$i=1$ to $\lambda$}
%
%\STATE Each node $u$ does the following:
%
%\STATE - For each message $M$ held by $u$,
%pick a neighbor $z$ uniformly at random as a receiver of $M$.
%
%\STATE - For each neighbor $z$ of $u$, send ID of $v$ and the number
%of messages that $z$ is picked as a receiver, denoted by $c(u, v)$.
%
%\STATE - For each neighbor $z$ of $u$, upon receiving ID of $v$ and
%$c(u, v)$, constructs $c(u, v)$ messages, each contains the ID of
%$v$.
%
%\ENDFOR
%
%\COMMENT {Each walk has now completed $\lambda$ steps. These walks are now extended probabilistically
%further by $r$ steps where each $r$ is independent and uniform in the range $[0,\lambda-1]$.}
%
%\FOR{$i=0$ to $\lambda-1$}
%
%\STATE \label{line:reservoir} For each message, independently with probability $\frac{1}{\lambda-i}$, stop sending the message further and save the ID of the source node (in this event, the node with the message is the destination). For messages $M$ that are not stopped, each node picks a neighbor correspondingly and sends the messages forward as before.
%
%\ENDFOR
%
%\STATE At the end, each destination knows the source ID as well as the length of the corresponding walk.
%
%\end{algorithmic}
%
%\end{algorithm}
%
%\begin{algorithm}[t]
%\caption{\sc Sample-Destination($v$)} \label{alg:Sample-Destination}
%\textbf{Input:} Starting node $v$.\\
%\textbf{Output:} A node sampled from among the stored
%walks (of length in $[\lambda, 2\lambda-1]$) from $v$. \\
%
%\textbf{Sweep 1: (Perform BFS tree)}
%\begin{algorithmic}[1]
%
%\STATE Construct a Breadth-First-Search (BFS) tree rooted at $v$.
%While constructing, every node stores its parent's ID. Denote such
%tree by $T$.
%
%\end{algorithmic}
%
%\textbf{Sweep 2: (Tokens travel up the tree, sample as you go)}
%\begin{algorithmic}[1]
%
%\STATE We divide $T$ naturally into levels $0$ through $D$ (where
%nodes in level $D$ are leaf nodes and the root node $s$ is in level
%$0$).
%
%\STATE Tokens are held by nodes as a result of doing walks of length
%between $\lambda$ and $2\lambda-1$ from $v$ (which is done in either Phase~1 or {\sc
%Get-More-Walks} (cf. Algorithm~\ref{alg:Get-More-Walks})) A node
%could have more than one token.
%
%\STATE Every node $u$ that holds token(s) picks one token, denoted
%by $d_0$, uniformly at random and lets $c_0$ denote the number of
%tokens it has.
%
%\FOR{$i=D$ down to $0$}
%
%\STATE Every node $u$ in level $i$ that either receives token(s)
%from children or possesses token(s) itself do the following.
%
%\STATE Let $u$ have tokens $d_0, d_1, d_2, \ldots, d_q$, with counts
%$c_0, c_1, c_2, \ldots, c_q$ (including its own tokens). The node
%$v$ samples one of $d_0$ through $d_q$, with probabilities
%proportional to the respective counts. That is, for any $1\leq j\leq
%q$, $d_j$ is sampled with probability
%$\frac{c_j}{c_0+c_1+\ldots+c_q}$.
%
%\STATE The sampled token is sent to the parent node (unless already
%at root), along with a count of $c_0+c_1+\ldots+c_q$ (the count
%represents the number of tokens from which this token has been
%sampled).
%
%\ENDFOR
%
%\STATE The root output the ID of the owner of the final sampled
%token. Denote such node by $u_d$.
%
%\end{algorithmic}
%
%\textbf{Sweep 3: (Go and delete the sampled destination)}
%\begin{algorithmic}[1]
%
%\STATE $v$ sends a message to $u_d$ (e.g., via broadcasting). $u_d$
%deletes one token of $v$ it is holding (so that this random walk of
%length $\lambda$ is not reused/re-stitched).
%\end{algorithmic}
%
%\end{algorithm}



%\begin{definition}
%Connectors.
%\end{definition}

%{\bf Gopal:} We need a proof for this. Although it is easy, it will still be better to state it.
%{\bf Danupon:} I removed the claim below as it is the same as the last lemma below.

%\begin{claim}
%{\sc Sample-Destination} returns a destination from a random walk whose length is uniform in the range $[\lambda,2\lambda-1]$.
%\end{claim}

%MOVED TO APPENDIX, ALONG WITH THE PSEUDOCODE
%Notice that in Line~\ref{line:reservoir} in Algorithm~\ref{alg:Get-More-Walks},
%the walks of length $\lambda$ are extended further to walks of length $\lambda+r$
%where $r$ is a random number in the range $[0,\lambda-1]$. We do this by extending
%the $\lambda$-length walks further, and probabilistically stopping each walk in each of the next $i$ steps
%(for $0\leq i\leq \lambda-1$) with probability $\frac{1}{\lambda-i}$. The reason it needs to be done this way is because if we first sampled $r$,
%independently for each walk, in the range $[0,\lambda-1]$ and then extended each walk accordingly, the algorithm would need to pass $r$ independently
%for each walk. This will cause congestion along the edges; no congestion occurs in the mentioned algorithm as only the {\em count} of the number of
%walks along an edge are passed to the node across the edge.

%{\bf TODO: Above is too verbose and badly written - to make it crisp}.

%We now explain algorithm {\sc Single-Random-Walk} (cf.
%Algorithm~\ref{alg:single-random-walk}) in some more detail.  The
%algorithm consists of two phases.
%In the first phase, each node performs $\eta$ random walks of length
%$\lambda$ each. To do this, each node initially constructs $\eta$
%messages with its ID. Then, each node forwards each message to a
%random neighbor. This is done for $\lambda$ steps. At the end of
%this phase, if node $u$ has $k$ messages with the ID of node $v$
%written on them, then $u$ is a destination of $k$ walks starting at
%$v$. Note that $v$ has no knowledge of the destinations of its own
%walks. The main technical issue to deal with here is that performing
%many simultaneous random walks can cause too much congestion. We
%show a key lemma (Lemma \ref{lem:mainone}) that bounds the time
%needed for this phase.

%In the second phase, we perform a random walk starting from source
%$s$ by ``stitching'' walks of length $\lambda$ obtained in the first
%phase into a longer walk. The process goes as follows. Imagine that
%there is a token initially held by $s$. Among $\eta$ walks starting
%at $s$ (obtained in phase 1), randomly select one. Note that this
%step is not straightforward since $s$ has no knowledge of the
%destinations of its walks. Further, selecting an arbitrary
%destination would violate randomness. (A minor technical point: one
%may try to use the $i$-th walk when it is reached for the $i$-th
%time;  however, this is not possible because one cannot mark tokens
%separately in {\sc Get-More-Walks} (described later), since we only
%send counts forward to avoid congestion on edges). {\sc
%Sample-Destination} algorithm (cf.
%Algorithm~\ref{alg:Sample-Destination}) is used to perform this
%step. We prove in Lemma~\ref{lem:Sample-Destination} that this can
%be done in $O(D)$ rounds.

%When {\sc Sample-Destination}($v$) is called by any node $v$, this
%algorithm randomly picks a message with ID of $v$ written on it,
%returns the ID of the node that is holding this message, and then
%deletes it. If there is no such message (e.g., when {\sc
%Sample-Destination}($v$) has been called $\eta$ times), it returns
%{\sc null}.

%Let $v$ receive $u_d$ as an output from {\sc Sample-Destination}.
%$v$ sends the token to $u_d$ and the process repeats. That is, $u_d$
%randomly selects a random walk starting at $u_d$ and forwards the
%token to the destination. If the process continues without {\sc
%Sample-Destination} returning {\sc null}, then a walk of length
%$\ell$ will complete after $\ell/\lambda$ repetitions.

%However, if {\sc null} is returned by {\sc Sample-Destination} for
%$v$, then the token cannot be forwarded further. At this stage,
%$\eta$ more walks of length $\lambda$ are performed from $v$ by
%calling {\sc Get-More-Walks}($v$, $\eta$, $\lambda$) (cf.
%Algorithm~\ref{alg:Get-More-Walks}). This algorithm creates $\eta$
%messages with ID $v$ and forwards them for $\lambda$ random steps.
%This is done fast by only sending counts along edges that require
%multiple messages. This is crucial in avoiding congestion. While one
%cannot directly bound the number of times any particular node $v$
%invokes {\sc Get-more-Walks}, a simple amortization argument is used to
%bound the running time of invocations over all nodes.

%TO DO: STATE THE FOLLOWING LEMMA. MAKE SURE THAT THE READER UNDERSTANDS THAT
%THIS IS A CONTRIBUTION OF THE PREVIOUS PAPER.

We now state four lemmas which are  similar to the Lemma~2.2-2.6 in
\cite{DNP09-podc}. However, since the algorithm here is a
modification of that in \cite{DNP09-podc}, we include the full
proofs in the full version~\cite{fullversion}.

%{\bf TODO: Put the proofs of the following three lemmas in the Appendix}
%with a slight modification in the
%algorithm, we are able to bound the number of times any node is
%visited (based on the length of the walk). This in turn allows us to
%bound the number of times {\sc Get-More-Walks} will be required, and
%consequently get an improved result. Since Phase~1 of the algorithm
%is essentially the same as that in \cite{DNP09-podc}, we only state
%the result here with the proof omitted.


\begin{lemma} \label{lem:phase1}
Phase~1 finishes in $O( \lambda \eta \log{n} )$ rounds with high
probability.
\end{lemma}


%\textbf{Danupon}: The bound above is not actually proved in the PODC
%paper although the proof of this lemma is the same as the proof of
%lemma in PODC paper.


%It is left to bound the number of rounds used by Phase~2. First, we
%recall the following facts proved by


\begin{lemma}\label{lem:get-more-walks}
For any $v$, {\sc Get-More-Walks($v$, $\eta$, $\lambda$)} always
finishes within $O(\lambda)$ rounds.
\end{lemma}



\begin{lemma}\label{lem:Sample-Destination}
{\sc Sample-Destination} always finishes within $O(D)$ rounds.
\end{lemma}

%{\bf Gopal:} Here we should state the lemma from PODC paper that says that the algorithm is correct, that is, the sample
%that is returned is from the distribution of walk for length $\ell$.  We say that the correctness follows from
%PODC.  Then say, that the rest of the section is
%devoted to running time analysis.


\begin{lemma}\label{lem:correctness-sample-destination-new}
Algorithm {\sc Sample-Destination}($v$)  returns a destination from
a random walk whose length is uniform in the range
$[\lambda,2\lambda-1]$.
\end{lemma}

%\begin{lemma}\label{lem:correctness-sample-destination}
%Algorithm {\sc Sample-Destination}($v$) (cf.
%Algorithm~\ref{alg:Sample-Destination}), for any node $v$, samples a
%destination of a walk uniformly at random.
%\end{lemma}



%In the following subsection, we give upper bounds on the number of times {\sc Get-More-Walks} will be invoked. This
%helps in obtaining better bounds in this paper.

%\input{upperbound} %%%% The old analysis used in SODA'10 submission can be found in the file "old-analysis-SODA10.tex".

\subsection{Analysis}
\label{sec:analysis}

The following theorem states the main result of this Section. It
states that the algorithm {\sc Single-Random-Walk} correctly samples
a node after a random walk of $\ell$ steps and the algorithm takes,
with high probability, $\tilde O\left(\sqrt{\ell D}\right)$ rounds
where $D$ is the diameter of the graph. Throughout this section, we
assume that $\ell$ is $O(m^2)$, where $m$ is the number of edges in
the network. If $\ell$ is $\Omega(m^2)$, the required bound is
easily achieved by aggregating the graph topology (via upcast) onto
one node in $O(m+D)$ rounds (e.g., see \cite{peleg}). The difficulty
lies in proving for  $\ell = O(m^2) $.

\begin{theorem}\label{thm:1-walk}
For any $\ell$, Algorithm {\sc Single-Random-Walk} solves $1$-RW-DoS
(the Single Random Walk Problem) and, with probability at least
$1-\frac{2}{n}$,
%
finishes in $\tilde O\left( \sqrt{\ell D} \right)$ rounds.
\end{theorem}

\iffalse A key difference in this paper from Das Sarma et
al.~\cite{DNP09-podc} is that, with a  crucial modification in the
algorithm, we are able to bound the number of times any node is
visited (based on the length of the walk). This in turn allows us to
bound the number of times {\sc Get-More-Walks} will be required, and
consequently get an improved result.

In \cite{DNP09-podc}, the running time of Phase~2 is argued by
bounding the number of times {\sc Get-More-Walks} is invoked in
total. In particular, it is shown that this algorithm is invoked at
most $\frac{\ell}{\eta\lambda}$ times. In this paper, we show that
with the modification of the algorithm presented here, one can prove
a stronger result, as follows. \fi

We prove the above theorem using the following lemmas. As mentioned
earlier, to bound the number of times {\sc Get-More-Walks} is
invoked, we  need a technical result on random walks that bounds the
number of times a node will be visited in a $\ell$-length random
walk. Consider a simple random walk on a connected undirected graph
on $n$ vertices. Let $d(x)$ denote the degree of $x$, and let $m$
denote the number of edges. Let $N_t^x(y)$ denote the number of
visits to vertex $y$ by time $t$, given the walk started at vertex
$x$.
%
Now, consider $k$ walks, each of length $\ell$, starting from (not
necessary distinct) nodes $x_1, x_2, \ldots, x_k$. We show a key
technical lemma (proof in the full version~\cite{fullversion}) that
applies to a random walk on any graph:  With high probability, no
vertex $y$ is visited more than $24 d(x) \sqrt{k\ell+1}\log n + k$
times.


\begin{lemma}\label{lemma:visits bound}
For any nodes $x_1, x_2, \ldots, x_k$, and $\ell=O(m^2)$,
\[\Pr\bigl(\exists y\ s.t.\ \sum_{i=1}^k N_\ell^{x_i}(y) \geq 24
d(x) \sqrt{k\ell+1}\log n+k\bigr) \leq 1/n\,.\]
\end{lemma}
\iffalse
\begin{proof}[Proof (Sketch)]
Due to space limitation, we sketch the key steps here. A full proof
can be found in the full version~\cite{fullversion}.

First, we bound the expectation of the number of visits,
$\e[N_t^x(y)]$, to be at most $8d(y)\sqrt{t+1}$ (cf.
Proposition~\ref{proposition:first and second moment}). This is done
using Lyons' estimation (see Lemma~3.4 and Remark 4 in \cite{Lyons})
which bound the distribution at $y$ at the $k$-th step of the walk
to be at most $4d(y)/\sqrt{k+1}$.

Secondly, we use the above bound to show that, with high
probability, $N^x_t(y)$ is at most $24d(y)\sqrt{t+1}\log n$ times
(cf. Lemma~\ref{lemma:whp one walk one node bound}). To do this,
consider when $N^x_t(y)$ is visited more than the above number. We
divide the walk into $\log n$ independent subwalks, each visiting
$y$ exactly $24 d(y)\sqrt{t+1}$ times. We then argue using the
previous bound that, with high probability, one of these subwalks
must be longer than $t$, contradicting the fact that the walk is of
length $t$.

Finally, we extend the above result to show that $\sum_{i=1}^k
N_\ell^{x_i}(y)<24 d(x) \sqrt{k\ell+1}\log n+k$ with high
probability (cf. Lemma~\ref{lemma:k walks one node bound}). The
intuition is to view $k$ walks of length $\ell$ as one walk of
length $k\ell$. The previous bound immediately implies that
$\sum_{i=1}^k N_\ell^{x_i}(y) < 24 d(x) \sqrt{k\ell+1}\log n$. The
additional ``$+k$'' term is due to some technicalities.

By union bound the above bound over all nodes, the theorem follows.
\end{proof}
\fi

%
This lemma says that the number of visits to each node can be
bounded. However, for each node, we are only interested in the case
where it is used as a connector. The lemma below shows that the
number of visits as a connector can be bounded as well; i.e.,
%
if any node $v_i$ appears $t$ times in the walk, then it is likely
to appear roughly $t/\lambda$ times as connectors.

\begin{lemma}
\label{lem:uniformityused} For any vertex $v$, if $v$ appears in the
walk at most $t$ times then it appears as a connector node at most
$t(\log n)^2/\lambda$ times with probability at least $1-1/n^2$.
\end{lemma}

Intuitively, this argument is simple, since the connectors are
spread out in steps of length approximately $\lambda$. However,
there might be some {\em periodicity} that results in the same node
being visited multiple times but {\em exactly} at
$\lambda$-intervals. This is where we crucially use the fact that
the algorithm uses walks of length $\lambda + r$ where $r$ is chosen
uniformly at random from $[0,\lambda-1]$. The proof then goes via
constructing another process equivalent to partitioning the $\ell$
steps in to intervals of $\lambda$ and then sampling points from
each interval. We analyze this by carefully constructing a different
process that stochastically dominates the process of a node
occurring as a connector at various steps in the $\ell$-length walk
and then use a Chernoff bound argument. The detailed proof is
presented in the full version~\cite{fullversion}.

%\begin{proof}[Proof (Sketched)]
%Intuitively, this argument is simple, since the connectors are
%spread out in steps of length approximately $\lambda$. However,
%there might be some {\em periodicity} that results in the same node
%being visited multiple times but {\em exactly} at
%$\lambda$-intervals. This is where we crucially use the fact that
%the algorithm uses walks of length $\lambda + r$ where $r$ is chosen
%uniformly at random from $[0,\lambda-1]$.

%Think of when there are $k$ numbers $x_1, x_2, ..., x_k$. Let us
%assume that the number $1$ appears at most $t$ times, for some $t$.
%Now, pick every $\lambda+r$ numbers starting from the left, where
%$r$ is chosen uniformly at random from $[0,\lambda-1]$. (This is
%equivalent to picking connectors in the random walk.) How many times
%will $1$ be picked? We claim that $1$ will be picked at most $t(\log
%n)^2/\lambda$ times with probability at least $1-1/n^2$.

%To see this, the main step is to consider another process: Partition
%$x_1, ..., x_k$ into blocks of size $\lambda$ and pick one number
%from each block uniformly at random. Intuitively, since this process
%always return at least the same number of numbers returned in the
%previous process, the probability that we see $1$ here more than
%$t(\log n)^2/\lambda$ in this process is more than the probability
%of getting $1$ more than $t(\log n)^2/\lambda$ in the previous
%process. This claim has to be carefully proved, which is done in
%Appendix~\ref{sec:uniformityused-proof}.

%Now, since the result from the block is independent from each other,
%we can use Chernoff's bound to bound the number of see $1$ more than
%$t(\log n)^2/\lambda$ times in the second process. This turns out to
%be at most $1/n$ as desired.
%\end{proof}

%(GOPAL --- Something is wrong with the last line of the above theorem.
%The proof sketch does not read well, with a lot of ``numbers". May be
%better to remove the sketch?)

Now we are ready to prove Theorem~\ref{thm:1-walk}.

\begin{proof}[of Theorem~\ref{thm:1-walk}]
First, we claim, using Lemma \ref{lemma:visits bound} and
\ref{lem:uniformityused}, that each node is used as a connector node
at most $\frac{24 d(x) \sqrt{\ell}(\log n)^3}{\lambda}$ times with
probability at least $1-2/n$. To see this, observe that the claim
holds if each node $x$ is visited at most
$t(x)=24d(x)\sqrt{\ell+1}\log n$ times and consequently appears as a
connector node at most $t(x)(\log n)^2/\lambda$ times. By
Lemma~\ref{lemma:visits bound}, the first condition holds with
probability at least $1-1/n$. By Lemma~\ref{lem:uniformityused} and
the union bound over all nodes, the second condition holds with
probability at least $1-1/n$, provided that the first condition
holds. Therefore, both conditions hold together with probability at
least $1-2/n$ as claimed.

Now, we choose $\eta=1$ and $\lambda=24 \sqrt{\ell D}(\log n)^3$.
%
By Lemma~\ref{lem:phase1}, Phase~1 finishes in $\tilde O(\lambda
\eta) = \tilde O(\sqrt{\ell D})$ rounds with high probability.
%
For Phase~2, {\sc Sample-Destination} is invoked
$O(\frac{\ell}{\lambda})$ times (only when we stitch the walks) and
therefore, by Lemma~\ref{lem:Sample-Destination}, contributes
$O(\frac{\ell D}{\lambda})=\tilde O(\sqrt{\ell D})$ rounds.
%
Finally, we claim that {\sc Get-More-Walks} is never invoked, with
probability at least $1-2/n$. To see this, recall our claim above
that each node is used as a connector node at most $\frac{24 d(x)
\sqrt{\ell}(\log n)^3}{\lambda}$ times. Moreover, observe that we
have prepared this many walks in Phase~1; i.e., after Phase~1, each
node has $\eta\lambda d(x)= \frac{24 d(x) \sqrt{\ell}(\log
n)^3}{\lambda}$ short walks. The claim follows.

Therefore, with probability at least $1-2/n$, the rounds are $\tilde
O(\sqrt{\ell D})$ as claimed.
\end{proof}

\noindent{\bf Regenerating the entire random walk:} It is important
to note that our algorithm can be extended to regenerate the entire
walk. As described above, the source node obtains the sample after a
random walk of length $\ell$. In certain applications, it may be
desired that the entire random walk be obtained, i.e., every node in
the $\ell$ length walk knows its position(s) in the walk. This can
be done by first informing all intermediate connecting nodes of
their position (since there are only $O(\sqrt{\ell})$ such nodes).
Then, these nodes can regenerate their $O(\sqrt{\ell})$ length short
walks by simply sending a message through each of the corresponding
short walks. This can be completed in $\tilde{O}(\sqrt{\ell D})$
rounds with high probability. This is because, with high
probability, {\sc Get-More-Walk} will not be invoked and hence all
the short walks are generated in Phase~1. Sending a message through
each of these short walks (in fact, sending a message through {\em
every} short walk generated in Phase~1) takes time at most the time
taken in Phase~1, i.e., $\tilde{O}(\sqrt{\ell D})$ rounds.

\subsection{Extension to Computing $k$ Random Walks}

We now consider the scenario when we want to compute $k$ walks of
length $\ell$ from different (not necessary distinct) sources $s_1,
s_2, \ldots, s_k$. We show that {\sc Single-Random-Walk} can be
extended to solve this problem. Consider the following  algorithm.

\paragraph{{\sc Many-Random-Walks}:} Let
$\lambda=(24 \sqrt{k\ell D+1}\log n+k)(\log n)^2$ and $\eta=1$. If
$\lambda> \ell$ then run the naive random walk algorithm, i.e., the
sources find walks of length $\ell$ simultaneously by sending
tokens. Otherwise, do the following. First, modify Phase~2 of {\sc
Single-Random-Walk} to create multiple walks, one at a time; i.e.,
in the second phase, we stitch the short walks together to get a
walk of length $\ell$ starting at $s_1$ then do the same thing for
$s_2$, $s_3$, and so on. We state the theorem below and the proof is
in the full version~\cite{fullversion}.

\begin{theorem}\label{thm:kwalks} {\sc Many-Random-Walks} finishes in
$$\tilde O\left( \min( \sqrt{k\ell D} + k, k + \ell )\right)$$ rounds
with high probability.
\end{theorem}
%\begin{proof}
%First, consider the case where $\lambda>\ell$. In this case,
%$\min(\sqrt{k\ell D}+k, \sqrt{k\ell}+k+\ell)=\tilde
%O(\sqrt{k\ell}+k+\ell)$. By Lemma~\ref{lemma:visits bound}, each
%node $x$ will be visited at most $\tilde O(d(x) (\sqrt{k\ell}+k))$
%times. Therefore, using the same argument as Lemma~\ref{lem:phase1},
%the congestion is $\tilde O(\sqrt{k\ell} + k)$ with high
%probability. Since the dilation is $\ell$, {\sc Many-Random-Walks}
%takes $\tilde O(\sqrt{k\ell} + k +\ell)$ rounds as claimed.

%Now, consider the other case where $\lambda\leq \ell$. In this case,
%$\min(\sqrt{k\ell D}+k, \sqrt{k\ell}+k+\ell)=\tilde O(\sqrt{k\ell
%D}+k)$. Phase~1 takes $\tilde O(\lambda \eta) = \tilde O(\sqrt{k\ell
%D}+k)$. The stitching in Phase~2 takes $\tilde O(k\ell D/\lambda) =
%\tilde O(\sqrt{k\ell D})$. Moreover, by Lemma~\ref{lemma:visits
%bound}, {\sc Get-More-Walks} will never be invoked. Therefore, the
%total number of rounds is $\tilde O(\sqrt{k\ell D}+k)$ as claimed.
%\end{proof}

%(GOPAL --- This proof can be moved to the Appendix.)

% I created this file because we ran into conflicts so many times.
%\section{Lower bound}
%\input{lowerbound}
\input{lowerbound-alternative}

\section{Applications}

%In this section, we present two applications of our algorithm.

\subsection{A Distributed Algorithm for Random Spanning Tree}
\label{sec:rst}
\input{rst}

\subsection{Decentralized Estimation of Mixing Time}
%\danupon{To fix: change $l$ to $\ell$.}
\label{sec:mixingtime} We now present an algorithm to estimate the
mixing time of a graph from a specified source. Throughout this
section, we assume that the graph is connected and non-bipartite
(the conditions under which mixing time is well-defined). The main
idea in estimating the mixing time is, given a source node, to run
many random walks of length $\ell$ using the approach described in
the previous section, and use these to estimate the distribution
induced by the $\ell$-length random walk. We then compare the
distribution at length $\ell$, with the stationary distribution to
determine if they are {\em close}, and if not, double $\ell$ and
retry. For this approach, one issue that we need to address is how
to compare two distributions with few samples efficiently (a
well-studied problem). We introduce some definitions before
formalizing our approach and theorem.

%(GOPAL --- Something wrong with the above sentence.)

%We need to address the issue of what value(s) of $\ell$ to try. Further,
%we need to efficiently compare two distributions with minimum number of walks.
%Our algorithm estimates the mixing time with respect to a specified source.
%We do not necessarily find the worst case mixing time of the graph.
% We introduce some definitions before making our theorem precise.




%However, instead of computing the exact mixing time, we compute
%the time required for {\em approximate} mixing of a random walk. That
%is, we compute a length $\ell$ such that running a random walk for $\ell$
%steps from an initial distribution ends at a node with a probability
%distribution that is {\em close} to the stationary distribution.

%\atish{TO FIX THIS SECTION. DO WE NEED TO DEFINE IT FOR A SOURCE, OR CAN WE MAKE IT CARRY THROUGH WITHOUT IT? IF ALL WORKS, THEN JUST NEED TO DEFINE MIXING TIME, NOT NEAR MIXING TIME}

\begin{definition} [Distribution vector]
Let $\pi_x(t)$ define the probability distribution vector reached
after $t$ steps when the initial distribution starts with
probability $1$ at node $x$. Let $\pi$ denote the stationary
distribution vector.
%Define the distance from the stationary distribution for source $x$ and time steps $t$ by $r_x(t) = ||\pi_x(t) - \pi||_1$.
\end{definition}

%In standard notation, the smallest $\tau$ is said to be the mixing time of the graph, $\tau_{mix}$, if for all $x$, we have $r_x(\tau)\leq 1/2e$. It is usually desired that one approximates $\tau_{mix}$ from above.


% (GOPAL --- Better not to talk about ``worst case" mixing time at all here.
%In future work, we can mention that. Here we will simple only focus on
%$tau^x_{mix}$.)


 %That is usually the distribution is said to have mixed if it reaches within $1/2e$ of the stationary distribution, in terms of $L_1$ distance. For specific applications, it may be desired that the distribution be even closer to the stationary distribution.

\begin{definition}($\tau^x(\epsilon)$ and $\tau^x_{mix}$, mixing time for source
$x$) Define $\tau^x(\epsilon) = \min t : ||\pi_x(t) - \pi||_1 <
\epsilon$. Define $\tau^x_{mix} = \tau^x(1/2e)$.
% to be the smallest $t$ such that $r_x(t)\leq (1/2e)$.
\end{definition}

The goal is to estimate $\tau^x_{mix}$. Notice that the definition
of $\tau^x_{mix}$ is consistent due to the following standard
monotonicity property of distributions (proof in the full
version~\cite{fullversion}).
%(proof  in Appendix~\ref{app:mon}).

\begin{lemma}\label{lem:monotonicity}
$||\pi_x(t+1) - \pi||_1 \leq  ||\pi_x(t) - \pi||_1$.
%If $r_x(t)\leq \epsilon$, then $r_x(t+1)\leq \epsilon$.
%The $\epsilon$-near mixing time is monotonic property, i.e., if a walk of length $\ell$ is
%$\epsilon$-near mixing, so is a walk of any length greater than $\ell$.
\end{lemma}
%\begin{proof}
%The monotonicity follows from the fact that
%$||Ax||_1 \le ||x||_1$ where $A$ is the transpose of the transition probability matrix of the graph and $x$ is any probability vector. That is, $A(i,j)$ denotes the probability of transitioning from node $j$ to node $i$. This in turn follows from the fact that the sum of entries of any column of $A$ is 1.

%Now let $\pi$ be the stationary distribution of the transition matrix $A$. This implies that if $\ell$ is $\epsilon$-near mixing, then $||A^lu - \pi||_1 \leq \epsilon$, by definition of $\epsilon$-near mixing time. Now consider $||A^{l+1}u - \pi||_1$. This is equal to $||A^{l+1}u - A\pi||_1$ since $A\pi = \pi$.  However, this reduces to $||A(A^{l}u - \pi)||_1 \leq \epsilon$. It follows that $(\ell+1)$ is $\epsilon$-near mixing.
%\end{proof}

%\begin{definition} [$\epsilon$-near mixing time]
%We say that $\ell$ is the {\em $\epsilon$-near mixing
%time} of an undirected graph if the $L_1$-distance between the steady
%state distribution and the distribution obtained after a random walk
%of length $\ell$ is at most $\epsilon$. Further, $\ell$ must be the shortest
%such length that satisfies this condition.
%\end{definition}

%In some cases, however, the degree distribution, and hence the steady state distribution of the graph may not be known to nodes
%and may be too expensive to compute. In such situations, we have to resort to a somewhat weaker notion of approximate mixing time.

%\begin{definition} [$\epsilon$-close mixing time]
%We say that $l$ is the {\em $\epsilon$-near mixing
%time} of a directed graph if the $L_1$-distance between the
%distribution obtained after a random walk of length $l$ from any
%initial distribution, and the distribution obtained after a random
%walk of length $l+poly(1/\epsilon)$, is at most $\epsilon$.
%\end{definition}

%Here, when referring to the mixing time of the graph, which we denote by $\tau_{mix}$, one usually considers $\epsilon = 1/2e$ or $\epsilon = 1/4$.
%This definition of approximation is reasonable and is corroborated by the following lemma.

To compare two distributions, we use the technique of Batu et.
al.~\cite{BFFKRW} to determine if the distributions are
$\epsilon$-near. Their result (slightly restated) is summarized in
the following theorem.

\begin{theorem}[\cite{BFFKRW}]\label{thm:batu}
For any $\epsilon$, given $\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$
samples of a distribution $X$ over $[n]$, and a specified
distribution $Y$, there is a test that outputs PASS with high
probability if $|X-Y|_1\leq \frac{\epsilon^3}{4\sqrt{n}\log n}$, and
outputs FAIL with high probability if $|X-Y|_1\geq 6\epsilon$.
%one can test if $X$ is $\epsilon$-near in the $L_1$ norm to
%a specific distribution $Y$.
%Given two unknown
%distributions $X$ and $Y$ over $[n]$, one can determine whether $X$ and $Y$ are $\epsilon$-near in $L_1$
%norm with $\tilde{O}(n^{2/3}poly(\epsilon^{-1}))$ samples each from $X$ and $Y$.
\end{theorem}

%Notice that their theorem can be translated to high probability guarantee on both sides by just repeating the test $\Theta(\log n)$ times; the test that outputs FAIL with constant probability can be boosted. We briefly describe their algorithm.

%The algorithm partitions the set of nodes in to buckets based on the steady state probabilities. Each of the $\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ samples from $X$ now falls in one of these buckets. Further, the actual count of number of nodes in these buckets for distribution $Y$ are counted. The exact count for $Y$ for at most $\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ buckets (corresponding to the samples) is compared with the number of samples from $X$; these are compared to determine if $X$ and $Y$ are close. We refer the reader to their paper~\cite{BFFKRW} for a precise description.
We now give a very brief description of the algorithm of Batu et.
al.~\cite{BFFKRW} to illustrate that it can in fact be simulated on
the distributed network efficiently. The algorithm partitions the
set of nodes in to buckets based on the steady state probabilities.
Each of the $\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ samples from $X$
now falls in one of these buckets. Further, the actual count of
number of nodes in these buckets for distribution $Y$ are counted.
The exact count for $Y$ for at most
$\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ buckets (corresponding to
the samples) is compared with the number of samples from $X$; these
are compared to determine if $X$ and $Y$ are close. We refer the
reader to their paper~\cite{BFFKRW} for a precise description.


Our algorithm starts with $\ell=1$ and runs $K=\tilde{O}(\sqrt{n})$
walks of length $\ell$ from the specified source $x$. As the test of
comparison with the steady state distribution outputs FAIL (for
choice of $\epsilon=1/12e$), $\ell$ is doubled. This process is
repeated to identify the largest $\ell$ such that the test outputs
FAIL with high probability and the smallest $\ell$ such that the
test outputs PASS with high probability. These give lower and upper
bounds on the required $\tau^x_{mix}$ respectively. Our resulting
theorem is presented below and the proof is in the full
version~\cite{fullversion}.
%(with algorithm details) is placed in Appendix~\ref{app:mixproof}.

%{\bf ATISH - NEED TO FIX BELOW AND BE CAREFUL IN WHAT WE CLAIM, SINCE NOTICE THAT BATU ET. AL. TEST HAS A GAP OF THE TWO SIDED TEST. GOPAL, HOW DO YOU THINK WE SHOULD STATE THIS?}

%\begin{theorem}\label{thm:mixmain}
%Given a graph with diameter $D$, a node $x$ can find, in $\tilde{O}(n^{1/2} + n^{1/4}\sqrt{D\tau^x_{mix}})$ rounds, a time
%$\tilde{\tau}^x_{mix}$ such that $\tau^x_{mix}\leq \tilde{\tau}^x_{mix}\leq \tau^x(\frac{1}{6912e\sqrt{n}\log n})$.
\begin{theorem}\label{thm:mixmain}
Given a graph with diameter $D$, a node $x$ can find, in
$\tilde{O}(n^{1/2} + n^{1/4}\sqrt{D\tau^x(\epsilon)})$ rounds, a
time $\tilde{\tau}^x_{mix}$ such that $\tau^x_{mix}\leq
\tilde{\tau}^x_{mix}\leq \tau^x(\epsilon)$, where $\epsilon =
\frac{1}{6912e\sqrt{n}\log n}$.
% where $T$ is the smallest time such that $r_x(T)\leq \frac{1}{6912e\sqrt{n}\log n}$.
%This can be done in $\tilde{O}(n^{1/2} + n^{1/4}\sqrt{Dt_{mix}})$ rounds.
%
%that is w.h.p. between the $6\epsilon$-near mixing time and $\frac{\epsilon^3}{4\sqrt{n}\log n}$-near mixing time in $\tilde{O}(n^{1/2}poly(\epsilon^{-1}) + n^{1/4}poly(\epsilon^{-1})\sqrt{Dt_{mix}})$ rounds.
%
%If the degree distribution is unknown to the nodes, a node can find an $\epsilon$-close mixing time in $\tilde{O}(n^{2/3}poly(\epsilon^{-1}) + n^{1/3}poly(\epsilon^{-1})\sqrt{Dt_{mix}})$ rounds.
\end{theorem}
\begin{proof}
For undirected unweighted graphs, the stationary distribution of the
random walk is known and is $\frac{deg(i)}{2m}$ for node $i$ with
degree $deg(i)$, where $m$ is the number of edges in the graph.  If
a source node in the network knows the degree distribution, we only
need $\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ samples from a
distribution to compare it to the stationary distribution.  This can
be achieved by running {\sc MultipleRandomWalk} to obtain $K =
\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ random walks. We choose
$\epsilon = 1/12e$. To find the approximate mixing time, we try out
increasing values of $l$ that are powers of $2$.  Once we find the
right consecutive powers of $2$, the monotonicity property admits a
binary search to determine the exact value for the specified
$\epsilon$.
%of $\epsilon$-near mixing
%time. Note that we can apply binary search as $\epsilon$-near mixing
%time is a monotonic property.

The result in~\cite{BFFKRW} can also be adapted to compare with the
steady state distribution even if the source does not know the
entire distribution. As described previously, the source only needs
to know the {\em count} of number of nodes with steady state
distribution in given buckets. Specifically, the buckets of interest
are at most $\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ as the count is
required only for buckets were a sample is drawn from. Since each
node knows its own steady state probability (determined just by its
degree), the source can broadcast a specific bucket information and
recover, in $O(D)$ steps, the count of number of nodes that fall
into this bucket. Using the standard upcast technique previously
described, the source can obtain the bucket count for each of these
at most $\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ buckets in
$\tilde{O}(n^{1/2}poly(\epsilon^{-1}) + D)$ rounds.


We have shown previously that a source node can obtain $K$ samples
from $K$ independent random walks of length $\ell$ in $\tilde{O}(K +
\sqrt{KlD})$ rounds. Setting $K=\tilde{O}(n^{1/2}poly(\epsilon^{-1})
+ D)$ completes the proof.
\end{proof}


Suppose our estimate of $\tau^x_{mix}$ is close to the mixing time
of the graph defined as $\tau_{mix} = \max_{x}{\tau^x_{mix}}$, then
this would allow us to estimate several related quantities. Given a
mixing time $\tau_{mix}$, we can approximate the spectral gap
($1-\lambda_2$) and the conductance ($\Phi$) due to the
%following known relations. The spectral gap is the $1-\lambda_2$ where $\lambda_2$ is the second eigenvalue of the connected transition matrix. It is
known relations that $\frac{1}{1-\lambda_2}\leq \tau_{mix}\leq
\frac{\log n}{1-\lambda_2}$ and $\Theta(1-\lambda_2)\leq \Phi\leq
\Theta(\sqrt{1-\lambda_2})$ as shown in~\cite{JS89}.
%Further, the conductance $\Phi$ and of the graph is related as $\Theta(1-\lambda_2)\leq \Phi\leq \Theta(\sqrt{1-\lambda_2})$ as shown in~\cite{JS89}. The conductance of a
%graph $G$ is defined as $\phi(G)=\min_{S:|S| \le
%|V|/2}{\frac{E(S,V(G)\setminus S)}{E(S)}}$ where $E(S,V(G)\setminus
%S)$ is the number of the edges spanning the cut, and $E(S)$ is the
%number of edges on the smaller side of the cut.

%Assuming that the $\epsilon$-near mixing time is close to the actual mixing time, we get
%the following corollary.

%\begin{corollary}
%Assuming the $\epsilon$-near mixing time is close to $\tau_{mix}$, the conductance of the network can be approximated to a
%quadratic factor in $\tilde{O}(n^{1/2}poly(\epsilon^{-1}) + n^{1/4}poly(\epsilon^{-1})\sqrt{D\tau_{mix}})$ rounds, where
%$\tau_{mix}$ is the actual mixing time of the graph.
%\end{corollary}

%Using this assumption, we can also approximate the spectral gap of the graph as $\tau_{mix}$ is within a $\log n$ factor of the reciprocal of the spectral gap.


\iffalse The work of Kempe and McSherry \cite{kempe} can be used to
estimate $\tau^x_{mix}$ in $\tilde{O}(\tau^x_{mix})$ rounds.
%\danupon{Should we state this precisely? Also, should we define $\tau_{mix}$?}
Our approach attempts at reducing the dependence on $\tau^x_{mix}$.
%We do not show how to find the second eigenvector of the transition matrix.
It should be noted that Kempe and McSherry \cite{kempe} in fact show
much more; they show how to approximate the top $k$ eigenvectors in
$\tilde{O}(\tau_{mix})$ rounds if two adjacent nodes are allowed to
exchange $O(k^3)$ messages per round. \fi

%Gopal -- I remove the above, since we already say this in the Intro.




\section{Concluding Remarks}\label{sec:conclusion}

%Open questions: (1) Close the gap? (2) Directed Graphs? (3) Handle Byzantine Failures?

This paper makes progress towards resolving the time complexity of
distributed computation of random walks in undirected networks. The
dependence on the diameter $D$ is still not tight, and it would be
interesting to settle this.
%Furthermore, showing a general lower bound that applies to any distributed algorithm in the CONGEST model is an open issue.
There is also a gap in our bounds for performing $k$ independent
random walks. Further, we look at the CONGEST model enforcing a
bandwidth restriction and minimize number of rounds. While our
algorithms have good {\it amortized} message complexity over several
walks,
%the total message complexity of our algorithms are high. It
it would be nice to come up with algorithms that are round efficient
and yet have smaller message complexity.

We presented two algorithmic applications of our distributed random
walk algorithm: estimating mixing times and computing random
spanning trees. It would be interesting to improve upon these
results. For example, is there a $\tilde{O}(\sqrt{\tau^x_{mix}} +
n^{1/4})$ round algorithm to estimate $\tau^x$; and is there a
$\tilde{O}(n)$ round algorithm for RST?

There are several interesting directions to take this work further.
Can these techniques be useful for estimating the second eigenvector
of the transition matrix (useful for sparse cuts)? Are there
efficient distributed algorithms for random walks in directed graphs
(useful for PageRank and related quantities)? Finally, from a
practical standpoint, it is important to develop algorithms that are
robust to failures and it would be nice to extend our techniques to
handle such node/edge failures.
%(1) An efficient distributed random spanning tree algorithm and (2) An distributed computation of mixing time.
%
%It will be interesting to see
%whether we can improve over the running time of  $\tilde{O}(\sqrt{m}D)$  algorithm for RST. In particular, the time complexity of distributed RST is wide open.
%Another interesting open question is whether the source $x$ based mixing time of the graph can be estimated more efficiently, in particular in $\tilde{O}(\sqrt{\tau^x_{mix}} + n^{1/4})$ rounds? Currently our techniques require roughly $n^{1/4}$ factor more rounds. Further, can our techniques be extended to obtain fast decentralized algorithms to compute the worst case mixing time $\tau_{mix} = \max_{x}{\tau^x_{mix}}$ and perhaps the second eigenvector of the transition matrix (which can be used to approximate a sparse cut)?
%
%However, there are still significant open problems.
%We conjecture is that the true number of rounds to obtain an $\ell$-length random walk sample is $\Theta(\sqrt{\ell D})$. Thus removing (or at least reducing) the dependence on $\epsilon$ will be an important improvement.
%Alternately, it will also be interesting to see whether one can give lower bounds that depend on $\epsilon$.
%
%It will be also interesting
%to investigate the distributed complexity of  performing random walks in  directed graphs. This will be useful in the context of developing
%fast decentralized algorithms for computing PageRank and related quantities. We believe our approach can be useful for  doing decentralized computation
%in large-scale dynamic networks.
%From a practical standpoint, it is often important to develop algorithms that are robust to failures and therefore it would be nice to extend our techniques to handle such node and edge failures.
%
 %However, comparing the terms reveals that our result is better only if $\ell\geq \Omega(\frac{D}{\epsilon^{3/2}})$. Recall that $\frac{1}{\epsilon}$ is within a $\log n$ factor of the mixing time of the underlying network. Therefore, for our current results to be an {\em improvement}, we need $\ell$ to be more than mixing time to the power of $3/2$. This makes our results less interesting at least from a practical standpoint.
 %in our approach, this term comes as a result of a specific way to bound the number of times any node is visited. The dependence on $\epsilon$ might just be an artifact of our approach.
 %

%The focus of this paper is on the time complexity of random walks; however, this
%can come at the cost of increased message complexity.
%It would also be interesting to study tradeoffs between time and messages.


%
% The following two commands are all you need in the
% initial runs of your .tex file to
% produce the bibliography for the citations in your paper.
\bibliographystyle{abbrv}
\bibliography{Distributed-RW}  % sigproc.bib is the name of the Bibliography in this case
% You must have a proper ".bib" file
%  and remember to run:
% latex bibtex latex latex
% to resolve all references
%
% ACM needs 'a single self-contained file'!
%


\end{document}
