%\input{template}
\documentclass[11pt]{article}
%\documentclass{sig-alternate}
\usepackage{algorithm}
\usepackage{algorithmic}

\usepackage{epsfig,amsthm,amsmath,color, amsfonts}
\usepackage{epsfig,color}
\newcommand{\xxx}[1]{\textcolor{red}{#1}}
\usepackage{fullpage}
\usepackage{framed}
%\usepackage{epsf}

\newtheorem{theorem}{Theorem}[section]
%\newtheorem{definition}[theorem]{Definition}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{claim}[theorem]{Claim}
%\newtheorem{example}[theorem]{Example}
\newtheorem{remark}[theorem]{Remark}
\theoremstyle{definition}\newtheorem{example}[theorem]{Example}
\theoremstyle{definition}\newtheorem{definition}[theorem]{Definition}
\theoremstyle{observation}\newtheorem{observation}[theorem]{Observation}

\newcommand{\comment}[1]{}
\newcommand{\QED}{\mbox{}\hfill \rule{3pt}{8pt}\vspace{10pt}\par}
%\newcommand{\eqref}[1]{(\ref{#1})}
\newcommand{\theoremref}[1]{(\ref{#1})}
\newenvironment{proof1}{\noindent \mbox{}{\bf Proof:}}{\QED}
%\newenvironment{observation}{\mbox{}\\[-10pt]{\sc Observation.} }%
%{\mbox{}\\[5pt]}

\def\m{\bar{m}}
\def\eps{{\epsilon}}
\def\half{{1\over 2}}
\def\third{{1\over 3}}
\def\quarter{{1\over 4}}
\def\polylog{\operatorname{polylog}}
\newcommand{\ignore}[1]{}
\newcommand{\eat}[1]{}
\newcommand{\floor}[1]{\left\lfloor #1 \right\rfloor}
\newcommand{\ceil}[1]{\left\lceil #1 \right\rceil}

\newcommand{\algorithmsize}[0]{}


\begin{document}


\title{Can you Walk Faster than you can Walk?}

\maketitle
\begin{abstract}
Performing random walks in networks is a fundamental primitive that
has found applications in many areas of computer science, including
distributed computing. In this paper, we focus on the problem of
performing random walks efficiently in a distributed network. Given
bandwidth constraints, the goal is to minimize the number of rounds
required to obtain a random walk sample. We show that a random walk of length $\ell$
can be performed in $\tilde{O}(\ell^{1/2}D^{1/2}\epsilon^{-1/4})$ rounds where $D$
is the diameter of the network and $\epsilon$ is the spectral gap of the transition matrix corresponding to the network random walk. This is an
improvement over the naive $\ell$-round algorithm for $\ell > \frac{D}{\sqrt{\epsilon}}$. Our results also improve upon the previous best
$\tilde{O}(\ell^{2/3}D^{1/3})$ for most interesting values of $\ell$.
\end{abstract}


%\section{Introduction}

%Random walks play a central role in computer
%science, spanning a wide range of areas in both theory and practice,
%including distributed computing. Algorithms in many different
%applications use random walks as an integral subroutine.
%Applications in networks include token management~\cite{IJ90, BBF04,
%CTW93}, load balancing~\cite{KR04}, small-world routing~\cite{K00},
%search~\cite{ZS06,AHLP01,C05,GMS05,LCCLS02}, information propagation
%and gathering~\cite{BAS04,KKD01}, network topology
%construction~\cite{GMS05,LawS03,LKRG03}, checking
%expander~\cite{DT07}, constructing random spanning
%trees~\cite{Broder89, BIZ89, BFG+03}, monitoring
%overlays~\cite{MG07}, group communication in ad-hoc
%network~\cite{DSW06}, gathering and dissemination of information
%over a network \cite{AKL+79}, distributed construction of expander
%networks \cite{LawS03}, and peer-to-peer membership
%management~\cite{GKM03,ZSS05}.
%Random walks  have also been used to provide uniform and efficient
%solutions to distributed control of dynamic networks \cite{BBSB04}.
%The paper of \cite{ZS06} describes a broad range of network
%applications that can benefit from random walks in dynamic and
%decentralized settings.
% For further references on applications of random walks to distributed computing, see, e.g.~\cite{BBSB04,ZS06}.
%A key purpose of random walks in  many of these network
%applications is to perform  node sampling.
%While the sampling requirements in different applications vary,
%whenever a true sample is required from a random walk of certain
%steps, all applications perform the walks naively. In this paper we
%present the first non-trivial distributed random walk sampling
%algorithms that are significantly faster than the existing (naive)
%approaches.

% We consider a distributed network of processors modeled by an unweighted undirected  (connected) $n$-node graph. Each node corresponds to a processor (with a unique identity number). Initially, each node only has its own local knowledge of the network; i.e., it only knows its own ID and the IDs of its neighbors. Nodes communicate by exchanging messages in (synchronous) rounds: In each round a node can send $O(\log n)$ bits, per edge (this model is known as the  ${\cal CONGEST}$ model \cite{peleg}). The computation time of an algorithm is measured by the total number of rounds it requires. Our goal  is to study fast distributed
%algorithms for performing random walks in arbitrary networks.

%\subsubsection*{Problems}
%Although using random walks help in improving the performance of
%many distributed algorithms, all known algorithms perform random
%walks naively: Each walk of length $\ell$ is performed by sending a
%token for $\ell$ steps, picking a random neighbor with each step. Is
%there a faster way to perform a random walk distributively? In
%particular, we consider the following {\em basic} random walk
%problem.

%\textit{Computing One Random Walk where Destination Outputs Source.}
%Let $s$  be any  node in the network. We want a distributed algorithm
%such that, in the end, one node $v$ outputs the ID of $s$ where $v$
%is randomly picked according to the probability that it is the
%destination of a random walk of length $\ell$ starting at $s$ (the source node). We
%want an algorithm that finishes in the smallest number of rounds.

%We also consider the following generalizations of the above problem.
%\begin{enumerate}
%\item \textit{$k$ Random Walks, Destinations output Sources ($k$-RW-DoS)}: We have $k$ sources
%$s_1, s_2, ..., s_k$ (not necessarily distinct) and we want each of
%$k$ destinations to output an ID of its corresponding source.
%\item \textit{$k$ Random Walks, Sources output Destinations ($k$-RW-SoD)}: Same as above but we want each source to output the ID of its corresponding destination.
%\end{enumerate}

%It turns out that solving $k$-RW-SoD can be more expensive than
%solving $k$-RW-DoS. An extension of the first problem can be used in
%applications where the sources only want to know a ``synopsis'' of
%the destination, such as aggregating statistics and computing a
%function (max load, average load) by sampling nodes. The second
%problem is used when sources want to know data of each destination
%separately.

%To demonstrate that these problems are non-trivial, let us first
%focus on the basic random walk problem (which is equivalent to
%$1$-RW-DoS). The following naive algorithm finishes in ${O}(\ell)$
%rounds: Circulate a token (with ID of $s$ written on it) starting
%from $s$ for $\ell$ rounds (in each round, the node having the token
%currently, forwards it to a random neighbor) and, in the end, the
%vertex $v$ that holds the token outputs the ID of $s$. Our goal is
%to devise algorithms that are faster than this $\ell$-round
%algorithm. To achieve faster algorithms, a node cannot just wait
%until it receives the token and forwards it. It is necessary to
%``forward the token ahead of time''. One natural approach is to
%guess which nodes will be in the walk and ask them to forward the
%token ahead of time. However, even if one knew how many times each
%node is expected to be seen on the walk (without knowing the order),
%it is still not clear what running time one can guarantee. The
%difficulty is that many pre-forwarded tokens may cause congestion. A
%new approach is needed to obtain fast distributed computation of
%random walks. We present the first such results in this paper.

%\medskip
%\noindent \textbf{Notation:} Throughout the paper, we let $\ell$ be
%the length of the walks, $k$ be the number of walks, $D$ be the
%network diameter, $\delta$ be the minimum node degree and $n$ be the
%number of nodes in the network.

%\subsubsection*{Our Main Contributions}

%\subsubsection*{Applications and Related Work}
%Random walks have been used in a wide variety of applications in distributed networks as mentioned in the beginning. We describe here some of the applications in more detail.

%Speeding up distributed algorithms using random walks has been considered for a long time. Besides our approach of speeding up the random walk itself, one popular approach is to reduce the {\it cover time}.
%Recently, Alon et. al.~\cite{AAKKLT} show that performing several
%random walks in parallel reduces the cover time in various types of
%graphs. They assert that the problem with performing random walks is
%often the latency. In these scenarios where many walks are
%performed, our results could help avoid too much latency and yield
%an additional speed-up factor.

% A nice application of random walks is in  the design and analysis of expanders. We mention two results here. Law and Siu~\cite{LawS03} consider the problem of constructing expander graphs in a distributed fashion. One of the key subroutines in their algorithm is to perform several random walks from specified source nodes. While the overall running time of their algorithm depends on other factors, the specific step of
%computing random walk samples can be improved using our techniques
%presented in this paper. Dolev and Tzachar~\cite{DT07} use  random
%walks to check if a given graph is an expander. The first algorithm
%given in \cite{DT07} is essentially to run a random walk of length
%$n\log{n}$ and mark every visited vertices. Later, it is checked if
%every node is visited. It can be seen that our algorithm implies
%that the first step can be done in
%$\tilde{O}((n\log{n})^{2/3}D^{1/3})$ rounds.

%Broder~\cite{Broder89} and Wilson~\cite{Wilson96} gave algorithms to
%generate random spanning trees using random walks and Broder's
%algorithm was later applied to the network setting by Bar-Ilan and
%Zernik~\cite{BIZ89}. Recently Goyal et al.~\cite{GoyalRV09} show how
%to construct an expander/sparsifier using random spanning trees. If
%their algorithm is implemented on a distributed network, the
%techniques presented in this paper would yield an additional
%speed-up in the random walk constructions.

%Morales and Gupta~\cite{MG07} discuss about discovering a consistent
%and available monitoring overlay for a distributed system. For each
%node, one needs to select and discover a list of nodes that would
%monitor it. The monitoring set of nodes need to satisfy some
%structural properties such as consistency, verifiability, load
%balancing, and randomness, among others. This is where random walks
%come in. Random walks is a natural way to discover a set of random
%nodes that are spread out (and hence scalable), that can in turn be
%used to monitor their local neighborhoods. Random walks have been
%used for this purpose in another paper by Ganesh et al.~\cite{GKM03}
%on peer-to-peer membership management for gossip-based protocols.

%The only work that uses the same general approach as this paper is
%the recent paper of Das Sarma et al.~\cite{AtishGP08}. They consider
%the problem of finding random walks in data streams with the main
%motivation of finding PageRank. The same general idea of stitching
%together short walks is used. They consider the model where the graph is
%too big to store in main memory, and the algorithm has {\em streaming} access
%to the edges of the graph while maintaining limited storage. They show how to
%perform $\ell$ length random walks in about $\sqrt{\ell}$ passes over the data. This improves
%upon the naive $\ell$ pass approach and thereby leads to improved algorithms for estimating PageRank vectors. The distributed setting considered in this paper has very different constraints and motivations from the streaming setting and calls for new techniques.
%Recently, Sami and Twigg~\cite{ST08} consider lower bounds on the
%communication complexity of computing stationary distribution of
%random walks in a network. Although, their problem is  related to
%our problem, the lower bounds obtained do not  imply anything
%in our setting.

\section{introduction}

PLEASE SEE OUR PODC PAPER FOR THE DESCRIPTION OF THE MODEL AND ALSO AN INTUITIVE DESCRIPTION OF THE ALGORITHMS (ENOUGH TO LOOK AT INTRO+SINGLE RANDOM WALK SECTIONS). 

\section{Algorithm for one random walk ($1$-RW-DoS)}\label{sec:one_walk_DoS}
\subsection{Description of the Algorithm}

%In this section, we present the main ideas of our approach by
%developing an algorithm for $1$-RW-DoS
% called {\sc Single-Random-Walk} (cf. Algorithm~\ref{alg:single-random-walk}) for undirected graphs. The naive upper and lower bounds for $1$-RW-DoS are $\ell$ and $D$ respectively (the lower bound is formalized later in this paper). We present the first nontrivial upper bound, i.e., perform walks of length $\ell>D$ in fewer than $\ell$ rounds. {\sc Single-Random-Walk} runs with two important variables: $\eta$ and $\lambda$. The main idea is to first perform $\eta$ random walks of length $\lambda$ from every node. Subsequently, starting at the source node $s$, these $\lambda$ length walks are ``stitched'' to form a longer walk (traversing a new walk of length $\lambda$ from the end point of the previous walk of length $\lambda$). Whenever a node is visited as an end point of such a walk of length $\lambda$, one of its (at most $\eta$) {\em unused} walks is sampled uniformly to preserve randomness. If all $\eta$ walks from a node have been used up, additional rounds are invested to obtain $\eta$ more walks from this node. This approach turns out to be round-efficient for three reasons. First, performing the initial set of $\eta$ walks of length $\lambda$ from all nodes simultaneously can be done efficiently. Second, we give a technique to perform $\eta$ walks of length $\lambda$ from a single node efficiently. Finally, stitching two $\lambda$ length walks can be done in about $D$ rounds.

The main algorithm for performing a single random walk is described in {\sc Single-Random-Walk}. This algorithm, in turn, uses {\sc Get-More-Walks} and {\sc Sample-Destination}. {\sc Single-Random-Walk} and {\sc Get-More-Walks} are slight modifications from the same algorithms in our PODC paper. 



$d(x)$ denotes the degree of $x$.

\newcommand{\mindegree}[0]{\delta}
\begin{algorithm}[t]
\caption{\sc Single-Random-Walk($s$, $\ell$)} \label{alg:single-random-walk}
\textbf{Input:} Starting node $s$, and desired walk length $\ell$.\\
\textbf{Output:} Destination node of the walk outputs the ID of
$s$.\\

\textbf{Phase 1: (Each node $x$ performs $\eta_x=\Theta(d(x)\eta\log n)$ random walks of length
$\lambda + r_i$ where $r_i$ (for each $1\leq i\leq eta$) is chosen independently and at random
in the range $[0,\lambda]$ for each of the $\eta$ walks. )}
\begin{algorithmic}[1]

\STATE Let $r_{max} = \max_{1\leq i\leq \eta}{r_i}$, the random numbers chosen independently for each of the $\eta_x$ waks.
Each node $x$ constructs $\eta_x$ messages containing
its ID and in addition, the $i$-th message contains the desired walk length of $\lambda + r_i$.

\FOR{$i=1$ to $\lambda + r_{max}$}

\STATE This is the $i$-th iteration. Each node $v$ does the
following: Consider each message $M$ held by $v$ and received in the
$(i-1)$-th iteration (having current counter $i-1$). If the message $M$'s
desired walk length is $\leq i$, then $v$ stored the ID of the source ($v$ is the
desired destination). Else, $v$ picks a neighbor
$u$ uniformly at random and forward $M$ to $u$ after incrementing
its counter.

\COMMENT{Note that any iteration could require more than 1 round.}

\ENDFOR

\end{algorithmic}


\textbf{Phase 2: (Stitch $\Theta(\ell/\lambda)$ walks, each of length in $[\lambda,2\lambda]$)}
\begin{algorithmic}[1]
\STATE $s$ creates a message called ``token'' which contains the ID
of $s$

\STATE The algorithm generates a multi-set of {\em Connector} nodes $C$. Initialize $C = \{s\}$

\WHILE {Length of walk completed is less than $\ell-2\lambda$}

  \STATE Let $v$ be a node that is currently holding a token.

  \STATE $v$ calls {\sc Sample-Destination($v$)} and let $v'$ be the
  returned value (which is a destination of an unused random walk starting at $v$
  of length between $\lambda$ and $2\lambda$.)

  \IF{$v'$ = {\sc null} (all walks from $v$ have already been used up)}

  \STATE $v$ calls {\sc Get-More-Walks($v$, $\eta$, $\lambda$)} (Perform $\Theta(l/\lambda)$ walks
  of length $\lambda$ starting at $v$)

  \STATE $v$ calls {\sc Sample-Destination($v$)} and let $v'$ be the
  returned value

  \ENDIF

  \STATE $v$ sends the token to $v'$

  \STATE $C = C \cup \{v\}$

\ENDWHILE

\STATE Walk naively until $\ell$ steps are completed (this is at
most another $2\lambda$ steps)

\STATE A node holding the token outputs the ID of $s$

\end{algorithmic}

\end{algorithm}


\begin{algorithm}[t]
\caption{\sc Get-More-Walks($v$, $\lambda$)} \label{alg:Get-More-Walks}
(Starting from node $v$,  perform $\ell/\lambda$ number of random walks, each of  length $\lambda + r_i$ where
$r_i$ is chosen uniformly at random in the range $[0,\lambda]$ for the $i$-th walk.) \\
\begin{algorithmic}[1]
\STATE The node $v$ constructs $\ell/\lambda$ (identical) messages
containing its ID.

\FOR{$i=1$ to $\lambda$}

\STATE Each node $u$ does the following:

\STATE - For each message $M$ held by $u$,
pick a neighbor $z$ uniformly at random as a receiver of $M$.

\STATE - For each neighbor $z$ of $u$, send ID of $v$ and the number
of messages that $z$ is picked as a receiver, denoted by $c(u, v)$.

\STATE - For each neighbor $z$ of $u$, up on receiving ID of $v$ and
$c(u, v)$, constructs $c(u, v)$ messages, each contains the ID of
$v$.

\ENDFOR

\STATE Each walk has now completed $\lambda$ steps. These walks are now extended probabilistically
further by $r$ steps where each $r$ is independent and uniform in the range $[0,\lambda]$.

\FOR{$i=1$ to $\lambda$}

\STATE For each message, independently with probability $\frac{1}{\lambda}$, stop sending the message further and save the ID of the source node (in this event, the node with the message is the destination). For messages $M$ that are not stopped, each node picks a neighbor correspondingly and sends the messages forward as before.

\ENDFOR

\STATE At the end, each destination knows the source ID as well as the length of the corresponding walk.

\end{algorithmic}

\end{algorithm}

\begin{algorithm}[t]
\caption{\sc Sample-Destination($v$)} \label{alg:Sample-Destination}
\textbf{Input:} Starting node $v$, and desired walk of any length in range $[\lambda,2\lambda]$.\\
\textbf{Output:} A node sampled from among the stored
walks from $v$. \\

\textbf{Sweep 1: (Perform BFS tree)}
\begin{algorithmic}[1]

\STATE Construct a Breadth-First-Search (BFS) tree rooted at $v$.
While constructing, every node stores its parent's ID. Denote such
tree by $T$.

\end{algorithmic}

\textbf{Sweep 2: (Tokens travel up the tree, sampling as you go)}
\begin{algorithmic}[1]

\STATE We divide $T$ naturally into levels $0$ through $D$ (where
nodes in level $D$ are leaf nodes and the root node $s$ is in level
$0$).

\STATE Tokens are held by nodes as a result of doing walks of length
between $\lambda$ and $2\lambda$ from $v$ (which is done in either Phase~1 or {\sc
Get-More-Walks} (cf. Algorithm~\ref{alg:Get-More-Walks})) A node
could have more than one token.

\STATE Every node $u$ that holds token(s) picks one token, denoted
by $d_0$, uniformly at random and lets $c_0$ denote the number of
tokens it has.

\FOR{$i=D$ down to $0$}

\STATE Every node $u$ in level $i$ that either receives token(s)
from children or possesses token(s) itself do the following.

\STATE Let $u$ have tokens $d_0, d_1, d_2, \ldots, d_q$, with counts
$c_0, c_1, c_2, \ldots, c_q$ (including its own tokens). The node
$v$ samples one of $d_0$ through $d_q$, with probabilities
proportional to the respective counts. That is, for any $1\leq j\leq
q$, $d_j$ is sampled with probability
$\frac{c_j}{c_0+c_1+\ldots+c_q}$.

\STATE The sampled token is sent to the parent node (unless already
at root), along with a count of $c_0+c_1+\ldots+c_q$ (the count
represents the number of tokens from which this token has been
sampled).

\ENDFOR

\STATE The root output the ID of the owner of the final sampled
token. Denote such node by $u_d$.

\end{algorithmic}

\textbf{Sweep 3: (Go and delete the sampled destination)}
\begin{algorithmic}[1]

\STATE $v$ sends a message to $u_d$ (e.g., via broadcasting). $u_d$
deletes one token of $v$ it is holding (so that this random walk of
length $\lambda$ is not reused/re-stitched).
\end{algorithmic}

\end{algorithm}

\begin{definition}
Connectors.
\end{definition}

\begin{claim}
Sample destination returns a destination from a random walk whose length is uniform in the range $[\lambda,2\lambda]$.
\end{claim}

%We now explain algorithm {\sc Single-Random-Walk} (cf.
%Algorithm~\ref{alg:single-random-walk}) in some more detail.  The
%algorithm consists of two phases.
%In the first phase, each node performs $\eta$ random walks of length
%$\lambda$ each. To do this, each node initially constructs $\eta$
%messages with its ID. Then, each node forwards each message to a
%random neighbor. This is done for $\lambda$ steps. At the end of
%this phase, if node $u$ has $k$ messages with the ID of node $v$
%written on them, then $u$ is a destination of $k$ walks starting at
%$v$. Note that $v$ has no knowledge of the destinations of its own
%walks. The main technical issue to deal with here is that performing
%many simultaneous random walks can cause too much congestion. We
%show a key lemma (Lemma \ref{lem:mainone}) that bounds the time
%needed for this phase.

%In the second phase, we perform a random walk starting from source
%$s$ by ``stitching'' walks of length $\lambda$ obtained in the first
%phase into a longer walk. The process goes as follows. Imagine that
%there is a token initially held by $s$. Among $\eta$ walks starting
%at $s$ (obtained in phase 1), randomly select one. Note that this
%step is not straightforward since $s$ has no knowledge of the
%destinations of its walks. Further, selecting an arbitrary
%destination would violate randomness. (A minor technical point: one
%may try to use the $i$-th walk when it is reached for the $i$-th
%time;  however, this is not possible because one cannot mark tokens
%separately in {\sc Get-More-Walks} (described later), since we only
%send counts forward to avoid congestion on edges). {\sc
%Sample-Destination} algorithm (cf.
%Algorithm~\ref{alg:Sample-Destination}) is used to perform this
%step. We prove in Lemma~\ref{lem:Sample-Destination} that this can
%be done in $O(D)$ rounds.

%When {\sc Sample-Destination}($v$) is called by any node $v$, this
%algorithm randomly picks a message with ID of $v$ written on it,
%returns the ID of the node that is holding this message, and then
%deletes it. If there is no such message (e.g., when {\sc
%Sample-Destination}($v$) has been called $\eta$ times), it returns
%{\sc null}.

%Let $v$ receive $u_d$ as an output from {\sc Sample-Destination}.
%$v$ sends the token to $u_d$ and the process repeats. That is, $u_d$
%randomly selects a random walk starting at $u_d$ and forwards the
%token to the destination. If the process continues without {\sc
%Sample-Destination} returning {\sc null}, then a walk of length
%$\ell$ will complete after $\ell/\lambda$ repetitions.

%However, if {\sc null} is returned by {\sc Sample-Destination} for
%$v$, then the token cannot be forwarded further. At this stage,
%$\eta$ more walks of length $\lambda$ are performed from $v$ by
%calling {\sc Get-More-Walks}($v$, $\eta$, $\lambda$) (cf.
%Algorithm~\ref{alg:Get-More-Walks}). This algorithm creates $\eta$
%messages with ID $v$ and forwards them for $\lambda$ random steps.
%This is done fast by only sending counts along edges that require
%multiple messages. This is crucial in avoiding congestion. While one
%cannot directly bound the number of times any particular node $v$
%invokes {\sc Get-more-Walks}, a simple amortization argument is used to
%bound the running time of invocations over all nodes.

\subsection{Analysis}

The following theorem states the main result of this paper. It states that the algorithm {\sc Single-Random-Walk} correctly samples a node after a random walk of $\ell$ steps and the algorithm takes, with high probability, $\tilde{O}(\sqrt{\ell D}\epsilon^{-1/4})$ rounds. Here $D$ is the diameter of the graph and $\epsilon$ is the eigenvalue gap in the graph's transition probability matrix. 

\begin{theorem}\label{thm:1-walk}
Algorithm {\sc Single-Random-Walk} (cf. Algorithm~\ref{alg:single-random-walk}) solves $1$-RW-DoS and, with
high probability\footnote{With high probability means with probability at least $(1-\frac{1}{n})$ throughout this paper.},
finishes in  $\tilde{O}(\sqrt{\ell D}\epsilon^{-1/4})$ rounds.
\end{theorem}

The main difference in this paper from the PODC paper is that with a slight modification in the algorithm, we are able to bound the number of times any node is visited (based on the length of the walk). This in turn allows us to bound the number of times {\sc Get-More-Walks} will be required, and consequently get an improved result. 


We begin by analyzing the time needed by Phase 1 of Algorithm {\sc
Single-Random-Walk}. [THIS PART WILL BE ALMOST THE SAME AS PODC PAPER]

\begin{lemma} \label{lem:phase1}
Phase~1 finishes in $O(\frac{\lambda \eta \log{n}}{\mindegree})$
rounds with high probability, where $\delta$ is the minimum node
degree.
\end{lemma}
\begin{proof}
%Consider the case when each node $v$ creates $\eta \cdot
%\frac{degree(v)}{\mindegree}\geq \eta$ messages. We show that the
%lemma  holds even in this case. For each message $M$, any $j=1, 2,
%..., \lambda$, and any edge $e$, we define $X_M^j(e)$ to be a random
%variable having value 1 if $M$ is sent through $e$ in the $j^{th}$
%iteration (i.e., when the counter on $M$ has value $j-1$). Let
%$X^j(e)=\sum_{M: \text{message}} X_M^j(e)$.  We compute the expected
%number of messages that go through an edge, see claim below.

\begin{claim}
\label{claim:first} For any edge $e$ and any $j$,
$\mathbb{E}[X^j(e)]=2\frac{\eta}{\mindegree}$.
\end{claim}
%\begin{proof}
%Assume that each node $v$ starts with
%$\eta\cdot\frac{degree(v)}{\mindegree}\geq \eta$ messages. Each
%message takes a random walk. We prove that after any given number of
%steps $j$, the expected number of messages at node $v$ is still
%$\eta\frac{degree(v)}{\mindegree}\geq \eta$.  Consider the random
%walk's probability transition matrix, call it $A$. In this case $Au
%= u$ for the vector $u$ having value $\frac{degree(v)}{2m}$ where
%$m$ is the number of edges in the graph (since this $u$ is the
%stationary distribution of an undirected unweighted graph). Now the
%number of messages we started with at any node $i$ is proportional
%to its stationary distribution, therefore, in expectation, the
%number of messages at any node remains the same.

%To calculate $\mathbb{E}[X^j(e)]$, notice that edge $e$ will receive
%messages from its two end points, say $x$ and $y$. The number of
%messages it receives from node $x$ in expectation is exactly the
%number of messages at $x$ divided by $degree(x)$. The lemma follows.
%\end{proof}

%By Chernoff's bound (e.g., in~\cite[Theorem~4.4.]{MU-book-05}), for
%any edge $e$ and any $j$,
%$$\mathbb{P}[X^j(e)\geq 4\log{n}\frac{\eta}{\mindegree}]\leq 2^{-4\log{n}}=n^{-4}.$$
%It follows that the probability that there exists an edge $e$ and an
%integer $1\leq j\leq \lambda$ such that $X^j(e)\geq
%4\log{n}\frac{\eta}{\mindegree}$ is at most $|E(G)| \lambda
%n^{-4}\leq \frac{1}{n}$ since $|E(G)|\leq n^2$ and $\lambda\leq
%\ell\leq n$ (by the way we define $\lambda$).

%Now suppose that $X^j(e)\leq 4\log{n}\frac{\eta}{\mindegree}$ for
%every edge $e$ and every integer $j\leq \lambda$. This implies that
%we can extend all walks of length $i$ to length $i+1$ in
%$4\log{n}\frac{\eta}{\mindegree}$ rounds. Therefore, we obtain walks
%of length $\lambda$ in $4\lambda\frac{\eta}{\mindegree}\log{n}$
%rounds as claimed. (Note that if $\eta\leq \mindegree$, we still get
%a high probability bound for $X^j(e)\geq 4\log{n}$.)
\end{proof}

We next show the time needed for {\sc Get-More-Walks} and {\sc
Sample-Destination}.

\begin{lemma} \label{lem:get-more-walks}
For any $v$, {\sc Get-More-Walks($v$, $\eta$, $\lambda$)} always
finishes within $O(\lambda)$ rounds.
\end{lemma}

%\begin{proof}
%Consider any node $v$ during the execution of the algorithm. If it
%contains $x$ copies of the source ID, for some $x$, it has to pick
%$x$ of its neighbors at random, and pass the source ID to each of
%these $x$ neighbors. Although it might pass these messages to less
%than $x$ neighbors, it sends only the source ID and a {\em count} to
%each neighbor, where the count represents the number of copies of
%source ID it wishes to send to such neighbor. Note that there is
%only one source ID as one node calls {\sc Get-More-Walks} at a time.
%Therefore, there is no congestion and thus the algorithm terminates
%in $O(\lambda)$ rounds.
%\end{proof}

\begin{lemma} \label{lem:Sample-Destination}
{\sc Sample-Destination} always finishes within $O(D)$ rounds.
\end{lemma}

\begin{lemma}\label{lem:correctness-sample-destination}
Algorithm {\sc Sample-Destination}($v$) (cf.
Algorithm~\ref{alg:Sample-Destination}), for any node $v$, samples a
destination of a walk of length $\lambda$ uniformly at random.
\end{lemma}

ABOVE THREE LEMMAS ARE ALSO SIMILAR TO PODC PAPER. 

%\begin{proof}
%Constructing a BFS tree clearly takes only $O(D)$ rounds. In the
%second phase where the algorithm wishes to {\em sample} one of many
%tokens (having its ID) spread across the graph. The sampling is done
%while retracing the BFS tree starting from leaf nodes, eventually
%reaching the root. The main observation is that when a node receives
%multiple samples from its children, it only sends one of them to its
%parent. Therefore, there is no congestion. The total number of
%rounds required is therefore the number of levels in the BFS tree,
%$O(D)$. The third phase of the algorithm can be done by broadcasting
%(using a BFS tree) which needs $O(D)$ rounds.
%\end{proof}

%Next we show the correctness of the {\sc Sample-Destination}
%algorithm.


%\begin{proof}
%Assume that before this algorithm starts, there are  $t$ (without
%loss of generality, let $t > 0$) ``tokens'' containing ID of $v$
%stored in some nodes in the network. The goal is to show that {\sc
%Sample-Destination} brings one of these tokens to $v$ with uniform
%probability. For any node $u$, let $T_u$ be the subtree rooted at
%$u$ and let $S_u$ be the set of tokens in $T_u$. (Therefore, $T_v=T$
%and $|S_v|=t$.)

%We claim that any node $u$ returns a destination to its parent with
%uniform probability (i.e., for any tokens $x\in S_u$, $Pr[ u$
%returns $x ]$ is $1/|S_u|$ (if $|S_u|>0$)). We prove this by
%induction on the height of the tree. This claim clearly holds for
%the base case where $u$ is a leaf node. Now, for any non-leaf node
%$u$, assume that the claim is true for any of its children.

%To be precise, suppose that $u$ receives tokens and counts from $q$
%children. Assume that it receives tokens $d_1, d_2, ..., d_q$ and
%counts $c_1, c_2, ..., c_q$ from nodes $u_1, u_2, ..., u_q$,
%respectively. (Also recall that $d_0$ is the sample of its own
%tokens (if exists) and $c_0$ is the number of its own tokens.) By
%induction, $d_j$ is sent from $u_j$ to $u$ with probability
%$1/|S_{u_j}|$, for any $1\leq j\leq q$. Moreover, $c_j=|S_{u_j}|$
%for any $j$. Therefore, any token $d_j$ will be picked with
%probability $\frac{1}{|S_{u_j}|}\times \frac{c_j}{c_0+c_1+...c_q} =
%\frac{1}{S_u}$ as claimed.

%The lemma follows by applying the claim above to $v$.
%\end{proof}

%We are now ready to state and prove the running time of the main
%algorithm for 1-RW-DoS.

NOVEL IDEA OF THIS PAPER: 

We start by describing a result due to Gilman. 

Let $t_l$ denote the number of times a set of vertices $A$ is visited in a walk of length $l$; further let $\pi(x) = \frac{d(x)}{2m}$ denote the stationary probability of vertex $x$ and $\pi(A)$ the sum of stationary probabilities of vertices in $A$. Let the source node of the random walk of length $l$ be chosen from the distribution ${\bf q}$. Let $\frac{{\bf q}}{\sqrt{\pi}}$ denote the vector with entries $\frac{{\bf q}(x)}{\sqrt{\pi(x)}}$. Let $N_{\bf q}$ denote the $L_2$-norm of $\frac{{\bf q}}{\sqrt{\pi}}$.

Denote by $\epsilon$ the gap between the first and the second eigenvalue of the transition matrix of the undirected graph. The top eigenvalue $\lambda_1$ is $1$ and the eigenvector corresponding to this is the stationary distribution vector. Further, the second eigenvalue $\lambda_2$ is bounded away from $1$ under the assumption that the graph is connected. $\epsilon = \lambda_1 - \lambda_2$.

\begin{theorem} [Theorem 2.1 of Gilman 1998]
\label{thm:gilman}
$Pr[t_{\ell} \geq \ell\pi(A) + \gamma] \leq (1 + \frac{\gamma \epsilon}{10l})N_{{\bf q}}e^{-\gamma^2\epsilon/20\ell}$
\end{theorem}

We use this to prove the following lemma. 

\begin{lemma} \label{lem:gilman}
The probability that more than $k$ distinct nodes, $x_1, x_2, \ldots, x_k$ are each visited more than $\gamma/k + \ell d(x_i)/2m$ times respectively for $\gamma = \sqrt{\frac{\ell k}{\epsilon}}$ is at most $\frac{1}{n}$ as long as $l\leq O(m)$.
\end{lemma}
\begin{proof}
In Theorem~\ref{thm:gilman} choose $\gamma$ such that $\gamma^2 \geq \frac{\ell k\log^3 n}{\epsilon}$.
We consider a set $A$ of size $k$. So it follows that $\pi(A)\cdot 2m \geq k$.

Further, by our choice of $\gamma$, we know that $\gamma > \ell\pi(A)$ as long as $\ell<m$. Therefore, we get $P[t_l \geq \tilde{\Theta}(\sqrt{\frac{\ell k}{\epsilon}})
] \leq O(e^{-k\log^2n} \leq n^{-k}e^{-\log n}$, here we choose the $\log n$ factors sufficiently large to ensure that $N_{{\bf q}}$ is nullified.

We now perform a union bound over all sets $A$ of size $k$. There are $n^k$ such sets. Therefore, the probability that for any set $A$ of size $k$, the set $A$ is visited more than $\sqrt{\frac{\ell k}{\epsilon}}$ times is at most $\frac{1}{n}$. Now, if $k$ distinct nodes $x_1, x_2, \ldots, x_k$ are each visited more than $\gamma/k + \ell d(x_i)/2m$ times respectively, then consider the set that contains exactly these $k$ nodes. The condition is violated.
\end{proof}


%\begin{lemma}
%The probability that Get-More-Walks is invoked more than $k$ times is, w.h.p., $O(1/n)$.
%\end{lemma}
%\begin{proof}
%{\bf Uniformity Argument.}
%
%We know that in a walk of length $\ell$, at most $k$ distinct nodes can be visited more than $\eta_x \lambda \log n$ where $\eta_x = \eta\cdot d(x)$. However, we now need to argue that if only the {\em Connectors} of this $\ell$ length walk are observed, then any of these nodes is visited at most $\eta_x \log n$ times w.h.p.
%
%Intuitively, this argument is simple, since the connectors are spread out in steps of length approximately $\lambda$. However, there might be some {\em periodicity} that results in the same node being visited multiple times but {\em exactly} at $\lambda$-intervals. This is where we crucially use the fact that the algorithm uses walks of length $\lambda + r$ where $r$ is chosen uniformly at random from $[0,\lambda]$.
%
%We essentially use the following claim.
%\begin{claim}
%Given a sequence of observations $X_1, X_2, \ldots, X_{t}$, if an event ${\cal R}$ is observed at most $f_1\cdot f_2$ times. Suppose one observation is picked at random from $X_i, X_{i+1}, X_{i+f_2}$ for each $i$ in $\{1, f_2, 2f_2, \ldots, (f_1-1)f_2\}$, and call these observations $Y_1, Y_2, \ldots, Y_{f_1}$, then the probability that the event ${\cal R}$ is observed more than $f_2\log n$ times among all $Y_i$ observations is at most $1/n$.
%\end{claim}
%\begin{proof}
%To write.
%\end{proof}
%
%Choose $t=\ell$, the event observed to be the walk being at node $x$, and $f_1=\eta_x\log n$ and $f_2=\lambda/2$. By our algorithm, the samples picked are even sparser -- to formalize.
%This then completes the proof of the Uniformity Lemma.
%
%\end{proof}

%--------------------------------------------------------------

The crucial step is to now prove the following. We do this by showing Lemma~\ref{lem:uniformityused}.

\begin{proposition}[Previously Lemma~1.10]\label{prop:at_most_k_calls}
The probability that Get-More-Walks is invoked more than $k$ times
is at most $1/n$.
\end{proposition}

WHY NEED MORE: The point is that in Lemma~\ref{lem:gilman}, we looked at all points of the $\ell$ length walk. If we look at all $\ell$ points, then indeed any node may occurs many times. However, in {\sc Single-Random-Walk}, we need to invoke {\sc Get-More-Walks} only when a node $x$ is visited too many times as a {\em connector} node. Here, too many times, is more than the number of walks that were stored from it, in Phase~1. 

NOTE: We choose $r_i$ from $[0, \lambda-1]$. If in the algorithm, we had deterministic lengths, then arguing about connectors becomes difficult. 

To prove the proposition, recall that it is shown earlier that in a
walk of length $\ell$, with high probability, there are at most $k$
distinct node $v$ that appears more than $\eta_v \lambda \log n$
times in the walk where $\eta_v = \eta\cdot d(v)$. It is left to
show that every vertex $v$ that appears at most $\eta_v\lambda\log
n$ times in the walk also appear at most $\eta_v\polylog n$ times as
a {\em connector node} (defined in
Algorithm~\ref{alg:single-random-walk}), with high probability. We
show the following lemma.

\begin{lemma}
\label{lem:uniformityused}
For any vertex $v$, if $v$ appears in the walk at most
$\eta_v\lambda\log n$ times then it appears as a connector node at
most $\eta_v(\log n)^3$ times with probability at least $1-1/n^2$.
\end{lemma}

\begin{proof}
Intuitively, this argument is simple, since the connectors are
spread out in steps of length approximately $\lambda$. However,
there might be some {\em periodicity} that results in the same node
being visited multiple times but {\em exactly} at
$\lambda$-intervals. This is where we crucially use the fact that
the algorithm uses walks of length $\lambda + r$ where $r$ is chosen
uniformly at random from $[0,\lambda]$.

%-----------------------------

We prove the lemma using the two following claims.

\begin{claim}
Consider any sequence $A$ of numbers $a_1, ..., a_\ell'$ of length
$\ell'$. For any integer $\lambda'$, let $B$ be a sequence
$a_{\lambda'+r_1}, a_{2\lambda'+r_1+r_2}, ...,
a_{i\lambda'+r_1+...+r_i}, ...$ where $r_i$, for any $i$, is a
random integer picked uniformly from $[0, \lambda'-1]$.
Consider another subsequence of numbers $C$ of $A$ where an element in $C$ 
is picked from from ``every $\lambda'$ numbers'' in $A$; i.e., $C$ consists of
$\lfloor\ell'/\lambda'\rfloor$ numbers $c_1, c_2, ...$ where, for
any $i$, $c_i$ is chosen uniformly at random from
$a_{(i-1)\lambda'+1}, a_{(i-1)\lambda'+2}, ..., a_{i\lambda'}$.
Then, $Pr[C \text{ contains } a_{i_1},
a_{i_2}, ..., a_{i_k}\}] = Pr[B = \{a_{i_1}, a_{i_2}, ..., a_{i_k}\}]$ for any set $\{a_{i_1}, a_{i_2}, ..., a_{i_k}\}$.
\end{claim}

%\textbf{Danupon:} This claim is stated with full details as we may
%want to bring it out to highlight later.
%\begin{claim}
%Given a sequence of observations $X_1, X_2, \ldots, X_{t}$, if an
%event ${\cal R}$ is observed at most $f_1\cdot f_2$ times. Suppose
%one observation is picked at random from $X_i, X_{i+1}, X_{i+f_2}$
%for each $i$ in $\{1, f_2, 2f_2, \ldots, (f_1-1)f_2\}$, and call
%these observations $Y_1, Y_2, \ldots, Y_{f_1}$, then the probability
%that the event ${\cal R}$ is observed more than $f_2\log n$ times
%among all $Y_i$ observations is at most $1/n$.
%\end{claim}
\begin{proof}
First consider a subsequence $C$ of $A$. Numbers in $C$ are picked
from ``every $\lambda'$ numbers'' in $A$; i.e., $C$ consists of
$\lfloor\ell'/\lambda'\rfloor$ numbers $c_1, c_2, ...$ where, for
any $i$, $c_i$ is chosen uniformly at random from
$a_{(i-1)\lambda'+1}, a_{(i-1)\lambda'+2}, ..., a_{i\lambda'}$.
Observe that $|C|\geq |B|$. In fact, we can say that ``$C$ contains
$B$''; i.e., for any sequence of $k$ indexes $i_1, i_2, ..., i_k$
such that $\lambda'\leq i_{j+1}-i_j\leq 2\lambda'-1$ for all $j$,
%
$$Pr[B = \{a_{i_1}, a_{i_2}, ..., a_{i_k}\}] = Pr[C \text{ contains
} \{a_{i_1}, a_{i_2}, ..., a_{i_k}\}].$$
%
To see this, observe that $B$ will be equal to $\{a_{i_1}, a_{i_2},
..., a_{i_k}\}$ only for a specific value of $r_1, r_2, ..., r_k$.
Since each of $r_1, r_2, ..., r_k$ is chosen uniformly at random
from $[1, \lambda']$, $Pr[B = \{a_{i_1}, a_{i_2}, ..., a_{i_k}\}] =
\lambda'^{-k}$.
%(Some technicality: The inequality follows from
%the fact that for some $\{a_{i_1}, a_{i_2}, ..., a_{i_k}\}$, $Pr[B =
%\{a_{i_1}, a_{i_2}, ..., a_{i_k}\}] = 0$.)
Moreover, the $C$ will contain $a_{i_1}, a{i_2}, ..., a_{i_k}\}$ if
and only if, for each $j$, we pick $a_{i_j}$ from the interval that
contains it (i.e., from $a_{(i'-1)\lambda'+1}, a_{(i'-1)\lambda'+2},
..., a_{i'\lambda'}$, for some $i'$). (Note that $a_{i_1}, a_{i_2},
...$ are all in different intervals because $i_{j+1}-i_j\geq
\lambda'$ for all $j$.) Therefore, $Pr[C \text{ contains } a_{i_1},
a_{i_2}, ..., a_{i_k}\}]=\lambda'^{-k}$.
\end{proof}

\begin{claim}
Consider any sequence $A$ of numbers $a_1, ..., a_\ell'$ of length
$\ell'$. 
Consider subsequence of numbers $C$ of $A$ where an element in $C$ 
is picked from from ``every $\lambda'$ numbers'' in $A$; i.e., $C$ consists of
$\lfloor\ell'/\lambda'\rfloor$ numbers $c_1, c_2, ...$ where, for
any $i$, $c_i$ is chosen uniformly at random from
$a_{(i-1)\lambda'+1}, a_{(i-1)\lambda'+2}, ..., a_{i\lambda'}$.. For any
number $x$, let $n_x$ be the number of appearances of $x$ in $A$;
i.e., $n_x=|\{i\ |\ a_i=x\}|$. Then, for any $R\geq 6n_x/\lambda'$,
$x$ appears in $C$ more than $R$ times with probability at most
$2^{-R}$.
\end{claim}
\begin{proof}
For $i=1, 2, ..., \lfloor\ell'/\lambda'\rfloor$, let $X_i$ be a 0/1
random variable that is $1$ if and only if $c_i=x$ and
$X=\sum_{i=1}^{\lfloor\ell'/\lambda'\rfloor} X_i$. That is, $X$ is
the number of appearances of $x$ in $C$. Clearly,
$E[X]=n_x/\lambda'$. Since $X_i$'s are independent, we can apply the
Chernoff bound (e.g., in~\cite[Theorem~4.4.]{MU-book-05}): For any
$R\geq 6E[X]=6n_x/\lambda'$,
$$Pr[X\leq R]\geq 2^{-R}.$$
The claim is thus proved.
\end{proof}

%-------------------------------

Now we use the claim to prove the lemma. Choose $\ell'=\ell$ and
$\lambda'=\lambda$ and consider any node $v$ that appears at most
$\eta_v\lambda\log n$ times. The number of time it appears as a
connector nodes is the number of times it appears in the subsequence
$B$ described in the claim. By applying the claim with
$R=\eta_v\lambda(\log n)^3$, $v$ appears in $B$ more than
$\eta_v(\log n)^3$ times with probability at most $1/n^2$.
\end{proof}

Now we prove the proposition.
\begin{proof}[Proof of Proposition~\ref{prop:at_most_k_calls}]
This uses Lemma~\ref{lem:uniformityused} and Lemma~\ref{lem:gilman}. Lemma~\ref{lem:gilman} gives a bound on the number of times any node is visited, when all points of the $\ell$ length walk are considered. This, together with Lemma~\ref{lem:uniformityused} gives us a bound on the number of times any node is visited when only the connector points of the $\ell$ length walk are considered. 

Have to wait for the exact number from previous lemmas.
\end{proof}


With a bound on the number of times {\sc Get-More-Walks} is invoked, we are now ready to prove the main upper bound of this paper. 

%--------------------------------------------------------------




\begin{proof} [of Theorem~ref{thm:1-walk}]
We chose $\gamma = O(\pi(A)\cdot 2m\cdot \lambda \cdot \eta \cdot \log n)$ where we further force $\gamma^2 \geq \frac{\ell k\log n}{\epsilon}$. Therefore, we choose $\eta\lambda$ such that $\pi(A)\cdot 2m\cdot \lambda \cdot \eta \cdot \log n \geq \sqrt{\frac{\ell k\log n}{\epsilon}}$.

We consider a set $A$ of size $k$. So it follows that $\pi(A)\cdot 2m \geq k$. Therefore, to ensure the above inequality, it suffices to choose $\eta \lambda = \gamma/k = \tilde{O}(\sqrt{\frac{l}{\epsilon k}})$. We now use Lemma~\ref{lem:gilman}.

Using Lemma~\ref{prop:at_most_k_calls} and the bound on the running times of Phase~1 of {\sc Single-Random-Walk} (Lemma~\ref{lem:phase1}), {\sc Get-More-Walks} (Lemma~\ref{lem:get-more-walks}), and {\sc Sample-Destination} (Lemma~\ref{lem:Sample-Destination}), we get the number of rounds required to be $\tilde{O}(\sqrt{\frac{\ell}{\epsilon k}} + \frac{lD}{\lambda} + k\lambda)$ with the added constraint that $\eta\lambda = \sqrt{\frac{\ell}{\epsilon k}}$ [because of our choice of $\gamma$ in Lemma~{lem:gilman}]. We choose parameters as follows: $k=\frac{1}{\sqrt{\epsilon}}$, $\lambda = \sqrt{\ell D}\epsilon^{1/4}$, $\eta = \frac{1}{\sqrt{\epsilon D}}$. Note that $\frac{1}{\epsilon} > D$  and therefore our choice of $\eta$ is at least $1$. The result of $\tilde{O}(\sqrt{\ell D}\epsilon^{-1/4})$ rounds follows by plugging in these parameter values.

\end{proof}

%\begin{lemma} [Uniformity Lemma]
%In the walk of length $l$, if every node is seen at most $t$ times w.h.p., then among the Connectors in a walk of length $l$, any walk is seen at most $O(t/\lambda)$ times w.h.p.
%\end{lemma}


%\begin{lemma}
%\label{lem:mainone} Algorithm {\sc Single-Random-Walk} (cf.
%Algorithm~\ref{alg:single-random-walk}) solves $1$-RW-DoS and, with
%high probability, finishes in $O(\frac{\lambda\eta\log
%n}{\mindegree} + \frac{\ell\cdot D}{\lambda} + \frac{\ell}{\eta})$
%rounds.
%\end{lemma}
%\begin{proof}
%First, we prove the correctness of the algorithm. Observe that any
%two $\lambda$-length walks (possibly from different sources) are
%independent from each other. Moreover, a walk from a particular node
%is picked uniformly at random (by
%Lemma~\ref{lem:correctness-sample-destination}). Therefore, the {\sc
%Single-Random-Walk} algorithm is equivalent to having a source node
%perform a walk of length $\lambda$ and then have the destination do
%another walk of length $\lambda$ and so on.

%We now prove the time bound.
%First, observe that algorithm {\sc Sample-Destination}  is called
%$O(\frac{\ell}{\lambda})$ times and by
%Lemma~\ref{lem:Sample-Destination}, this algorithm takes
%$O(\frac{\ell\cdot D}{\lambda})$ rounds in total.
%Next, we claim that {\sc Get-More-Walks} is called at most
%$O(\frac{\ell}{\lambda\eta})$ times in total (summing over all
%nodes). This is because when a node $v$ calls {\sc
%Get-More-Walks}($v$, $\eta$, $\lambda$), all $\eta$ walks starting
%at $v$ must have been stitched and therefore $v$ contributes
%$\lambda\eta$ steps of walk to the long walk we are constructing.
%It follows from Lemma~\ref{lem:get-more-walks} that {\sc
%Get-More-Walks} algorithm takes $O(\frac{\ell}{\eta})$ rounds in
%total.

%Combining the above results with Lemma~\ref{lem:phase1} gives the
%claimed bound.
%\end{proof}

%Theorem~\ref{thm:1-walk} immediately follows.

%\begin{proof}[Proof of Theorem~\ref{thm:1-walk}]
%Use Lemma~\ref{lem:mainone} with
%$\lambda=\frac{\ell^{1/3}D^{2/3}\mindegree^{1/3}}{(\log n)^{1/3}}$
%and $\eta=\frac{\ell^{1/3}\mindegree^{1/3}}{D^{1/3}(\log n)^{1/3}}$.
%\end{proof}

\subsection{Interpretation of Result}

$\epsilon$ related to the mixing time of the graph.

\section{Conclusion}\label{sec:conclusion}

Open questions: (1) Close the gap? (2) Directed Graphs? (3) Handle Byzantine Failures?

\bigskip
\bibliographystyle{abbrv}
\bibliography{Distributed-RW}

\end{document}
