\documentclass[11pt]{article}

\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amsfonts}
\usepackage{fullpage}


\newcommand{\cfbox}[3]{%
    \colorlet{currentcolor}{.}%
    {\color{#1}%
    \framebox[#3]{\color{currentcolor}#2}}%
}
%--------------------------------------------------------------
%--------------------------------------------------------------
%Problem names
%--------------------------------------------------------------
%--------------------------------------------------------------
\newcommand{\MS}{{\sf Machine Scheduling}\xspace}
\newcommand{\RMS}{{\sf Restricted Machine Scheduling}\xspace}
\newcommand{\CM}{{\sf Congestion Minimization}\xspace}
\newcommand{\ST}{{\sf Steiner Tree}\xspace}
\newcommand{\CD}{{\sf Cost-Distance}\xspace}
\newcommand{\PST}{{\sf Priority Steiner Tree}\xspace}
\newcommand{\FCNF}{{\sf FCNF}\xspace}
\newcommand{\AC}{{\sf Asymmetric $k$-Center}\xspace}
\newcommand{\MAXIS}{{\sf Maximum Independent Set}\xspace}
\newcommand{\kC}{{\sf $k$-Center}\xspace}
\newcommand{\dH}{{\sf $d$-Hypergraph-Cover}\xspace}
\newcommand{\kH}{{\sf $k$-Hypergraph-Cover}\xspace}
\newcommand{\GS}{{\sf Group-Steiner-Tree}\xspace}
\newcommand{\SC}{{\sf Set-Cover}\xspace}
\newcommand{\GAPSAT}{{\sf Gap-3SAT(5)}\xspace}
\newcommand{\clique}{{\sf Max-Clique}\xspace}
\newcommand{\ML}{{\sf Metric Labeling}\xspace}
\newcommand{\ZE}{{\sf 0-extension}\xspace}
\newcommand{\ZIE}{{\sf $(0,\infty)$-extension}\xspace}
\newcommand{\sat}{{\sf SAT}\xspace}
\newcommand{\mdc}{{\sf MDC}\xspace}
\newcommand{\csp}{{\sf HA-CSP}\xspace}

\newcommand{\yi}{{\sc Yes-Instance}\xspace}
\renewcommand{\ni}{{\sc No-Instance}\xspace}

\newcommand{\restatethm}[3]{
  \medskip\noindent{\bf #1~#2.}{\rm (restated)}
  {\it #3}
}

% Vectors
\newcommand{\x}{{\bf x}}
\newcommand{\y}{{\bf y}}
\newcommand{\congest}{\ensuremath{\mathcal{CONGEST}}\xspace}
\newcommand{\congestb}{\ensuremath{\mathcal{CONGEST}(B)}\xspace}
%\newcommand{\dist}{\ensuremath{\operatorname{dist}}\xspace}
\newcommand{\id}{{\sf id}\xspace}%{\mbox{\sf dist}}

%--------------------------------------------------------------
%--------------------------------------------------------------
%Protocol names
%--------------------------------------------------------------
%--------------------------------------------------------------
\newcommand{\RP}{{\sf RP}\xspace}
\newcommand{\RV}{{\sc Raz Verifier}\xspace}

%--------------------------------------------------------------
%--------------------------------------------------------------
%Math operators
%--------------------------------------------------------------
%--------------------------------------------------------------
\newcommand{\size}[1]{\ensuremath{\left|#1\right|}}
\newcommand{\ceil}[1]{\ensuremath{\left\lceil#1\right\rceil}}
\newcommand{\floor}[1]{\ensuremath{\left\lfloor#1\right\rfloor}}
\newcommand{\Dexp}[1]{\dexp\{#1\}}
\newcommand{\Tower}[2]{\operatorname{tower}^{(#1)}\{#2\}}
\newcommand{\logi}[2]{\operatorname{log}^{(#1)}{#2}}
\newcommand{\norm}[1]{\lVert #1\rVert}
\newcommand{\abs}[1]{\lvert #1\rvert}
\newcommand{\paren}[1]{\left ( #1 \right ) }
\newcommand{\union}{\cup}
\newcommand{\band}{\wedge}
\newcommand{\bor}{\vee}
\newcommand{\dimension}[1]{\ensuremath{{\sf dim}(#1)}\xspace}
\newcommand{\induce}[1]{\ensuremath{{\sf im}(#1)}\xspace}
\newcommand{\sinduce}[1]{\ensuremath{{\sf sim}(#1)}\xspace}
\newcommand{\sinducesigma}[2]{\ensuremath{{\sf sim}_{#2}(#1)}\xspace}
\newcommand{\hstrong}{\ensuremath{\times_e}}

\newcommand{\MFS}{{\sc Mrfs}\xspace}
\newcommand{\UDP}{{\sc Udp-Min}\xspace}
\newcommand{\SMP}{{\sc Smp}\xspace}
\newcommand{\MES}{{\sc Maximum Expanding Sequence}\xspace}

%--------------------------------------------------------------
%--------------------------------------------------------------
%Complexity Classes
%--------------------------------------------------------------
%--------------------------------------------------------------

\renewcommand{\P}{\mbox{\sf P}}
\newcommand{\NP}{\mbox{\sf NP}}
\newcommand{\APX}{\mbox{\sf APX}}
\newcommand{\PCP}{\mbox{\sf PCP}}
\newcommand{\ZPP}{\mbox{\sf ZPP}}
\newcommand{\polylog}[1]{\mathrm{polylog(#1)}}
\newcommand{\DTIME}{\mbox{\sf DTIME}}

\newcommand{\opt}{\mbox{\sf OPT}}
\newcommand{\sz}{\mbox{\sf SIZE}}
\newcommand{\lin}{\mbox{\sf LIN}}
\newcommand{\card}{\mbox{\sf card}}




%--------------------------------------------------------------
%--------------------------------------------------------------
%Sets
%--------------------------------------------------------------
%--------------------------------------------------------------
\newcommand{\set}[1]{\left\{ #1 \right\}}
\newcommand{\sse}{\subseteq}

\newcommand{\B}{{\mathcal{B}}}
\newcommand{\tset}{{\mathcal T}}
\newcommand{\gset}{{\mathcal{G}}}
\newcommand{\pset}{{\mathcal{P}}}
\newcommand{\qset}{{\mathcal{Q}}}
\newcommand{\lset}{{\mathcal{L}}}
\newcommand{\bset}{{\mathcal{B}}}
\newcommand{\aset}{{\mathcal{A}}}
\newcommand{\cset}{{\mathcal{C}}}
\newcommand{\fset}{{\mathcal{F}}}
\newcommand{\mset}{{\mathcal M}}
\newcommand{\iset}{{\mathcal{I}}}
\newcommand{\jset}{{\mathcal{J}}}
\newcommand{\xset}{{\mathcal{X}}}
\newcommand{\yset}{{\mathcal{Y}}}
\newcommand{\rset}{{\mathcal{R}}}
\newcommand{\vset}{{\mathcal{V}}}
\newcommand{\uset}{{\mathcal{U}}}
\newcommand{\wset}{{\mathcal{W}}}


\newcommand{\I}{{\mathcal I}}
\newcommand{\hset}{{\mathcal{H}}}
\newcommand{\sset}{{\mathcal{S}}}
\newcommand{\notu}{\overline U}



%--------------------------------------------------------------
%--------------------------------------------------------------
%General Environments
%--------------------------------------------------------------
%--------------------------------------------------------------

%\newcommand{\be}{\begin{enumerate}}
%\newcommand{\ee}{\end{enumerate}}
%\newcommand{\bd}{\begin{description}}
%\newcommand{\ed}{\end{description}}
%\newcommand{\bi}{\begin{itemize}}
%\newcommand{\ei}{\end{itemize}}

%--------------------------------------------------------------
%--------------------------------------------------------------
%Theorems and such
%--------------------------------------------------------------
%--------------------------------------------------------------
\newtheorem{theorem}{Theorem}[section]
%[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{observation}[theorem]{Observation}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{claim}[theorem]{Claim}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{assumption}{Assumption}
\newtheorem{conjecture}{Conjecture}
%\newtheorem{definition}{Definition}
\newenvironment{exercise}{\underline{\bf Exercise}: }{}
%\newenvironment{proof}{\par \smallskip{\bf Proof:}}{\hfill\stopproof}
\def\stopproof{\square}
\def\square{\vbox{\hrule height.2pt\hbox{\vrule width.2pt height5pt \kern5pt
\vrule width.2pt} \hrule height.2pt}}

%\newenvironment{definition}{\underline{\bf Definition}: }{}
%\newtheorem{definition}[theorem]{Definition}
\theoremstyle{definition}\newtheorem{definition}[theorem]{Definition}
%\newenvironment{definition}[1][Definition]{\begin{trivlist}
%\item[\hskip \labelsep {\bfseries #1}]}{\end{trivlist}}

%\renewcommand{\paragraph}[1]{

%--------------------------------------------------------------
%--------------------------------------------------------------
%Figures and such
%--------------------------------------------------------------
%--------------------------------------------------------------
\newcommand{\scale}[2]{\scalebox{#1}{#2}}

\newcommand{\fig}[1]{
\begin{figure}[h]
\rotatebox{0}{\includegraphics{#1}}
\end{figure}}

\newcommand{\figcap}[2]
{
\begin{figure}[h]
\rotatebox{270}{\includegraphics{#1}} \caption{#2}
\end{figure}
}

\newcommand{\scalefig}[2]{
\begin{figure}[h]
\begin{center}
\scalebox{#1}{\includegraphics{#2}}
\end{center}
\end{figure}}

\newcommand{\scalefigcap}[3]{
\begin{figure}[h]
\scalebox{#1}{\rotatebox{0}{\includegraphics{#2}}} \caption{#3}
\end{figure}}

\newcommand{\scalefigcaplabel}[4]{
\begin{figure}[h]
\begin{center}
\label{#4} \scalebox{#1}{\includegraphics{#2}}\caption{#3}
\end{center}
\end{figure}}

%-----------------------------------------------------
%Programs
%-----------------------------------------------------
\newenvironment{prog}[1]{
\begin{minipage}{5.8 in}
{\sc\bf #1}
\begin{enumerate}}
{
\end{enumerate}
\end{minipage}
}

\newcommand{\program}[2]{\vspace{2mm}\fbox{\vspace{2mm}\begin{prog}{#1} #2 \end{prog}\vspace{2mm}}\vspace{2mm}}
%-----------------------------------------------------------

%--------------------------------------------------------------
%--------------------------------------------------------------
%Other - Math
%--------------------------------------------------------------
%--------------------------------------------------------------
\renewcommand{\phi}{\varphi}
\newcommand{\eps}{\epsilon}
\newcommand{\Sum}{\displaystyle\sum}
\newcommand{\half}{\ensuremath{\frac{1}{2}}}

\newcommand{\poly}{\operatorname{poly}}
\newcommand{\dist}{\mbox{\sf dist}}

\newcommand{\reals}{{\mathbb R}}

\newcommand{\rn}{\reals^n}
\newcommand{\rk}{\reals^k}
\newcommand{\newsigma}{\Sigma^{d^{\ceil{t/2}}}}
\newcommand{\R}{\ensuremath{\mathbb R}}
\newcommand{\C}{\ensuremath{\mathbb C}}
\newcommand{\Z}{\ensuremath{\mathbb Z}}
\newcommand{\N}{\ensuremath{\mathbb N}}
\newcommand{\F}{\ensuremath{\mathbb F}}
\newcommand{\fm}{\enshuremath{\mathbb F}^m}
\newcommand{\func}{:{\mathbb F}^m\rightarrow {\mathbb F}}

\newcommand{\expct}[1]{\text{\bf E}_\left [#1\right]}
\newcommand{\expect}[2]{\text{\bf E}_{#1}\left [#2\right]}
\newcommand{\prob}[1]{\text{\bf Pr}\left [#1\right]}
\newcommand{\pr}[2]{\text{\bf Pr}_{#1}\left [#2\right ]}
\newcommand{\prefix}{\operatorname{pref}}

%\setlength{\parskip}{2mm}
%\setlength{\parindent}{0mm}


\newcommand{\notsat}[1]{\overline{\text{SAT}(#1)}}
\newcommand{\notsats}[2]{\overline{\text{SAT}_{#1}(#2)}}

\newcommand{\maxsatf}{\mbox{\sf Max 3SAT(5)} }
\newcommand{\threesat}{\mbox{\sf Max 3SAT} }
\newcommand{\twosat}{\mbox{\sf Max 2SAT} }
\newcommand{\sndp}{\mbox{\sf SNDP}}
\newcommand{\ecsndp}{\mbox{\sf EC-SNDP}}
\newcommand{\vcsndp}{\mbox{\sf VC-SNDP}}
\newcommand{\kec}{k\mbox{\sf -edge connectivity}}
\newcommand{\kvc}{k\mbox{\sf -vertex connectivity}}
\newcommand{\sskec}{\mbox{\sf single-source}~k\mbox{\sf -edge connectivity}}
\newcommand{\sskvc}{\mbox{\sf single-source}~k\mbox{\sf -vertex connectivity}}
\newcommand{\subkvc}{\mbox{\sf subset}~k\mbox{\sf -vertex connectivity}}
\newcommand{\oneec}{1\mbox{\sf -edge connectivity}}
\newcommand{\onevc}{1\mbox{\sf -vertex connectivity}}
\newcommand{\kvcssp}{k\mbox{\sf -vertex-connected spanning subgraph problem}}
\newcommand{\rem}{\operatorname{rem}}
\newcommand{\ove}{\operatorname{over}}
\newcommand{\len}{\operatorname{len}}
\newcommand{\rev}{\operatorname{rev}}

\newcommand{\events}{\mathcal E}


\newcommand{\sssp}{{\sf SSSP}\xspace}
\newcommand{\apsp}{{\sf APSP}\xspace}


\def\danupon#1{\marginpar{$\leftarrow$\fbox{D}}\footnote{$\Rightarrow$~{\sf #1 --Danupon}}}


%\def\danupon#1{}

%-------------------
% SPACE SAVERS
%-------------------
%
%\usepackage{mathtime}
%\usepackage{times}
%\usepackage[small,compact]{titlesec}
%\usepackage[footnotesize]{caption}

\newcommand{\squishlist}{
 \begin{list}{$\bullet$}
  { \setlength{\itemsep}{0pt}
     \setlength{\parsep}{3pt}
     \setlength{\topsep}{3pt}
     \setlength{\partopsep}{0pt}
     \setlength{\leftmargin}{1.5em}
     \setlength{\labelwidth}{1em}
     \setlength{\labelsep}{0.5em} } }
\newcommand{\squishend}{
  \end{list}  }

\newcommand{\squishnum}{
 \begin{enumerate}
  { \setlength{\itemsep}{0pt}
     \setlength{\parsep}{3pt}
     \setlength{\topsep}{3pt}
     \setlength{\partopsep}{0pt}
     \setlength{\leftmargin}{1.5em}
     \setlength{\labelwidth}{1em}
     \setlength{\labelsep}{0.5em} } }
\newcommand{\squishnumend}{
  \end{enumerate}  }


%\newcommand{\squishlist}{
% \begin{itemize}
%}
%
%\newcommand{\squishend}{\end{itemize}
%}

\newcounter{Lcount}
\newcommand{\squishlisttwo}{
\begin{list}{D\arabic{Lcount}. }
{ \usecounter{Lcount} \setlength{\itemsep}{0pt}
\setlength{\parsep}{0pt} \setlength{\topsep}{0pt}
\setlength{\partopsep}{0pt} \setlength{\leftmargin}{2em}
\setlength{\labelwidth}{1.5em} \setlength{\labelsep}{0.5em} } }

\newcommand{\squishendtwo}{
\end{list} }

\renewcommand{\theLcount}{D\arabic{Lcount}}


\newcommand{\be}{\squishlisttwo}
\newcommand{\ee}{\squishendtwo}
%\newcommand{\bd}{\begin{description}}
%\newcommand{\ed}{\end{description}}
\newcommand{\bi}{\squishlist}
\newcommand{\ei}{\squishend}
\newcommand{\ve}{\varepsilon}


\newcommand{\expand}{{\sc Expand}\xspace}
\newcommand{\denst}{{\sf density}\xspace}

%\newcommand{\ind}{{i\nu}\xspace}

%\newcommand{\ind}{\induce\xspace}
%\renewcommand{\dim}{\mbox{dim}\xspace}
\newcommand{\bip}{\mbox{B}\xspace}
\newcommand{\bipm}{\ensuremath{\bip}}
\newcommand{\bipp}{\ensuremath{\bip_e}}
\newcommand{\cA}{\ensuremath{\mathcal{A}}}
\newcommand{\cE}{\ensuremath{\mathcal{E}}}
\newcommand{\cG}{\ensuremath{\mathcal{G}}}
\newcommand{\depth}{\ensuremath{\operatorname{depth}}}
\newcommand{\diam}{\ensuremath{\operatorname{diam}}}

\newcommand{\cP}{\mathcal{P}}



\def\NoNumber#1{{\def\alglinenumber##1{}\State #1}\addtocounter{ALG@line}{-1}}


%--------------------------------------
% Title, etc.
%--------------------------------------


\title{Responses to Referees' Comments}
\author{}
\date{\today}


\begin{document}
\maketitle

To editor and referees:

We thank the referees very much for their comments which have been incorporated in the revised version. The detailed point-by-point addressing of these comments are below.


\section*{Referee 1}

\paragraph{Comment:} Please consider to mention in the introduction the message complexity of your algorithm, as well as the issue of dependency between long walks computed from the same set of short walks (if you indeed do this to amortize the cost of precomputing the short walks over the computation of several long walks).

The paper is nicely written and well organised, which makes it easy to read. However, I would have appreciated that the authors mention right from the beginning that the message complexity of their algorithm is a lot higher than the one of a naive random walk algorithm. Instead, they wait until the conclusion to mention it. Also, in the conclusion,
they mention that one can amortize the complexity over several walks. I believe that some dependency between the (long) random walks computed from the same set of short random walks should exist. If not what is the argument?


\paragraph{Response:} Thank you for the comments. We made the following changes to address the above comments.

$\bullet$ We added the following remark in ``Our Results'' section (Section 1.4).



\begin{quote}
{\em Remarks.}
 While the message complexity is not the main focus of this paper, we note that our improved running time comes with the cost of an increased message complexity from the naive algorithm (we  discuss  this in Section 6). Our message complexity  for computing a random walk of length $\ell$ is  $\tilde O(m\sqrt{\ell D}+n\sqrt{\ell/D})$ which can be worse than the naive algorithm's $\tilde O(\ell)$ message complexity.
\end{quote}


$\bullet$ We also added the following in para 5 of Section 1.4.
\begin{quote}
We  note that the $k$ random walks generated by our algorithm are {\em independent} (cf. Section 4.1).
\end{quote}

$\bullet$ We also add the following in Section 4.1.

\begin{quote}
The correctness of {\sc Many-Random-Walks} follows from Lemma~3.1; intuitively, this algorithm outputs independent random walks because it obtains long walks by stitching short walks that are all independent (no short walk is used twice).
\end{quote}



$\bullet$ We added the following in Section 6. 

\begin{quote}
While  the focus  of this paper is on time complexity, message complexity is also important. In particular, our message complexity for computing $k$ independent random walks of length $\ell$ is $\tilde O(m\sqrt{\ell D}+n\sqrt{\ell/D})$ which can be worse than the naive algorithm's $\tilde O(k\ell)$ message complexity.
It would be important to come up with an algorithm that is round efficient and yet has smaller message complexity.
In a subsequent paper [Das Sarma et al. 2012], we have addressed this issue partly and shown that, under certain assumptions, we can extend our algorithms  to  be message efficient also.
\end{quote}

$\bullet$ We also added a discussion of the above paper [Das Sarma et al. 2012]  in Section 1.5 -- under ``Subsequent work''. \\

$\bullet$ We {\em removed} the following sentence from Section 6 since it seems to confuse the readers.
Actually, this sentence talks about generating many long walks from different set of short walks (each short walk is used only once and never reused in any other long walk) and hence there is no dependency. However, we don't address the amortization issue problem at all in this paper (it was addressed in a subsequent paper mentioned above).

\begin{quote}
``While our algorithm has a good {\it amortized} message complexity over several random walks, for doing one walk our algorithm takes much more messages than the naive token passing algorithm that takes $\ell$ messages."
\end{quote}

%
%\begin{quote}
%``{\bf MAKE THIS SHORT and refer to conclusion} We note that the improved running time comes with the cost of increased message complexity. In particular, our message complexity for computing $k$ independent random walks of length $\ell$ is $\tilde O(m\sqrt{\ell D}+n\sqrt{\ell/D})$ which is worse than the naive algorithm's $\tilde O(k\ell)$ message complexity when $\ell=o(m^2(D+k))$.
%%
%We also note that $k$ random walks generated by our algorithm are {\em independent}. In particular, our algorithm obtains them by stitching short walks that are all independent.''
%\end{quote}

\iffalse
%\paragraph{To do:} Gopal and Atish should check whether the message complexity is correct. This is Danupon's analysis: Phase 1 where we runs $\eta deg(v)$ walks of length $\lambda$ from $v$ needs $\tilde O(\eta\lambda m)$ messages in total, where $m$ is the number of edges in the network. Phase 2 where we stitch $\ell/\lambda$ walks needs $\tilde O(n\ell/\lambda)$ since we have to communicate through a BFS tree for each stitch. In Theorem 3.6, we set $\eta=1$ and $\lambda\approx \sqrt{\ell D}$, so the total message complexity is $\tilde O(m\sqrt{\ell D}+n\sqrt{\ell/D})$.

%The case of $k$ walks needs $\tilde O(\eta\lambda m)$ and  $\tilde O(nk\ell/\lambda)$ in Phase 1 and 2, respective, where we use $\eta=1$ and $\lambda=\tilde \Theta(\sqrt{k\ell D}+k)$. So, the total message complexity is $\tilde O(m(\sqrt{k\ell D}+k)+n\sqrt{k \ell/D})$.

\paragraph{To do:} Gopal might want to add a reference to infocom paper.
\fi
%\paragraph{To discuss:} I'm not sure if we should say when the message complexity of our algorithm is worse since later we will always assume that $\ell=O(m^2)$.

%\paragraph{To discuss:} I'm not sure how to deal with the comment below. Actually, I don't know why we wrote this in the conclusion. This also has something to do with the above comment.
%\begin{quote}
%Also, in
%the conclusion, they mention that one can amortize the complexity over several walks. I believe that some
%dependency between the (long) random walks computed from the same set of short random walks should exist. If
%not what is the argument?
%\end{quote}

\section*{Referee 2}

\paragraph{Comment:} A disadvantage with the paper's approach is that the total message complexity is rather high, since many nodes cooperate in generating and stitching together the random walks.

\paragraph{Response:} Yes, that's correct. Referee 1 also raised this issue and we have added some comments
to clarify this issue. Please see our responses to Referee 1. Thank you.


\paragraph{Comment:}  ``A note to the authors: if T is your
time bound, then you can state that at most all nodes within distance T of the source (for the
single-random-walk case) need to be involved -- am I correct?''

\paragraph{Response:} Yes, that's correct. We note that unless $\ell$, the length of the random walk,
is less than $D$ (the network diameter), the running time of our algorithm (which is $\tilde{O}(\sqrt{\ell D})$) will be at least $D$ (hence all nodes in the network will be involved).

\paragraph{Comment:}
1. Your statement of optimality (in a few places in the paper) should be mentioned to be within a polylog
factor.

\paragraph{Response:} We added in many places that the optimality is within a poly-logarithmic factor.


\paragraph{Comment:} 2. Page 2, line 25: "checking expander" $\rightarrow$ "checking expansion"

\paragraph{Response:} Done.

\paragraph{Comment:}
3. Page 2, line 31: "local and lightweight and" $\rightarrow$ "local, lightweight, and"

\paragraph{Response:}  We slightly changed the sentence to make it more clear: ``Random walks are local and lightweight; moreover, they require ...''

\paragraph{Comment:} 4. Page 2, line 32: "which make them" $\rightarrow$ "which makes them"

\paragraph{Response:} Fixed.

\paragraph{Comment:} 5. Page 2, line 41: use $\ell \gg D$ to say "$\ell >> D$" (the same comment holds for a few more places in the
paper); also, remove the word "time".


\paragraph{Response:} Done.

\paragraph{Comment:}
6. Section 1.1: is it critical for your algorothms that the set of IDs come from a space of size exactly n?
Perhaps not. It is often convenient for the nodes to choose random IDs -- a range of size $\omega(n^2)$ will
guarantee distinct IDs with high probability. So, perhaps this requirement of exact size n can be relaxed?

\paragraph{Response:} We have relaxed the requirement, as stated in the following sentence. `` Specifically, assume that each node is associated with a distinct
identity number from the set $\{1, 2, \ldots , \operatorname{poly}(n)\}$.''

\paragraph{Comment:}
7. Page 3, line 21: "arrive to" $\rightarrow$ "arrive at"

\paragraph{Response:} Fixed.

\paragraph{Comment:}
8. Page 3, line 45: "for short" $\rightarrow$ "in short"
\paragraph{Response:} We changed ``For short'' to ``For brevity''.


\paragraph{Comment:} 9. Page 4, line 7: "an ID" $\rightarrow$ "the ID"
\paragraph{Response:} Fixed.

\paragraph{Comment:} 10. Page 4, line 12: "result" $\rightarrow$ "resultant"
\paragraph{Response:} We changed from ``is the result random walk '' to ``is the resultant random walk''.


\paragraph{Comment:} 11. Page 4, line 16: why do you say "proportional" to $1/deg(v)$? This can allow non-uniform choices of neighbors. (The only exception I know is of lazy random walks where, for ergodicity, you stay at a node with
probability $\geq  1/2$: you use this in a different context in page 18.) Clarify your model precisely.
\paragraph{Response:} We removed the word ``proportional to''. The probability should be exactly $1/deg(v)$ (this is just a definition of a basic random walk).


\paragraph{Comments:} 12. Page 4, line 42: "as the case" $\rightarrow$ "as is the case"

13. Page 5, line 19: "called as" $\rightarrow$ "called"

14. Page 5, line 21: efficiently AND simultaneously


15. Page 5, line 43: remove the commas after "special case"


\paragraph{Response:} All comments above are taken care of. Thank you.

\paragraph{Comment:}
16. Page 6, line 5: "give a" $\rightarrow$ "give an"

\paragraph{Response:} We believe that the referee refers to ``give an $\tilde O(...)$''. We have fixed this.

\paragraph{Comment:} 17. Page 6, lines 19 and 20: "give a" $\rightarrow$ "gives a"; "a RST" $\rightarrow$ "an RST" (**here and later as well in the paper**)

\paragraph{Response:} Done.

%The first one is done. Is the second one correct? (Gopal --- Yes, correct. Please do as suggested. It should be "an RST").

\paragraph{Comment:} 18. First paragraph of Sec. 1.5: "finding PageRank" $\rightarrow$ "computing PageRank"; add "the" between "computing" and "stationary"; remove the comma after "Although"

19. Page 7, line 5: so ARE amenable


20. Page 7, line 17: "proving for" $\rightarrow$ "the case of"


21. Page 7, line 32: "Note" $\rightarrow$ "Now"


22. Page 7, line 40: "that avoids" $\rightarrow$ "which avoids"



23. Page 7, line 41: add "the" after "some of"


24. Page 8, line 12: "doing" $\rightarrow$ "running"


25. Step 7 of Phase 2: add "the" after "length of"


26. Page 8, line 41: "are deleted" $\rightarrow$ "may get deleted"


27. Page 9: "send to its" $\rightarrow$ "sends to its"; "from uniform" $\rightarrow$ "with uniform"; "are called" $\rightarrow$ "is called".


28. Algorithm 1: an "a" is needed in the middle of "such tree" and "such coupon" (the latter occurs twice).


29. The indices should end at q-1, and not at q, in Steps 6 and 7 of Algorithm 1.


30. Algorithm 2, Part 1: (a) Clarify that all references to "coupon" in this algorithm are about the new
coupons being sent out from v. (b) Step 1: "messages" $\rightarrow$ "coupons"; (c)  Step 5: "that $z$ is picked" $\rightarrow$ "for which $z$ is picked"; (d) Step 6: "For each" $\rightarrow$ "Each"; (e) Step 6: "each contains" $\rightarrow$ "each containing".


31. Algorithm 2, Part 2: (a) "is now forwarded for" $\rightarrow$ "has now been forwarded for"; (b) Step 2: "message" $\rightarrow$ "coupon"; (c) Step 4: "time the corresponding coupon is" $\rightarrow$ "times the corresponding coupon has been".


32. Page 10, line 52: remove "randomly" in "randomly pick"


33. In the top-left sub-figure of Figure 1, "$[0, \lambda]$" should be "$[0, \lambda - 1]$".


\paragraph{Response:} All above comments are taken care of.


\paragraph{Comment:}
34. You switch from "$\eta$ coupons per node" in the informal description, to $\eta deg(v)$ in Algorithm 3. Please
spell out this change earlier, rather than waiting until Page 12.

\paragraph{Response:} We added the following sentence in the second paragraph of the ``A Slower algorithm'' subsection: ``(We note that we will need slightly more short walks when we develop a faster algorithm.)''.

%\paragraph{To Discuss:} I'm not sure what else we can do. The part where we explain ``faster algorithm'' is pretty short and we made it clear that we change from $\eta$ coupons to $\eta deg(v)$ coupons.

\paragraph{Comment:}
35. Phase 1 of Algorithm 3: (a) $r_i$ is a (random) function not only of i, but also of $v$; (b) Step 6: "forward
$C$" $\rightarrow$ "forwards $C$".

\paragraph{Response:} (a) We added the following sentence in Algorithm 3: ``(We note that random numbers $r_i$ generated by different nodes are different.)''. We do not specify which node $v$ each $r_i$ corresponds to since it is always clear from the context, and omitting $v$ helps simplify the presentation. (b) is taken care of.


\paragraph{Comment:} 36. Step 11, Phase 2 of Algorithm 3: "delete" $\rightarrow$ "deletes"

37. Page 11, line 51: delete "of the random walk"


38. Remove the word "algorithm" from the end of the title of Sec. 3.


39. Conclude the statement of Lemma 3.3 (both original and restated) with the word "rounds".


40. Indices in the proof of Claim 3.8 may also need to stop at q-1 instead of q, as in comment \#29 above.


41. Page 14, line 13: "-length walk" $\rightarrow$ "-length walks".



\paragraph{Response:} All the above comments are taken care of.


\paragraph{Comment:} 42. The failure-probability bound of $n^{-4}$ following the proof of Claim 3.9, follows from the Chernoff bound
even for $Pr[X^j(e) \geq K \eta (log n)/ loglogn]$, for some constant $K$. This (small) improvement of loglog n can
be further improved as $\eta$ increases. It is worth using this refined bound.

\paragraph{Response:} We added the following sentence: ``(We note that the number $4\eta\log {n}$ above can be improved to $c\eta\log{n}/\log\log n$ for some constant $k$. This improvement of $\log\log n$ can be further improved as $\eta$ increases. This fact is useful in practice but does not help improve our claimed running time since we always hide a $\polylog{n}$ factor.)''

\paragraph{Comment:}
43. You use "Pr[...]" for probabilities near the bottom of Page 13, and math-script-P in the Chernoff bound in
page 14. Use consistent notation.

\paragraph{Response:} We have changed ``$Pr[...]$'' to ``$\mathbb{P}[...]$'' in all places.



\paragraph{Comment:} 44. The last sentence in Page 14 holds only with high probability (not with certainty).
\paragraph{Response:} We changed from ``Therefore, we obtain walks of length $\lambda$ in
$4\lambda\eta\log{n}$ rounds as claimed.'' to ``Therefore, we obtain walks of length $\lambda$ in
$4\lambda\eta\log{n}$ rounds, with high probability, as claimed.''


\paragraph{Comment:}
45. Page 16: (a) line 6: given THAT the; (b) line 8: "applies to a random walk" $\rightarrow$ "applies to random walks".

\paragraph{Response:} Done.

\paragraph{Comment:} 46. Your key lemma, Lemma 3.12, can be stated in a more general manner so that it could be of use to future authors as well. (i) State the probability bound for any given vertex y, rather than applying a union bound
over all $y$; (ii) eliminate the condition "$\ell = O(m^2)$" by stating the lemma for any $\ell$ (this would forbid
the simplification of (2) that you do later, but that is fine in my opinion), and (iii) state what number of
hits H you should aim for, if you want, for some parameter $\delta$, $Pr(\sum_i N^{x_i}_{\ell}(y) \geq H) \leq
\delta$.


\paragraph{Response:} We added the following paragraphs after we state Lemma~3.12.

\begin{quote}
We note that one can also show a similar bound for a specific vertex, i.e. $\Pr\bigl(\exists y\ s.t.\
\sum_{i=1}^k N_\ell^{x_i}(y) \geq 32 \deg(x) \sqrt{k\ell+1}\log
n+k\bigr)$. Since we will not use this bound here, we defer it to Lemma~3.18 in Subsection~3.5.
%We can also show a similar bound for other number of visits. Since we will not use these bounds here, we defer them to Lemma~3.18 and Equation~(7) in Subsection~3.5.
%
Moreover, we prove the above lemma only for a specific number of visits of roughly $\sqrt{k\ell}$ because this is the expected number of visits (we show this in Proposition~3.16 in Section~3.5). It might be possible to prove more general bounds; however, we do not include them here since they need more proofs and are not relevant to the results of this paper.

Also note that Lemma~3.12 is not true if we do not restrict $\ell$ to be $O(m^2)$. For example, consider a star network and a walk of length $\ell$ such that $\ell\gg n^2$ and $\ell$ is larger than the mixing time. In this case, this walk will visit the center of the star $\tilde \Omega(\ell)$ times with high probability. This contradicts Lemma~3.2 which says that the center will be visited $\tilde O(n\sqrt{\ell})=o(\ell)$ times with high probability.
%
We can modify the statement of Lemma~3.12 to hold for a general value of $\ell$ as follows (this fact is not needed in this paper):
$\Pr(\exists y\ s.t.\
\sum_{i=1}^k N_\ell^{x_i}(y) \geq 32 \deg(x) \sqrt{k\ell+1}\log
n+k + \ell\deg(x)/m) \leq 1/n.$
(Recall that $m$ is the number of edges in the network.) This inequality can be proved using Lemma~3.12 and the fact that $m^2$ is larger than the mixing time, which means that the walk will visit vertex $x$ with probability $\deg(x)/m$ in each step after the $(m^2)^{th}$ step.
\end{quote}

We also gave the name {\sc Random Walk Visits Lemma For a Specific Vertex} to Lemma 3.18.



%\paragraph{To Discuss:} I'm not sure how to deal with (iii) in the comment above. Atish suggested that we mention the first equation in the proof of Lemma 3.18 (Eq. (7)) but I'm not sure if that is really relevant.

%I think we can't state a general bound (as required in (iii)) since our main proposition (Proposition 3.16) is specifically for $\sqrt{t}$.

%*** $\sqrt{\ell}$ bound is easier to prove than the general one since this is what we got in the expected number calculation. It might be possible to prove a more general bound it will more work and it is not necessary for this paper. ***


%*** Point to 3.18. Give name: Specific node visit lemma. Also state the inequality in the proof of 3.18; that should address (iii). Note that the lemma is not true for other values of $\ell$. Example: A star. When $\ell\gg n$, say $n^3$, then we will reach a stationary distribution so we will visit the center $\Omega(\ell)$ times. But the lemma just says that we will visit it only $O(n\sqrt \ell)=o(\ell)$ times.

%Current bound + $\ell\cdot deg(v)/n$





\paragraph{Comment:}
47. Section 4.3, first paragraph: (a) "perform random walk" $\rightarrow$ "perform a random walk" in line 1; (b) in three
(not all) places in this paragraph, the word "distribution" ("stationary" or "steady state") should be replaced
by "probability", since you are referring to some $\pi_i$ here

\paragraph{Response:} Done.

%\paragraph{To discuss:} I've done (a) but I'm not sure if (b) is really correct.


\paragraph{Comment:}
48. Proof of Lemma 3.17: remove "the" before "Markov's inequality".


\paragraph{Response:} Done.

\paragraph{Comment:} 49. Some of the references need page numbers, many need the non-abbreviated form of the conference name. In the reference Dubhashi et al., "Grandioni" should be "Grandoni".
\paragraph{Response:} We have updated many references to include page numbers and non-abbreviated forms of conference names. We also updated some references that have appeared in journals. We note that the papers by Adamic et al. and Sami and Twigg only appeared in arXiv and thus do not have page numbers.


\section*{Referee 3}
%\iffalse
%\paragraph{Comment:}
%The concept of random walks has been used extensively for analyzing flooding algorithms. Often such concepts
%are used for monitoring the data flow in networks, where each datum is represent by a token that independently
%performs a random walk. The literature explains how to control the message contention over each edge (however I
%could not find a clear reference to this line of work, see the work of Sotiris Nikoletseas, for example).
%
%
%\paragraph{Response:} ...?
%
%\paragraph{To Gopal:} Atish and I have looked at the above comments but have no ideas what these papers are about. If you are aware of these papers, can you add something to our paper? Otherwise, let's all discuss again.
%
%
%\paragraph{Comment:}
%Another approach is to use random walks for carrying a single token, as in Dolev et al 2006. Both approaches
%are used in order to avoid the use of fixed distributed data structures, such as BFS trees that are considered
%in this paper. The reason is that these fixed data structures are inefficient in dynamic ad hoc graphs for
%mobile networks - the need to update them can arise too frequent.
%
%It could be that the proposed study result is worthwhile publishing. However, I found several presentation
%flaws and I have started to lose confidence. I cannot recommend acceptance before the paper goes through a
%major revision.
%
%\paragraph{Response:} Maybe we don't have to say anything here.
%\fi

\paragraph{Comment:} Although I am inclined to accept the paper, I could not understand some of the key approaches. The paper starts with a reference to single random walk, before talking about stitching a constant number of random walks into a larger one, and then talking about carrying out these random walks over a BFS tree. See for example line 1 in Algorithm 1 that talks about the construction of such a BFS tree. It could be that I misunderstood something but for me, talking about the advantages of random walks in dynamic ad hoc graphs for mobile networks and then using a BFS tree does not make sense.

\paragraph{Response:} While we mention ad hoc networks as one motivation,
random walks have many applications in static (wireline) networks also. We agree
with the reviewer that in dynamic networks using BFS tree is not a good idea.
But this paper focuses only on static networks. A subsequent paper
(to appear in DISC 2012) deals with adapting our algorithm to dynamic networks.
We discuss this in Section 1.5 (under subsequent work). Thank you  in helping us clarify this issue.


\paragraph{Comment:}
Section 1.1, describes informally a synchronized system. In fact, one
 would need perhaps to use a global clock because the authors assume that the round numbers are known.After
reading the paper, I am not sure that this assumption is needed. But in case it does, the clock synchronization
cost should be considered.




\paragraph{Response:} Yes, that's right, the algorithm does not need global clock or round numbers.  We mention this in Section 1.1:
\begin{quote}
For convenience, our algorithms  assume that
nodes always know the number of the current round (although this is not really needed --- cf. Section 2).
\end{quote}
and also in Section 2: 
\begin{quote}
We note that the notion of ``phase" is used only for simplicity.
The algorithm does not really need round numbers.
If there are many messages to be sent through the same edge, send one
with minimum counter first.
\end{quote}


\paragraph{Comment:}
Section 1.2 defines the studied problem and gives no motivation for them.

\paragraph{Response:} We discuss motivations in Section 1.3. In particular,
we mention two key applications (RST and mixing time computation) that directly uses the distributed algorithm for our problems.


\paragraph{Comment:}
The paper is missing a system setting section in which authors assumptions are explained.

\paragraph{Response:} We have added more details in Section 1.1.

\paragraph{Comment:} The reference to the
CONGEST model without any presentation of it should be changed.

\paragraph{To discuss:} We have added more details on the CONGEST model in Section 1.1.

\paragraph{Comment:} The related work section is too brief. The
authors mention an extensive list of articles there they considers merely as applications and ignoring their
analysis, see the first paragraph of the introduction.

\paragraph{Response:} We have added more discussion in the related work section --- Section 1.5.



\paragraph{Comment:}
In observation 2.1 and the paragraph after, I have lost the motivation for preforming a random walk. If we can
collect the topology, over a dynamic ad hoc graph for mobile networks, why should we perform any distributed
calculation via a random walk rather than local calculations?

\paragraph{Response:} Yes, if $\ell$ is very large, that is $\Omega(m^2)$ (this is significantly more than the cover time also), then it is sufficient to collect the topology and do local computation (this takes $\tilde O(m)=\tilde O(\sqrt{\ell})$ time).
However, in many applications $\ell$ is much smaller, where our algorithm can be significantly faster
than the naive method of collecting the topology. Thus the focus of this paper is the regime where $\ell = O(m^2)$.

\paragraph{Comment:}
The code description is confuting. What is the $(i-1)$-iteration when i=1? Which coupons where received?  I am
sure the authors have the right answer but as a reader, I can get confused.

\paragraph{Response:} We added the following sentence when we mention the $(i-1)$-th iteration: ``(The zeroth iteration is the initial stage where each node creates its own messages.)''.



\paragraph{Comment:} Figure 1, Please do not repeat the caption text.

\paragraph{Response:} Fixed. Thank you. 

%\paragraph{Comment:} Remove the text in the third picture in Figure 1.







\end{document}
