
\subsection{Analysis}

The following theorem states the main result of this Section. It
states that the algorithm {\sc Single-Random-Walk} correctly samples
a node after a random walk of $\ell$ steps and the algorithm takes,
with high probability, $\tilde O\left(\sqrt{\ell D}\right)$ rounds
where $D$ is the diameter of the graph.
%$\tilde O\left(\min(\frac{\sqrt{\ell
%D}}{\epsilon^{1/4}} +\sqrt{\frac{{D}}{\epsilon}},
%\frac{\sqrt{\ell}D^{2/3}}{\epsilon^{1/3}})\right)$
%rounds. Here $D$ is the
%diameter of the graph
%and $\epsilon$ is the eigenvalue gap in the graph's transition probability matrix.
%In the algorithm description,
%we assume that $\epsilon$ is known, and is therefore used in the
%choice of parameters. The value of $\epsilon$ can be guessed by
%doubling, and this would increase the number of runs, and therefore
%rounds, by only a $O(\log n)$ factor since $\frac{1}{\epsilon}$ is
%at most a polynomial in $n$ for undirected unweighted connected
%graphs.

%\begin{theorem}\label{thm:1-walk}
%Algorithm {\sc Single-Random-Walk} (cf. Algorithm~\ref{alg:single-random-walk}) solves $1$-RW-DoS and, with
%high probability\footnote{With high probability means with probability at least $(1-\frac{1}{n})$ throughout this paper.},
%finishes in  $\tilde{O}(\sqrt{\ell D}\epsilon^{-1/4})$ rounds.
%\end{theorem}

%PLEASE KEEP THIS. IT CAN TELL HOW TO GET OTHER BOUNDS. E.G. $\tilde O(\frac{\sqrt{\ell D}}{\epsilon^{1/3}}
%+\sqrt{\frac{{D}}{\epsilon^1/3}})$
%\begin{theorem}\label{thm:1-walk}
%For any $\alpha\geq \log_{1/\epsilon} (5\sqrt{D})$ and $\ell\leq \frac{160 m^2 (\log n)^5}{\epsilon^{1-\alpha}}$,
%Algorithm {\sc Single-Random-Walk} (cf. Algorithm~\ref{alg:single-random-walk}) solves $1$-RW-DoS and, with
%high probability\footnote{With high probability means with probability at least $(1-\frac{1}{n})$ throughout this paper.},
%finishes in  $\tilde O(\sqrt{\frac{\ell D}{\epsilon^{1-\alpha}}}+\sqrt{\ell
%D \epsilon^{1-\alpha}}+\frac{\sqrt{D}}{\epsilon^{\alpha}})$ rounds.
%\end{theorem}



\begin{theorem}\label{thm:1-walk}
TO BE CHANGED. For any $\ell\leq \frac{160 m^2 (\log
n)^5}{\sqrt{\epsilon}}$, Algorithm {\sc Single-Random-Walk} (cf.
Algorithm~\ref{alg:single-random-walk}) solves $1$-RW-DoS (the
Single Random Walk Problem) and, with high probability\footnote{With
high probability means with probability at least $(1-\frac{1}{n})$
throughout this paper.},
%
finishes in $\tilde O\left(\min(\frac{\sqrt{\ell D}}{\epsilon^{1/4}}
+\sqrt{\frac{{D}}{\epsilon}},
\frac{\sqrt{\ell}D^{2/3}}{\epsilon^{1/3}})\right)$ rounds.
\end{theorem}

Note that the Theorem deals with values of $\ell < \frac{160 m^2
(\log n)^5}{\sqrt{\epsilon}}$. Indeed, if $\ell>m^2$, the random
walk problem can be solved in $\sqrt{\ell D}$ rounds since the
entire graph structure can be collected at one node in $O(m+D)$
rounds via upcast \cite{peleg}.


An important case implied by the above theorem is the following
corollary.

\begin{corollary}
For any $\frac{1}{\sqrt{\epsilon}}\leq \ell\leq \frac{160 m^2 (\log
n)^5}{\sqrt{\epsilon}}$, Algorithm {\sc Single-Random-Walk} (cf.
Algorithm~\ref{alg:single-random-walk}) solves $1$-RW-DoS and, with
high probability, finishes in $\tilde O(\frac{\sqrt{\ell
D}}{\epsilon^{1/4}})$ rounds.
\end{corollary}

 A key difference in this paper from Das Sarma et
al.~\cite{DNP09-podc} is that with a  crucial modification in the
algorithm, we are able to bound the number of times any node is
visited (based on the length of the walk). This in turn allows us to
bound the number of times {\sc Get-More-Walks} will be required, and
consequently get an improved result. Since Phase~1 of the algorithm
is essentially the same as that in \cite{DNP09-podc}, we only state
the result here with the proof omitted.


In \cite{DNP09-podc}, the running time of Phase~2 is argued by
bounding the number of times {\sc Get-More-Walks} is invoked in
total. In particular, it is shown that this algorithm is invoked at
most $\frac{\ell}{\eta\lambda}$ times.
%
%In particular, notice each {\sc Get-More-Walks($v$, $\eta$,
%$\lambda$)} is invoked only after all $\eta$ walks of length
%$\lambda$ starting at $v$ are used up and this algorithm creates the
%same number of such walks; in other words, one call of {\sc
%Get-More-Walks} corresponds to a walk of length $\eta\lambda$.
%Therefore,
In this paper, we show that with the modification of the algorithm
presented here, one can prove a stronger result, as stated in the
following Lemma.


%\textbf{Danupon:} Should this be a proposition or lemma or theorem? I choose to make
%it a proposition because it's more important than other lemmas. It could even be a theorem.
\begin{lemma} \label{prop:at_most_k_calls}
For any $k\geq 5$, $\lambda$, $\eta$, and $\ell$ such that
$\eta\lambda\geq \sqrt{\frac{160\ell}{\epsilon k}}(\log n)^{9/2}$
and $\ell\leq m^2\frac{160}{\epsilon k}(\log n)^5$, the probability
that Get-More-Walks is invoked by the {\sc Single-Random-Walk}
algorithm more than $k$ times is at most $2/n$.
\end{lemma}

% This proposition is correct but we want to eliminate $k$
%\begin{proposition} \label{prop:at_most_k_calls}
%For any
%$k\geq 5$, $\lambda$, $\eta$, and $\ell$ such that
%$\eta\lambda\geq \sqrt{\frac{160\ell}{\epsilon k}}(\log n)^{5/2}$ and
%$\ell\leq m^2\frac{160}{\epsilon k}(\log n)^5$,
%the probability that Get-More-Walks is invoked by the {\sc
%Single-Random-Walk} algorithm more than $k$ times is at most $1/n$.
%\end{proposition}


We now prove the lemma over the next subsection and finally use it
to prove Theorem~\ref{thm:1-walk}.



% OLD PROOF -- IN CASE THE ABOVE ANALYSIS IS WRONG
%\begin{proof}[Proof of Theorem~\ref{thm:1-walk}]
%We consider two choices of parameters:
%
%%\squishlist
%\begin{enumerate}
%\item[1.] $k=\frac{1}{\sqrt{\epsilon}}$,
%$\lambda=\lceil\sqrt{160\ell D}\epsilon^{1/4}\rceil$, and
%$\eta=\lceil\frac{1}{\sqrt{\epsilon}D}(\log n)^{9/2}\rceil$
%\item[1.1] $k=\frac{1}{\sqrt{\epsilon}}$,
%$\lambda=\lceil\sqrt{\frac{160\ell}{D}}\epsilon^{-1/4}(\log n)^{9/2}\rceil$, and
%$\eta=1$
%\item[2.] $k=\frac{1}{\epsilon^{1/3}D}$,
%$\lambda=\lceil\sqrt{\frac{160\ell}{\epsilon k}}(\log n)^{5/2}\rceil$,
%and $\eta=1$
%\end{enumerate}
%%\squishend
%
%Observe that in both choices,
%$$\eta\lambda=\lceil\sqrt{\frac{160\ell}{\epsilon k}}(\log
%n)^{9/2}\rceil=\tilde O\left(\sqrt{\frac{\ell}{\epsilon k}}\right).$$
%
%By Lemma~\ref{lem:phase1}, Phase~1 finishes in $\tilde O(\lambda
%\eta) = \tilde O(\sqrt{\frac{\ell}{\epsilon k}})$ with high
%probability. For Phase~2, {\sc Sample-Destination} is invoked
%$O(\frac{\ell}{\lambda})$ times (only when we stitch the walks) and
%therefore, by Lemma~\ref{lem:Sample-Destination}, contributes
%$O(\frac{\ell D}{\lambda})$ rounds. Finally, by
%Proposition~\ref{prop:at_most_k_calls}, {\sc Get-More-Walks} is
%invoked at most $k$ times with high
%probability and hence contributes $O(kD)$ rounds by Lemma~\ref{lem:get-more-walks}.
%Therefore, the total number of rounds is
%\begin{eqnarray}
%\tilde O(\sqrt{\frac{\ell}{\epsilon k}}+\frac{\ell
%D}{\lambda}+kD).\label{eqn:number_of_rounds}
%\end{eqnarray}
%
%%Now, we choose $k$, $\lambda$ and $\eta$ as follows.
%Now, we choose $k=\frac{1}{\sqrt{\epsilon}}$,
%$\lambda=\lceil\sqrt{\ell D}\epsilon^{1/4}\rceil$, and
%$\eta=\lceil\frac{1}{\sqrt{\epsilon}D}(\log n)^{5/2}\rceil$. Note
%that $\frac{1}{\epsilon}> D$ and therefore our choice of $\eta$ is
%integral less than $\frac{2}{\sqrt{\epsilon}D}(\log n)^{5/2}$. By
%this choice, the first, second and third terms of
%\eqref{eqn:number_of_rounds} become $\sqrt{\ell}/\epsilon^{1/4}$,
%$\sqrt{\ell D}/\epsilon^{1/4}$, and $D/\sqrt{\epsilon}$.
%
%
%\textbf{Danupon:} This is an alternative choice of $k, \lambda,
%\eta$. Which one is preferred?
%
%Now, we choose $k=\frac{1}{\epsilon^{1/3}D}$,
%$\lambda=\lceil\sqrt{\frac{\ell}{\epsilon k}}(\log n)^{5/2}\rceil$,
%and $\eta=1$. By this choice, the first, second and third terms of
%\eqref{eqn:number_of_rounds} become $\tilde O(\sqrt{\ell
%D}/\epsilon^{1/3})$, $\tilde O(\sqrt{\ell D}/\epsilon^{1/3})$, and
%$\tilde O(1/\epsilon^{1/3})$. Therefore, the total number of rounds
%is $\tilde O(\sqrt{\ell D}/\epsilon^{1/3})$.
%
%
%*** TO WRITE: Also note that each node will call {\sc Get-More-Walk} only
%once.
%\end{proof}


%\begin{proof}[Proof of Theorem~\ref{thm:1-walk}]
%We choose $k=\frac{1}{\sqrt{\epsilon}}$, $\lambda=\lceil\sqrt{\ell
%D}\epsilon^{1/4}\rceil$, and
%$\eta=\lceil\frac{1}{\sqrt{\epsilon}D}(\log n)^{5/2}\rceil$. Note
%that $\frac{1}{\epsilon} > D$ and therefore our choice of $\eta\leq
%2\frac{1}{\sqrt{\epsilon}D}$.
%
%By Lemma~\ref{lem:phase1}, Phase~1 finishes in $\tilde O(\lambda
%\eta) = \tilde O(\frac{\sqrt{\ell}}{\sqrt{D}\epsilon^{1/4}})$ with
%high probability. For Phase~2, {\sc Sample-Destination} is invoked
%$O(\frac{\ell}{\lambda})$ times (only when we stitch the walks) and
%therefore, by Lemma~\ref{lem:Sample-Destination}, contributes
%$O(\frac{\ell D}{\lambda})=\tilde O(\sqrt{\ell D} \epsilon^{1/4})$
%rounds. Finally, by Proposition~\ref{prop:at_most_k_calls}, {\sc
%Get-More-Walks} is invoked at most $k=O(\frac{1}{\sqrt{\epsilon}})$
%times with high probability and therefore, by
%Lemma~\ref{lem:get-more-walks}, contributes
%$O(\frac{D}{\sqrt{\epsilon}})$ rounds. Therefore, the total number
%of rounds is XXX. (THIS IS NOT WHAT WE CLAIM. HAVE TO CHANGE
%PARAMETERS.)
%
%*** Also note that each node will call {\sc Get-More-Walk} only
%once.
%\end{proof}


%\begin{proof}[Proof of Theorem~\ref{thm:1-walk}]
% We chose $\gamma = O(\pi(A)\cdot 2m\cdot
%\lambda \cdot \eta \cdot \log n)$ (WHAT IS CONSTANT) where we
%further force $\gamma^2 \geq \frac{\ell k\log n}{\epsilon}$.
%Therefore, we choose $\eta\lambda$ such that $\pi(A)\cdot 2m\cdot
%\lambda \cdot \eta \cdot \log n \geq \sqrt{\frac{\ell k\log
%n}{\epsilon}}$.
%
%We consider a set $A$ of size $k$. So it follows that $\pi(A)\cdot
%2m \geq k$. Therefore, to ensure the above inequality, it suffices
%to choose $\eta \lambda = \gamma/k =
%\tilde{O}(\sqrt{\frac{l}{\epsilon k}})$. We now use
%Lemma~\ref{lem:gilman}.
%
%Using Lemma~\ref{prop:at_most_k_calls} and the bound on the running
%times of Phase~1 of {\sc Single-Random-Walk}
%(Lemma~\ref{lem:phase1}), {\sc Get-More-Walks}
%(Lemma~\ref{lem:get-more-walks}), and {\sc Sample-Destination}
%(Lemma~\ref{lem:Sample-Destination}), we get the number of rounds
%required to be $\tilde{O}(\sqrt{\frac{\ell}{\epsilon k}} +
%\frac{lD}{\lambda} + k\lambda)$ with the added constraint that
%$\eta\lambda = \sqrt{\frac{\ell}{\epsilon k}}$ [because of our
%choice of $\gamma$ in Lemma~{lem:gilman}]. We choose parameters as
%follows: $k=\frac{1}{\sqrt{\epsilon}}$, $\lambda = \sqrt{\ell
%D}\epsilon^{1/4}$, $\eta = \frac{1}{\sqrt{\epsilon D}}$. Note that
%$\frac{1}{\epsilon} > D$  and therefore our choice of $\eta$ is at
%least $1$. The result of $\tilde{O}(\sqrt{\ell D}\epsilon^{-1/4})$
%rounds follows by plugging in these parameter values.
%\end{proof}



\subsubsection*{Proof of Lemma~\ref{prop:at_most_k_calls}}

%{\bf Gopal: } What does it mean to say "That all terms are not  stated precisely".
%Better to remove this. INstead say "approximately $O(\lambda \eta)$ times" etc. What do you think?

Our plan is as follows. Roughly speaking, we want to show that at
most $k$ nodes are used as connectors (cf.
Algorithm~\ref{alg:single-random-walk} Phase~2) more than $\eta$
times, and hence need to call {\sc Get-More-Walks}. The main idea in
showing this consists of two parts.
%(Note that all terms here are not stated
%precisely.)
In the first part (cf. Lemma~\ref{lem:gilman}), we show that, with
high probability, there are not many nodes visited by a random walk
more than approximately $\lambda \eta$ times. Then in the second
part (Lemma~\ref{lem:uniformityused}), we show that those nodes that
are visited at most $t$ times by a walk will, with high probability,
appear at most approximately $t/\lambda$ times as connectors
(intuitively, because the distance between connectors are at least
$\lambda$). Finally, we combine these two parts to prove the Lemma:
With high probability, there are at most $k$ nodes visited more than
approximately $\lambda\eta$ times and only those $k$ nodes will
appear as connectors more than approximately $\eta$ times and hence
necessitates to call {\sc Get-More-Walks}.

We now prove the first part. We start by explaining the main
ingredient which is a result by Gillman~\cite{Gillman98}.

Let $t_l$ denote the number of times a set of vertices $A$ is
visited in a walk of length $l$; further let $\pi(x) =
\frac{\deg(x)}{2m}$ denote the stationary probability of vertex $x$
and $\pi(A)$ the sum of stationary probabilities of vertices in $A$.
Let the source node of the random walk of length $l$ be chosen from
the distribution ${\bf q}$. Let $\frac{{\bf q}}{\sqrt{\pi}}$ denote
the vector with entries $\frac{{\bf q}(x)}{\sqrt{\pi(x)}}$. Let
$N_{\bf q}$ denote the $L_2$-norm of $\frac{{\bf q}}{\sqrt{\pi}}$.

Denote by $\epsilon$ the gap between the first and the second
eigenvalue of the transition matrix of the undirected graph. The top
eigenvalue $\lambda_1$ is $1$ and the eigenvector corresponding to
this is the stationary distribution vector. Further, the second
eigenvalue $\lambda_2$ is bounded away from $1$ under the assumption
that the graph is connected. $\epsilon = \lambda_1 - \lambda_2$.

\begin{theorem} [Theorem 2.1 of Gillman~\cite{Gillman98}]
\label{thm:gilman} $\Pr[t_{\ell} \geq \ell\pi(A) + \gamma] \leq (1 +
\frac{\gamma \epsilon}{10l})N_{{\bf q}}e^{-\gamma^2\epsilon/20\ell}$
\end{theorem}

Now we use the theorem above to prove the first part which, again,
roughly says that ``most'' nodes appear not  many times in the walk.
Its proof can be found in Appendix~\ref{proof:lem:gilman}.

\begin{lemma} \label{lem:gilman}
For any $k\geq 5$, the probability that for $k$ distinct nodes,
$v_i$ (for $1\leq i\leq k$), each $v_i$ is visited more than
$\frac{\gamma}{k} + \ell\pi(v_i)$ times in a random walk of length
$\ell$, where $\gamma = \left({\frac{40\ell}{\epsilon}\cdot k\log
n}\right)^{1/2}$, is at most $\frac{1}{n}$.
\end{lemma}



%\begin{lemma} \label{lem:gilman}
%The probability that more than $k$ distinct nodes, $x_1, x_2, \ldots, x_k$ are each visited more than $\gamma/k + \ell d(x_i)/2m$ times respectively for $\gamma = \sqrt{\frac{\ell k}{\epsilon}}$ is at most $\frac{1}{n}$ as long as $l\leq O(m)$.
%\end{lemma}
%\begin{proof}
%In Theorem~\ref{thm:gilman} choose $\gamma$ such that $\gamma^2 \geq \frac{\ell k\log^3 n}{\epsilon}$.
%We consider a set $A$ of size $k$. So it follows that $\pi(A)\cdot 2m \geq k$.
%
%Further, by our choice of $\gamma$, we know that $\gamma > \ell\pi(A)$ as long as $\ell<m$. Therefore, we get $P[t_l \geq \tilde{\Theta}(\sqrt{\frac{\ell k}{\epsilon}})
%] \leq O(e^{-k\log^2n} \leq n^{-k}e^{-\log n}$, here we choose the $\log n$ factors sufficiently large to ensure that $N_{{\bf q}}$ is nullified.
%
%We now perform a union bound over all sets $A$ of size $k$. There are $n^k$ such sets. Therefore, the probability that for any set $A$ of size $k$, the set $A$ is visited more than $\sqrt{\frac{\ell k}{\epsilon}}$ times is at most $\frac{1}{n}$. Now, if $k$ distinct nodes $x_1, x_2, \ldots, x_k$ are each visited more than $\gamma/k + \ell d(x_i)/2m$ times respectively, then consider the set that contains exactly these $k$ nodes. The condition is violated.
%\end{proof}


%\begin{lemma}
%The probability that Get-More-Walks is invoked more than $k$ times is, w.h.p., $O(1/n)$.
%\end{lemma}
%\begin{proof}
%{\bf Uniformity Argument.}
%
%We know that in a walk of length $\ell$, at most $k$ distinct nodes can be visited more than $\eta_x \lambda \log n$ where $\eta_x = \eta\cdot d(x)$. However, we now need to argue that if only the {\em Connectors} of this $\ell$ length walk are observed, then any of these nodes is visited at most $\eta_x \log n$ times w.h.p.
%
%Intuitively, this argument is simple, since the connectors are spread out in steps of length approximately $\lambda$. However, there might be some {\em periodicity} that results in the same node being visited multiple times but {\em exactly} at $\lambda$-intervals. This is where we crucially use the fact that the algorithm uses walks of length $\lambda + r$ where $r$ is chosen uniformly at random from $[0,\lambda]$.
%
%We essentially use the following claim.
%\begin{claim}
%Given a sequence of observations $X_1, X_2, \ldots, X_{t}$, if an event ${\cal R}$ is observed at most $f_1\cdot f_2$ times. Suppose one observation is picked at random from $X_i, X_{i+1}, X_{i+f_2}$ for each $i$ in $\{1, f_2, 2f_2, \ldots, (f_1-1)f_2\}$, and call these observations $Y_1, Y_2, \ldots, Y_{f_1}$, then the probability that the event ${\cal R}$ is observed more than $f_2\log n$ times among all $Y_i$ observations is at most $1/n$.
%\end{claim}
%\begin{proof}
%To write.
%\end{proof}
%
%Choose $t=\ell$, the event observed to be the walk being at node $x$, and $f_1=\eta_x\log n$ and $f_2=\lambda/2$. By our algorithm, the samples picked are even sparser -- to formalize.
%This then completes the proof of the Uniformity Lemma.
%
%\end{proof}

%--------------------------------------------------------------


At first, it may look obvious that the Lemma
\ref{prop:at_most_k_calls} follows easily from the above Lemma
(\ref{lem:gilman}). However this is not true. The point is that in
Lemma~\ref{lem:gilman}, we looked at all points of the $\ell$ length
walk. If we look at all $\ell$ points, then indeed any node may
occur many times. However, in {\sc Single-Random-Walk}, we need to
invoke {\sc Get-More-Walks} only when a node is visited too many
times (more than $\eta_v$ times) as a {\em connector} node. Here,
too many times, is more than the number of walks that were stored
from it, in Phase~1.

%{\bf Gopal:} "by too many times", we mean $\eta_v$ right? In that case, better to say
%$\eta_v$.

We choose $r_i$ from $[0, \lambda-1]$. If in the algorithm, we had
deterministic lengths, then arguing about connectors becomes
difficult.

To prove the lemma, recall that it is shown earlier that in a walk
of length $\ell$, with high probability, there are at most $k$
distinct node $v$ that appears more than $\eta_v \lambda \log n$
times in the walk where $\eta_v = \eta \deg(v)$. It is left to show
that every vertex $v$ that appears at most $\eta_v\lambda\log n$
times in the walk also appear at most $\eta_v\polylog n$ times as a
{\em connector node} (defined in
Algorithm~\ref{alg:single-random-walk}), with high probability. We
show the following lemma.

%{\bf Gopal:}  The definition of connectors is not clear from the psuedocode.
%As I say at the beggining of the section, it isbetter to define it (informally explanantion is fine)
%at the beginning itself.

%-------------------------------------------

We next show the second part which says that if any node $v_i$
appears $t$ times in the walk, then it is likely to appear roughly
$t/\lambda$ times as connectors.

\begin{lemma}
\label{lem:uniformityused} For any vertex $v$, if $v$ appears in the
walk at most $t$ times then it appears as a connector node at most
$t(\log n)^2/\lambda$ times with probability at least $1-1/n^2$.
\end{lemma}

\begin{proof}[Sketch]
Intuitively, this argument is simple, since the connectors are
spread out in steps of length approximately $\lambda$. However,
there might be some {\em periodicity} that results in the same node
being visited multiple times but {\em exactly} at
$\lambda$-intervals. This is where we crucially use the fact that
the algorithm uses walks of length $\lambda + r$ where $r$ is chosen
uniformly at random from $[0,\lambda-1]$.
%Due to the lack of space,
%we provide full details in Appendix~\ref{sec:uniformityused-proof}.

Think of when there are $k$ numbers $x_1, x_2, ..., x_k$. Let us
assume that the number $1$ appears at most $t$ times, for some $t$.
Now, pick every $\lambda+r$ numbers starting from the left, where
$r$ is chosen uniformly at random from $[0,\lambda-1]$. (This is
equivalent to picking connectors in the random walk.) How many times
will $1$ be picked? We claim that $1$ will be picked at most $t(\log
n)^2/\lambda$ times with probability at least $1-1/n^2$.

To see this, the main step is to consider another process: Partition
$x_1, ..., x_k$ into blocks of size $\lambda$ and pick one number
from each block uniformly at random. Intuitively, since this process
always return at least the same number of numbers returned in the
previous process, the probability that we see $1$ here more than
$t(\log n)^2/\lambda$ in this process is more than the probability
of getting $1$ more than $t(\log n)^2/\lambda$ in the previous
process. This claim has to be carefully proved, which is done in
Appendix~\ref{sec:uniformityused-proof}.

Now, since the result from the block is independent from each other,
we can use Chernoff's bound to bound the number of see $1$ more than
$t(\log n)^2/\lambda$ times in the second process. This turns out to
be at most $1/n$ as desired.
\end{proof}

%************ MOVED TO APPENDIX ****************************
%\begin{proof}
%Intuitively, this argument is simple, since the connectors are
%spread out in steps of length approximately $\lambda$. However,
%there might be some {\em periodicity} that results in the same node
%being visited multiple times but {\em exactly} at
%$\lambda$-intervals. This is where we crucially use the fact that
%the algorithm uses walks of length $\lambda + r$ where $r$ is chosen
%uniformly at random from $[0,\lambda-1]$.
%
%%-----------------------------
%
%We prove the lemma using the following two claims.
%
%\begin{claim}
%Consider any sequence $A$ of numbers $a_1, ..., a_\ell'$ of length
%$\ell'$. For any integer $\lambda'$, let $B$ be a sequence
%$a_{\lambda'+r_1}, a_{2\lambda'+r_1+r_2}, ...,
%a_{i\lambda'+r_1+...+r_i}, ...$ where $r_i$, for any $i$, is a
%random integer picked uniformly from $[0, \lambda'-1]$. Consider
%another subsequence of numbers $C$ of $A$ where an element in $C$ is
%picked from from ``every $\lambda'$ numbers'' in $A$; i.e., $C$
%consists of $\lfloor\ell'/\lambda'\rfloor$ numbers $c_1, c_2, ...$
%where, for any $i$, $c_i$ is chosen uniformly at random from
%$a_{(i-1)\lambda'+1}, a_{(i-1)\lambda'+2}, ..., a_{i\lambda'}$.
%Then, $Pr[C \text{ contains } a_{i_1}, a_{i_2}, ..., a_{i_k}\}] =
%Pr[B = \{a_{i_1}, a_{i_2}, ..., a_{i_k}\}]$ for any set $\{a_{i_1},
%a_{i_2}, ..., a_{i_k}\}$.
%\end{claim}
%
%%\textbf{Danupon:} This claim is stated with full details as we may
%%want to bring it out to highlight later.
%%\begin{claim}
%%Given a sequence of observations $X_1, X_2, \ldots, X_{t}$, if an
%%event ${\cal R}$ is observed at most $f_1\cdot f_2$ times. Suppose
%%one observation is picked at random from $X_i, X_{i+1}, X_{i+f_2}$
%%for each $i$ in $\{1, f_2, 2f_2, \ldots, (f_1-1)f_2\}$, and call
%%these observations $Y_1, Y_2, \ldots, Y_{f_1}$, then the probability
%%that the event ${\cal R}$ is observed more than $f_2\log n$ times
%%among all $Y_i$ observations is at most $1/n$.
%%\end{claim}
%\begin{proof}
%First consider a subsequence $C$ of $A$. Numbers in $C$ are picked
%from ``every $\lambda'$ numbers'' in $A$; i.e., $C$ consists of
%$\lfloor\ell'/\lambda'\rfloor$ numbers $c_1, c_2, ...$ where, for
%any $i$, $c_i$ is chosen uniformly at random from
%$a_{(i-1)\lambda'+1}, a_{(i-1)\lambda'+2}, ..., a_{i\lambda'}$.
%Observe that $|C|\geq |B|$. In fact, we can say that ``$C$ contains
%$B$''; i.e., for any sequence of $k$ indexes $i_1, i_2, ..., i_k$
%such that $\lambda'\leq i_{j+1}-i_j\leq 2\lambda'-1$ for all $j$,
%%
%$$Pr[B = \{a_{i_1}, a_{i_2}, ..., a_{i_k}\}] = Pr[C \text{ contains
%} \{a_{i_1}, a_{i_2}, ..., a_{i_k}\}].$$
%%
%To see this, observe that $B$ will be equal to $\{a_{i_1}, a_{i_2},
%..., a_{i_k}\}$ only for a specific value of $r_1, r_2, ..., r_k$.
%Since each of $r_1, r_2, ..., r_k$ is chosen uniformly at random
%from $[1, \lambda']$, $Pr[B = \{a_{i_1}, a_{i_2}, ..., a_{i_k}\}] =
%\lambda'^{-k}$.
%%(Some technicality: The inequality follows from
%%the fact that for some $\{a_{i_1}, a_{i_2}, ..., a_{i_k}\}$, $Pr[B =
%%\{a_{i_1}, a_{i_2}, ..., a_{i_k}\}] = 0$.)
%Moreover, the $C$ will contain $a_{i_1}, a{i_2}, ..., a_{i_k}\}$ if
%and only if, for each $j$, we pick $a_{i_j}$ from the interval that
%contains it (i.e., from $a_{(i'-1)\lambda'+1}, a_{(i'-1)\lambda'+2},
%..., a_{i'\lambda'}$, for some $i'$). (Note that $a_{i_1}, a_{i_2},
%...$ are all in different intervals because $i_{j+1}-i_j\geq
%\lambda'$ for all $j$.) Therefore, $Pr[C \text{ contains } a_{i_1},
%a_{i_2}, ..., a_{i_k}\}]=\lambda'^{-k}$.
%\end{proof}
%
%\begin{claim}
%Consider any sequence $A$ of numbers $a_1, ..., a_\ell'$ of length
%$\ell'$. Consider subsequence of numbers $C$ of $A$ where an element
%in $C$ is picked from from ``every $\lambda'$ numbers'' in $A$;
%i.e., $C$ consists of $\lfloor\ell'/\lambda'\rfloor$ numbers $c_1,
%c_2, ...$ where, for any $i$, $c_i$ is chosen uniformly at random
%from $a_{(i-1)\lambda'+1}, a_{(i-1)\lambda'+2}, ...,
%a_{i\lambda'}$.. For any number $x$, let $n_x$ be the number of
%appearances of $x$ in $A$; i.e., $n_x=|\{i\ |\ a_i=x\}|$. Then, for
%any $R\geq 6n_x/\lambda'$, $x$ appears in $C$ more than $R$ times
%with probability at most $2^{-R}$.
%\end{claim}
%\begin{proof}
%For $i=1, 2, ..., \lfloor\ell'/\lambda'\rfloor$, let $X_i$ be a 0/1
%random variable that is $1$ if and only if $c_i=x$ and
%$X=\sum_{i=1}^{\lfloor\ell'/\lambda'\rfloor} X_i$. That is, $X$ is
%the number of appearances of $x$ in $C$. Clearly,
%$E[X]=n_x/\lambda'$. Since $X_i$'s are independent, we can apply the
%Chernoff bound (e.g., in~\cite[Theorem~4.4.]{MU-book-05}): For any
%$R\geq 6E[X]=6n_x/\lambda'$,
%$$Pr[X\leq R]\geq 2^{-R}.$$
%The claim is thus proved.
%\end{proof} %of claim

%-------------------------------

%Now we use the claim to prove the lemma. Choose $\ell'=\ell$ and
%$\lambda'=\lambda$ and consider any node $v$ that appears at most
%$t$ times. The number of times it appears as a connector node is
%the number of times it appears in the subsequence $B$ described in
%the claim. By applying the claim with $R=t(\log n)^2$, we have that
%$v$ appears in $B$ more than $t(\log n)^2$ times with probability at
%most $1/n^2$ as desired.
%\end{proof}



We are now ready to prove Lemma ~\ref{prop:at_most_k_calls}.

\begin{proof}[Proof of Lemma~\ref{prop:at_most_k_calls}]
By Lemma~\ref{lem:gilman}, every node $v$, except for some $k$
nodes, is good; i.e., $v$ is visited by the walk at most
$\frac{\gamma}{k}+\ell\pi(v)$ times, with probability at least
$1-1/n$.
%
By Lemma~\ref{lem:uniformityused}, every such  good node $v$ appears
as a connector at most
$\frac{\left(\frac{\gamma}{k}+\ell\pi(v)\right)(\log n)^2}{\lambda}$
times, with probability at least $1-1/n$, where $\gamma$ is as in
Lemma\ref{lem:gilman}.

The algorithm, on the other hand, performs $\eta \deg(v)$ walks from
each node $v$. For any node $v$, since $\ell\leq m^2
\frac{160}{\epsilon k} (\log n)^5$ and $\eta\lambda \geq
\sqrt{\frac{160\ell}{\epsilon k}}(\log n)^{9/2}$,
%
%\begin{eqnarray*}
%\ell \pi(v)(\log n)^2 &=& \ell \frac{\deg(v)}{2m}(\log n)^2\\
%&\leq& \sqrt{\ell} \left(2m \sqrt{\frac{40}{\epsilon k}} (\log n)^{5/2}\right) \frac{\deg(v)}{2m} (\log n)^2\\
%&=& \sqrt{\frac{40\ell}{\epsilon k}} (\log n)^{9/2} \deg(v)\\
%&\leq& \frac{\eta \lambda \deg(v)}{2}
%\end{eqnarray*}
%
\begin{eqnarray*}
\ell \pi(v)(\log n)^2 = \ell \frac{\deg(v)}{2m}(\log n)^2
&\leq& \sqrt{\ell} \left(2m \sqrt{\frac{40}{\epsilon k}} (\log n)^{5/2}\right) \frac{\deg(v)}{2m} (\log n)^2\\
&=& \sqrt{\frac{40\ell}{\epsilon k}} (\log n)^{9/2} \deg(v) \leq
\frac{\eta \lambda \deg(v)}{2}
\end{eqnarray*}
%
%$$\ell \pi(v)(\log n)^2 = \ell \frac{\deg(v)}{2m}(\log n)^2
%\leq \sqrt{\ell} \left(2m \sqrt{\frac{40}{\epsilon k}} (\log
%n)^{5/2}\right) \frac{\deg(v)}{2m} (\log n)^2 =
%\sqrt{\frac{40\ell}{\epsilon k}} (\log n)^{9/2} \deg(v) \leq
%\frac{\eta \lambda \deg(v)}{2}$$
%
%
Moreover, $\frac{\gamma}{k}(\log n)^2= \sqrt{\frac{40\ell}{\epsilon
k}} (\log n)^{5/2}\leq \frac{\eta\lambda}{2}.$

Therefore, any good node $v$ does not need to call {\sc
Get-More-Walks}. The lemma thus follows as there are $k$ bad nodes
with high probability and each bad node invokes {\sc Get-More-Walks}
only once (as get more walks can produce as many walks as needed in
$O(D)$ rounds).
%
%----------------------------------------
%
%Notice that $\ell\pi(v) = \ell\frac{\deg(v)}{2m}$.
%For our choice of $\lambda = \sqrt{lD}{\epsilon}^{1/4}$ and $\eta = 1/\sqrt{\epsilon D}$,
%we get $\eta\lambda = \sqrt{\frac{40\ell}{\epsilon k}}(\log n)^{5/2}$.
%Therefore, $\ell\pi(v)/\lambda \leq \eta\deg(v)$ as long as $\ell\leq m^2/\epsilon^{1/2}$.
%HAS TO BE CLEANED UP. This is because
%
%[{\bf Atish} : In the above para, to put in the logs and constants correctly in the value of $\lambda$ and $\eta$].
%
%This quantity is at most
%$$\eta \deg(v)$$ since $\ell\leq m^2/\epsilon^{1/2}$ and $\eta\lambda\geq
%\sqrt{\frac{40\ell}{\epsilon k}}(\log n)^{5/2}$.
%
%Therefore, such good nodes do not need to call {\sc Get-More-Walks}.
%The proposition thus follows.
%
%
%This uses Lemma~\ref{lem:gilman} and Lemma~\ref{lem:uniformityused}.
%Lemma~\ref{lem:gilman} gives a bound on the number of times any node
%is visited, when all points of the $\ell$ length walk are
%considered. This, together with Lemma~\ref{lem:uniformityused} gives
%us a bound on the number of times any node is visited when only the
%connector points of the $\ell$ length walk are considered.
%
%Have to wait for the exact number from previous lemmas.
\end{proof}


We now complete the proof of Theorem~\ref{thm:1-walk}.

\begin{proof}[Proof of Theorem~\ref{thm:1-walk}(sketched)]
The theorem follows by choosing $\lambda=\lceil\sqrt{\frac{160\ell
D}{\epsilon k}}(\log n)^{9/2}\rceil$ and $\eta=1$ and considering
two values of $k$: $k=5(\frac{\log n}{\epsilon D})^{1/2}$ and
$k=5(\frac{\log n}{\epsilon D})^{1/3}$. Full proof is in
Appendix~\ref{proof:thm:1-walk}.
\end{proof}

%\begin{lemma} [Uniformity Lemma]
%In the walk of length $l$, if every node is seen at most $t$ times w.h.p., then among the Connectors in a walk of length $l$, any walk is seen at most $O(t/\lambda)$ times w.h.p.
%\end{lemma}


%\begin{lemma}
%\label{lem:mainone} Algorithm {\sc Single-Random-Walk} (cf.
%Algorithm~\ref{alg:single-random-walk}) solves $1$-RW-DoS and, with
%high probability, finishes in $O(\frac{\lambda\eta\log
%n}{\mindegree} + \frac{\ell\cdot D}{\lambda} + \frac{\ell}{\eta})$
%rounds.
%\end{lemma}
%\begin{proof}
%First, we prove the correctness of the algorithm. Observe that any
%two $\lambda$-length walks (possibly from different sources) are
%independent from each other. Moreover, a walk from a particular node
%is picked uniformly at random (by
%Lemma~\ref{lem:correctness-sample-destination}). Therefore, the {\sc
%Single-Random-Walk} algorithm is equivalent to having a source node
%perform a walk of length $\lambda$ and then have the destination do
%another walk of length $\lambda$ and so on.

%We now prove the time bound.
%First, observe that algorithm {\sc Sample-Destination}  is called
%$O(\frac{\ell}{\lambda})$ times and by
%Lemma~\ref{lem:Sample-Destination}, this algorithm takes
%$O(\frac{\ell\cdot D}{\lambda})$ rounds in total.
%Next, we claim that {\sc Get-More-Walks} is called at most
%$O(\frac{\ell}{\lambda\eta})$ times in total (summing over all
%nodes). This is because when a node $v$ calls {\sc
%Get-More-Walks}($v$, $\eta$, $\lambda$), all $\eta$ walks starting
%at $v$ must have been stitched and therefore $v$ contributes
%$\lambda\eta$ steps of walk to the long walk we are constructing.
%It follows from Lemma~\ref{lem:get-more-walks} that {\sc
%Get-More-Walks} algorithm takes $O(\frac{\ell}{\eta})$ rounds in
%total.

%Combining the above results with Lemma~\ref{lem:phase1} gives the
%claimed bound.
%\end{proof}

%Theorem~\ref{thm:1-walk} immediately follows.

%\begin{proof}[Proof of Theorem~\ref{thm:1-walk}]
%Use Lemma~\ref{lem:mainone} with
%$\lambda=\frac{\ell^{1/3}D^{2/3}\mindegree^{1/3}}{(\log n)^{1/3}}$
%and $\eta=\frac{\ell^{1/3}\mindegree^{1/3}}{D^{1/3}(\log n)^{1/3}}$.
%\end{proof}

%-----------------------------------------------------------------




%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Proof of Lemma~\ref{lem:gilman}}
\label{proof:lem:gilman}
\begin{proof}
First consider any set of $k$ vertices $A$. Apply
Theorem~\ref{thm:gilman} for the choice of $\gamma^2 =
\frac{20\ell}{\epsilon}\cdot (k+5)\log n$. It follows that,
$Pr[t_{\ell} \geq \ell\pi(A) + \gamma] \leq (1 + \frac{\gamma
\epsilon}{10l})N_{{\bf q}}e^{-\gamma^2\epsilon/20\ell}$.
Substituting the value of $\gamma^2$, this probability reduces to
$(1 + \frac{\gamma \epsilon}{10l})N_{{\bf
q}}e^{-\gamma^2\epsilon/20\ell}\leq 2N_{\bf q}e^{-(k+5)\log n}\leq
n^{-k+1}$. The last step follows from the bound that $N_{\bf q}\leq
nm\leq n^3$. Therefore, for the given choice of $\gamma$, the
probability that a given set of size $A$, for $|A|=k$ is visited
more than $\ell\pi(A) + \gamma$ times is at most
$\frac{1}{n^{k+1}}$.

Now consider all sets of size $k$ in the graph. There are less than
$n^{k}$ sets of vertices of size $k$. It follows, by Union Bound,
that the probability that any set of $k$ vertices $A_i$ (from among
all possible sets) is visited at least $\ell\pi(A) + \gamma$ times,
is at most $n^k\cdot\frac{1}{n^{k+1}} = \frac{1}{n}$.

Now consider any $k$ distinct vertices $v_1, v_2, \ldots, v_k$. If
each $v_i$ (for $1\leq i\leq k$) is visited at least
$\frac{\gamma}{k} + \ell\pi(v_i)$ times, then the set $S=\{v_1, v_2,
\ldots, v_k\}$ is visited more than $\sum_{i=1}^{k}{\frac{\gamma}{k}
+ \ell\pi(v_i)} = \gamma + \sum_{i=1}^{k}{\ell\pi(v_i)} = \gamma +
\ell\pi(S)$ times. Denote the probability that there exist $k$ such
distinct nodes $v_i$ (such that each is visited more than
$\frac{\gamma}{k} + \ell\pi(v_i)$ times) by $D_k$ and denote the
probability that there exists some set of vertices $A$ such that the
entire set is visited more than $\gamma + \ell\pi(A)$ times by
$S_k$. We therefore have $D_k\leq S_k$. It follows that $D_k\leq
\frac{1}{n}$.
%
This completes the proof.
\end{proof}



%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Appendix: Proof of Theorem~\ref{thm:1-walk}} \label{proof:thm:1-walk}
\begin{proof}[Proof of Theorem~\ref{thm:1-walk}]
We choose $\lambda=\lceil\sqrt{\frac{160\ell D}{\epsilon k}}(\log
n)^{9/2}\rceil$ and $\eta=1$ and considers two values of $k$:
$k=5(\frac{\log n}{\epsilon D})^{1/2}$ and $k=5(\frac{\log
n}{\epsilon D})^{1/3}$.
%
Note that for both value of $k$, $k\geq 5$ since
$\frac{1}{\epsilon}\geq \Omega(\frac{D}{\log n})$. This is because
$\frac{1}{\epsilon}$ is at least the mixing time over $\log n$, and
the mixing time of a graph is at least its diameter $D$. Therefore,
all conditions in Lemma~\ref{prop:at_most_k_calls} hold.

By Lemma~\ref{lem:phase1}, Phase~1 finishes in $\tilde O(\lambda
\eta) = \tilde O(\sqrt{\frac{\ell D}{\epsilon k}})$ with high
probability. For Phase~2, {\sc Sample-Destination} is invoked
$O(\frac{\ell}{\lambda})$ times (only when we stitch the walks) and
therefore, by Lemma~\ref{lem:Sample-Destination}, contributes
$O(\frac{\ell D}{\lambda})$ rounds. Finally, by
Lemma~\ref{prop:at_most_k_calls}, {\sc Get-More-Walks} is invoked at
most $k$ times with high probability and hence contributes $O(kD)$
rounds by Lemma~\ref{lem:get-more-walks}. Therefore, the total
number of rounds is
$$
\tilde O\left(\sqrt{\frac{\ell D}{\epsilon k}}+\frac{\ell
D}{\lambda}+kD\right)
%= \tilde O(\frac{\sqrt{\ell D}}{\epsilon^{1/4}}
%+\sqrt{\ell D} \epsilon^{1/4}
%+\sqrt{\frac{{D}}{\epsilon}})
\label{eqn:number_of_rounds}
$$

which is $$\tilde O\left(\frac{\sqrt{\ell D}}{\epsilon^{1/4}}
+\sqrt{\frac{{D}}{\epsilon}}\right)$$ when $k=5(\frac{\log
n}{\epsilon D})^{1/2}$
%
and $$\tilde O\left(\frac{\sqrt{\ell}D^{2/3}}{\epsilon^{1/3}}
+\sqrt{\ell} D^{5/6} \epsilon^{2/3}
+\frac{{D^{2/3}}}{\epsilon^{1/3}}\right) = \tilde
O\left(\frac{\sqrt{\ell}D^{2/3}}{\epsilon^{1/3}}\right)$$ when
$k=5(\frac{\log n}{\epsilon D})^{1/3}$. (The last equality uses the
fact that $\frac{1}{\epsilon}\geq D$ and so
$D^{5/6}\epsilon^{2/3}\leq D^{1/6}$.)
\end{proof}
