\section{Omitted Proofs of Section~\ref{sec:one_walk_DoS} (Upper Bound)}

\subsection{Algorithm descriptions}\label{sec:pseudocode}
The main algorithm for performing a single random walk is described
in {\sc Single-Random-Walk} (cf.
Algorithm~\ref{alg:single-random-walk}). This algorithm, in turn,
uses {\sc Get-More-Walks} (cf. \ref{alg:Get-More-Walks} and {\sc
Sample-Destination} (cf. \ref{alg:Sample-Destination}).

Notice that in Line~\ref{line:reservoir} in
Algorithm~\ref{alg:Get-More-Walks}, the walks of length $\lambda$
are extended further to walks of length $\lambda+r$ where $r$ is a
random number in the range $[0,\lambda-1]$. We do this by extending
the $\lambda$-length walks further, and probabilistically stopping
each walk in each of the next $i$ steps (for $0\leq i\leq
\lambda-1$) with probability $\frac{1}{\lambda-i}$. The reason it
needs to be done this way is because if we first sampled $r$,
independently for each walk, in the range $[0,\lambda-1]$ and then
extended each walk accordingly, the algorithm would need to pass $r$
independently for each walk. This will cause congestion along the
edges; no congestion occurs in the mentioned algorithm as only the
{\em count} of the number of walks along an edge are passed to the
node across the edge.

\newcommand{\mindegree}[0]{\delta}
\begin{algorithm}
\caption{\sc Single-Random-Walk($s$, $\ell$)}
\label{alg:single-random-walk}
\textbf{Input:} Starting node $s$, and desired walk length $\ell$.\\
\textbf{Output:} Destination node of the walk outputs the ID of
$s$.\\

\textbf{Phase 1: (Each node $v$ performs $\eta_v=\eta \deg(v))$
random walks of length $\lambda + r_i$ where $r_i$ (for each $1\leq
i\leq \eta$) is chosen independently at random in the range
$[0,\lambda-1]$.)}
\begin{algorithmic}[1]
\STATE Let $r_{max} = \max_{1\leq i\leq \eta}{r_i}$, the random
numbers chosen independently for each of the $\eta_x$ walks.

\STATE Each node $x$ constructs $\eta_x$ messages containing its ID
and in addition, the $i$-th message contains the desired walk length
of $\lambda + r_i$.

\FOR{$i=1$ to $\lambda + r_{max}$}

\STATE This is the $i$-th iteration. Each node $v$ does the
following: Consider each message $M$ held by $v$ and received in the
$(i-1)$-th iteration (having current counter $i-1$). If the message
$M$'s desired walk length is at most $i$, then $v$ stored the ID of
the source ($v$ is the desired destination). Else, $v$ picks a
neighbor $u$ uniformly at random and forward $M$ to $u$ after
incrementing its counter.

\COMMENT{Note that any iteration could require more than 1 round.}

\ENDFOR

\end{algorithmic}


\textbf{Phase 2: (Stitch $\Theta(\ell/\lambda)$ walks, each of
length in $[\lambda,2\lambda-1]$)}
\begin{algorithmic}[1]
\STATE The source node $s$ creates a message called ``token'' which
contains the ID of $s$

\STATE The algorithm generates a set of {\em connectors}, denoted by
$C$, as follows.

\STATE Initialize $C = \{s\}$

\WHILE {Length of walk completed is at most $\ell-2\lambda$}

  \STATE Let $v$ be the node that is currently holding the token.

  \STATE $v$ calls {\sc Sample-Destination($v$)} and let $v'$ be the
  returned value (which is a destination of an unused random walk starting at $v$
  of length between $\lambda$ and $2\lambda-1$.)

  \IF{$v'$ = {\sc null} (all walks from $v$ have already been used up)}

  \STATE $v$ calls {\sc Get-More-Walks($v$, $\lambda$)} (Perform $\Theta(l/\lambda)$ walks
  of length $\lambda$ starting at $v$)

  \STATE $v$ calls {\sc Sample-Destination($v$)} and let $v'$ be the
  returned value

  \ENDIF

  \STATE $v$ sends the token to $v'$

  \STATE $C = C \cup \{v\}$

\ENDWHILE

\STATE Walk naively until $\ell$ steps are completed (this is at
most another $2\lambda$ steps)

\STATE A node holding the token outputs the ID of $s$

\end{algorithmic}

\end{algorithm}


\begin{algorithm}[t]
\caption{\sc Get-More-Walks($v$, $\lambda$)}
\label{alg:Get-More-Walks} (Starting from node $v$,  perform
$\lfloor\ell/\lambda\rfloor$ number of random walks, each of  length
$\lambda + r_i$ where
$r_i$ is chosen uniformly at random in the range $[0,\lambda-1]$ for the $i$-th walk.) \\
\begin{algorithmic}[1]
\STATE The node $v$ constructs $\lfloor\ell/\lambda\rfloor$
(identical) messages containing its ID.

\FOR{$i=1$ to $\lambda$}

\STATE Each node $u$ does the following:

\STATE - For each message $M$ held by $u$, pick a neighbor $z$
uniformly at random as a receiver of $M$.

\STATE - For each neighbor $z$ of $u$, send ID of $v$ and the number
of messages that $z$ is picked as a receiver, denoted by $c(u, v)$.

\STATE - For each neighbor $z$ of $u$, upon receiving ID of $v$ and
$c(u, v)$, constructs $c(u, v)$ messages, each contains the ID of
$v$.

\ENDFOR

\COMMENT {Each walk has now completed $\lambda$ steps. These walks
are now extended probabilistically further by $r$ steps where each
$r$ is independent and uniform in the range $[0,\lambda-1]$.}

\FOR{$i=0$ to $\lambda-1$}

\STATE \label{line:reservoir} For each message, independently with
probability $\frac{1}{\lambda-i}$, stop sending the message further
and save the ID of the source node (in this event, the node with the
message is the destination). For messages $M$ that are not stopped,
each node picks a neighbor correspondingly and sends the messages
forward as before.

\ENDFOR

\STATE At the end, each destination knows the source ID as well as
the length of the corresponding walk.

\end{algorithmic}

\end{algorithm}

\begin{algorithm}[t]
\caption{\sc Sample-Destination($v$)} \label{alg:Sample-Destination}
\textbf{Input:} Starting node $v$.\\
\textbf{Output:} A node sampled from among the stored
walks (of length in $[\lambda, 2\lambda-1]$) from $v$. \\

\textbf{Sweep 1: (Perform BFS tree)}
\begin{algorithmic}[1]

\STATE Construct a Breadth-First-Search (BFS) tree rooted at $v$.
While constructing, every node stores its parent's ID. Denote such
tree by $T$.

\end{algorithmic}

\textbf{Sweep 2: (Tokens travel up the tree, sample as you go)}
\begin{algorithmic}[1]

\STATE We divide $T$ naturally into levels $0$ through $D$ (where
nodes in level $D$ are leaf nodes and the root node $s$ is in level
$0$).

\STATE Tokens are held by nodes as a result of doing walks of length
between $\lambda$ and $2\lambda-1$ from $v$ (which is done in either
Phase~1 or {\sc Get-More-Walks} (cf.
Algorithm~\ref{alg:Get-More-Walks})) A node could have more than one
token.

\STATE Every node $u$ that holds token(s) picks one token, denoted
by $d_0$, uniformly at random and lets $c_0$ denote the number of
tokens it has.

\FOR{$i=D$ down to $0$}

\STATE Every node $u$ in level $i$ that either receives token(s)
from children or possesses token(s) itself do the following.

\STATE Let $u$ have tokens $d_0, d_1, d_2, \ldots, d_q$, with counts
$c_0, c_1, c_2, \ldots, c_q$ (including its own tokens). The node
$v$ samples one of $d_0$ through $d_q$, with probabilities
proportional to the respective counts. That is, for any $1\leq j\leq
q$, $d_j$ is sampled with probability
$\frac{c_j}{c_0+c_1+\ldots+c_q}$.

\STATE The sampled token is sent to the parent node (unless already
at root), along with a count of $c_0+c_1+\ldots+c_q$ (the count
represents the number of tokens from which this token has been
sampled).

\ENDFOR

\STATE The root output the ID of the owner of the final sampled
token. Denote such node by $u_d$.

\end{algorithmic}

\textbf{Sweep 3: (Go and delete the sampled destination)}
\begin{algorithmic}[1]

\STATE $v$ sends a message to $u_d$ (e.g., via broadcasting). $u_d$
deletes one token of $v$ it is holding (so that this random walk of
length $\lambda$ is not reused/re-stitched).
\end{algorithmic}

\end{algorithm}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\subsection{Proofs of Lemma~\ref{lem:phase1}, \ref{lem:get-more-walks}, \ref{lem:Sample-Destination} and
\ref{lem:correctness-sample-destination-new}}\label{sec:four-proofs}

%\Opensolutionfile{movedProofs}
%\begin{movedProof}[Proof of Lemma~\ref{lem:phase1}]
\begin{proof}[Proof of Lemma~\ref{lem:phase1}]
This proof is a slight modification of the proof of Lemma~2.2 in
\cite{DNP09-podc}, where it is shown that each node can perform
$\eta$ walks of length $\lambda$ together in $O(\lambda \eta
\log{n})$ rounds with high probability. We extend this to the
following statement.
%
\begin{quote}
Each node $v$ can in fact perform $\eta\deg(v)$ of length $2\lambda$
and still finish in $O(\lambda \eta \log{n})$ rounds.
\end{quote}
%
The desired claim will follow immediately because each node $v$
performs $\eta\deg(v)$ of length \textit{at most} $\lambda$ in
Phase~1.

Consider the case when each node $v$ creates $\eta \deg(v)\geq \eta$
messages. For each message $M$, any $j=1, 2, ..., \lambda$, and any
edge $e$, we define $X_M^j(e)$ to be a random variable having value
1 if $M$ is sent through $e$ in the $j^{th}$ iteration (i.e., when
the counter on $M$ has value $j-1$). Let $X^j(e)=\sum_{M:
\text{message}} X_M^j(e)$.  We compute the expected number of
messages that go through an edge, see claim below.

\begin{claim}
\label{claim:first} For any edge $e$ and any $j$,
$\mathbb{E}[X^j(e)]=2\eta$.
\end{claim}
\begin{proof}% [Proof of Claim~\ref{claim:first}]
Assume that each node $v$ starts with $\eta \deg(v)$ messages. Each
message takes a random walk. We prove that after any given number of
steps $j$, the expected number of messages at node $v$ is still
$\eta \deg(v)$.  Consider the random walk's probability transition
matrix, call it $A$. In this case $Au = u$ for the vector $u$ having
value $\frac{\deg(v)}{2m}$ where $m$ is the number of edges in the
graph (since this $u$ is the stationary distribution of an
undirected unweighted graph). Now the number of messages we started
with at any node $i$ is proportional to its stationary distribution,
therefore, in expectation, the number of messages at any node
remains the same.

To calculate $\mathbb{E}[X^j(e)]$, notice that edge $e$ will receive
messages from its two end points, say $x$ and $y$. The number of
messages it receives from node $x$ in expectation is exactly the
number of messages at $x$ divided by $\deg(x)$. The claim follows.
\end{proof}
%The intuition is that, in expectation $degree(v)$, messages held by $v$ will get evenly distributed over all edges incident %on $v$. The full proof can be found in Appendix~\ref{app:claim:first}.

By Chernoff's bound (e.g., in~\cite[Theorem~4.4.]{MU-book-05}), for
any edge $e$ and any $j$,
$$\mathbb{P}[X^j(e)\geq 4\eta\log{n}]\leq 2^{-4\log{n}}=n^{-4}.$$
It follows that the probability that there exists an edge $e$ and an
integer $1\leq j\leq \lambda$ such that $X^j(e)\geq 4\eta\log{n}$ is
at most $|E(G)| \lambda n^{-4}\leq \frac{1}{n}$ since $|E(G)|\leq
n^2$ and $\lambda\leq \ell\leq n$ (by the way we define $\lambda$).

Now suppose that $X^j(e)\leq 4\eta\log{n}$ for every edge $e$ and
every integer $j\leq \lambda$. This implies that we can extend all
walks of length $i$ to length $i+1$ in $4\eta\log{n}$ rounds.
Therefore, we obtain walks of length $\lambda$ in
$4\lambda\eta\log{n}$
rounds as claimed. %(Note that if $\eta$, we still get
%a high probability bound for $X^j(e)\geq 4\log{n}$.)
\end{proof}
%\end{movedProof}

\begin{proof}[Proof of Lemma~\ref{lem:get-more-walks}]
The argument is exactly the same as the proof of Lemma~2.4 in
\cite{DNP09-podc}. That is, there is no congestion. We only consider
longer walks (length at most $2\lambda-1$ ) this time. The detail of
the proof is as follows.

Consider any node $v$ during the execution of the algorithm. If it
contains $x$ copies of the source ID, for some $x$, it has to pick
$x$ of its neighbors at random, and pass the source ID to each of
these $x$ neighbors. Although it might pass these messages to less
than $x$ neighbors, it sends only the source ID and a {\em count} to
each neighbor, where the count represents the number of copies of
source ID it wishes to send to such neighbor. Note that there is
only one source ID as one node calls {\sc Get-More-Walks} at a time.
Therefore, there is no congestion and thus the algorithm terminates
in $O(\lambda)$ rounds.
\end{proof}


\begin{proof}[Proof of Lemma~\ref{lem:Sample-Destination}]
This proof is exactly the same as the proof of Lemma~2.5 in
\cite{DNP09-podc}.

Constructing a BFS tree clearly takes only $O(D)$ rounds. In the
second phase where the algorithm wishes to {\em sample} one of many
tokens (having its ID) spread across the graph. The sampling is done
while retracing the BFS tree starting from leaf nodes, eventually
reaching the root. The main observation is that when a node receives
multiple samples from its children, it only sends one of them to its
parent. Therefore, there is no congestion. The total number of
rounds required is therefore the number of levels in the BFS tree,
$O(D)$. The third phase of the algorithm can be done by broadcasting
(using a BFS tree) which needs $O(D)$ rounds.
\end{proof}


\begin{proof}[Proof of Lemma~\ref{lem:correctness-sample-destination-new}]
The claim follows from the correctness of {\sc Sample-Destination}
that the algorithm samples a walk uniformly at random and the fact
that the length of each walk is uniformly sampled from the range
$[\lambda,2\lambda-1]$. The first part is proved in Lemma~2.6 in Das
Sarma et al.~\cite{DNP09-podc} and included below for completeness.
We now prove the second part.

To show that each walk length is uniformly sampled from the range
$[\lambda,2\lambda-1]$, note that each walk can be created in two
ways.
\begin{enumerate}
\item It is created in Phase~1. In this case, since we pick the
length of each walk uniformly from the length
$[\lambda,2\lambda-1]$, the claim clearly holds.
\item It is created by {\sc Get-More-Walk}. In this case, the claim holds by the
technique of {\em reservoir} sampling: Observe that after the
$\lambda^{th}$ step of the walk is completed, we stop extending each
walk at any length between $\lambda$ and $2\lambda-1$ uniformly. To
see this, observe that we stop at length $\lambda$ with probability
$1/\lambda$. If the walk does not stop, it will stop at length
$\lambda+1$ with probability $\frac{1}{\lambda-1}$. This means that
the walk will stop at length $\lambda+1$ with probability
$\frac{\lambda-1}{\lambda}\times \frac{1}{\lambda-1} =
\frac{1}{\lambda-1}$. Similarly, it can be argue that the walk will
stop at length $i$ for any $i\in [\lambda, 2\lambda-1]$ with
probability $\frac{1}{\lambda}$.
\end{enumerate}

We now show the proof of Lemma~2.6 (with slight modification) in Das
Sarma et al. for completeness.


\begin{lemma}[Lemma 2.6 in \cite{DNP09-podc}]\label{lem:correctness-sample-destination}
Algorithm {\sc Sample-Destination}($v$) (cf.
Algorithm~\ref{alg:Sample-Destination}), for any node $v$, samples a
destination of a walk starting at $v$ uniformly at random.
\end{lemma}
\begin{proof}
Assume that before this algorithm starts, there are  $t$ (without
loss of generality, let $t > 0$) ``tokens'' containing ID of $v$
stored in some nodes in the network. The goal is to show that {\sc
Sample-Destination} brings one of these tokens to $v$ with uniform
probability. For any node $u$, let $T_u$ be the subtree rooted at
$u$ and let $S_u$ be the set of tokens in $T_u$. (Therefore, $T_v=T$
and $|S_v|=t$.)

We claim that any node $u$ returns a destination to its parent with
uniform probability (i.e., for any tokens $x\in S_u$, $Pr[ u$
returns $x ]$ is $1/|S_u|$ (if $|S_u|>0$)). We prove this by
induction on the height of the tree. This claim clearly holds for
the base case where $u$ is a leaf node. Now, for any non-leaf node
$u$, assume that the claim is true for any of its children.
%
To be precise, suppose that $u$ receives tokens and counts from $q$
children. Assume that it receives tokens $d_1, d_2, ..., d_q$ and
counts $c_1, c_2, ..., c_q$ from nodes $u_1, u_2, ..., u_q$,
respectively. (Also recall that $d_0$ is the sample of its own
tokens (if exists) and $c_0$ is the number of its own tokens.) By
induction, $d_j$ is sent from $u_j$ to $u$ with probability
$1/|S_{u_j}|$, for any $1\leq j\leq q$. Moreover, $c_j=|S_{u_j}|$
for any $j$. Therefore, any token $d_j$ will be picked with
probability $\frac{1}{|S_{u_j}|}\times \frac{c_j}{c_0+c_1+...c_q} =
\frac{1}{S_u}$ as claimed.

The lemma follows by applying the claim above to $v$.
\end{proof}
\end{proof}







%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\subsection{Proof of Lemma~\ref{lem:uniformityused}}
\label{sec:uniformityused-proof}
\begin{proof}
Intuitively, this argument is simple, since the connectors are
spread out in steps of length approximately $\lambda$. However,
there might be some {\em periodicity} that results in the same node
being visited multiple times but {\em exactly} at
$\lambda$-intervals. This is where we crucially use the fact that
the algorithm uses walks of length $\lambda + r$ where $r$ is chosen
uniformly at random from $[0,\lambda-1]$.

%-----------------------------

We prove the lemma using the following two claims.

\begin{claim}
Consider any sequence $A$ of numbers $a_1, ..., a_\ell'$ of length
$\ell'$. For any integer $\lambda'$, let $B$ be a sequence
$a_{\lambda'+r_1}, a_{2\lambda'+r_1+r_2}, ...,
a_{i\lambda'+r_1+...+r_i}, ...$ where $r_i$, for any $i$, is a
random integer picked uniformly from $[0, \lambda'-1]$. Consider
another subsequence of numbers $C$ of $A$ where an element in $C$ is
picked from from ``every $\lambda'$ numbers'' in $A$; i.e., $C$
consists of $\lfloor\ell'/\lambda'\rfloor$ numbers $c_1, c_2, ...$
where, for any $i$, $c_i$ is chosen uniformly at random from
$a_{(i-1)\lambda'+1}, a_{(i-1)\lambda'+2}, ..., a_{i\lambda'}$.
Then, $Pr[C \text{ contains } a_{i_1}, a_{i_2}, ..., a_{i_k}\}] =
Pr[B = \{a_{i_1}, a_{i_2}, ..., a_{i_k}\}]$ for any set $\{a_{i_1},
a_{i_2}, ..., a_{i_k}\}$.
\end{claim}

%\textbf{Danupon:} This claim is stated with full details as we may
%want to bring it out to highlight later.
%\begin{claim}
%Given a sequence of observations $X_1, X_2, \ldots, X_{t}$, if an
%event ${\cal R}$ is observed at most $f_1\cdot f_2$ times. Suppose
%one observation is picked at random from $X_i, X_{i+1}, X_{i+f_2}$
%for each $i$ in $\{1, f_2, 2f_2, \ldots, (f_1-1)f_2\}$, and call
%these observations $Y_1, Y_2, \ldots, Y_{f_1}$, then the probability
%that the event ${\cal R}$ is observed more than $f_2\log n$ times
%among all $Y_i$ observations is at most $1/n$.
%\end{claim}
\begin{proof}
First consider a subsequence $C$ of $A$. Numbers in $C$ are picked
from ``every $\lambda'$ numbers'' in $A$; i.e., $C$ consists of
$\lfloor\ell'/\lambda'\rfloor$ numbers $c_1, c_2, ...$ where, for
any $i$, $c_i$ is chosen uniformly at random from
$a_{(i-1)\lambda'+1}, a_{(i-1)\lambda'+2}, ..., a_{i\lambda'}$.
Observe that $|C|\geq |B|$. In fact, we can say that ``$C$ contains
$B$''; i.e., for any sequence of $k$ indexes $i_1, i_2, ..., i_k$
such that $\lambda'\leq i_{j+1}-i_j\leq 2\lambda'-1$ for all $j$,
%
$$Pr[B = \{a_{i_1}, a_{i_2}, ..., a_{i_k}\}] = Pr[C \text{ contains
} \{a_{i_1}, a_{i_2}, ..., a_{i_k}\}].$$
%
To see this, observe that $B$ will be equal to $\{a_{i_1}, a_{i_2},
..., a_{i_k}\}$ only for a specific value of $r_1, r_2, ..., r_k$.
Since each of $r_1, r_2, ..., r_k$ is chosen uniformly at random
from $[1, \lambda']$, $Pr[B = \{a_{i_1}, a_{i_2}, ..., a_{i_k}\}] =
\lambda'^{-k}$.
%(Some technicality: The inequality follows from
%the fact that for some $\{a_{i_1}, a_{i_2}, ..., a_{i_k}\}$, $Pr[B =
%\{a_{i_1}, a_{i_2}, ..., a_{i_k}\}] = 0$.)
Moreover, the $C$ will contain $a_{i_1}, a{i_2}, ..., a_{i_k}\}$ if
and only if, for each $j$, we pick $a_{i_j}$ from the interval that
contains it (i.e., from $a_{(i'-1)\lambda'+1}, a_{(i'-1)\lambda'+2},
..., a_{i'\lambda'}$, for some $i'$). (Note that $a_{i_1}, a_{i_2},
...$ are all in different intervals because $i_{j+1}-i_j\geq
\lambda'$ for all $j$.) Therefore, $Pr[C \text{ contains } a_{i_1},
a_{i_2}, ..., a_{i_k}\}]=\lambda'^{-k}$.
\end{proof}

\begin{claim}
Consider any sequence $A$ of numbers $a_1, ..., a_\ell'$ of length
$\ell'$. Consider subsequence of numbers $C$ of $A$ where an element
in $C$ is picked from from ``every $\lambda'$ numbers'' in $A$;
i.e., $C$ consists of $\lfloor\ell'/\lambda'\rfloor$ numbers $c_1,
c_2, ...$ where, for any $i$, $c_i$ is chosen uniformly at random
from $a_{(i-1)\lambda'+1}, a_{(i-1)\lambda'+2}, ...,
a_{i\lambda'}$.. For any number $x$, let $n_x$ be the number of
appearances of $x$ in $A$; i.e., $n_x=|\{i\ |\ a_i=x\}|$. Then, for
any $R\geq 6n_x/\lambda'$, $x$ appears in $C$ more than $R$ times
with probability at most $2^{-R}$.
\end{claim}
\begin{proof}
For $i=1, 2, ..., \lfloor\ell'/\lambda'\rfloor$, let $X_i$ be a 0/1
random variable that is $1$ if and only if $c_i=x$ and
$X=\sum_{i=1}^{\lfloor\ell'/\lambda'\rfloor} X_i$. That is, $X$ is
the number of appearances of $x$ in $C$. Clearly,
$E[X]=n_x/\lambda'$. Since $X_i$'s are independent, we can apply the
Chernoff bound (e.g., in~\cite[Theorem~4.4.]{MU-book-05}): For any
$R\geq 6E[X]=6n_x/\lambda'$,
$$Pr[X\leq R]\geq 2^{-R}.$$
The claim is thus proved.
\end{proof} %of claim

Now we use the claim to prove the lemma. Choose $\ell'=\ell$ and
$\lambda'=\lambda$ and consider any node $v$ that appears at most
$t$ times. The number of times it appears as a connector node is the
number of times it appears in the subsequence $B$ described in the
claim. By applying the claim with $R=t(\log n)^2$, we have that $v$
appears in $B$ more than $t(\log n)^2$ times with probability at
most $1/n^2$ as desired.
\end{proof}



%----------------------------------------------------------------






%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


\subsection{Proof of Lemma~\ref{lemma:visits bound}}\label{sec:proof of visits
bound}

%Consider a simple random walk on a connected undirected graph on $n$
%vertices. Let $d(x)$ denote the degree of $x$, and let $m$ denote
%the number of edges. Let $N_t^x(y)$ denote the number of visits to
%vertex $y$ by time $t$, given the walk started at vertex $x$.
%
%Now, consider $k$ walks, each of length $\ell$, starting from (not
%necessary distinct) nodes $x_1, x_2, \ldots, x_k$. In this section
%we show that, with high probability, no vertex $y$ is visited more
%than $24 d(x) \sqrt{k\ell+1}\log n + k$ times, as in the following
%lemma.


%\newtheorem{LemmaVisit}{Theorem}[section]
%\begin{theorem}\label{theorem:visits bound}
%For any nodes $x_1, x_2, \ldots, x_k$, \[\Pr\bigl(\exists y\ s.t.\
%\sum_{i=1}^k N_\ell^{x_i}(y) \geq 24 d(x) \sqrt{k\ell+1}\log
%n+k\bigr) \leq 1/n\,.\]
%\end{theorem}


%The rest of this section is devoted to prove the above lemma.
We
start with the bound of the first and second moment of the number of
visits at each node by each walk.

\begin{proposition}\label{proposition:first and second moment} For
any node $x$, node $y$ and $t = O(m^2)$,
\begin{equation}
\e[N_t^x(y)] \le 8 d(y) \sqrt{t+1}\,, \ \ \ \mbox{{\rm  and }} \ \ \
%\var(N_t^x(y)) = \e[N_t^x(y)] + O\bigl(d^2(y) t\bigr)\,.
\e\Bigl[\bigl(N_t^x(y)\bigr)^2\Bigr] \le \e[N_t^x(y)] + 128 \
d^2(y)\  (t+1)\,.
\end{equation}
\end{proposition}

To prove the above proposition, let $P$ denote the transition
probability matrix of such a random walk and let $\pi$ denote the
stationary distribution of the walk, which in this case is simply
proportional to the degree of the vertex, and let $\pi_\m = \min_x
\pi(x)$.


The basic bound we use is the following estimate from Lyons (see
Lemma~3.4 and Remark~4  in \cite{Lyons}). Let $Q$ denote the
transition probability matrix of a chain with self-loop probablity
$\alpha > 0$, and with $c= \min{\{\pi(x) Q(x,y) : x\neq y \mbox{ and }                      Q(x,y)>0\}}\,.$
Note that for a random walk on an undirected graph, $c=\frac{1}{2m}$. For $k >
0$ a positive integer (denoting time) ,

\begin{equation}
\label{kernel_decay} \bigl|\frac{Q^k(x,y)}{\pi(y)} - 1\bigr| \le
\min\Bigl\{\frac{1}{\alpha c \sqrt{k+1}}, \frac{1}{2\alpha^2 c^2
(k+1)} \Bigr\}\,.
\end{equation}

For $k\leq\beta m^2$ for a sufficiently small constant $\beta$, and small $\alpha$, the above can be simplified to the following bound; see
Remark~3 in \cite{Lyons}.
\begin{equation}
\label{one_sided_decay} Q^k(x,y)  \le \frac{4\pi(y)}{c \sqrt{k+1}} =
\frac{4d(y)}{\sqrt{k+1}}\,.
\end{equation}

Note that given a simple random walk on a graph $G$, and a
corresponding matrix  $P$, one can always switch to the lazy version
$Q=(I+P)/2$, and interpret it as a walk on graph $G'$, obtained by
adding  self-loops  to vertices in $G$ so as to double the degree of
each vertex. In the following, with abuse of notation we assume our
$P$ is such a lazy version of the original one.

\begin{proof}
Let $X_0, X_1, \ldots $ describe the random walk, with $X_i$
denoting the position of the walk at time $i\ge 0$, and let
$\bone_A$ denote the indicator (0-1) random variable, which takes
the value 1 when the event $A$ is true. In the following we also use
the subscript $x$ to denote the fact that the probability or
expectation is with respect to starting the walk at vertex $x$.
%Let $X_0=x$
First the expectation.
\begin{eqnarray*}
\e[N_t^x(y)] & = & \e_x[  \sum_{i=0}^t \bone_{\{X_i=y\}}] = \sum_{i=0}^t P^i(x,y) \\
& \le &  4 d(y) \sum_{i=0}^t \frac{1}{\sqrt{i+1}} , \ \ \mbox{ (using the above inequality  (\ref{one_sided_decay})) } \\
& \le & 8 d(y) \sqrt{t+1}\,.
\end{eqnarray*}


%One does not expect Gaussian tails here, since the random variable
%in question is distributed more like a Geometric one.

Abbreviating $N^x_t(y)$   as $N_t(y)$, we now compute the second
moment:
\begin{eqnarray*}
\e[N^2_t(y)] & = & \e_x \Bigl[  \bigl(\sum_{i=0}^t \bone_{\{X_i=y\}} \bigr) \bigl(\sum_{j=0}^t \bone_{\{X_j=y\}} \bigr) \Bigr] \\
& = & \e_x\Bigl[  \sum_{i=0}^t \bone_{\{X_i=y\}}  +  2 \sum_{0\le i < j\le t}^t \bone_{\{X_i = y, \ X_j=y\}} \Bigr] \\
& = & \e[N_t(y)] + 2 \sum_{0\le i < j\le t}^t \Pr(X_i = y, \
X_j=y)\,.
\end{eqnarray*}
To bound the second term on the right hand side above, consider for
$0\le i<j $:
\begin{eqnarray*}
\Pr(X_i = y, \ X_j=y)
& = &  \Pr(X_i = y) \ \Pr(X_j = y | X_i = y) \\
& = & P^i(x,y) \ \ P^{j-i}(y,y)\,, \ \ \ \mbox{ due to the Markovian property }\\
& \le & \frac{4 d(y)}{\sqrt{i+1}} \  \ \frac{4
d(y)}{\sqrt{j-i+1}}\,.
 \ \ \mbox{ (using   (\ref{one_sided_decay})) }
\end{eqnarray*}
Thus,
\begin{eqnarray*}
\sum_{0\le i < j \le t}  \Pr(X_i = y, \ X_j=y) & \le &
\sum_{0\le i \le t} \frac{4 d(y)}{\sqrt{i+1}} \ \sum_{0< j-i \le t-i} \frac{4d(y)}{\sqrt{j-i+1}} \\
& = & 16d^2(y) \sum_{0\le i \le t} \frac{1}{\sqrt{i+1}} \ \sum_{0< k \le t-i} \frac{1}{\sqrt{k+1}}\\
& \le & 32 d^2(y)  \sum_{0\le i \le t} \frac{1}{\sqrt{i+1}} \ \sqrt{t-i+1}\\
& \le & 32 d^2(y) \sqrt{t+1}  \sum_{0\le i \le t} \frac{1}{\sqrt{i+1}} \\
& \le & 64 d^2(y) \ (t+1)\,,
\end{eqnarray*}
which yields the  claimed bound on the second moment in the
proposition. %\hfill $\qed$
\end{proof}



Using the above proposition, we bound the number of visits of each
walk at each node, as follows.


\begin{lemma}\label{lemma:whp one walk one node bound}
For $t=O(m^2)$ and any vertex $y \in G$, the random walk
started at $x$ satisfies:
\begin{equation*}
\Pr\bigl(N^x_t(y) \ge  24  \ d(y) \sqrt{t+1}\log n \bigr) \le
\frac{1}{n^2} \,.
\end{equation*}
\end{lemma}
\begin{proof}
First, it follows from the Proposition that
%
\begin{equation} \Pr\bigl(N^x_t(y) \ge  2\cdot 12 \
d(y) \sqrt{t+1}\bigr) \le \frac{1}{4} \,.\label{eq:simple bound}
\end{equation}
%
This is done by using the standard Chebyshev argument that for $B >
0$, $\Pr\bigl(N_t(y) \ge  B \bigr) \le \Pr
\bigl(N^2_t(y) \ge  B^2)  \le \frac{\e\bigl(N_t^2(y)\bigr)}{B^2}$.


%
%Let $r=24  \ d(y) \sqrt{t+1}$. By Lemma~\ref{lemma:one walk one node
%bound}, \[\Pr\bigl(N^x_t(y) \ge r \bigr) \le \frac{1}{4}\,.\] This
%implies that the probability that
%
For any $r$, let $L^x_r(y)$ be the time that the random walk
(started at $x$) visits $y$ for the $r^{th}$ time. Observe that, for
any $r$, $N^x_t(y)\geq r$ if and only if $L^x_r(y)\leq t$.
Therefore,
\begin{equation}
\Pr(N^x_t(y)\geq r)=\Pr(L^x_r(y)\leq t).\label{eq:visits eq length}
\end{equation}

Let $r^*=24  \ d(y) \sqrt{t+1}$. By \eqref{eq:simple bound} and
\eqref{eq:visits eq length}, $\Pr(L^x_{r^*}(y)\leq t)\leq
\frac{1}{4}\,.$ We claim that
\begin{equation}
\Pr(L^x_{r^*\log n}(y)\leq t)\leq \left(\frac{1}{4}\right)^{\log
n}=\frac{1}{n^2}\,.\label{eq:hp length bound}
\end{equation}
To see this, divide the walk into $\log n$ independent subwalks,
each visiting $y$ exactly $r^*$ times. Since the event $L^x_{r^*\log
n}(y)\leq t$ implies that all subwalks have length at most $t$,
\eqref{eq:hp length bound} follows.
%
Now, by applying \eqref{eq:visits eq length} again,
\[\Pr(N^x_t(y)\geq r^*\log n) = \Pr(L^x_{r^*\log n}(y)\leq t)\leq
\frac{1}{n^2}\] as desired.

\end{proof}

We now extend the above lemma to bound the number of visits of {\em
all} the walks at each particular node.

\begin{lemma}\label{lemma:k walks one node bound}
For $\gamma > 0$, and $t=O(m^2)$, and for any vertex $y \in
G$, the random walk started at $x$ satisfies:
\begin{equation*}
\Pr\bigl(\sum_{i=1}^k N^{x_i}_t(y) \ge  24  \ d(y) \sqrt{kt+1} \log
n+k\bigr) \le \frac{1}{n^2} \,.
\end{equation*}
\end{lemma}
\begin{proof}
First, observe that, for any $r$, $$\Pr\bigl(\sum_{i=1}^k
N^{x_i}_t(y) \geq r-k\bigr)\leq \Pr[N^y_{kt}(y)\geq r].$$ To see
this, we construct a walk $W$ of length $kt$ starting at $y$ in the
following way: For each $i$, denote a walk of length $t$ starting at
$x_i$ by $W_i$. Let $\tau_i$ and $\tau'_i$ be the first and last
time (not later than time $t$) that $W_i$ visits $y$. Let $W'_i$ be
the subwalk of $W_i$ from time $\tau_i$ to $\tau_i'$. We construct a
walk $W$ by stitching $W'_1, W'_2, ..., W'_k$ together and complete
the rest of the walk (to reach the length $kt$) by a normal random
walk. It then follows that the number of visits to $y$ by $W_1, W_2,
\ldots, W_k$ (excluding the starting step) is at most the number of
visits to $y$ by $W$. The first quantity is $\sum_{i=1}^k
N^{x_i}_t(y)-k$. (The term `$-k$' comes from the fact that we do not
count the first visit to $y$ by each $W_i$ which is the starting
step of each $W'_i$.) The second quantity is $N^y_{kt}(y)$. The
observation thus follows.

Therefore, \[\Pr\bigl(\sum_{i=1}^k N^{x_i}_t(y)\geq 24 \ d(y)
\sqrt{kt+1}\log n + k\bigr) \leq \Pr\bigl(N^y_{kt}(y)\geq 24 \ d(y)
\sqrt{kt+1}\log n\bigr) \leq \frac{1}{n^2}\]
%
where the last inequality follows from Lemma~\ref{lemma:whp one walk
one node bound}.
%
%The lemma follows by bounding $\Pr\bigl(N^y_{kt}(y)\geq 24 \ d(y)
%\sqrt{kt+1}\log n\bigr)$ using Lemma~\ref{lemma:whp one walk one
%node bound}.
%
%Therefore, by Lemma~\ref{lemma:one walk one node bound},
%%
%\begin{eqnarray*}
%Pr[\sum_{i=1}^k N^{x_i}_t(y) \geq 12  \ \gamma \ d(y)
%\sqrt{kt+1}\bigr)] &\leq& Pr[N^y_{kt}(y)\geq 12  \ \gamma \ d(y)
%\sqrt{kt+1}\bigr)]\\
%&\leq& \frac{1}{\gamma^2}
%\end{eqnarray*}
%as desired.
\end{proof}


Lemma~\ref{lemma:visits bound} follows immediately from
Lemma~\ref{lemma:k walks one node bound} by union bounding over all
nodes.


\subsection{Proof of Theorem~\ref{thm:kwalks}}\label{app:kwalks}
\begin{proof}
First, consider the case where $\lambda>\ell$. In this case,
$\min(\sqrt{k\ell D}+k, \sqrt{k\ell}+k+\ell)=\tilde
O(\sqrt{k\ell}+k+\ell)$. By Lemma~\ref{lemma:visits bound}, each
node $x$ will be visited at most $\tilde O(d(x) (\sqrt{k\ell}+k))$
times. Therefore, using the same argument as Lemma~\ref{lem:phase1},
the congestion is $\tilde O(\sqrt{k\ell} + k)$ with high
probability. Since the dilation is $\ell$, {\sc Many-Random-Walks}
takes $\tilde O(\sqrt{k\ell} + k +\ell)$ rounds as claimed. Since $2\sqrt{k\ell}\leq k+\ell$, this bound reduces to $O(k+\ell)$. 

Now, consider the other case where $\lambda\leq \ell$. In this case,
$\min(\sqrt{k\ell D}+k, \sqrt{k\ell}+k+\ell)=\tilde O(\sqrt{k\ell
D}+k)$. Phase~1 takes $\tilde O(\lambda \eta) = \tilde O(\sqrt{k\ell
D}+k)$. The stitching in Phase~2 takes $\tilde O(k\ell D/\lambda) =
\tilde O(\sqrt{k\ell D})$. Moreover, by Lemma~\ref{lemma:visits
bound}, {\sc Get-More-Walks} will never be invoked. Therefore, the
total number of rounds is $\tilde O(\sqrt{k\ell D}+k)$ as claimed.
\end{proof}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Omitted Proofs of Section~\ref{sec:lowerbound} (Lower Bound)}

\subsection{Proof of Lemma~\ref{lem:one}}
\label{proof:lem:one}
\begin{proof}
After the first $k$ free rounds, consider the intervals that the
left subtree can have, in the best case. Recall that these $k$
rounds allowed communication only along the path. The $path\_dist$
of any node in $L$ from the breakpoints of $sub(L)$ along the path
is at least $k+1$.
\end{proof}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


\subsection{Proof of Lemma~\ref{lem:two}}
\label{proof:lem:two}
\begin{proof}
First, notice that each left breakpoint is at a path-distance of
$k+1$ from every node in the right subtree. That is,
$path\_dist(u,L) = path\_dist(v,R) = k+1$ for all $u\in B_l$ and all
$v\in B_r$.

%{\bf Gopal:} Explain "connected" below.

%{\bf Danupon:} The sentence is changed.

Each breakpoint needs to be combined into one interval in the end.
However, there could be one interval that is communicated from the
$sub(l)$ to the $sub(r)$ (or vice versa) such that it connects
several breakpoints. We show that this cannot happen. Consider all
the breakpoints $v\in B_l\cup B_r$.

\noindent{\bf Definition of {\em scratching}}.

Let us say that we {\em scratch out} the breakpoints from the list
$k+1$, $k'/2+k+1$, $k'+k+1$, $k'+k'/2+k+1$, $2k'+k+1$, ... that get
connected when an interval is communicated between $sub(l)$ and
$sub(r)$. We scratch out a breakpoint if there is an interval in the
graph that contains it and both (or one in case of the first and
last breakpoints) its adjacent breakpoints. For example, if the left
subtree has intervals $[1, k'/2+k]$ and $[k'/2+k+2, k'+k'/2+k+1]$
and the right subtree has $[k+2, k'+k]$ and the latter interval is
communicated to a node in the left subtree, then the left subtree is
able to obtain the merged interval $[1,k'+k'/2+k+1]$ and therefore
breakpoints $k+1$ and $k'/2+k+1$ are scratched out.

\begin{claim}
\label{claim:one} At most $O(1)$ breakpoints can be scratched out
with one message/interval communicated between $sub(r)$ and $sub(l)$
\end{claim}
\begin{proof}
We argue that with the communication of one interval across the left
and right subtrees, at most $4$ breakpoints that have not been
scratched yet can get scratched. This follows from a simple
inductive argument. Consider a situation where the left subtree has
certain intervals with all overlapping intervals already merged, and
similarly right subtree. Suppose an interval ${\cal I}$ is
communicated between $sub(r)$ and $sub(l)$, one of the following
cases arise:
%\begin{itemize}
\squishlist
\item ${\cal I}$ contains one breakpoint: Can be merged with at most two other intervals. Therefore, at most three breakpoints can get scratched.
\item ${\cal I}$ contains two breakpoints: Can get connected with at most two other intervals and therefore at most four breakpoints can get scratched.
\item ${\cal I}$ contains more than two breakpoints: This is impossible since there are at most two breakpoints in each interval, its left most and
right most numbers (by definition of scratching).
%The important point is that ${\cal I}$ can contain at most two breakpoints that have not been scratched yet (by definition).
%\end{itemize}
\squishend This completes the proof of the claim.
\end{proof}

%{\bf Gopal:} Better to say that "the last case does not arise because ...."

%{\bf Danupon:} The sentence of the last case is changed. See above.

The proof now follows from Lemma~\ref{lem:one}. For any breakpoint
$b$, let $M_b$ be the set of messages that represents an interval
containing $b$ while $b$ is still unscratched. If $b$ is in $sub(l)$
and gets scratched because of the combination of some intervals in
$sub(r)$, then we claim that $M_b$ has covered a path-distance of at
least $k$. (Define the path-distance covered by $M_b$ by the total
path-distance covered by all messages in $M_b$.) This is because $b
= v_i$ (say), being a breakpoint in $sub(l)$ has $i$ equal to $(k+1
\mod k')$. Therefore, $b$ is at a path distance of at least $k$ from
any node in $R$. Consequently, $b$ is at a path-distance of at least
$k$ from any node in $sub(r)$. Since there are
$\Theta(\frac{n}{4k})$ breakpoints, and for any interval to be
communicated across the left and right subtree, a path-distance of
$k$ must be covered, in total, $\Theta(n)$ path-distance must be
covered for all breakpoints to be scratched. This follows from three
main observations:
%\begin{itemize}
\squishlist
\item As shown above, for any breakpoint to be scratched, an interval with a breakpoint must
be communicated from $sub(l)$ to $sub(r)$ or vice versa (thereby all
messages $m$ containing the breakpoint together covering a
path-distance of at least $k$)
\item Any message/interval with unscratched breakpoints has at most two unscratched breakpoints
\item As shown in Claim~\ref{claim:one}, at most four breakpoints can be scratched when two intervals are merged.
\squishend
%\end{itemize}

The proof follows. (Also see Figure~\ref{fig:scratch_4} for the idea of this proof.)
\end{proof}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%\iffalse
\subsection{Proof of Lemma~\ref{lem:three}}
\label{proof:lem:three}
\begin{proof}
We consider the total number of messages that can go through nodes
at any level of the graph, starting from level $0$ to level $\log k$
under the congest model.

First notice that if a message is passed at level $i$ of the tree,
this can cover a $path\_dist$ of at most $2^i$. This is because the
subtree rooted at a node at level $i$ has $2^i$ leaves. Further, by
our construction, there are $2^{\log (k') - i}$ nodes at level $i$.
Therefore, all nodes at level $i$ together, in a given round of
$\mathcal A$ can cover a $dist-path$, path distance, of at most
$2^i2^{\log (k') - i} = 4k+2$. Therefore, over $k$ rounds, the total
$path\_dist$ that can be covered in a single level is $k(k')$. Since
there are $O(\log k)$ levels, the total $path\_dist$ that can be
covered in $k$ rounds over the entire graph is $O(k^2\log k)$. (See
Figure~\ref{fig:max_path_cover}.)
\end{proof}
%\fi






%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

%
%\textbf{Danupon:} The problem with the above proof is that the weights are really huge. So, we cannot call them multiple edge.
%One idea to fix this is to put only $2^i$ edges between $v_i$ and $v_{i+1}$ and argue that the walk will have length $\Omega(n)$ with
%high probability. Then, we need to modify the theorem to deal with paths of length, say $n/2$, as well. Is $2^n$ small enough?
%
%{\bf Gopal:} Why we cannot call them multiple edge (as I say in the proof above)? What is the differnece
%if the wieght is $2^i$ as opposed to $n^i$ ?
%{\bf Danupon:} My only worry is that $n^i$ is too much for the number of edges. (That's also true for $2^i$.)
%If you think it is fine then that is ok.
%
%Another idea is to only show that with constant probability that the path will be taken.
%Then our lower bound statement will hold with constant probability instead of high probablity.
%In this case, we need only polynomial weights -- $n^2$ will suffice for every edge in the path. Do you agree?
%If this is correct, we can state this also.

\section{Omitted Proofs of Section~\ref{sec:mixingtime} (Mixing Time)}

\subsection{Brief description of algorithm for Theorem~\ref{thm:batu}}\label{app:batu}

The algorithm partitions the set of nodes in to buckets based on the steady state probabilities. Each of the $\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ samples from $X$ now falls in one of these buckets. Further, the actual count of number of nodes in these buckets for distribution $Y$ are counted. The exact count for $Y$ for at most $\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ buckets (corresponding to the samples) is compared with the number of samples from $X$; these are compared to determine if $X$ and $Y$ are close. We refer the reader to their paper~\cite{BFFKRW} for a precise description.

\subsection{Proof of Lemma~\ref{lem:monotonicity}}\label{app:mon}
\begin{proof}
The monotonicity follows from the fact that
$||Ax||_1 \le ||x||_1$ where $A$ is the transpose of the transition probability matrix of the graph and $x$ is any probability vector. That is, $A(i,j)$ denotes the probability of transitioning from node $j$ to node $i$. This in turn follows from the fact that the sum of entries of any column of $A$ is 1.

Now let $\pi$ be the stationary distribution of the transition matrix $A$. This implies that if $\ell$ is $\epsilon$-near mixing, then $||A^lu - \pi||_1 \leq \epsilon$, by definition of $\epsilon$-near mixing time. Now consider $||A^{l+1}u - \pi||_1$. This is equal to $||A^{l+1}u - A\pi||_1$ since $A\pi = \pi$.  However, this reduces to $||A(A^{l}u - \pi)||_1 \leq \epsilon$. It follows that $(\ell+1)$ is $\epsilon$-near mixing.
\end{proof}
%\Closesolutionfile{movedProofs}

\subsection{Proof of Theorem~\ref{thm:mixmain}}\label{app:mixproof}
\begin{proof}
For undirected unweighted graphs, the
stationary distribution of the random walk is known and is
$\frac{deg(i)}{2m}$ for node $i$ with degree $deg(i)$, where $m$ is
the number of edges in the graph.  If a source node in the network knows the degree distribution, we only need
$\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ samples from a distribution to
compare it to the stationary distribution.  This can be achieved by
running {\sc MultipleRandomWalk} to obtain $K = \tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ random walks. We choose $\epsilon = 1/12e$.
To find the approximate mixing time, we try out
increasing values of $l$ that are powers of $2$.  Once we find the
right consecutive powers of $2$, the monotonicity property admits a
binary search to determine the exact value for the specified $\epsilon$.
%of $\epsilon$-near mixing
%time. Note that we can apply binary search as $\epsilon$-near mixing
%time is a monotonic property.

The result
in~\cite{BFFKRW} can also be adapted to compare with the steady state distribution even if the source does not know the entire distribution. As described previously, the source only needs to know the {\em count} of number of nodes with steady state distribution in given buckets. Specifically, the buckets of interest are at most $\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ as the count is required only for buckets were a sample is drawn from. Since each node knows its own steady state probability (determined just by its degree), the source can broadcast a specific bucket information and recover, in $O(D)$ steps, the count of number of nodes that fall into this bucket. Using the standard upcast technique previously described, the source can obtain the bucket count for each of these at most $\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ buckets in $\tilde{O}(n^{1/2}poly(\epsilon^{-1}) + D)$ rounds.


We have shown previously that a source node can obtain $K$ samples from $K$ independent random walks of length $\ell$ in $\tilde{O}(K + \sqrt{KlD})$ rounds. Setting $K=\tilde{O}(n^{1/2}poly(\epsilon^{-1}) + D)$ completes the proof.
\end{proof}

\section{Figures}


\begin{figure}[h]
\centering
\includegraphics{path_verify_definition.eps}
\caption{Example of path verification problem. {\bf (a)} In the
beginning, we want to verify that the vertices containing numbers
$1..5$ form a path. (In this case, they form a path $a, b, c, d,
a$.) {\bf (b)} One way to do this is for $a$ to send $1$ to $b$ and
therefore $b$ can check that two vertices $a$ and $b$ corresponds to
label $1$ and $2$ form a path. (The interval $[1,2]$ is used to
represent the fact that vertices corresponding to numbers $1, 2$ are
verified to form a path.) Similarly, $c$ can verify $[3,5]$. {\bf
(c)} Finally, $c$ combine $[1,2]$ with $[3, 5]$ and thus the path
corresponds to numbers $1,2, ..., 5$ is verified. }
\label{fig:path_verify_definition}
\end{figure}

\begin{figure}[h]
\centering
\includegraphics[width=0.98\linewidth]{connector.eps}
\caption{Figure illustrating the Algorithm of stitching short walks together.}
\label{fig:connector}
\end{figure}

\begin{figure}[h]
  \centering
  \includegraphics[width=0.7\linewidth]{Graph_Construction.eps}\\
  \caption{$G_n$}\label{fig:graph_construction}
\end{figure}

\begin{figure}[h]
\centering
\includegraphics{breaking-points.eps}
\caption{\textbf{Breakpoints.} {\bf (a)} $L$ and $R$ consist of
every other $k'/2$ vertices in $P$. (Note that we show the vertices
$l$ and $r$ appear many times for the convenience of presentation.)
{\bf (b)} $v_{k'/2+k+1}$ and $v_{k'+k'/2+k+1}$ (nodes in black) are
two of the breakpoints for $L$. Notice that there is one breakpoint
in every connected piece of $L$ and $R$.}
\label{fig:breaking-points}
\end{figure}

\begin{figure}[h]
\centering \subfigure[{Path-distance.}]{
\includegraphics[width=0.35\linewidth]{path-distance.eps}
\label{fig:path_distance}}
%
\subfigure[{Idea of Claim~\ref{claim:one}}]{
\includegraphics[width=0.35\linewidth]{scratch_4.eps}
\label{fig:scratch_4} }
%
\subfigure[Idea of Lemma~\ref{lem:three}.]{
\includegraphics[width=0.25\linewidth]{max_path_cover.eps}
\label{fig:max_path_cover} } \caption{{\bf (a)} Path distance
between 1 and 2 is the number of leaves in the subtree rooted at 3,
the lowest common ancestor of 1 and 2. {\bf (b)} For one unscratched
left breakpoint, $k'/2+k+1$ to be combined with another right
breakpoint $k+1$ on the left, $k'/2+k+1$ has to be carried to $L$ by
some intervals. Moreover, one interval can carry at most two
unscratched breakpoints at a time. {\bf (c)} Sending a message
between nodes on level $i$ and $i-1$ can increase the covered path
distance by at most $2^i$.}
\end{figure}
