\input{mixing_time}

\section{Omitted proof of Section \ref{sec:algo} (Single Random Walk Problem)}
\newcommand{\mindegree}[0]{\delta}
\begin{algorithm}[T]
\caption{\sc Single-Random-Walk}
\label{alg:single-random-walk}
\textbf{Input:} Starting node $s$, desired walk length $\tau$ and parameter $\lambda$.\\
\textbf{Output:} Destination node of the walk outputs the ID of $s$.\\

\textbf{Phase 1: (Each node $v$ performs $d = \deg(v)$ random walks of length $\lambda + r_i$ where $r_i$ (for each $1\leq i \leq d$) is chosen independently at random in the range $[0, \lambda - 1]$. At the end of the process, there are $d$ (not necessarily distinct) nodes holding a ``coupon" containing the ID of v.)}
\begin{algorithmic}[1]
\FOR{each node $v$}
\STATE  Generate $d$ random integers in the range $[0, \lambda - 1]$, denoted by $r_1, r_2, \ldots,r_{d}$.
\STATE Construct $d$ messages containing its ID, a counter number and in addition, the $i$-th message contains the desired walk length of $\lambda + r_i$. 
We will refer to these messages created by node $v$ as ``coupons created by $v$".
\ENDFOR

\FOR{$i=1$ to $2 \lambda$}

\STATE This is the $i$-th round. Each node $v$ does the following: Consider each coupon $C$ held by $v$ which is received in the $(i - 1)$-th round. If the coupon $C$'s desired walk length is at most $i$, then $v$ keeps this coupon ($v$ is the desired destination). Else, $v$ picks a neighbor $u$ uniformly at random  for each coupon $C$ and forward $C$ to $u$.

%\COMMENT{Note that any iteration could require more than 1 round.}

\ENDFOR

\end{algorithmic}


\textbf{Phase 2: (Stitch short walks by token forwarding. Stitch $\Theta (\tau/\lambda)$ walks, each of length in $[\lambda, 2 \lambda -1]$.)}
\begin{algorithmic}[1]
\STATE The source node $s$ creates a message called ``token'' which contains the ID of $s$

\STATE The algorithm will forward the token around and keep track of a set of connectors, denoted by $C$. Initially, $C = \{s\}$

\WHILE {Length of walk completed is at most $\tau-2 \lambda$}

  \STATE Let $v$ be the node that is currently holding the token.
  
 \STATE $v$ sample one of the coupons distributed by $v$ uniformly at random (by randomly chosen one counter number from the unused set of coupons). Let $v'$ be the destination node of the sampled coupon, say $C$.

 % \STATE $v$ calls {\sc Sample-Destination($v$)} and let $v'$ be the
  %returned value (which is a destination of an unused random walk starting at $v$
  %of length between $\lambda$ and $2\lambda-1$.)

  %\IF{$v'$ = {\sc null} (all walks from $v$ have already been used up)}

  %\STATE $v$ calls {\sc Get-More-Walks($v$, $\lambda$)} (Perform $\Theta(l/\lambda)$ walks
  %of length $\lambda$ starting at $v$)

%  \STATE $v$ calls {\sc Sample-Destination($v$)} and let $v'$ be the
  %returned value

  %\ENDIF

  \STATE $v$ sends the token to $v'$ through broadcast and delete the coupon $C$.  

  \STATE $C = C \cup \{v\}$

\ENDWHILE

\STATE Walk naively until $\tau$ steps are completed (this is at
most another $2 \lambda$ steps)

\STATE A node holding the token outputs the ID of $s$

\end{algorithmic}

\end{algorithm}



\begin{figure}[h]
\centering
\includegraphics[width=0.98\linewidth]{connector.eps}
%\includegraphics[width=0.98\linewidth]{connector-2.pdf}
\caption{Figure illustrating the Algorithm of stitching short walks
together.} \label{fig:connector}
\end{figure}

\subsection{Proof of the Lemma \ref{lem:phase1} (restated below)}
\begin{lemma}
Phase 1 finishes in $O(\lambda)$ rounds with high probability. 
\end{lemma}
\begin{proof}
 In phase 1, each node $v$ performs $d$ walks of length $\lambda$. Initially all the node starts with $d$ coupons (or messages) and each coupon takes a random walk. We prove that after any given number of steps $j$, the expected number of coupons at node any $v$ is still $d$. Though the edges are changes round to round, but at any round, every node has $d$-neighbors connected with it. So at each step every node can send (as well as receive) $d$ messages. Now the number of messages we started at any node $v$ is proportional to its degree and stationary distribution is uniform here.Therefore, in expectation the number of messages at any node remains same. Thus in expectation the number of messages, say $X$ that go through an edge in any round is at most $2$ (from both end points). Using Chernoff's bound we get ($\Pr[X\geq 4 \log n] \leq 2^{-4\log n} = n^{-4}$). It follows easily from there that the number of messages can go through any edge in any round is at most $4 \log n$ with high probability. Hence there will be at most $O( \log^2 n)$ bits w.h.p. in any edge per round . Since we consider {\em CONGEST}($\log^2 n$) model, so there will be delay due to congestion. Hence, phase 1 finishes in $O(\lambda)$ rounds with high probability.     
\end{proof}

\subsection{Proof of the Lemma \ref{lem:lemma2.3} (restated below)}
\begin{lemma}
Sample-Coupon always finishes within $O(\Phi)$ rounds.
\end{lemma}
\begin{proof}
The proof follows directly from the definition of dynamic diameter $\Phi$. Since one can sample-coupon by at most flooding time and $\Phi$ is maximum of all flooding time of all vertex.   
\end{proof}

\subsection{Proof of the Lemma \ref{lem:connector-bound} (restated below)}
\begin{lemma}
For any vertex $v$, if $v$ appears in the walk at most $t$ times then it appears as a connector node at most $t(\log n)^2/\lambda$ times with probability at least $1-1/n^2$.
\end{lemma}
\begin{proof}
Intuitively, this argument is simple, since the connectors are spread out in steps of length approximately $\lambda$. However, there might be some periodicity that results in the same node being visited multiple times but exactly at $\lambda$-intervals. To overcome this we crucially use the fact that the algorithm uses short walks of length $\lambda + r$ (instead of fixed length $\lambda$) where $r$ is chosen uniformly at random from $[0, \lambda -1]$. Then the proof can be shown via constructing another process equivalent to partitioning the $\tau$ steps into intervals of $\lambda$ and then sampling points from each interval. The detailed proof follows immediately from the proof of the Lemma~2.7 in~\cite{DasSarmaNPT10}.
\end{proof}

\subsection{Proof of Random Walk Visits Lemma (cf. Lemma~\ref{lem:visit-bound})}\label{proof of rw visit lemma}
We restate the lemma here.
\begin{lemma}
$(${\sc Random Walk Visits Lemma}$)$. For any nodes $x_1, x_2, \ldots, x_k$, \begin{align*} \Pr\bigl(\exists y\ s.t.\
\sum_{i=1}^k N_\ell^{x_i}(y) & \geq 32 \ d \sqrt{k\ell+1}\log n+k\bigr) \\ & \leq 1/n\,. \end{align*}
\end{lemma}
We start with the bound of the first moment of the number of visits at each node by each walk.
\begin{proposition}\label{proposition:first-moment} For
any node $x$, node $y$ and $t = O(\tau)$,
\begin{equation}
\e[N_t^x(y)] \le 8 \ d \sqrt{t+1}
\end{equation}
\end{proposition}

To prove the above proposition, let $P$ denote the transition probability matrix of such a random walk and let $\pi$ denote the stationary distribution of the walk. 

The basic bound we use is the estimate from Lyons lemma (see Lemma~3.4 in \cite{Lyons}). We show below that the Lyons lemma also holds for a regular evolving graph. %For simplicity we start to show it for any $d$-regular graphs. The essential result required for regular evolving graphs is shown inside the proof.  
\begin{lemma}\label{lem:lyons}
Let $Q$ denote the transition probability matrix of a $d$-regular evolving graph, with self-loop probability $\alpha > 0$. Let $c= \min{\{\pi(x) Q(x,y) : x \neq y \mbox{ and }Q(x,y)>0\}} > 0\,$. Note that here $c = \frac{1}{n d}$, as $\pi$ is uniform distribution. Then for any vertex $x$ and all $k > 0$, a positive integer (denoting time),

%\begin{equation}
%\label{kernel_decay} 
$$\bigl|\frac{Q^k(x,x)}{\pi(x)} - 1\bigr| \le
\min\Bigl\{\frac{1}{\alpha c \sqrt{k+1}}, \frac{1}{2\alpha^2 c^2(k+1)} \Bigr\}\,.$$
%\end{equation}
\end{lemma}

%The Lyons Lemma still holds for dynamic regular graphs. All parts of the lemma is true for dynamic graphs except that $ \lVert Q^{k+1} f \rVert_\infty \le \lVert Q^k f \rVert_\infty $ where $Q$ is the adjacency matrix of the graph. This we prove in the following lemma for regular dynamic graph. 


\begin{proof} %[Proof of Lemma~\ref{lem:lyons}]
Let $G = (V, E)$ be any $d$-regular graph and $Q$ be the transition probability matrix of it. Write $$c_2(x, y) := \pi(x) Q^2(x, y)$$ and note that for $(x, y) \in E$, we have 
\begin{align*} 
c_2(x, y) & \ge \pi(x) \bigl[Q(x, x) Q(x, y) + Q(x, y) Q(y, y)\bigr] \\ & \ge 2\alpha c.
\end{align*}
We write $\ell^2(V, \pi)$ for the vector space $\mathbb{R}^{V}$ equipped with the inner product defined by $$(f_1, f_2)_{\pi} := \sum_{x \in V} f_1(x) f_2(x)\pi(x).$$ We regard elements of $\mathbb{R}^{V}$ as functions from $V$ to $\mathbb{R}$. Therefore we will call eigenvectors of the matrix $Q$ as eigenfunctions. Recall that the transition matrix $Q$ is reversible with respect to the stationary distribution $\pi$. The reason for introducing the above inner product is
\begin{claim}\label{lem:lavin}
Let $Q$ be a reversible transition matrix with respect to $\pi$. Then the inner product space $\langle \ell^2(V, \pi), (\cdot , \cdot)_{\pi}\rangle$ has an orthonormal basis of real-valued eigenfunctions $\{f_i \}_{j=1}^{\vert V \vert}$ corresponding to real eigenvalues $\{\lambda_j\}$. 
\end{claim}
\begin{proof}
Denote by $(\cdot , \cdot)$ the usual inner product on $\mathbb{R}^V$, given by $(f_1, f_2) := \sum_{x \in V} f_1(x) f_2(x)$. For a regular graph, $Q$ is symmetric. The more general version proof is given in Lemma~12.2 in~\cite{Levin} where $Q$ need not be symmetric. The spectral theorem for symmetric matrices guarantees that the inner product space $\langle \ell^2(V, \pi), (\cdot , \cdot) \rangle$ has an orthonormal basis $\{\varphi_j\}_{j=1}^{\vert V \vert}$ such that $\varphi_j$ is an eigenfunction with the real eigenvalue $\lambda_j$. It is known that $\sqrt{\pi}$ is an eigenfunction of $Q$ corresponding to the eigenvalue $1$; we set $\varphi_1 = \sqrt{\pi} $ and $\lambda_1 =1$. If $D_{\pi}$ denote the diagonal matrix with diagonal entries $D_{\pi}(x, x) = \pi(x)$, then $Q = D_{\pi}^{\frac{1}{2}} Q D_{\pi}^{-\frac{1}{2}}$. Let $f_j = D_{\pi}^{-\frac{1}{2}} \varphi_j$, then $f_j$ is an eigenfunction of $Q$ with eigenvalue $\lambda_j$. Infact: 
\begin{align*} 
Q f_j = Q D_{\pi}^{-\frac{1}{2}} \varphi_j & =  D_{\pi}^{-\frac{1}{2}} (D_{\pi}^{\frac{1}{2}} Q D_{\pi}^{-\frac{1}{2}}) \varphi_j \\ & = D_{\pi}^{-\frac{1}{2}} Q \varphi_j  = D_{\pi}^{-\frac{1}{2}} \lambda_j \varphi_j = \lambda_j f_j
\end{align*}
Although the eigenfunctions $\{f_j\}$ are not necessarily orthonormal with respect to the usual inner product, they are orthonormal with respect to the inner product $(\cdot, \cdot)_{\pi}$: 
$$\delta_{ij} = (\varphi_i, \varphi_j) = (D_{\pi}^{\frac{1}{2}} f_i , D_{\pi}^{\frac{1}{2}} f_j) = (f_i, f_j)_{\pi}, $$ the first equality follows since $\{\varphi_j\}$ is orthonormal with respect to the usual inner product. 
\end{proof}

Let $\ell_0^2(V, \pi)$ be the orthogonal complement of the constants in $\ell^2(V, \pi)$. Note that $\textbf{1}$ is an eigenfunction of $Q$ and that $\ell_0^2(V, \pi)$ is invariant under $Q$. Now we show in the following claim that each $f$ has at least one nonnegative value and at least one nonpositive value, such as $f \in \ell^2_0(V, \pi)$. 

\begin{claim}\label{clm:dirichlets}
Let $G$ be an undirected connected $d$-regular graph on $n$ vertices with transition matrix $A_G$. Let $\lambda_1 \geq \lambda_2 \geq \ldots \geq \lambda_n$ be the eigenvalues and $X_1, X_2, \ldots,X_n$ are the corresponding eigenvectors  of $A_G$. Then for each eigenvector $X_i, i= 2,3,  \ldots,n$, $($other than $X_1)$, has at least one negative and at least one positive co-ordinates.  
\end{claim}
\begin{proof}
It is known that $\lambda_1 = 1$ and the normalized eigenvector corresponding to $\lambda_1$ is $X_1 = \frac{1}{\sqrt{n}}(1, 1, \ldots , 1)$. The set of eigenvectors $\{X_i : i = 1, 2, \ldots,n\}$ form a orthonormal basis of the eigenspace. Then for any normalized eigenvector $X \in \{X_i : i=2, 3, \ldots, n \}$, we have $X \perp X^1$. Hence, $\sum_{i=1}^n x_{i}^2 = 1$ and $ \sum_{i=1}^n x_i = 0$, where $X = (x_1, x_2, \ldots ,x_n)$. Let $x_l$ and $x_s$ be the respectively largest and smallest co-ordinates of $X$. Then clearly $ x_l \geq 1/\sqrt{n}$ and $x_s < 0$. 
\end{proof}

Let $x_0$ be a vertex where $\vert f \vert$ achieves its maximum. Then clearly it follows from the Claim~\ref{clm:dirichlets}
\begin{align}
\label{equ-equation1}
\lVert f \rVert_{\infty} & =  \vert f(x_0) \vert \le \frac{1}{2} \sum_{(x,y) \in E} \lvert f(x) - f(y) \rvert \nonumber \\ & \le \frac{1}{2} \sum_{x, y \in V} c_2(x, y) \lvert f(x) - f(y) \rvert/(2 \alpha c)
\end{align} 
where $\lVert \cdot \rVert_{\infty}$ denote the supremum norm and the factor $1/2$ arises from counting each pair $(x,y)$ in each order. Take $f \in \ell^2_0(V, \pi)$. Notice that $\sum_{x,y \in V} c_2(x, y) = \sum_{x \in V} \pi(x) = 1$. Thus, we have from equation~(\ref{equ-equation1}) by using Cauchy-Schwartz inequality that  
\begin{align*}
(2\alpha c)^2 \lVert f \rVert_{\infty}^2 &  \le \frac{1}{2} \sum_{x, y \in V} c_2(x, y) [f(x) - f(y)]^2 \\
& = \frac{1}{2} \sum_{x, y \in V} f(x)^2 \pi(x) Q^2(x ,y) \\ & - \sum_{x, y \in V} f(x) f(y) \pi(x) Q^2(x, y) \\ & + \frac{1}{2} \sum_{x, y \in V} f(y)^2 \pi(x) Q^2(x, y)
\end{align*}
By reversibility, $\pi(x) Q^2(x, y) = \pi(y)Q^2(y, x)$, and the first and last terms above are equal to common value
$$ \frac{1}{2} \sum_{x \in V} f(x)^2 \pi(x) \sum_{y \in V} Q^2(x, y) = \frac{1}{2} \sum_{x \in V} f(x)^2 \pi(x). $$ 
Therefore the above inequality becomes,
\begin{align*}
(2\alpha c)^2 \lVert f \rVert_{\infty}^2 & \le \sum_{x \in V} f(x)^2 \pi(x) \\ & - \sum_{x \in V} f(x) \bigl[ \sum_{y \in V} f(y) Q^2(x, y) \bigr] \pi(x) \\
& = (f, f)_{\pi} - (f, Q^2 f)_{\pi} \\
& = ((\mathbb{I} - Q^2)f, f)_{\pi}.
\end{align*}
Alternatively, we may apply~(\ref{equ-equation1}) to the function $\mbox{sgn}(f)f^2$. Using the trivial inequality $$\lvert \mbox{sgn}(s)s^2 - \mbox{sgn}(t)t^2 \rvert \le \lvert s - t \lvert \cdot (\lvert s \rvert + \lvert t \rvert),$$ valid for any real numbers $s$ and $t$, we obtain that  
\begin{align*}
& (2\alpha c)^2 \lVert f \rVert_{\infty}^4 \le \\ & \left(\frac{1}{2} \sum_{x, y \in V} c_2(x, y) \lvert f(x) - f(y)\rvert \cdot (\lvert f(x) \rvert + \lvert f(y) \rvert) \right)^2 \\
& \le \left(\frac{1}{2} \sum_{x, y \in V} c_2(x, y) [f(x) - f(y)]^2 \right) \cdot \\ & \left(\frac{1}{2} \sum_{x, y \in V} c_2(x, y) [\lvert f(x) \rvert + \lvert f(y) \rvert]^2 \right) \\
& = ((\mathbb{I} - Q^2)f, f)_{\pi} ((\mathbb{I} + Q^2)\lvert f \rvert, \lvert f \rvert)_{\pi}
\end{align*}
by the Cauchy-Schwartz inequality and same algebra as above. Therefore, if $(f,f)_{\pi} \le 1$, we have $2(\alpha c)^2 \lVert f \rVert_{\infty}^4 \le ((\mathbb{I} - Q^2)f, f)_{\pi}$. \\
Putting both these estimates together, we get 
\begin{equation}
\label{equ-ineque2}
2(\alpha c)^2 \max \{2\lVert f \rVert_{\infty}^2, \lVert f \rVert_{\infty}^4 \} \le ((\mathbb{I} - Q^2)f, f)_{\pi}
\end{equation} 
for $(f,f)_{\pi} \le 1$. Now we show that the above inequality is also holds for the regular evolving graph. 

\begin{claim}\label{clm:claim-norm}
Let $\mathcal{G} = G_1, G_2, \ldots$ be a $d$-regular, connected evolving graph with the same set $V$ of nodes. Let $A_{G_i}$ be the transpose of the transition matrix of $G_i$. Let the column vector $f = (p_1, p_2, \ldots ,p_n)^T$ be any probability distribution on $V$. Then 
$ \lVert (A_{G_{i+1}}A_{G_{i}} \ldots A_{G_1}) f \rVert_\infty \le \lVert (A_{G_i}A_{G_{i-1}}\ldots A_{G_1}) f \rVert_\infty $ for all $i \geq 1$.
\end{claim}
\begin{proof}
It is known that the transition matrix of any regular graph is doubly stochastic and if a matrix $Q$ is doubly stochastic then so is $Q^2$. Let $(A_{G_i}A_{G_{i-1}} \ldots A_{G_1}) f = (p_1^i, p_2^i, \ldots, p_n^i)^T$ and $\lVert (A_{G_i}A_{G_{i-1}} \ldots A_{G_1}) f  \rVert_\infty = \max \{p_l^i : l= 1,2, \ldots, n \} = \vert p_k^i \vert$ (say). Then 
\begin{align*}
& (A_{G_{i+1}}A_{G_i} \ldots A_{G_1}) f = \\ & \bigl(\sum_{j \in N(1)} a_{1j}p_j^i, \sum_{j\in N(2)} a_{2j}p_j^i, \ldots, \sum_{j\in N(n)} a_{nj}p_j^i  \bigr)^T
\end{align*} where $N(v)$ is the set of neighbors of $v$ and $a_{ij}$ is the $ij$-th entries of the matrix $(A_{G_{i+1}}A_{G_i} \ldots A_{G_1})$. We show that the absolute value of any co-ordinates of  $(A_{G_{i+1}}A_{G_i}\ldots A_{G_1})f $ is $\leq \vert p_k^i \vert$. Infact for any $l$, 
\begin{align*} \vert \sum_{j\in N(l)} a_{lj}p_j^i \vert \leq \sum_{j\in N(l)} \lvert a_{lj}\rvert \lvert p_j^i \rvert & \leq \lvert p_k^i \rvert \sum_{j\in N(l)} a_{lj} = \lvert p_k^i \vert,
\end{align*} since the matrix is doubly stochastic, the last sum is $1$. 
\end{proof}

Now apply the inequality~(\ref{equ-ineque2}) to $Q^l f$ for $l= 0,1, \ldots, k$.  Summing these inequalities and using Claim~\ref{clm:claim-norm} to obtain,
\begin{align*}
& (k+1)2(\alpha c)^2\max \{2\lVert Q^k f \rVert_{\infty}^2, \lVert Q^k f \rVert_{\infty}^4 \} \\ & \le 2(\alpha c)^2 \max \{2\sum_{l=0}^k \lVert Q^l f \rVert_{\infty}^2, \sum_{l=0}^k \lVert Q^k f \rVert_{\infty}^4 \} \\
& \le \sum_{l=0}^k ((\mathbb{I} - Q^2)Q^l f, Q^l f)_{\pi} \\ & = \sum_{l=0}^k ((\mathbb{I} - Q^2)Q^{2l} f, f)_{\pi} \\
& = ((\mathbb{I} - Q^{2k + 2})f, f)_{\pi} \le 1
\end{align*}
for $(f,f)_{\pi} \le 1$. This shows that the norm of $Q^k : \ell^2_0(V, \pi) \rightarrow \ell^{\infty}(V)$ is bounded by 
$$\beta_k := \min\{ [(2\alpha c)^2(k+1)]^{-1/2}, [(\alpha c)^2(2k + 2)]^{-1/4} \}. $$
Let $T: \ell^2(V, \pi) \rightarrow \ell^2_0(V, \pi)$ be the orthogonal projection $Tf := f - (f, \textbf{1})_{\pi} \textbf{1}$. Given what we have shown, we see that the norm of $Q^k T : \ell^(V, \pi) \rightarrow \ell^{\infty}(V)$ is bounded by $\beta_k$. By duality, the same bound holds for $TQ^k : \ell^1(V, \pi) \rightarrow \ell^2(V, \pi)$. Therefore by composition of mapping we deduce that the norm of $Q^k T Q^k : \ell^1(V, \pi) \rightarrow \ell^{\infty}(V)$ is at most $\beta_k^2$ and the norm of $Q^k T Q^{k+1} : \ell^1(V, \pi) \rightarrow \ell^{\infty}(V)$ is at most $\beta_k \beta_{k+1}$. Applying these inequalities to $f := \textbf{1}_x/\pi(x)$ gives the required bound.  
\end{proof}

The more general case is proved in Lyons (see
Lemma~3.4 and Remark~4  in \cite{Lyons}). Sometimes, it is more convenient to use the following bound; %For $k =O(\frac{1}{1-\bar{\lambda_2}})$ 
For $k= O(\tau)$ and small $\alpha$, the above can be simplified to the following bound; see Remark~3 in \cite{Lyons}.
\begin{equation}
\label{one_sided_decay} Q^k(x,y)  \le \frac{4\pi(y)}{c \sqrt{k+1}} =
\frac{4d}{\sqrt{k+1}}\,.
\end{equation}

Note that given a simple random walk on a graph $G$, and a
corresponding matrix  $P$, one can always switch to the lazy version
$Q=(I+P)/2$, and interpret it as a walk on graph $G'$, obtained by
adding  self-loops  to vertices in $G$ so as to double the degree of
each vertex. In the following, with abuse of notation we assume our
$P$ is such a lazy version of the original one.

\begin{proof}[Proof of Proposition~\ref{proposition:first-moment}]
Remember that the evolving graph is $\mathcal{G} = G_1, G_2, \ldots$. Let $X_0, X_1, \ldots $ describe the random walk, with $X_i$
denoting the position of the walk at time $i\ge 0$ on $G_{i+1}$, and let
$\bone_A$ denote the indicator (0-1) random variable, which takes
the value 1 when the event $A$ is true. In the following we also use
the subscript $x$ to denote the fact that the probability or
expectation is with respect to starting the walk at vertex $x$.
%Let $X_0=x$
First the expectation.
\begin{align*}
\e[N_t^x(y)] & =  \e_x[  \sum_{i=0}^t \bone_{\{X_i=y\}}] = \sum_{i=0}^t P^i(x,y) \\
& \le  4 d \sum_{i=0}^t \frac{1}{\sqrt{i+1}} , \\ & \mbox{ (using the above inequality  (\ref{one_sided_decay})) } \\
& \le 8 d \sqrt{t+1}\,.
\end{align*}
\end{proof}
Using the above proposition, we bound the number of visits of each walk at each node, as follows. 

\begin{lemma}\label{lemma:whp one walk one node bound}
For $t = O(\tau)$ and any vertex $y \in \mathcal{G}$, the random walk
started at $x$ satisfies:
\begin{equation*}
\Pr\bigl(N^x_t(y) \ge  32  \ d \sqrt{t+1}\log n \bigr) \le \frac{1}{n^2} \,.
\end{equation*}
\end{lemma}
\begin{proof}
First, it follows from the Proposition that
%
\begin{equation} 
\Pr\bigl(N^x_t(y) \ge  4\cdot 8 \ d \sqrt{t+1}\bigr) \le \frac{1}{4} \,.\label{eq:simple bound}
\end{equation}
%

For any $r$, let $L^x_r(y)$ be the time that the random walk
(started at $x$) visits $y$ for the $r^{th}$ time. Observe that, for
any $r$, $N^x_t(y)\geq r$ if and only if $L^x_r(y)\leq t$.
Therefore,
\begin{equation}
\Pr(N^x_t(y)\geq r)=\Pr(L^x_r(y)\leq t).\label{eq:visits eq length}
\end{equation}

Let $r^*=32  \ d \sqrt{t+1}$. By \eqref{eq:simple bound} and
\eqref{eq:visits eq length}, $\Pr(L^x_{r^*}(y)\leq t)\leq
\frac{1}{4}\,.$ We claim that
\begin{equation}
\Pr(L^x_{r^*\log n}(y)\leq t)\leq \left(\frac{1}{4}\right)^{\log
n}=\frac{1}{n^2}\,.\label{eq:hp length bound}
\end{equation}
To see this, divide the walk into $\log n$ independent subwalks,
each visiting $y$ exactly $r^*$ times. Since the event $L^x_{r^*\log
n}(y)\leq t$ implies that all subwalks have length at most $t$,
\eqref{eq:hp length bound} follows.
%
Now, by applying \eqref{eq:visits eq length} again,
\[\Pr(N^x_t(y)\geq r^*\log n) = \Pr(L^x_{r^*\log n}(y)\leq t)\leq
\frac{1}{n^2}\] as desired.
\end{proof}

We now extend the above lemma to bound the number of visits of {\em
all} the walks at each particular node.

\begin{lemma}\label{lemma:k walks one node bound}
For $t = O(\tau)$, and for any vertex $y \in
\mathcal{G}$, the random walk started at $x$ satisfies:
\begin{equation*}
\Pr\bigl(\sum_{i=1}^k N^{x_i}_t(y) \ge  32  \ d \sqrt{kt+1} \log n+k\bigr) \le \frac{1}{n^2} \,.
\end{equation*}
\end{lemma}
\begin{proof}
First, observe that, for any $r$, $$\Pr\bigl(\sum_{i=1}^k
N^{x_i}_t(y) \geq r-k\bigr)\leq \Pr[N^y_{kt}(y)\geq r].$$ To see
this, we construct a walk $W$ of length $kt$ starting at $y$ in the
following way: For each $i$, denote a walk of length $t$ starting at
$x_i$ by $W_i$. Let $\tau_i$ and $\tau'_i$ be the first and last
time (not later than time $t$) that $W_i$ visits $y$. Let $W'_i$ be
the subwalk of $W_i$ from time $\tau_i$ to $\tau_i'$. We construct a
walk $W$ by stitching $W'_1, W'_2, ..., W'_k$ together and complete
the rest of the walk (to reach the length $kt$) by a normal random
walk. It then follows that the number of visits to $y$ by $W_1, W_2,
\ldots, W_k$ (excluding the starting step) is at most the number of
visits to $y$ by $W$. The first quantity is $\sum_{i=1}^k
N^{x_i}_t(y)-k$. (The term `$-k$' comes from the fact that we do not
count the first visit to $y$ by each $W_i$ which is the starting
step of each $W'_i$.) The second quantity is $N^y_{kt}(y)$. The
observation thus follows.

Therefore,
\begin{align*}
& \Pr\bigl(\sum_{i=1}^k N^{x_i}_t(y)\geq 32 \ d
\sqrt{kt+1}\log n + k\bigr) \\ & \leq \Pr\bigl(N^y_{kt}(y)\geq 32 \ d
\sqrt{kt+1}\log n\bigr) \\ & \leq \frac{1}{n^2}
\end{align*}
%
where the last inequality follows from Lemma~\ref{lemma:whp one walk
one node bound}.
\end{proof}


%Lemma~\ref{lemma:visits bound}
The Random Walk Visits Lemma~\ref{lem:visit-bound} follows immediately from
Lemma~\ref{lemma:k walks one node bound} by union bounding over all
nodes.

\iffalse
\subsection{Proof of the Theorem \ref{thm:maintheorem} (restated below)}
\begin{theorem}
The algorithm {\sc Single-Random-walk} (cf. Algorithm \ref{alg:single-random-walk}) solves the Single Random Walk problem and with high probability finishes in $\tilde{O}(\sqrt{\tau \Phi})$ rounds. 
\end{theorem}
\begin{proof}
First, we claim, using Lemma \ref{lem:visit-bound} and
\ref{lem:connector-bound}, that each node is used as a connector node
at most $\frac{32 \ d \sqrt{\tau}(\log n)^3}{\lambda}$ times with
probability at least $1-2/n$. To see this, observe that the claim
holds if each node $x$ is visited at most
$t(x)=32 \ d \sqrt{\tau+1}\log n$ times and consequently appears as a
connector node at most $t(x)(\log n)^2/\lambda$ times. By
Lemma~\ref{lem:visit-bound}, the first condition holds with
probability at least $1-1/n$. By Lemma~\ref{lem:connector-bound} and
the union bound over all nodes, the second condition holds with
probability at least $1-1/n$, provided that the first condition
holds. Therefore, both conditions hold together with probability at
least $1-2/n$ as claimed.

Now, we choose $\eta=1$ and $\lambda=32 \sqrt{\tau \Phi}(\log n)^3$.
%
By Lemma~\ref{lem:phase1}, Phase~1 finishes in $\tilde O(\lambda
\eta) = \tilde O(\sqrt{\tau \Phi})$ rounds with high probability.
%
For Phase~2, {\sc Sample-Coupon} is invoked
$O(\frac{\tau}{\lambda})$ times (only when we stitch the walks) and
therefore, by Lemma~\ref{lem:lemma2.3}, contributes
$O(\frac{\tau \Phi}{\lambda})=\tilde O(\sqrt{\tau \Phi})$ rounds.

Therefore, with probability at least $1-2/n$, the rounds are $\tilde
O(\sqrt{\tau \Phi})$ as claimed.
\end{proof}

\fi

\section{Generalization to non-regular evolving graphs}\label{sec:lazy}
The lazy random walk strategy actually ``converts"  a random walk on an non-regular graph to a slower random walk on a regular graph. 

\begin{definition}\label{def:lazy-rw}
At each step of the walk pick a vertex $v$ from $V$ uniformly at random and if there is an edge from the current vertex to the vertex $v$ then we move to $v$, otherwise we stay at the current vertex. 
\end{definition} 

This strategy of lazy random walk in fact makes the graphs $n$-regular: every edge adjacent to the current vertex is picked with the probability $1/n$ and with the remaining probability we stay at the current vertex. 
Using this strategy, we can obtain the same results on non-regular graphs as well, but with a factor of $n$
slower. In fact, we can do better, if nodes know an an upper bound $d_{max}$ on the maximum degree of the dynamic network. Modify the lazy walk such that at each step of the walk stay at the current vertex $u$ with probability $1 - (d(u)/(d_{max} + 1))$ and with the remaining probability pick a neighbors uniformly at random. This only results in a slow down by a factor of $d_{max}$ compared to the regular case. 

\section{$k$ Random Walks Problem}\label{multiple walks}
The high level idea and analysis of the algorithm is following:
\paragraph{{\sc Many-Random-Walks} :} Let $\lambda=(32 \sqrt{k\tau \Phi+1}\log n+k)(\log n)^2$. If
$\lambda \ge \tau$ then run the naive random walk algorithm. %, i.e., the sources find walks of length $\tau$ simultaneously by sending tokens. 
Otherwise, do the following. First, modify Phase~2 of {\sc Single-Random-Walk} to create multiple walks, one at a time; i.e., in the second phase, we stitch the short walks together to get a
walk of length $\tau$ starting at $s_1$ then do the same thing for $s_2$, $s_3$, and so on. We show that {\sc Many-Random-Walks} algorithm finishes in $\tilde O\left(\min(\sqrt{k\tau \Phi}, k+\tau)\right)$ rounds with high probability. This is the result stated in the Theorem \ref{thm:kwalks} (Section \ref{sec:results}), the formal proof is given below. The details of this specific extension is similar to the previous ideas even for the dynamic setting. 

\subsection{Proof of the Theorem \ref{thm:kwalks} (restated below)}
\begin{theorem} {\sc Many-Random-Walks} (cf. Algorithm~\ref{alg:many-random-walk}) finishes in
$\tilde O\left(\min(\sqrt{k\tau \Phi}, k+\tau)\right)$
rounds with high probability.
\end{theorem}
\begin{proof}
Recall that we assume $\lambda=(32 \sqrt{k\tau \Phi+1}\log n+k)(\log n)^2$. First, consider the case where $\lambda \ge \tau$. In this case, $\min(\sqrt{k\tau \Phi}+k, \sqrt{k\tau}+k+\tau)=\tilde O(\sqrt{k\tau}+k+\tau)$. By Lemma~\ref{lem:visit-bound}, each
node $x$ will be visited at most $\tilde O(d (\sqrt{k\tau}+k))$ times. Therefore, using the same argument as Lemma~\ref{lem:phase1},
the congestion is $\tilde O(\sqrt{k\tau} + k)$ with high probability. Since the dilation is $\tau$, {\sc Many-Random-Walks}
takes $\tilde O(\sqrt{k\tau} + k +\tau)$ rounds as claimed. Since $2\sqrt{k\tau} \le k + \tau$, this bound reduces
to $\tilde O(k +\tau)$. 

Now, consider the other case where $\lambda < \tau$. In this case,
$\min(\sqrt{k\tau \Phi} +k, \sqrt{k\tau}+k+\tau)=\tilde O(\sqrt{k\tau \Phi}+k)$. Phase~1 takes $O(\lambda) = \tilde O(\sqrt{k\tau \Phi}+k)$. The stitching in Phase~2 takes $\tilde O(k \Phi\tau /\lambda) = \tilde O(\sqrt{k\tau \Phi})$. Since $k \Phi\tau /\lambda \geq k\Phi \geq k$, the
total number of rounds required is $\tilde O(\sqrt{k\tau \Phi})$ as claimed.
\end{proof}

%\newcommand{\mindegree}[0]{\delta}
\begin{algorithm}[T]\label{many-walks algorithm}
\caption{\sc Many-Random-Walks}
\label{alg:many-random-walk}
\textbf{Input:} Starting nodes $s_1, s_2, \ldots, s_k$, (not necessarily distinct) and desired walks length $\tau$ and parameter $\lambda$.\\
\textbf{Output:} Each destination node of the walks outputs the ID of its corresponding source.\\

\textbf{Case~1.} When $\lambda \ge \tau$. [we assumed $\lambda=(32 \sqrt{k\tau \Phi+1}\log n+k)(\log n)^2$]
\begin{algorithmic}[1] 
\STATE  Run the naive random walk algorithm, i.e., the sources find walks of length $\tau$ simultaneously by sending tokens.

\end{algorithmic}

\textbf{Case~2.} When $\lambda < \tau$. \\
\textbf{Phase 1: (Each node $v$ performs $d$ random walks of length $\lambda + r_i$ where $r_i$ (for each $1\leq i \leq d$) is chosen independently at random in the range $[0, \lambda -1]$. At the end of the process, there are $d$ (not necessarily distinct) nodes holding a ``coupon" containing the ID of $v$.)}
\begin{algorithmic}[1]
\FOR{each node $v$}
\STATE  Perform $d$ walks of length $\lambda + r_i$, as in Phase~1 of algorithm {\sc Single-Random-Walk}. 
\ENDFOR

\end{algorithmic}


\textbf{Phase 2: (Stitch $\Theta (\tau/\lambda)$ short walks for each source node $s_j$)}
\begin{algorithmic}[1]
\FOR{j = 1 to k}
\STATE  Consider source $s_j$. Use algorithm {\sc Single-Random-Walk} to perform a walk of length $\tau$ from $s_j$.
\STATE When algorithm {\sc Single-Random-walk} terminates, the sampled destination outputs ID of the source $s_j$. 
\ENDFOR
\end{algorithmic}

\end{algorithm}

\iffalse
\section{Applications}
\subsection{Information Dissemination (Pseudocode)}\label{application token}
%Let us first analyze the running time of our approach to compute $k$-gossip problem in $d$-regular evolving graph. \\ 
%\noindent \textbf{Analysis.} Using the time complexity of our $k$ Random Walks algorithm, we analyze the round complexity of our algorithm (cf. Algorithm \ref{alg:token-dissemination}), and show that it solves the $k$-gossip problem in $\tilde{O}(n^{1/3}k^{2/3}(\tau \Phi)^{1/3})$ rounds. To make sure that the 

\begin{algorithm}[H]
\caption{\sc K-Information-Dissemination}
\label{alg:token-dissemination}
\textbf{Input:} An evolving graphs $\mathcal{G}: G_1, G_2, \ldots$ and $k$ token in some nodes.\\
\textbf{Output:} To disseminate $k$ tokens to all the nodes.\\

\textbf{Phase 1: (Send $f = n^{2/3} (k/\tau \Phi)^{1/3}$ copies of each token to random places)}
\begin{algorithmic}[1]
%\FOR{each token $t$}
\STATE  Every node holding token $t$, send $f = n^{2/3} (k/\tau \Phi)^{1/3}$ copies of each token to random nodes using algorithm {\sc Many-Random-Walk}.
%\ENDFOR

\end{algorithmic}


\textbf{Phase 2: (Broadcast each token for $O(n\log n/f)$ rounds)}
\begin{algorithmic}[1]
\FOR{each token $t$}
\STATE  For the next $2 n\log n/f$ rounds, let all the nodes has token $t$ broadcast the token.
%\STATE When algorithm {\sc Single-Random-walk} terminates, the sampled destination outputs ID of the source $s_j$. 
\ENDFOR
\end{algorithmic}

\end{algorithm}


 \noindent algorithm terminates in $O(nk)$ rounds, 
we run the above algorithm in parallel with the trivial algorithm (which is just broadcast each of the $k$ tokens sequentially; clearly this will take $O(nk)$ rounds in total) and stops when one of the two algorithm stop. Thus the claimed bound in Theorem \ref{thm:token-bound} holds. The formal proof is given below. \\

\noindent \textbf{Proof of the Theorem \ref{thm:token-bound} (restated below)}
\begin{theorem}
The algorithm~(cf. algorithm~\ref{alg:token-dissemination}) solves $k$-gossip problem with high probability\\ in $\tilde{O}(\min\{n^{1/3}k^{2/3}(\tau \Phi)^{1/3}, nk\})$ rounds. 
\end{theorem}
\begin{proof}%[Proof of the Theorem \ref{thm:token-bound}]
We are running both the trivial and our proposed algorithm in parallel. Since the trivial algorithm finishes in $O(nk)$ rounds, therefore we concentrate here only on the round complexity of our proposed algorithm. \\
We are sending $f$ copies of each $k$ token to random nodes which means we are sampling $k f$ random nodes from uniform distribution. So using the {\sc Many-Random-Walk} algorithm, phase 1 takes $\tilde{O}(\sqrt{k f \tau \Phi})$ rounds. 

Now fix a node $v$ and a token $t$. Let $S$ be the set of nodes which has the token $t$ after phase 1. Since the token $t$ is broadcast for $2 n \log n/f$ rounds, there is a set $S_v^t$ of atleast $2 n \log n/f$ nodes from which $v$ is reachable within $2 n \log n/f$ rounds. This is follows from the fact that at any round at least one uninformed node will be informed as the graph being always connected. It is now clear that if $S$ intersects $S_v^t$, $v$ will receive token $t$. The elements of the set $S$ were sampled from the vertex set through the algorithm {\sc Many-Random-Walk} which sample nodes from close to uniform distribution, not from actual uniform distribution. We can make it though very close to uniform by extending the walk length multiplied by some constant. Suppose {\sc Many-Random-Walk} algorithm samples nodes with probability $1/n \pm 1/n^2$ which means each node in $S$ is sampled with probability $1/n \pm 1/n^2$. So the probability of a single node $w \in S$ does not intersect $S^t_v$ is at most $(1 - |S^t_v|(\frac{1}{n} \pm \frac{1}{n^2})) = (1 - \frac{2n\log n}{f} \times \frac{n \pm 1}{n^2})$. Therefore the probability of any of the $f$ sampled node in $S$ does not intersect $S^t_v$ is at most $(1 - \frac{2(n \pm 1)\log n}{n f})^f \leq \frac{1}{n^{2 \pm 2/n}}$. Now using union bound we can say that every node in the network receives the token $t$ with high probability. This shows that phase~2 uses $k n\log n/f$ rounds and sends all $k$ tokens to all the nodes with high probability. Therefore the algorithm finishes in $\tilde{O}(\sqrt{k f \tau \Phi} + k n/f)$ rounds. Now choosing $f = n^{2/3} (k/\tau \Phi)^{1/3}$ gives the bound as $\tilde{O}(n^{1/3} k^{2/3} (\tau \Phi)^{1/3})$. Hence, the $k$-gossip problem solves with high probability in $\tilde{O}(\min\{n^{1/3}k^{2/3}(\tau \Phi)^{1/3}, nk\})$ rounds. 
\end{proof}

Note that the mixing time $\tau$ of a regular dynamic graph is at most $O(n^2)$ (follows from Theorem~\ref{thm:mixtime} and Corollary~\ref{cor:second-eigen-bound}). Putting this in Theorem  \ref{thm:token-bound}, yields a better  bound for $k$-gossip problem in a regular dynamic graph. 
\fi
%\begin{lemma}
%Let $G_1, G_2, \ldots$ be a sequence of $d$-regular stationary evolving graphs. Let a token $t$ be broadcasted from a particular node $v$ for $O(R)$ rounds. Then there is at least $O(R)$ nodes receives the token $t$. In other words, there is at least $O(R)$ nodes from which $v$ is reachable if broadcast so many rounds.    
%\end{lemma}
%\begin{proof}
%This clearly follows from the connectedness property of the evolving graph. At any round $r$, let $A$ be the set of all informed nodes and $B$ be the set of all uninformed nodes. Since the graph is connected, there is at least one edge between $A$ and $B$. So at least one uninformed node will be informed in every round. Hence the lemma.   
%\end{proof}
\iffalse
\subsubsection{Proof of the Theorem \ref{thm:token-bound} (restated below)}
\begin{theorem}
The algorithm~(cf. algorithm~\ref{alg:token-dissemination}) solves $k$-gossip problem with high probability in\\ $\tilde{O}(\min\{n^{1/3}k^{2/3}(\tau \Phi)^{1/3}, nk\})$ rounds. 
\end{theorem}
\begin{proof}
We run both the trivial algorithm (just broadcast all tokens) and our proposed algorithm in parallel. Since the trivial algorithm finishes in $O(nk)$ rounds, therefore, we concentrate here on the round complexity of our algorithm. \\
We are sending $f$ copies of each $k$ token to random nodes which means we are sampling $k f$ random nodes from uniform distribution. So using the {\sc Many-Random-Walk} algorithm, phase 1 takes $\tilde{O}(\sqrt{k f \tau \Phi})$ rounds. 

Now fix a node $v$ and a token $t$. Let $S$ be the set of nodes which has the token $t$ after phase 1. Since the token $t$ is broadcast for $2 n \log n/f$ rounds, there is a set $S_v^t$ of atleast $2 n \log n/f$ nodes from which $v$ is reachable within $2 n \log n/f$ rounds. This is follows from the fact that at any round at least one uninformed node will be informed as the graph is connected. It is now clear that if $S$ intersects $S_v^t$, $v$ will receive token $t$. Since the set $S$ was picked uniformly at random, the probability that $S$ does not intersect $S_v^t$ is at most, $$ \frac{\dbinom{n - 2n\log n/f}{f}}{\dbinom{n}{f}} < \left(\frac{n - 2n\log n/f}{n}\right)^{f} \le \frac{1}{n^2}.$$ Thus every node receives token with probability $1 - 1/n$. This shows phase 2 uses $k n\log n/f$ rounds and sends all $k$ tokens to all the nodes with high probability. Therefore the algorithm finishes in $\tilde{O}(\sqrt{k f \tau \Phi} + k n/f)$ rounds. Now choosing $f = n^{2/3} (k/\tau \Phi)^{1/3}$ gives the bound as $\tilde{O}(n^{1/3} k^{2/3} (\tau \Phi)^{1/3})$. Hence, the $k$-gossip problem solves with high probability in $\tilde{O}(\min\{n^{1/3}k^{2/3}(\tau \Phi)^{1/3}, nk\})$ rounds. 
\end{proof}
Note that the algorithm {\sc Many-Random-Walk} samples nodes from close to uniform distribution on the vertex set. But still the algorithm solves the $k$-gossip problem in $\tilde{O}(\min\{n^{1/3}k^{2/3}(\tau \Phi)^{1/3}, nk\})$ rounds with high probability. 

\fi

\iffalse
\subsection{Decentralized Estimation of Mixing Time}\label{application mixing time}
We focus on estimating the {\em dynamic mixing time} $\tau$ of a $d$-regular connected non-bipartite evolving graph $\mathcal{G}= G_1, G_2, \ldots$. We discussed in Section \ref{mixing_time} that $\tau$ is maximum of the mixing time of any graph in $\{G_t : t \geq 1 \}$. To make it appropriate for our algorithm, we will assume that all graphs $G_t$ in the graph process $\mathcal{G}$ have the same mixing time $\tau_{mix}$. Therefore $\tau = \tau_{mix}$. While the definition of $\tau$ (cf. Definition \ref{def:mix-dynamic}) itself is consistent, estimating this value becomes significantly harder in the dynamic context. The intuitive approach of estimating distributions continuously and then adapting a distribute-closeness test works well for static graphs, but each of these steps becomes far more involved and expensive when the network itself changes and evolves continuously. Therefore we need careful analysis and new ideas in obtaining the following results. We introduce related notations and definitions in Section~\ref{mixing_time}. 
%The algorithms and related results are placed in Appendix (cf. Section \ref{application mixing time}).  

%We focus on estimating the dynamic mixing time $\tau$ of a $d$-regular connected non-bipartite evolving graph $\mathcal{G} = G_1, G_2, \ldots$. We discussed in Section \ref{sec:model} that this $\tau$ is essentially an upper bound of the mixing time of any graph in $\{G_t : t \geq 1 \}$. %Let $\tau_{mix}$ is the maximum mixing time of any graph $G_t$. 
%While the definition of $\tau$ itself is consistent, estimating this value becomes significantly harder in the dynamic context. The intuitive approach of estimating distributions continuously and then adapting a distribute-closeness test works well for static graphs, but each of these steps becomes far more involved and expensive when the network itself changes and evolves continuously. Therefore we need careful analysis and new ideas in obtaining the following results. We introduce related notations and definitions in Section~\ref{mixing_time}. 
%Further we introduce another nice approach to estimate mixing time of a special class of graph, called node transitive graph. We placed the second approach in section \ref{application mixing time} in the Appendix. 

%We want to estimate the dynamic mixing time ($\tau$) of a $d$-regular connected non-bipartite evolving graph $\mathcal{G} = G_1, G_2, \ldots$. The intuitive approach of estimating distributions continuously and then adapting a distribute-closeness test works well for static graphs, but each of these steps becomes far more involved and expensive when the network itself changes and evolves continuously. So we need careful analysis and new ideas in obtaining the following results. We have introduced related notations and definitions in Section~\ref{mixing_time}. 

The goal is to estimate $\tau^x_{mix}$ (mixing time for source $x$). Notice that the definition $\tau^x_{mix}$ and dynamic mixing time, $\tau$ (cf. Section \ref{mixing_time}) are consistent for a $d$-regular evolving graph $\mathcal{G} = G_1,G_2, \dots$ due to the monotonicity property (cf. Lemma~\ref{lem:monotonicity}) of distributions.

We now present an algorithm to estimate $\tau$. The main idea behind this approach is, given a source node, to run many random walks of some length $\ell$ using the approach described in Section \ref{sec:k-algo}, and use these to estimate the distribution induced by the $\ell$-length random walk. We then compare the the distribution at length $\ell$, with the stationary distribution to determine if they are close, and if not, double $\ell$ and retry.     

For the case of static graph (with diameter $D$), Das Sarma et al.~\cite{DasSarmaNPT10} shows that the one can approximate mixing time in $\tilde O(n^{1/4} \sqrt{D\tau^x(\epsilon)})$ rounds. We show here that this bound also holds to approximate mixing time even for the dynamic graphs which is $d$-regular.  We use the technique of Batu et al.~\cite{BFFKRW} to determine if the distribution is $\epsilon$-near to uniform distribution. Their result is restated in the following theorem. 

\begin{theorem}[\cite{BFFKRW}]\label{thm:batu}
For any $\epsilon$, given $\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ samples of a distribution $X$
over $[n]$, and a specified distribution $Y$, there is a test that outputs PASS with high probability if $|X-Y|_1\leq \frac{\epsilon^3}{4\sqrt{n}\log n}$, and outputs FAIL with high probability if $|X-Y|_1\geq 6\epsilon$.
\end{theorem}

The distribution $X$ in our context is some distribution on nodes and $Y$ is the stationary distribution, i.e., $Y(v) = 1/n$ (assume $\vert V \vert = n$ in the
network). %In this case, the algorithm used in the above theorem can be simulated in a distributed network in �O(D + 2/ log(1 + ?)) rounds, as in the following theorem.
We now give a very brief description of the algorithm of Batu et. al.~\cite{BFFKRW} to illustrate that it can in fact be simulated on the distributed network efficiently. The algorithm partitions the set of nodes in to buckets based on the steady state probabilities. Each of the $\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ samples from $X$ now falls in one of these buckets. Further, the actual count of number of nodes in these buckets for distribution $Y$ are counted. The exact count for $Y$ for at most $\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ buckets (corresponding to the samples) is compared with the number of samples from $X$; these are compared to determine if $X$ and $Y$ are close. Note that the total number of nodes n and $\epsilon$ can be broadcasted to all nodes in $O(\Phi)$ rounds and each node can determine which bucket it is in in $O(\Phi)$ rounds.We refer the reader to their paper~\cite{BFFKRW} for a precise description.


Our algorithm starts with $\ell=1$ and runs $K=\tilde{O}(\sqrt{n})$ walks of length $\ell$ from the specified source $x$. As the test of comparison with the steady state distribution outputs FAIL (for choice of $\epsilon=1/12e$), $\ell$ is doubled. This process is repeated to identify the largest $\ell$ such that the test outputs FAIL with high probability and the smallest $\ell$ such that the test outputs PASS with high probability. These give lower and upper bounds on the required $\tau^x_{mix}$ respectively. Our resulting theorem is presented below. \\

\noindent \textbf{Proof of the Theorem \ref{thm:complexity_bound_mixing_time} (restated below)}
\begin{theorem}
Given connected $d$-regular evolving graphs with dynamic diameter $\Phi$, a node $x$ can find, in $\tilde{O}(n^{1/4}\sqrt{\Phi \tau^x(\epsilon)})$ rounds, a time
$\tilde{\tau}^x_{mix}$ such that $\tau^x_{mix}\leq \tilde{\tau}^x_{mix} \leq \tau^x(\epsilon)$, where $\epsilon = \frac{1}{6912e\sqrt{n}\log n}$.
% where $T$ is the smallest time such that $r_x(T)\leq \frac{1}{6912e\sqrt{n}\log n}$.
%This can be done in $\tilde{O}(n^{1/2} + n^{1/4}\sqrt{Dt_{mix}})$ rounds.
%
%that is w.h.p. between the $6\epsilon$-near mixing time and $\frac{\epsilon^3}{4\sqrt{n}\log n}$-near mixing time in $\tilde{O}(n^{1/2}poly(\epsilon^{-1}) + n^{1/4}poly(\epsilon^{-1})\sqrt{Dt_{mix}})$ rounds.
%
%If the degree distribution is unknown to the nodes, a node can find an $\epsilon$-close mixing time in $\tilde{O}(n^{2/3}poly(\epsilon^{-1}) + n^{1/3}poly(\epsilon^{-1})\sqrt{Dt_{mix}})$ rounds.
\end{theorem}
\begin{proof}
Our goal is to check when the probability distribution (on vertex set $V$) of the random walk becomes stationary distribution which is uniform here. If a source node knows the total number of nodes in the network (which can be done through flooding in $O(\Phi)$ rounds), we only need
$\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ samples from a distribution to
compare it to the stationary distribution.  This can be achieved by
running {\sc MultipleRandomWalk} to obtain $K = \tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ random walks. We choose $\epsilon = 1/12e$.
To find the approximate mixing time, we try out
increasing values of $\ell$ that are powers of $2$.  Once we find the
right consecutive powers of $2$, the monotonicity property admits a
binary search to determine the exact value for the specified $\epsilon$.
%of $\epsilon$-near mixing
%time. Note that we can apply binary search as $\epsilon$-near mixing
%time is a monotonic property.

The result
in~\cite{BFFKRW} can also be adapted to compare with the steady state distribution even if the source does not know the entire distribution. As described previously, the source only needs to know the {\em count} of number of nodes with steady state distribution in given buckets. Specifically, the buckets of interest are at most $\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ as the count is required only for buckets were a sample is drawn from. Since each node knows its own steady state probability (determined just by its degree), the source can broadcast a specific bucket information and recover, in $O(D)$ steps, the count of number of nodes that fall into this bucket. Using the standard upcast technique previously described, the source can obtain the bucket count for each of these at most $\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ buckets in $\tilde{O}(n^{1/2}poly(\epsilon^{-1}) + D)$ rounds.


We have shown previously that a source node can obtain $K$ samples from $K$ independent random walks of length $\ell$ in $\tilde{O}(\sqrt{K\ell \Phi})$ rounds. Setting $K=\tilde{O}(n^{1/2}poly(\epsilon^{-1}))$ completes the proof.
\end{proof}


Suppose our estimate of $\tau^x_{mix}$ is close to the dynamic mixing time of the network defined as $\tau = \max_{x}{\tau^x_{mix}}$, then this would allow us to estimate several related quantities. Given a dynamic mixing time $\tau$, we can approximate the spectral gap ($1-\lambda$) and the conductance ($\Psi$) due to the
%following known relations. The spectral gap is the $1-\lambda_2$ where $\lambda_2$ is the second eigenvalue of the connected transition matrix. It is
known relations that $\frac{1}{1-\lambda}\leq \tau \leq \frac{\log n}{1-\lambda}$ and $\Theta(1-\lambda)\leq \Psi \leq \Theta(\sqrt{1-\lambda})$ as shown in~\cite{JS89}. %Note that the spectral gap $(1 - \lambda)$ is the smallest spectral gap of all the graphs in $\{ G_t : t \geq 1 \}$ as our second largest eigenvalue $\lambda$ is the maximum of  all second largest eigenvalue of those graphs.  


\noindent\textbf{The second approach for node transitive graph:}\\
The idea behind this technique is to observe returns of simple random walks to the starting nodes on the graph. This is an idea that was completely unexplored in the past work on random walks in distributed networks. We assume here that the dynamic network $\mathcal{G}$ is node transitive so that mixing time of a simple random walk starting from any vertex is same. Note that every node transitive graph is regular. Initially we start with some random walks, say $c\log n$ ($c$ is small constant) from each vertex in parallel, gives total $c n\log n$ walks. When the distribution on vertex set $V$ becomes close to uniform then it is expected that total $c\log n$ walks would return to their starting vertex. Therefore by counting the number of returns of walks at some step $k$, can ensures that the distribution reaches close to the uniform distribution. We formalize the approach and results below.

\begin{lemma}
%\label{lem:total-return}
Let $c\log n$ many random walks starts in parallel from every vertex in the dynamic network. Let $R_k$ be the total number of returns of random walk at time $k$. Let the distribution on the vertex set is close to uniform at time $k$. Then $R_k$ is close to $c\log n$ with high probability.  
\end{lemma}
\begin{proof}
We are starting $c\log n$ random walks from every vertex, so there are total $c n \log n$ such walks. Since each walk is independent of others, we can put index number $i$ ($i = 1,2,\ldots, cn\log n$) corresponding to each walk. Let $X_i^k$ be the $0-1$ random variable which takes value $1$ when the $i$-th random walk returns to its starting node at time $k$, otherwise takes value $0$. Given that at time $k$, the probability that any walk return to its starting vertex is approximately $1/n$. Let $X = \sum_i X_i^k$ i.e. $X$ is the total number of returns of all walks at time $k$. Then clearly $E[X] = c \log n$. Now using the Chernoff's bound $\Pr(|X - E[X]| \ge \delta E[X]) \le 2e^{-E[X]\delta^2/3} = \frac{2}{n^{c\delta^2/3}}$, it follows that with high probability $X$ is approximately $c\log n$ for some constant $\delta$. Hence, $R_k$ is close to $c\log n$ with high probability. 
\end{proof}
Therefore we are interested to the value of $k$ when the total number of returns of random walk ($R_k$) is approximately $c\log n$. In other words, we are interested to find the $k$ for which the distribution on the vertex set $V$ is close to uniform. We will show below that this $k$ is essentially the dynamic mixing time of the network. \\   

Let $P_k(x, y)$ be the probability that a simple random walk on $\mathcal{G}$ starting at $x \in V$ will be at $y \in V$ at time $k$. For node transitive graph $\mathcal{G}$, the return probability $P_k(r, r)$ is same for all node $r \in V$. Therefore $P_k(r,r) = \frac{R_k}{cn\log n}$; which means when the total number of returns of walk is close to $c\log n$ then the return probabilities of all the vertex is close to uniform. Now assume that the distribution on $V$ is close to uniform at time $k$ i.e., $|P_k(r, r) - \frac{1}{n}|$ is close to $0$ for all $r \in V$. Let $q_k = |P_k(r, r) - \frac{1}{n}|$. We will find some $k$ for which $q_k$ is close to $0$ for all $r$. Now in Lemma~1 in \cite{Bat}, it is shown that $$(1 + \frac{\ln n}{\ln q_k})(1 - q_k^{1/k}) \leq 1- \lambda_2 \leq 1 - q_k^{1/k}.$$ This bound tells us that for some constant $s>1$,  if we find $k>0$ such that $q_k< 1/n^s$ then $1 - q_k^{1/k}$ is an estimate of $1-\lambda_2$ which is within a factor of $1/(1-1/s)$ to the true value. Therefore assuming the above inequality we want to find the value of $k$ for which $q_k< 1/n^s$ for all $r \in V$. \\
We have, 
\begin{align*} 
& 1-\lambda_2 \leq 1 - q_k^{1/k} \\
\Rightarrow \mbox{ } & q_k^{1/k} \leq 1- (1-\lambda_2) \\
\Rightarrow \mbox{ }  q_k & \leq (1- (1-\lambda_2))^k \\
  & \leq \frac{1}{n^s} & \text{for $k = \frac{s\log n}{1-\lambda_2}.$}
\end{align*}
This shows that if $k = \frac{s\log n}{1-\lambda_2}$ then $q_k(r) < 1/n^s$ for all $r$. In other words, we can say that at time $k = \frac{s\log n}{1-\lambda_2}$ (which is approximately the mixing time, $\tau$ of the dynamic graph), the distribution on the vertex set $V$ becomes close to the uniform distribution. Moreover, from the above we see that by counting the total number of returns of random walks ($R_k$) and then dividing them by total number of walks ($cn\log n$), we can ensure about the distribution on the vertex set at time $k$. Therefore, at each step $k$, we count $R_k$, if it is close to $c\log n$ then the value $k$ is close to the dynamic mixing time of the node transitive graph.   


Our algorithm starts with length $\ell = \log n$ (since $\log n$ is the mixing time of complete graph and hence a lower bound in general) and runs $c\log n$ random walks of length $\ell$ in parallel form each vertex. Then count the number of returns to each corresponding starting vertex of all random walks. If the total count ($R_{\ell}$) is closer to $c \log n$ then output the value of $\ell$ as mixing time of the network. Otherwise, double the value of $\ell$ and retry. We use the algorithm {\sc Many-Random-Walks} to perform $c\log n$ walks from each vertex. Counting the total number of returns of random walks can be done by flooding in at most $O(\Phi)$ rounds where $\Phi$ is dynamic diameter of the network. The round complexity of this algorithm is $\tilde{O}(n^{1/2}\sqrt{\tau \Phi})$ stated in the Theorem~\ref{thm:mixing estimate bound}. %in Section~\ref{sec:results}.  

\begin{theorem}\label{thm:mixing estimate bound}
The above algorithm estimate the dynamic mixing time $\tau$ of a node transitive graph in $\tilde{O}(n^{1/2}\sqrt{\tau \Phi})$ rounds, with high probability. 
\end{theorem}
\begin{proof}
We use {\sc Many-Random-Walk} algorithm (cf. Algorithm~\ref{alg:many-random-walk}) to run $c n\log n$ walks in parallel for length $\ell$. Then we can count the total number of return of walks. This can be done efficiently by flooding in at most $O(\Phi)$ rounds.  Since {\sc Many-Random-Walk} can be done in $\tilde{O}(\sqrt{k\ell \Phi})$ rounds and for counting it takes $O(\Phi)$, so in total $\tilde{O}(n^{1/2}\sqrt{\tau \Phi} + \Phi)$ which is  $\tilde{O}(n^{1/2}\sqrt{\tau \Phi})$ as $\Phi \leq n$. 
\end{proof}

Our estimation of mixing time $\tau$ of the node transitive graph would allow us to estimate several related quantities. Given a mixing time $\tau$, we can approximate the spectral gap ($1-\lambda_2$) and the conductance ($\Psi$) due to the known relations that $\frac{1}{1-\lambda_2}\leq \tau \leq \frac{\log n}{1-\lambda_2}$ and $\Theta(1-\lambda_2)\leq \Psi \leq \Theta(\sqrt{1-\lambda_2})$ as shown in~\cite{JS89}. 

\fi
  




