\section{Proofs of Section~\ref{mixing_time}}
\subsection{Proof of Lemma~\ref{lem:lemma2}}
\begin{proof}
Let $X_1, X_2, \ldots ,X_n$ be an orthonormal set of eigenvectors of $A_G$ with corresponding eigenvalues $\lambda_1 \geq \lambda_2 \geq \ldots \geq \lambda_n$. Since $A_G$ is a symmetric stochastic matrix, $\lambda_1 = 1$ and $X_1 = \frac{\textbf{1}}{\sqrt{n}}$ and all eigenvectors and eigenvalues are real. Clearly, 
$$ \bigl\Vert pA_G - \frac{\textbf{1}}{n} \bigr\Vert = \bigl\Vert pA_G - \frac{\textbf{1}}{n}A_G \bigr\Vert = \bigl\Vert (p - \frac{\textbf{1}}{n}) A_G \bigr\Vert $$
Since $p$ is a probability distribution, we can write it as $p = \beta_1X_1 + \beta_2X_2 + \ldots + \beta_n X_n$, where $\beta_1, \beta_2, \ldots ,\beta_n \in \mathbb{R}$. Then $\beta_1 = p\cdot X_1^T = \frac{1}{\sqrt{n}} \sum_{i}p_i = \frac{1}{\sqrt{n}}$, so that $\beta_1 X_1 = (\frac{1}{n}, \frac{1}{n}, \ldots ,\frac{1}{n}) $. Therefore, $p - \frac{\textbf{1}}{n} = \sum_{i=2}^n \beta_i X_i $. Hence, $$ \bigl\Vert p - \frac{\textbf{1}}{n} \bigr\Vert = \sqrt{\sum_{i=2}^n \beta_i^2} $$
Furthermore, 
\begin{align*} \bigl\Vert (p - \frac{\textbf{1}}{n}) A_G \bigr\Vert & = \bigl\Vert \sum_{i=2}^n \beta_i X_i A_G \bigr\Vert  = \bigl\Vert \sum_{i=2}^n \lambda_i \beta_i X_i \bigr\Vert \\ 
& = \sqrt{\sum_{i=2}^n \lambda_i^2 \beta_i^2}  \leq \max_{i=2,\ldots,n} \vert \lambda_i \vert \cdot \sqrt{\sum_{i=2}^n \beta_i^2} \\
& = \bar{\lambda_2}\cdot \bigl\Vert p - \frac{\textbf{1}}{n} \bigr\Vert 
%& \leq \left(1 - \frac{1}{d D n} \right) \sqrt{\sum_{i=2}^n \beta_i^2} && [\text{By the Lemma~\ref{lem:lemma1}]} \\
%& = \left(1 - \frac{1}{d D n} \right) \left\Vert p - \frac{\textbf{1}}{n} \right\Vert 
\end{align*}
Thus, $$\bigl\Vert pA_G - \frac{\textbf{1}}{n} \bigr\Vert \leq \bar{\lambda}_2\cdot \bigl\Vert p - \frac{\textbf{1}}{n} \bigr\Vert  $$
\end{proof}

\subsection{Proof of Corollary \ref{cor:second-eigen-bound}}\label{subsec:fact}
\begin{proof}
It is enough for the proof to show that  $(1- O(\frac{1}{n^2}))$ is an upper bound of the second largest eigenvalue $\bar{\lambda}_2$ of the transition matrix of an undirected connected regular graph. Let $G$ be an undirected connected $d$-regular graph on $n$ vertices and $A_G$ be the transition matrix of $G$. Suppose $\lambda_1 \geq \lambda_2 \geq \ldots \geq \lambda_n$ are the eigenvalues of $A_G$, then we show that for $i \geq 2, \lambda_i \leq 1 - O(\frac{1}{n^2})$. Note that $\lambda_1 = 1$. Therefore, 
the normalized eigenvector corresponding to $\lambda_1 = 1$ is $X_1 = \frac{1}{\sqrt{n}}(1, 1, \ldots , 1)$. Consider any normalized real eigenvector $X \perp X_1$ with its eigenvalue $\lambda$. Hence, $\sum_{i=1}^n x_{i}^2 = 1$ and $ \sum_{i=1}^n x_i = 0$, where $X = (x_1, x_2, \ldots ,x_n)$. Let $x_t$ and $x_s$ be the respectively largest and smallest co-ordinates of $X$. Clearly $ x_t \geq 1/\sqrt{n}$ and $x_s < 0$. Let $s=v_1 \to v_2 \to \ldots \to v_k =t$ be the vertices on a shortest path from $s$ to $t$ in $G$. Consider $X(\mathbb{I} - A_G)X^T$. Then,
\begin{align*}
1 - \lambda & = X(\mathbb{I} - A_G)X^T = \frac{1}{d} \cdot \sum_{\{i, j\} \in E(G)} (x_i - x_j)^2 \\
& \geq \frac{1}{d} \cdot \sum_{i=1}^{k-1} (x_{v_i} - x_{v_{i+1}})^2 \\
& \geq \frac{1}{d(k - 1)} \cdot \left( \sum_{i = 1}^{k-1} x_{v_i} - x_{v_{i+1}} \right)^2 && [\text{by Cauchy-Schwarz inequality}] \\
& = \frac{1}{d (k-1)} \cdot (x_s - x_t)^2 \\
& \geq \frac{1}{d D n}
\end{align*}
where $k-1 \leq D$, the diameter of the graph. Now it is a fact that the diameter of any connected regular graph is bounded by $O(\frac{n}{d})$. Hence, $\lambda \leq 1 - O(\frac{1}{n^2})$. 
\end{proof}

\subsection{Monotonicity property of the distribution vector}\label{sec:monotonicity}
Let $\pi_x(t)$ define the probability distribution vector of a simple random walk reached after $t$ steps when the initial distribution starts with probability $1$ at node $x$. Let $\pi$ denote the stationary distribution vector. We show in the following lemma that the vector $\pi_x(t)$ gets closer to $\pi$ as $t$ increases. 
\begin{lemma} %[\textbf{Monotonicity}]
\label{lem:monotonicity}
$||\pi_x(t+1) - \pi|| \leq  ||\pi_x(t) - \pi||$.
\end{lemma}
\begin{proof}
We need to show that the definition of mixing times are consistent, i.e., monotonic in $t$, the walk length of the random walk. Let $A$ be the transition matrix of a simple random walk on a $d$-regular dynamic graph $\mathcal{G}$ which in fact changes from round to round. The entries $a_{ij}$ of $A$ denotes the probability of transitioning from node $i$ to node $j$. The monotonicity follows from the fact that for any transition matrix $A$ of any regular graph and for any probability distribution vector $p$, $$||(p - \frac{\mathbf{1}}{n}) A|| < ||p - \frac{\mathbf{1}}{n}||.$$ This result follows from the above Lemma~\ref{lem:lemma2} and the fact that $\bar{\lambda_2} < 1$.

Let $\pi$ be the stationary distribution of the matrix $A$. Then $\pi = (\frac{1}{n}, \frac{1}{n}, \ldots, \frac{1}{n})$. This implies that if $t$ is $\epsilon$-near mixing time, then $||p A^t - \pi|| \leq \epsilon$, by definition of $\epsilon$-near mixing time. Now consider $||p A^{t+1} - \pi||$. This is equal to $||p A^{t+1} - \pi A||$ since $\pi A = \pi$.  However, this reduces to $||(p A^{t} - \pi) A|| < ||p A^t - \pi|| \leq \epsilon$. It follows that $(t+1)$ is $\epsilon$-near mixing time and $||p A^{t+1} - \pi|| < ||p A^t - \pi||$.
\end{proof}


\section{Proof of Lemma \ref{lem:lyons}}
\begin{proof}
We first prove the following result for $d$-regular dynamic network $\mathcal{G} = G_1, G_2, \ldots$. Let $Q_i$ denotes the transition probability matrix of $G_i$. Then $Q_1Q_2 \ldots$ denotes the transition matrix of the dynamic graph $\mathcal{G}$, with self-loop probability $\alpha > 0$ and $c= \min{\{\pi(x) Q_i(x,y) : x \neq y \mbox{ and }Q_i(x,y)>0\}}.$ Note that $c > 0$ and $Q^t$ means $(Q_1Q_2 \ldots Q_t)$ for any $t \in \mathbb{N}$. Then for any vertex $x$ and all $\hat{k} > 0$,

$$\bigl|\frac{Q^{\hat{k}}(x,x)}{\pi(x)} - 1\bigr| \le
\min\Bigl\{\frac{1}{\alpha c \sqrt{\hat{k}+1}}, \frac{1}{2\alpha^2 c^2(\hat{k}+1)} \Bigr\}\,.$$


This is an equivalent result of Lyons lemma (Lemma~3.4 in \cite{Lyons}) for static networks and our approach follows their proof. Let $G = (V, E)$ be any $d$-regular graph and $Q$ be the transition probability matrix of it. Write $$c_2(x, y) := \pi(x) Q^2(x, y)$$ and note that for $(x, y) \in E$, we have 
\begin{align*} 
c_2(x, y) \ge \pi(x) \bigl[Q(x, x) Q(x, y) + Q(x, y) Q(y, y)\bigr] \ge 2\alpha c.
\end{align*}
The first inequality holds because $Q^2(x,y) \geq [Q(x, x) Q(x, y) + Q(x, y) Q(y, y)]$, i.e.,  in two steps, a random walk token can either remain in $x$ and then go to a neighbour $y$, or go to $y$ and stay there; the second inequality holds because $c \leq \pi(x)Q(x, y)$, by the assumption and $\alpha = Q(x, x)$. \\

We write $\ell^2(V, \pi)$ for the vector space $\mathbb{R}^{V}$ equipped with the inner product defined by $$(f_1, f_2)_{\pi} := \sum_{x \in V} f_1(x) f_2(x)\pi(x).$$ We regard elements of $\mathbb{R}^{V}$ as functions from $V$ to $\mathbb{R}$. Therefore we will call eigenvectors of the matrix $Q$ as eigenfunctions. Recall that the transition matrix $Q$ is reversible with respect to the stationary distribution $\pi$ (since $G$ is a $d$-regular graph). The reason for introducing the above inner product is due to the following claim.
\begin{claim}\label{lem:lavin}
Let $Q$ be a reversible transition matrix with respect to $\pi$. Then the inner product space $\langle \ell^2(V, \pi), (\cdot , \cdot)_{\pi}\rangle$ has an orthonormal basis of real-valued eigenfunctions $\{f_i \}_{j=1}^{\vert V \vert}$ corresponding to real eigenvalues $\{\lambda_j\}$. 
\end{claim}
\begin{proof}
Denote by $(\cdot , \cdot)$ the usual inner product on $\mathbb{R}^V$, given by $(f_1, f_2) := \sum_{x \in V} f_1(x) f_2(x)$. For a regular graph, $Q$ is symmetric. The more general version proof is given in Lemma~12.2 in~\cite{Levin} where $Q$ need not be symmetric. The spectral theorem for symmetric matrices guarantees that the inner product space $\langle \ell^2(V, \pi), (\cdot , \cdot) \rangle$ has an orthonormal basis $\{\varphi_j\}_{j=1}^{\vert V \vert}$ such that $\varphi_j$ is an eigenfunction with the real eigenvalue $\lambda_j$. It is known that $\sqrt{\pi}$ is an eigenfunction of $Q$ corresponding to the eigenvalue $1$; we set $\varphi_1 = \sqrt{\pi} $ and $\lambda_1 =1$. If $D_{\pi}$ denote the diagonal matrix with diagonal entries $D_{\pi}(x, x) = \pi(x)$, then $Q = D_{\pi}^{\frac{1}{2}} Q D_{\pi}^{-\frac{1}{2}}$. Let $f_j = D_{\pi}^{-\frac{1}{2}} \varphi_j$, then $f_j$ is an eigenfunction of $Q$ with eigenvalue $\lambda_j$. In fact: 
\begin{align*} 
Q f_j = Q D_{\pi}^{-\frac{1}{2}} \varphi_j =  D_{\pi}^{-\frac{1}{2}} (D_{\pi}^{\frac{1}{2}} Q D_{\pi}^{-\frac{1}{2}}) \varphi_j = D_{\pi}^{-\frac{1}{2}} Q \varphi_j  = D_{\pi}^{-\frac{1}{2}} \lambda_j \varphi_j = \lambda_j f_j
\end{align*}
Although the eigenfunctions $\{f_j\}$ are not necessarily orthonormal with respect to the usual inner product, they are orthonormal with respect to the inner product $(\cdot, \cdot)_{\pi}$: 
$$\delta_{ij} = (\varphi_i, \varphi_j) = (D_{\pi}^{\frac{1}{2}} f_i , D_{\pi}^{\frac{1}{2}} f_j) = (f_i, f_j)_{\pi}, $$ the first equality follows since $\{\varphi_j\}$ is orthonormal with respect to the usual inner product. 
\end{proof}

Let $\ell_0^2(V, \pi)$ be the orthogonal complement of the constants in $\ell^2(V, \pi)$. Note that $\textbf{1}$ is an eigenfunction of $Q$ and that $\ell_0^2(V, \pi)$ is invariant under $Q$. Now we show in the following claim that each $f$ has at least one nonnegative value and at least one nonpositive value, such as $f \in \ell^2_0(V, \pi)$. 

\begin{claim}\label{clm:dirichlets}
Let $G$ be an undirected connected $d$-regular graph on $n$ vertices with transition matrix $Q$. Let $\lambda_1 \geq \lambda_2 \geq \ldots \geq \lambda_n$ be the eigenvalues and $X_1, X_2, \ldots,X_n$ are the corresponding eigenvectors  of $Q$. Then for each eigenvector $X_i, i= 2,3,  \ldots,n$, $($other than $X_1)$, has at least one negative and at least one positive entry or co-ordinate.   
\end{claim}
\begin{proof}
It is known that $\lambda_1 = 1$ and the normalized eigenvector corresponding to $\lambda_1$ is $X_1 = \frac{1}{\sqrt{n}}(1, 1, \ldots , 1)$. The set of eigenvectors $\{X_i : i = 1, 2, \ldots,n\}$ form a orthonormal basis of the eigenspace. Then for any normalized eigenvector $X \in \{X_i : i=2, 3, \ldots, n \}$, we have $X \perp X^1$. Hence, $\sum_{i=1}^n x_{i}^2 = 1$ and $ \sum_{i=1}^n x_i = 0$, where $X = (x_1, x_2, \ldots ,x_n)$. Let $x_l$ and $x_s$ be the respectively largest and smallest co-ordinates of $X$. Then clearly $ x_l > 0$ and $x_s < 0$, follows from the two relation $\sum_{i=1}^n x_{i}^2 = 1$ and $ \sum_{i=1}^n x_i = 0$.  
\end{proof}

Let $x_0$ be a vertex where $\vert f \vert$ achieves its maximum. Then  it follows from the Claim~\ref{clm:dirichlets}
\begin{align}
\label{equ-equation1}
\lVert f \rVert_{\infty} & =  \vert f(x_0) \vert \le \frac{1}{2} \sum_{(x,y) \in E} \lvert f(x) - f(y) \rvert \le \frac{1}{2} \sum_{x, y \in V} c_2(x, y) \lvert f(x) - f(y) \rvert/(2 \alpha c)
\end{align} 
where $\lVert \cdot \rVert_{\infty}$ denotes the supremum norm and the factor $1/2$ arises from counting each pair $(x,y)$ in each order. Take $f \in \ell^2_0(V, \pi)$. Notice that $\sum_{x,y \in V} c_2(x, y) = \sum_{x \in V} \pi(x) = 1$. Thus, we have from equation~(\ref{equ-equation1}) by using Cauchy-Schwartz inequality that  
\begin{align*}
(2\alpha c)^2 \lVert f \rVert_{\infty}^2 &  \le \frac{1}{2} \sum_{x, y \in V} c_2(x, y) [f(x) - f(y)]^2 \\
& = \frac{1}{2} \sum_{x, y \in V} f(x)^2 \pi(x) Q^2(x ,y) \\ & - \sum_{x, y \in V} f(x) f(y) \pi(x) Q^2(x, y) \\ & + \frac{1}{2} \sum_{x, y \in V} f(y)^2 \pi(x) Q^2(x, y)
\end{align*}
By reversibility, $\pi(x) Q^2(x, y) = \pi(y)Q^2(y, x)$, and the first and last terms above are equal to common value (in fact, using the reversibility the last term becomes $\frac{1}{2} \sum_{x, y \in V} f(y)^2 \pi(y) Q^2(y, x)$):
$$ \frac{1}{2} \sum_{x \in V} f(x)^2 \pi(x) \sum_{y \in V} Q^2(x, y) = \frac{1}{2} \sum_{x \in V} f(x)^2 \pi(x). $$ 
Therefore the above inequality becomes,
\begin{align*}
(2\alpha c)^2 \lVert f \rVert_{\infty}^2 & \le \sum_{x \in V} f(x)^2 \pi(x) - \sum_{x \in V} f(x) \bigl[ \sum_{y \in V} f(y) Q^2(x, y) \bigr] \pi(x) \\
& = (f, f)_{\pi} - (f, Q^2 f)_{\pi} \\
& = ((\mathbb{I} - Q^2)f, f)_{\pi}.
\end{align*}
Alternatively, we may apply~(\ref{equ-equation1}) to the function $\mbox{sgn}(f)f^2$. Using the trivial inequality $$\lvert \mbox{sgn}(s)s^2 - \mbox{sgn}(t)t^2 \rvert \le \lvert s - t \lvert \cdot (\lvert s \rvert + \lvert t \rvert),$$ valid for any real numbers $s$ and $t$, we obtain that  
\begin{align*}
(2\alpha c)^2 \lVert f \rVert_{\infty}^4 & \le \left(\frac{1}{2} \sum_{x, y \in V} c_2(x, y) \lvert f(x) - f(y)\rvert \cdot (\lvert f(x) \rvert + \lvert f(y) \rvert) \right)^2 \\
& \le \left(\frac{1}{2} \sum_{x, y \in V} c_2(x, y) [f(x) - f(y)]^2 \right) \cdot \left(\frac{1}{2} \sum_{x, y \in V} c_2(x, y) [\lvert f(x) \rvert + \lvert f(y) \rvert]^2 \right) \\
& = ((\mathbb{I} - Q^2)f, f)_{\pi} \cdot ((\mathbb{I} + Q^2)\lvert f \rvert, \lvert f \rvert)_{\pi}
\end{align*}
by the Cauchy-Schwartz inequality and same algebra as above. Now if $(f,f)_{\pi} \le 1$ then $((\mathbb{I} + Q^2)\lvert f \rvert, \lvert f \rvert)_{\pi} \le 1$, because $(\mathbb{I} + Q^2)$ is a transition matrix (with self-loop) of a random walk of length two and hence $(\mathbb{I} + Q^2) f \leq f $, for any eigenfunction $f$. Therefore, we have $2(\alpha c)^2 \lVert f \rVert_{\infty}^4 \le ((\mathbb{I} - Q^2)f, f)_{\pi}$.\\
Putting both these estimates together, we get 
\begin{equation}
\label{equ-ineque2}
2(\alpha c)^2 \max \{2\lVert f \rVert_{\infty}^2, \lVert f \rVert_{\infty}^4 \} \le ((\mathbb{I} - Q^2)f, f)_{\pi}
\end{equation} 
for $(f,f)_{\pi} = 1$. Now we show the monotonicity property of random walk probability distribution on regular dynamic graph. This is required to apply the above inequality for dynamic graph. 

\begin{claim}\label{clm:claim-norm}
Let $\mathcal{G} = G_1, G_2, \ldots$ be a $d$-regular, connected dynamic graph with the same set $V$ of nodes. Let $Q_i$ be the transpose of the transition matrix of $G_i$. Let the column vector $f = (p_1, p_2, \ldots ,p_n)^T$ be an arbitrary probability distribution on $V$. Then \\ 
$ \lVert (Q_{i+1}Q_{i} \ldots Q_1) f \rVert_\infty \le \lVert (Q_i Q_{i-1} \ldots Q_1) f \rVert_\infty $ for all $i \geq 1$.
\end{claim}
\begin{proof}
It is known that the transition matrix of any regular graph is doubly stochastic and if a matrix $Q$ is doubly stochastic then so is $Q^2$. Let $(Q_i Q_{i-1} \ldots Q_1) f = (p_1^i, p_2^i, \ldots, p_n^i)^T$ and $\lVert (Q_{i}Q_{i-1} \ldots Q_{1}) f  \rVert_\infty = \max \{p_l^i : l= 1,2, \ldots, n \} = \vert p_h^i \vert$ (say). Then 
\begin{align*}
(Q_{i+1}Q_i \ldots Q_{1}) f = \bigl(\sum_{j \in N(1)} q_{1j}p_j^i, \sum_{j\in N(2)} q_{2j}p_j^i, \ldots, \sum_{j\in N(n)} q_{nj}p_j^i  \bigr)^T
\end{align*} where $N(v)$ is the set of neighbors of $v$ and $q_{ij}$ is the $ij$-th entries of the matrix $Q_{i+1}$. We show that the absolute value of any co-ordinates of  $(Q_{i+1}Q_{i}\ldots Q_{1})f $ is $\leq \vert p_h^i \vert$. In fact for any $l$, 
\begin{align*} \vert \sum_{j\in N(l)} q_{lj}p_j^i \vert \leq \sum_{j\in N(l)} \lvert q_{lj}\rvert \lvert p_j^i \rvert & \leq \lvert p_h^i \rvert \sum_{j\in N(l)} q_{lj} = \lvert p_h^i \vert,
\end{align*} since the matrix is doubly stochastic, the last sum is $1$. 
\end{proof}

Now we apply the inequality~(\ref{equ-ineque2}) to $Q^l f$ for $l= 0,1, \ldots, \hat{k}$. By $Q^l f$ we mean $(Q_{l}Q_{l-1} \ldots Q_1) f$ and $Q^0 f$ means $f$. Therefore, summing these inequalities for $l= 0,1, \ldots, \hat{k}$ we get,
\begin{align*}
& (\hat{k} +1)2(\alpha c)^2\max \{2\lVert Q^{\hat{k}} f \rVert_{\infty}^2, \lVert Q^{\hat{k}} f \rVert_{\infty}^4 \}  \le 2(\alpha c)^2 \max \{2\sum_{l=0}^{\hat{k}} \lVert Q^l f \rVert_{\infty}^2, \sum_{l=0}^{\hat{k}} \lVert Q^{l} f \rVert_{\infty}^4 \}\\ 
& \text{[Follows from the above monotonicity property, cf. Claim~\ref{clm:claim-norm}]}\\
& \le \sum_{l=0}^{\hat{k}} ((\mathbb{I} - Q^2)Q^l f, Q^l f)_{\pi} \hspace{0.7in}\text{[By the Inequality~\ref{equ-ineque2}]}\\ & = \sum_{l=0}^{\hat{k}} ((\mathbb{I} - Q^2)Q^{2l} f, f)_{\pi} \\
& = ((\mathbb{I} - Q^{2\hat{k} + 2})f, f)_{\pi} \le 1
\end{align*}
for $(f,f)_{\pi} = 1$. This shows that the norm of $Q^{\hat{k}} : \ell^2_0(V, \pi) \rightarrow \ell^{\infty}(V)$ is bounded by 
$$\beta_{\hat{k}} := \min\{ [(2\alpha c)^2(\hat{k} +1)]^{-1/2}, [(\alpha c)^2(2\hat{k} + 2)]^{-1/4} \}. $$
Now we find the norm bound from $\ell^1(V, \pi) \rightarrow \ell^{\infty}(V)$ as the requirement of the lemma. \\ %is bounded by $\beta^2_k$ and $\beta_k\beta_{k+1}$.\\
 Let $T: \ell^2(V, \pi) \rightarrow \ell^2_0(V, \pi)$ be the orthogonal projection $Tf := f - (f, \textbf{1})_{\pi} \textbf{1}$. Given the above shown result, we see that the norm of $Q^{\hat{k}} T : \ell^2(V, \pi) \rightarrow \ell^{\infty}(V)$ is bounded by $\beta_{\hat{k}}$. By duality, the same bound holds for $TQ^{\hat{k}} : \ell^1(V, \pi) \rightarrow \ell^2(V, \pi)$. Therefore by composition of mapping we deduce that the norm of $Q^{\hat{k}} T Q^{\hat{k}} : \ell^1(V, \pi) \rightarrow \ell^{\infty}(V)$ is at most $\beta^2_{\hat{k}}$ and the norm of $Q^{\hat{k}} T Q^{\hat{k}+1} : \ell^1(V, \pi) \rightarrow \ell^{\infty}(V)$ is at most $\beta_{\hat{k}} \beta_{\hat{k}+1}$. Applying these inequalities to $f := \textbf{1}_x/\pi(x)$ gives the required bound for any $x$:
 $$\bigl|\frac{Q^{\hat{k}}(x,x)}{\pi(x)} - 1\bigr| \le \min\Bigl\{\frac{1}{\alpha c \sqrt{\hat{k}+1}}, \frac{1}{2\alpha^2 c^2(\hat{k}+1)} \Bigr\}\, \hspace{0.5in}\text{[Since, $Tf := f - (f, \textbf{1})_{\pi} \textbf{1}$]}$$  
 
Hence we have,
\begin{align*}
& \bigl|\frac{Q^{\hat{k}}(x,y)}{\pi(y)} - 1\bigr| \le \frac{1}{\alpha c \sqrt{\hat{k}+1}} \\  
\text{OR,} \hspace{0.3in} & Q^{\hat{k}}(x,y) \le \frac{\pi(y)}{\alpha c \sqrt{\hat{k}+1}} + \pi(y) =  \frac{\pi(y)}{c} \left( \frac{1}{\alpha \sqrt{\hat{k}+1}} + c \right) \\
\text{OR,} \hspace{0.3in} & Q^{\hat{k}}(x,y) \le \frac{\pi(y)}{c} \left( \frac{1}{\alpha \sqrt{\hat{k}+1}} + \frac{1}{2m} \right) \hspace{0.5in} \text{[Since, $c = 1/nd = 1/2m$}] \\
\text{OR,} \hspace{0.3in} & Q^{\hat{k}}(x,y) \le \frac{\pi(y)}{c} \left( \frac{1}{\alpha \sqrt{\hat{k}+1}} + \frac{1}{2}\sqrt{\frac{\rho}{\hat{k}}} \right) \hspace{0.5in} \text{[By our assumption, $\hat{k} \leq \rho m^2$}] \\
\text{OR,} \hspace{0.3in} & Q^{\hat{k}}(x,y) \le \frac{\pi(y)}{c} \left( \frac{1}{\alpha \sqrt{\hat{k}+1}} + \sqrt{\frac{\rho}{2(\hat{k}+1)}} \right) \hspace{0.5in} \text{[Since, $\sqrt{2\hat{k}} \geq \sqrt{\hat{k}+1}$}] \\
\text{OR,} \hspace{0.3in} & Q^{\hat{k}}(x,y) \le \frac{\pi(y)}{c\sqrt{\hat{k}+1}} \left( \frac{1}{\alpha} + \sqrt{\frac{\rho}{2}} \right) \\
\text{OR,} \hspace{0.3in} & Q^{\hat{k}}(x,y) \le \frac{4\pi(y)}{c\sqrt{\hat{k}+1}} \hspace{0.5in} \text{[Assuming the self loop probability $\alpha = 1/2$ and $\rho = 8$]}
 \end{align*}
 
Therefore, $$Q^{\hat{k}}(x,y) \le \frac{4\pi(y)}{c\sqrt{\hat{k}+1}} = \frac{4d}{\sqrt{\hat{k}+1}} \hspace{0.5in} \text{[Proved.]}$$
\end{proof}




