% !TEX root = ipdps-main.tex

\section{Pseudocode of Algorithm for Estimate Probability (cf. Section \ref{sec:prob-estimate})}
\begin{algorithm}[H]
\caption{\sc EstimateProbability}
\label{alg:randomwalk}
\textbf{Input:} Starting node $s$, length $\ell$, and number of walks $K$.\\% = \Theta(n \log n/\eps)$.\\
\textbf{Output:} $\tilde p(i)$ for each node $i$, which is an estimate of $p(i)$ with explicit bound on additive error.\\
\begin{algorithmic}[1]
%\STATE Each node $t$ maintains a counter number $\eta_t$ to count the number of walks land over it. 

\STATE  Node $s$ creates $K$ tokens of random walks and performs them simultaneously for $\ell$ steps as follows. 

\FOR{each round from $1$ to $\ell$}   

\STATE A node holding random walk tokens, samples a random neighbor corresponding to each token and subsequently sends the appropriate {\em count} to each neighbor. (Note that tokens do not contain any node IDs.)  
%\COMMENT{$M$ is the mixing time of the graph.}
\ENDFOR

\STATE Each node $i$ counts the number of tokens that landed on it --- let this count be $\eta_i$.   

\STATE Each node estimates the probability $\tilde p(i)$ as $\frac{\eta_i}{K}$. 


%\STATE Each node $t$ outputs $\tilde p(i)$.

\end{algorithmic}

\end{algorithm}  


\section{Proof of Lemma \ref{lem:probability-accuracy}}
\begin{lemma}
If the probability of an event $X$ occurring is $p$, then in $t = 4 n^2 \log n/\eps^2$ trials , the fraction of times the event $X$ occurs is $p \pm \frac{\eps}{n}$ with high probability. 
\end{lemma}
\begin{proof}
The proof is  follows from a Chernoff bound: $$ \Pr \left[\frac{1}{t} \sum_{i=1}^t X_i < (1 - \delta)p \right] < \left(\frac{e^{-\delta}}{(1-\delta)^{(1-\delta)}} \right)^{tp} < e^{-tp\delta^2/2}$$ and 
$$\Pr \left[\frac{1}{t} \sum_{i=1}^t X_i > (1 + \delta)p \right] < \left(\frac{e^{\delta}}{(1+ \delta)^{(1+ \delta)}} \right)^{tp}.$$ Where $X_1, X_2, \ldots, X_t$ are $t$ independent identically distributed $0-1$ random variables such that $\Pr[X_i = 1] = p$ and $\Pr[X_i = 0] = (1-p)$. The right hand side of the upper tail bound further reduces to $2^{-\delta t p}$ for $\delta > 2e -1$ and for $\delta <2e - 1$, it reduces to $e^{-tp\delta^2/4}$. 

Let us choose $t = 4n\log n/\eps^2$, and $\delta =  \frac{\eps}{pn}$. Consider two cases, when $pn \leq \eps$ and when $pn > \eps$. When $pn \leq \eps$ , the lower tail bound automatically holds as $pn - \eps < 0$. In this case, $\delta > 1$, so we consider the weaker bound of the upper tail bound which is $2^{- \delta t p}$. We get $2^{- \delta t p} = 2^{- \eps t/n} = 2^{- 4 n \log n/\eps} = \frac{1}{n^{(4n/\eps)}}$. Now consider the case when $pn > \eps$. Here, $\delta < 1$ is small and hence the lower and upper tail bounds are $e^{-tp\delta^2/2}$ and $e^{-tp\delta^2/4}$. Therefore, between these two, we go with the weaker bound of $e^{-tp\delta^2/4} = e^{- \frac{tp \eps^2}{4p^2n^2}} = e^{- \frac{1}{p}\log n} = 1/n^{\Theta(1)}$. 
\end{proof}

\section{Proof of Lemma \ref{lem:time-randomwalk}}
\begin{lemma}
Algorithm {\sc EstimateProbability} (cf. Algorithm \ref{alg:randomwalk}) finishes in $O(\ell)$ rounds, if the number of walks $K$ is at most polynomial in $n$.   
\end{lemma}
\begin{proof}
To prove this, we first show that there is no congestion in the network if we perform at most a polynomial number of random walks from $s$. This follows from the algorithm that each node only needs to count the number of random walk tokens that end on it. Therefore nodes do not need to know from which source node or rather from where it receives the random walk
tokens. Hence it is not needed to send the ID of the source node with the token. Since we consider CONGEST model, a polynomial in $n$ number of token's
count (i.e., we can send count of up to a polynomial number) can be sent in one
message through each edge without any congestion. Therefore, one round is enough to perform one step of random walk for all $K$ walks in parallel, where $K$ is at most polynomial in $n$. This implies that $K$ random walks of length $\ell$ can be performed in $O(\ell)$ rounds. Hence the lemma.
\end{proof} 

\section{Pseudocode of Algorithm for Estimating Personalized PageRank (cf. Section \ref{sec:pagerank-estimate})}
\begin{algorithm}[!ht]
\caption{\sc EstimatePageRank}
\label{alg:pr-walk}
\textbf{Input:} Source node $s$, reset probability $\alpha$, and number of walks $K$.\\% = \Theta(n \log n/\alpha)$.\\
\textbf{Output:} Approximate PageRank $\tilde{p}(v)$ of each node $v$.\\
\begin{algorithmic}[1]

\STATE Node $s$ floods the value $K = n^4\log n$, the number of random walks to be performed to all other nodes.

\STATE Source node $s$ creates $K$ random walk tokens and performs these simultaneously. All walks keep moving in parallel until they TERMINATE. 

\STATE Every node maintains a counter number $\eta_v$ for counting visits of random walks to it. 

\WHILE{there is at least one (alive) token}

\STATE This is $i$-th round. Each node $v$ holding at least one token does the following: Consider each random walk token $\mathcal{C}$ held by $v$ which is received in the $(i-1)$-th round. Generate a random number $r \in [0, 1]$.

\IF{$r< \alpha$} 
\STATE Terminate the token $\mathcal{C}$.
\ELSE
\STATE Select an outgoing neighbor uniformly at random, say $u$. Add one token counter number to $T^v_u$ where the variable $T^v_u$ indicates the number of tokens (or random walks) chosen to move to the neighbor $u$ from $v$ in the $i$-th round.    
\ENDIF

\STATE Send the token's counter number $T^v_u$ to the respective outgoing neighbor $u$. 

\STATE Every node $u$ adds the total counter number ($\sum_{v \in N(u)} T^v_u$---which is  the total number of visits of random walks to $u$ in $i$-th round) to $\eta_u$.

\ENDWHILE

\STATE Each node outputs its personalized PageRank as $\frac{\eta_v \alpha}{K}$.

\end{algorithmic}

\end{algorithm}  


\section{Proof of Lemma \ref{lem:time-pr-walk}}
\begin{lemma}
Algorithm {\sc EstimatePageRank} (cf. Algorithm \ref{alg:pr-walk}) computes personalized PageRank in $\tilde O(\frac{1}{\alpha})$ rounds with high probability, where $\alpha$ is the reset probability.   
\end{lemma}
\begin{proof}
To prove the lemma, we first show that there is no congestion in the network if the source node starts at most a polynomial (in $n$) number of random walks simultaneously. This is because, nodes are only sending the `count' number of random walk tokens in the algorithm. The process is similar to that in Section~\ref{sec:prob-estimate} where we estimate the landing probability distribution using the same technique. Hence the claim on the congestion part follows from the proof of the Lemma \ref{lem:time-randomwalk}. 

Now it is clear that the algorithm stops when all the walks terminate. Since the termination probability is $\alpha$, so in expectation after $1/\alpha$ steps, a walk terminates and with high probability (via the Chernoff bound) the walk terminates in $O(\log n/\alpha)$ rounds; by union bound \cite{MU-book-05}, all walks (since they are only polynomially many) terminate in $O(\log n/\alpha)$ rounds with high probability as well. Since all the walks are moving in parallel and there is no congestion, all the walks in the graph terminate in $O(\log n/\alpha)$ rounds whp. 
\end{proof} 


\section{Proof of Lemma \ref{thm:pr-sparsecut}}\label{sec:pr-global} 
 \begin{theorem}
%\label{thm:pr-sparsecut}
Given any graph $G$ and a conductance at most $\phi$, there is a PageRank based algorithm that computes a cut set of conductance at most $\tilde O(\sqrt{\phi})$ with high probability in $\tilde O(\frac{1}{b}(\frac{1}{\phi} + n))$ rounds, where $b$ is the balance of the cut.  
\end{theorem}
\begin{proof}
The algorithm runs in two phases for each of $O(\log n/b)$ source nodes. The first phase is for computing PageRank and it takes $\tilde O(\frac{\log n}{\phi})$ rounds with high probability (cf. Lemma \ref{lem:time-pr-walk}). Then the second phase is similar to the Algorithm \ref{alg:sparsecut}. That is, computing partitions according to the PageRank,  and then computing conductances of all partitions: all this can be done in $\tilde O(n + D)$ rounds. Hence totally we have $\tilde O(1/\phi + n)$ rounds with high probability, since diameter $D \leq n$. Therefore, over all the $O(\log n/b)$ source nodes, the running time of the PageRank based algorithm is $\tilde O(\frac{1}{b}(\frac{1}{\phi} + n))$ rounds with high probability.    
\end{proof}  



\iffalse
\section{Estimate Mixing Time}
We explain a simple approach to compute the mixing time of a undirected connected graph $G$. We present an algorithm to estimate the mixing time of a graph from a specified source node. That is, we want to find a length $\ell$ such that performing a random walk of length $\ell$ from a source node $x$, the probability distribution over the vertex set becomes close to the stationary distribution. The formal definitions are given later (cf. Definition \ref{def:mixing-time}). We denote the mixing time for source node $x$ by $M^x$. The idea is to perform many random walks of length $\ell$ from node $x$ in naive way\footnote{In each step, move to a random neighbor from the current node.}. At the end, each node estimate their probability distribution as the fraction of random walks stop over it. Then compare the distribution at length $\ell$, with the stationary distribution to determine if they are sufficiently close, otherwise, double the length $\ell$ and retry. Note that the stationary distribution of an undirected graph is a well defined quantity which is $\bigl(\frac{\deg(v_1)}{2m}, \frac{\deg(v_2)}{2m}, \ldots, \frac{\deg(v_n)}{2m}\bigr)$, where $\deg(i)$ is the degree of node $i$. 


\begin{definition}\label{def:mixing-time} [$M^x(1/2e)$ (mixing time for source $x$), $M(1/2e)$ (mixing time of the graph)]\\
Define $M^x = \min t : ||\pi_x(t) - \pi||_1 < 1/2e$, where $\pi_x(t)$ define the probability distribution vector reached after $t$ steps when the initial distribution starts with probability $1$ at node $x$ and $\pi = \bigl(\frac{\deg(v_1)}{2m}, \frac{\deg(v_2)}{2m}, \ldots, \frac{\deg(v_n)}{2m}\bigr)$ is the stationary distribution vector. $||\cdot||_1$ is $L_1$ norm. The mixing time of the graph is denoted by $M$ and is defined by $M = \{\max_x M^x : x \in V\}$. Clearly, $M^x \leq M$. 
\end{definition}


\begin{algorithm}
\caption{\sc EstimateMixingTime}
\label{alg:mixing-time}
\textbf{Input:} A graph $G = (V, E)$ and a source node $x$.\\
\textbf{Output:} $M^x$, mixing time for the source node $x$.\\
\begin{algorithmic}[1]

\STATE Node $x$ flood the value $K = 80 n^7 \log n$, the number of random walks. 

\FOR{$h =1,2, \dots$}
\STATE \label{stp:creat-token} $\ell \leftarrow 2^{h}$   

\STATE Node $x$ creates $K = 80 n^7 \log n$ random walk tokens and  perform random walk simultaneously for $\ell$ step as follows:

%\STATE  Node $s$ creates $K$ tokens of random walks and perform random walk simultaneously for $\ell$ step.

\STATE In every round, a node $v$ holding at least one token, does the following: for each token select a neighbor uniformly at random, say $u$. Increase one counter number to the variable $T_u^v$ which indicates the number of tokens chosen to move to $u$ from $v$ in this round. 

\STATE Send the counter number $T^v_u$ to the neighbor $u$.    

\STATE After $\ell$ rounds, each node $v$ count the total number of tokens it holds. Say, it is $\zeta_v$. 

\STATE Each node $v$ send the absolute difference value $\partial_v = |\frac{\zeta_v}{K} - \frac{d(v)}{2m}|$ to node $x$ for aggregation. 
 
\STATE Node $x$ locally check:

\IF{ $(\sum_v \partial_v < 1/2e)$}

\STATE Output $\ell$.
\STATE BREAK;

\ELSE

\STATE continue from step \ref{stp:creat-token} (doubling the length $\ell$).

\ENDIF

\ENDFOR

\end{algorithmic}

\end{algorithm}  

\subsection{Analysis of the Algorithm}
We want to approximate $M^x$. We note that the definition of $M^x$ is consistent due to the following standard monotonicity property of distributions.
\begin{lemma}\label{lem:monotonicity}
$||\pi_x(t+1) - \pi||_1 \leq  ||\pi_x(t) - \pi||_1$.
\end{lemma}
\begin{proof}
The monotonicity follows from the fact that
$||Ap||_1 \le ||p||_1$ where $A$ is the transpose of the transition probability matrix of the graph and $p$ is any probability vector. That is, $A(i,j)$ denotes the probability of transitioning from node $j$ to node $i$. This in turn follows from the fact that the sum of entries of any column of $A$ is 1.

Now let $\pi$ be the stationary distribution of the transition matrix $A$. This implies that if $\ell$ is $\epsilon$-near mixing, then $||A^{\ell}u - \pi||_1 \leq \epsilon$, by definition of $\epsilon$-near mixing time. Now consider $||A^{\ell+1}u - \pi||_1$. This is equal to $||A^{\ell+1}u - A\pi||_1$ since $A\pi = \pi$.  However, this reduces to $||A(A^{\ell}u - \pi)||_1 \leq \epsilon$. It follows that $(\ell+1)$ is $\epsilon$-near mixing.
\end{proof}

Now we show a result which implies our algorithm correctly estimate the mixing time for a source. Let $p^l_i$ be the probability of reaching at the node $i$ of a random walk at $l$-th step starting from the source node. In particular, we show that the above process (cf. Algorithm \ref{alg:mixing-time}) approximate $p^l_i$ for all $i$. Let us ignore those $p^l_i$ such that $p^l_i < 1/n^4$. So there might be at most $n$ such vertices, hence the sum of these probabilities is less than $1/n^3$  which is very small. Therefore, in the following lemma we prove that by performing $K = 80 n^8 \log n$ random walks from the source node $x$, the above algorithm can estimate $p^l_i$ for all $i$ such that $p^l_i \geq 1/n^4$. 

\begin{lemma}\label{lem:play-with-chernoff}
If the probability of an event $X$ occurring is $p$ such that $p \geq 1/n^4$, then in $t = 80 n^8 \log n$ trials , the fraction of times the event $X$ occurs is $p \pm \frac{1}{n^6}$ with high probability.  
\end{lemma}    
\begin{proof}
The proof is easily follows from a variant of Chernoff bound $$ \Pr \left[\frac{1}{t} \sum_{i=1}^t X_i < (1 - \delta)p \right] < \left(\frac{e^{-\delta}}{(1-\delta)^{(1-\delta)}} \right)^{tp} < e^{-tp\delta^2/2}$$ and 
$$\Pr \left[\frac{1}{t} \sum_{i=1}^t X_i > (1 + \delta)p \right] < \left(\frac{e^{\delta}}{(1+ \delta)^{(1+ \delta)}} \right)^{tp}.$$ Where $X_1, X_2, \ldots, X_t$ are $t$ independent identically distributed $0 - 1$ random variables such that $\Pr[X_i = 1] = p$ and $\Pr[X_i = 0] = (1-p)$. The right hand side of the upper tail bound further reduces to $2^{-\delta t p}$ for $\delta > 2e -1$ and for $\delta <2e - 1$, it reduces to $e^{-tp\delta^2/4}$. 

Let $\delta =  \frac{1}{n^2}$. Then $\delta < 1$, so we consider the weaker bound of both lower and upper tail bounds which is $e^{-tp\delta^2/4}$. Therefore, by choosing $t = n^8\log n$, we get  $e^{-tp\delta^2/4} \leq e^{- \frac{t}{4n^8}} = e^{- 20\log n} = O(1/n^{20})$. Hence follows. 
\end{proof}

Our algorithm starts with $\ell=2$ and runs $K= 80 n^8 \log n$ random walks of length $\ell$ from the specified source node $x$. When the difference (i.e., the $L_1$ distance) between $\ell$-length walk distribution with the stationary distribution is $\geq 1/12e$, $\ell$ is doubled and check. This process is repeated to identify the largest $\ell$ such that the difference is $\geq 1/12e$ and the smallest $\ell$ such that the difference is $< 1/12e$ . The output length $\ell$ would be the exact length since we shows that in every step the approximated probability is close to the original probability for every node (follows from the above Lemma \ref{lem:play-with-chernoff}). These give lower and upper bounds on the required $M^x$ respectively. The resulting theorem is presented below.

\begin{theorem}\label{thm:mix-time}
Given a graph with diameter $D$, a node $x$ can find $M^x(1/2e)$ in $\tilde{O}(M^x + D)$ rounds. 
 %a time $\tilde M^x$ such $M^x(1/2e) \leq \tilde M^x \leq M^x(\delta)$, where $\delta = \frac{1}{Ce \log n}$
\end{theorem}
\begin{proof}
For undirected unweighted graphs, the
stationary distribution of the random walk is known and is
$\frac{deg(i)}{2m}$ for node $i$ with degree $deg(i)$, where $m$ is
the number of edges in the graph. 
To find the approximate mixing time, we try out
increasing values of $\ell$ that are powers of $2$.  Once we find the
right consecutive powers of $2$, the monotonicity property admits a
binary search to determine the exact value. Since each node knows its own stationary probability (determined just by its
degree), they can compute the difference $\partial$ locally and send it to $x$. The node $x$ eventually want to compute the aggregate of these $\partial$ value, which can be done in $O(D)$ rounds. Also, initially we are sending $K$ to all the nodes which takes $O(D)$ rounds (this is one time). Hence, for a particular $\ell$, it required $O(\ell + D)$ rounds to check if it closes to the stationary distribution. Therefore, the total time required to compute the mixing time for a source node is $O(\sum_{i = 1}^{\log M^x} (\ell + D))$ rounds which is $\tilde O(M^x + D)$ rounds as $\ell$ is at most $M^x$.  
\end{proof}
\fi

\endinput


