% !TEX root = pagerank.tex

% This is LLNCS.DOC the documentation file of
% the LaTeX2e class from Springer-Verlag
% for Lecture Notes in Computer Science, version 2.4
\documentclass{llncs}
\usepackage{llncsdoc}
%

%\documentclass[11pt]{article}
%\documentclass{sig-alternate}
\usepackage{algorithm}
\usepackage{algorithmic}

\usepackage{subfigure}
\usepackage{epsfig,amsmath,color, amsfonts}
\usepackage{epsfig,color}
\newcommand{\xxx}[1]{\textcolor{red}{#1}}
%\usepackage{fullpage}
\usepackage{framed}
%\usepackage{epsf}
%\usepackage{hyperref}

\iffalse
%\setlength{\textheight}{9.4in} \setlength{\textwidth}{6.55in}
\setlength{\textheight}{9.2in} \setlength{\textwidth}{6.55in}
%\setlength{\topmargin}{0in}

\voffset=-0.9in
\hoffset=-0.8in


\newtheorem{theorem}{Theorem}[section]
%\newtheorem{definition}[theorem]{Definition}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{claim}[theorem]{Claim}
%\newtheorem{example}[theorem]{Example}
\newtheorem{remark}[theorem]{Remark}
\theoremstyle{definition}\newtheorem{example}[theorem]{Example}
\theoremstyle{definition}\newtheorem{definition}[theorem]{Definition}
\theoremstyle{observation}\newtheorem{observation}[theorem]{Observation}
\fi

\newcommand{\comment}[1]{}
\newcommand{\QED}{\mbox{}\hfill \rule{3pt}{8pt}\vspace{10pt}\par}
%\newcommand{\eqref}[1]{(\ref{#1})}
\newcommand{\theoremref}[1]{(\ref{#1})}
\newenvironment{proof1}{\noindent \mbox{}{\bf Proof:}}{\QED}
%\newenvironment{observation}{\mbox{}\\[-10pt]{\sc Observation.} }%
%{\mbox{}\\[5pt]}


\def\m{{\rm min}}
%\def\m{\bar{m}}
\def\eps{{\epsilon}}
\def\half{{1\over 2}}
\def\third{{1\over 3}}
\def\quarter{{1\over 4}}
\def\polylog{\operatorname{polylog}}
\newcommand{\ignore}[1]{}
\newcommand{\eat}[1]{}
\newcommand{\floor}[1]{\left\lfloor #1 \right\rfloor}
\newcommand{\ceil}[1]{\left\lceil #1 \right\rceil}

\newcommand{\algorithmsize}[0]{}

%---------------------
%  SPACE SAVERS
%---------------------

%\usepackage{times}
%\usepackage[small,compact]{titlesec}
%\usepackage[small,it]{caption}

\newcommand{\squishlist}{
 \begin{list}{$\bullet$}
  { \setlength{\itemsep}{0pt}
     \setlength{\parsep}{3pt}
     \setlength{\topsep}{3pt}
     \setlength{\partopsep}{0pt}
     \setlength{\leftmargin}{1.5em}
     \setlength{\labelwidth}{1em}
     \setlength{\labelsep}{0.5em} } }
\newcommand{\squishend}{
  \end{list}  }

%---------------------------------
% FOR MOVING PROOFS TO APPENDIX
%\usepackage{answers}
%%\usepackage[nosolutionfiles]{answers}
%\Newassociation{movedProof}{MovedProof}{movedProofs}
%\renewenvironment{MovedProof}[1]{\begin{proof}}{\end{proof}}

\def\e{{\rm E}}
\def\var{{\rm Var}}
\def\ent{{\rm Ent}}
\def\eps{{\epsilon}}
\def\lam{{\lambda}}
\def\bone{{\bf 1}}
\newcommand{\pr}{PageRank }



\begin{document}


\title{Fast Distributed \pr Computation}

%\begin{titlepage}
\iffalse
\author{Atish {Das Sarma} \thanks{eBay Research Labs, eBay Inc., CA, USA.
\hbox{E-mail}:~{\tt atish.dassarma@gmail.com}} \and  Anisur Rahaman Molla \thanks{Division of Mathematical
Sciences, Nanyang Technological University, Singapore 637371. \hbox{E-mail}:~{\tt anisurpm@gmail.com}.} \and Gopal Pandurangan \thanks{Division of Mathematical
Sciences, Nanyang Technological University, Singapore 637371 and Department of Computer Science, Brown University, Providence, RI 02912, USA. \hbox{E-mail}:~{\tt gopalpandurangan@gmail.com}. 
Supported in part by the following research grants: Nanyang Technological University grant M58110000, Singapore Ministry of Education (MOE) Academic Research Fund (AcRF) Tier 2 grant MOE2010-T2-2-082, and a grant from the US-Israel Binational Science
Foundation (BSF).
} \and  Eli Upfal \thanks{Department of Computer Science, Brown University, Providence, RI 02912, USA. \hbox{E-mail}:~{\tt eli@cs.brown.edu}.}}
\fi
\date{}

\author{Atish {Das Sarma}\inst{1} \and Anisur Rahaman Molla\inst{2} \and Gopal Pandurangan\inst{3} \and  Eli Upfal\inst{4}}
\institute{eBay Research Labs, eBay Inc., CA, USA.~\email{atish.dassarma@gmail.com}
\and Division of Mathematical Sciences, Nanyang Technological University, Singapore 637371.~\email{anisurpm@gmail.com}
\and Division of Mathematical Sciences, Nanyang Technological University, Singapore 637371 and Department of Computer Science, Brown University, Providence, RI 02912, USA.~\email{gopalpandurangan@gmail.com} 
%\thanks{Supported in part by the following research grants: Nanyang Technological University grant M58110000, Singapore Ministry of Education (MOE) Academic Research Fund (AcRF) Tier 2 grant MOE2010-T2-2-082, and a grant from the US-Israel Binational Science
%Foundation (BSF).}
\and Department of Computer Science, Brown University, Providence, RI 02912, USA.~\email{eli@cs.brown.edu}}
%\maketitle \thispagestyle{empty}

%\vspace*{-0.4in}


\maketitle

\begin{abstract}
Over the last decade, PageRank has gained importance in a wide range of applications and domains, ever since it first proved to be  effective in determining node importance in large graphs (and was a pioneering idea behind Google's search engine). In distributed computing alone, PageRank vectors, or more generally random walk based quantities have been used for several different applications ranging from determining important nodes, load balancing, search, and identifying connectivity structures. 
Surprisingly, however, there has been little work towards designing provably efficient fully-distributed algorithms for computing PageRank. The difficulty is that traditional matrix-vector multiplication style iterative methods may not always adapt well to the distributed setting owing to communication bandwidth restrictions and convergence rates. 
%Therefore, PageRank computation using Monte Carlo method is more appropriate in a distributed model with bandwidth constraints. 

In this paper, we present fast random walk-based distributed algorithms  for computing PageRank in  general  graphs  and prove strong bounds on the round complexity.  We first present an algorithm that  takes $O(\log n/\eps)$ rounds with high probability on any graph (directed or undirected), where $n$ is the network size and $\eps$ is the reset probability used in the PageRank computation (typically $\eps$ is a fixed constant).  We then present a faster algorithm that takes
$O(\sqrt{\log n}/\eps)$ rounds in undirected graphs. %We further show distributed algorithms with improved guarantees for undirected general graphs.
Both of the above algorithms  are  scalable, as each node processes and sends only small (polylogarithmic in $n$, the network size) number of  bits per round and hence work in the {\sc CONGEST} distributed computing model. 
For directed graphs, we present an algorithm that has a running time of $O(\sqrt{\log n/\eps})$, but it requires
a polynomial number of bits to processed and sent per node in a round.
 To the best of our knowledge, these are the first fully distributed algorithms for computing PageRank vectors with provably efficient running time. 
\end{abstract}
%\end{titlepage}
%\input{abstract}

{\bf Keywords:} PageRank, Distributed Algorithm,  Random Walk, Monte Carlo Method
\input{introduction}

\input{background}

%\input{related}

\vspace{-0.15in}
\section{A Distributed Algorithm for PageRank}\label{sec:simple-algo}
\vspace{-0.15in}
We present a Monte Carlo based distributed algorithm for computing PageRank distribution of a network \cite{mcm-avrachenkov}. The main idea of our algorithm (formal pseudocode is given in Algorithm \ref{alg:simple-pagerank-walk}) is as follows. Perform $K$ ($K$ will be fixed appropriately later) random walks starting from each node of the network in parallel. In each round, each random walk independently  goes to a random (outgoing) neighbor with probability $1-\eps$ and with the remaining probability (i.e., $\eps$) terminates in the current node. (Henceforth, we call
this random walk as {\em `PageRank random walk'}. This random walk can be shown to be equivalent to one based on the PageRank transition matrix $P$ (defined in Section 2.2) \cite{mcm-avrachenkov}.)   Since, $\eps$ is the probability of termination of a walk in each round, the expected length of every walk is $1/\eps$ and the length  will be at most $O(\log n/\eps)$ with high probability.  Let every node $v$ count the number of visits (say, $\zeta_v$) of all the walks that go through it. Then, after termination of all walks in the network, each node $v$ computes (estimates) its PageRank $\pi_v$ as $\tilde \pi_v = \frac{\zeta_v \eps}{n K}$. Notice that $\frac{nK}{\eps}$ is the (expected) total number of visits over all nodes of all the $n K$ walks. The above idea of counting the number of visits is a standard technique to approximate PageRank (see e.g., \cite{mcm-avrachenkov,ppr-bahmani2010}).  

We show in the next section that the above algorithm approximates PageRank vector $\pi$ accurately (with high probability) for an appropriate value of $K$. The main technical challenge in implementing the above method is  that performing many walks from each node in parallel can create a lot of congestion. Our algorithm uses a crucial idea to overcome the congestion. We show that (cf. Lemma \ref{lem:congestion}) that there will be no congestion in the network even if we start a polynomial number of random walks from every node in parallel. The main idea is based on the Markovian (memoryless) properties of the random walks and the process that terminates the random walks. To calculate how many walks move from  node $i$ to node $j$, node $i$ only needs to know the number of walks that reached it. It does not need to know the sources of these walks or the transitions that they took before reaching node $i$.  Thus it is enough to  send the {\em count} of the number of walks that pass through a node. The algorithm runs till all the walks are terminated. It is easy to see that it finishes in $O(\log n/\eps)$ rounds with high probability (this is because the maximum length of any walk is $O(\log n/\eps)$ whp). Then every node $v$ outputs its \pr as the ratio between the number of visits (denoted by $\zeta_v$) to it and the total number of visits $(\frac{nK}{\eps})$ over all nodes of all the walks.  We show that our algorithm computes approximate PageRank accurately in $O(\log n/\eps)$ rounds with high probability (cf. Theorem \ref{thm:main-round}).  

%Gopal --- Better to change the algo name to --- "Simple-PageRank--Algorithm".
%Anisur ---Done!
\vspace{-0.2in}
\newcommand{\mindegree}[0]{\delta}
\begin{algorithm}[htb]
\caption{\sc Simple-PageRank-Algorithm}
\label{alg:simple-pagerank-walk}
\textbf{Input (for every node):} Number of walks $K = c\log n$ from each node (where $c =  \frac{2}{\delta' \eps}$ and $\delta'$ is defined in Section \ref{sec:correctness}), reset probability $\eps$.\\
\textbf{Output:} PageRank of each node.\\

\textbf{[Each node $v$ starts $c\log n$ walks. All walks keep moving in parallel until they terminate. The termination probability of each walk is $\eps$, so the expected length of each walk is $1/\eps$.]}
\begin{algorithmic}[1]
\STATE Initially, each node $v$ in $G$ creates $c\log n$ messages (called coupons) $C_1,C_2, \ldots,C_{c\log n}$. Each node also maintains a counter $\zeta_v$ (for counting visits of  random walks to it). 

\WHILE{there is at least one (alive) coupon}
%\FOR{each node $v$ holding at least one coupon}
\STATE This is $i$-th round. Each node $v$ holding at least one coupon does the following: Consider each coupon $C$ held by $v$ which is received in the $(i-1)$-th round. 
%\FOR{each coupon $C$}
Generate a random number $r \in [0, 1]$.
\IF{$r< \eps$} 
\STATE Terminate the coupon $C$.
\ELSE
\STATE Select an outgoing neighbor uniformly at random, say $u$. Add one coupon counter number to $T^v_u$ where the variable $T^v_u$ indicates the number of coupons (or random walks)  chosen to move to the neighbor $u$ from $v$ in the $i$-th round.    
\ENDIF

%\STATE  This is the $i$-th round. Each node $v$ does the following: Consider each coupon $C$ held by $v$ which is received in the $(i-1)$-th round. Now $v$ picks a outgoing neighbor $u$ uniformly at random and forwards $C$ to $u$ 
%\ENDFOR
\STATE Send the coupon's counter number $T^v_u$ to the respective outgoing neighbors $u$. 
\STATE Every node $u$ adds the total counter number ($\sum_{v \in N(u)} T^v_u$---which is  the total number of visits of random walks to $u$ in $i$-th round) to $\zeta_u$.
%\ENDFOR
\ENDWHILE

\STATE Each node outputs its PageRank as $\frac{\zeta_v \eps}{c n \log n}$.


%\FOR{$i=1$ to $\lambda$}

%\STATE This is the $i$-th iteration. Each node $v$ does the following: Consider each coupon $C$ held by $v$ which is received in the $(i - 1)$-th iteration. If the coupon $C$�s desired walk length is at most $i$, then $v$ keeps this coupon ($v$ is the desired destination). Else, $v$ picks a neighbor $u$ uniformly at random and forward $C$ to $u$.

%\ENDFOR

\end{algorithmic}

\end{algorithm}


\vspace{-0.4in}
\subsection{Analysis}
Our algorithm computes the PageRank of each node $v$ as $\tilde \pi_v = \frac{\zeta_v \eps}{n K}$ and we say that $\tilde \pi_v$ approximates original PageRank $\pi_v$. We first focus on the correctness of our approach and then analyze the running time. 
\vspace{-0.15in}
\subsection{Correctness of PageRank Approximation}\label{sec:correctness}
\vspace{-0.03in}
The correctness of the above approximation follows directly from the main result of \cite{mcm-avrachenkov} (see Algorithm $4$ and Theorem $1$) and also from \cite{ppr-bahmani2010} (Theorem $1$). In particular, it is mentioned in \cite{mcm-avrachenkov,ppr-bahmani2010} that the approximate \pr value is quite good even for $K = 1$. It is easy to see that the expected value of $\tilde \pi_v$ is $\pi_v$ (e.g., \cite{mcm-avrachenkov}). In \cite{ppr-bahmani2010} (Theorem $1$), it shows that $\tilde \pi_v$ is sharply concentrated around $\pi$ using a Chernoff bound  technique (\cite{MU-book-05}). They show, 
\begin{equation}\label{equ:convergence}
\Pr[\mid \tilde \pi_v - \pi_v \mid \leq \delta \pi_v] \leq e^{-nK\pi_v \delta'}
\end{equation}
where $\delta'$ is a constant depending on $\eps$ and $\delta$. 
%\paragraph{\bf Value of $K$.}
From the above bound (cf. Equation \ref{equ:convergence}), we see that for $K = \frac{2\log n}{\delta' n\pi_{min}}$, we get a sharp approximation of PageRank vector with high probability.  
%$\pi_v = \Omega(\ln n/n)$ which is actually slightly larger than the expected PageRank value $1/n$. For $K=O(\ln n/n\pi_{min})$, we can get a very good approximation of the full PageRank vector $\pi$.
Since the \pr of any node is at least $\eps/n$ (i.e. the minimum \pr value, $\pi_{min} \geq \eps/n$), so it gives $K = \frac{2\log n}{\delta' \eps}$. %\footnote{Note that $\ln n = O(\log n)$.}. 
For simplicity we assume the constant $c =  \frac{2}{\delta' \eps}$. Therefore, it is enough if we perform $c\log n$ \pr random walks from each  node. Now we focus on the running time of our algorithm. 
\vspace{-0.2in}
\subsection{Time Complexity}\label{sec:complexity}
From the above section we see that our algorithm is able to compute the PageRank vector $\pi$ in $O(\log n/\eps)$ rounds with high probability %(this is because the length of one walk is $O(\log n/\eps)$ whp) 
if we perform $c\log n$ walks from each node in parallel without any congestion.  The lemma below guarantees that there will be no congestion even if we do a polynomial number of walks in parallel.   

%Gopal --- Actually set the correct value of K in the algorithm above and just thalk about that value. No two values of K. Is it 1 or what?
%Anisur: Fixed K to $c\log n$---done!

\begin{lemma}\label{lem:congestion}
There is no congestion in the network if every node starts at most a polynomial number of random walks in parallel. 
\end{lemma}
\begin{proof}
It follows from our algorithm that each node only needs to count the number of visits of random walks to itself. Therefore nodes do not require to know from which source node or rather from where it receives the random walk coupons. Hence it is not needed to send  the ID of the source node with  the coupon. Recall that in our algorithm,  in each round, every node currently holding at least one random walk coupon (could be many) does the following.  For each coupon, either the walk is terminated with probability $\eps$ or with remaining probability $1-\eps$, any outgoing edge is chosen uniformly at random to send the coupon. Any particular outgoing edge may be chosen for more than one coupon. Instead of sending each coupon separately through that edge, the algorithm simply sends the count, i.e., number of coupons, to the chosen outgoing neighbor. Since we consider {\em CONGEST} model, a polynomial in $n$ number of coupon's count (i.e., we can send count of up to a polynomial number) can be sent in one message through each edge without any congestion.   
\qed 
\end{proof}

\begin{theorem}\label{thm:main-round}
The algorithm {\sc Simple-PageRank-Algorithm} (cf. Algorithm \ref{alg:simple-pagerank-walk}) computes PageRank in $O(\frac{\log n}{\eps})$ rounds with high probability. 
\end{theorem}
\begin{proof}
The algorithm stops when all the walks terminate. Since the termination probability is $\eps$, so in expectation after $1/\eps$ steps, a walk terminates and with high probability (via the Chernoff bound) the walk terminates in $O(\log n/\eps)$ rounds and by union bound \cite{MU-book-05}, all walks (they are only polynomially many) terminate
in $O(\log n/\eps)$ rounds whp. Since all the walks are moving in parallel and there is no congestion (cf. Lemma \ref{lem:congestion}), all the walks in the network terminate in $O(\log n/\eps)$ rounds whp. Hence the algorithm stops in $O(\log n/\eps)$ rounds whp.  The correctness of the PageRank approximation follows  from \cite{mcm-avrachenkov,ppr-bahmani2010} as discussed earlier in Section \ref{sec:correctness}.
\qed
\end{proof}

\vspace{-0.3in}
\input{undirected-algo}

\vspace{-0.3in}
\input{directed-algo}

%\begin{theorem}\label{thm:1-walk}
%The {\sc Single-Random-Walk} (cf.
%Algorithm~\ref{alg:single-random-walk}) with transition probability defined by $P_{uv} = \left(\frac{1-\beta}{n} + \frac{\beta}{d_u} \right)$ finishes in $O(\frac{\lambda \eta D \log n}{n \pi_{\min}} + \frac{\ell D}{\lambda} + \frac{\ell D}{\eta})$ rounds.
%\end{theorem}
\vspace{-0.3in}
\input{conclusion}

%\newpage

  \let\oldthebibliography=\thebibliography
  \let\endoldthebibliography=\endthebibliography
  \renewenvironment{thebibliography}[1]{%
    \begin{oldthebibliography}{#1}%
      \setlength{\parskip}{0ex}%
      \setlength{\itemsep}{0ex}%
  }%
  {%
    \end{oldthebibliography}%
  }
%{ \small
{%\tiny
\bibliographystyle{abbrv}
\bibliography{Distributed-RW}
}

\end{document}
