\documentclass[11pt]{article}
%\documentclass{sig-alternate}
\usepackage{algorithm}
\usepackage{algorithmic}

\usepackage{subfigure}
\usepackage{epsfig,amsthm,amsmath,color, amsfonts}
\usepackage{epsfig,color}
\newcommand{\xxx}[1]{\textcolor{red}{#1}}
%\usepackage{fullpage}
\usepackage{framed}
%\usepackage{epsf}
%\usepackage{hyperref}

%\setlength{\textheight}{9.4in} \setlength{\textwidth}{6.55in}
\setlength{\textheight}{9.2in} \setlength{\textwidth}{6.55in}
%\setlength{\topmargin}{0in}

\voffset=-0.9in
\hoffset=-0.8in

\newtheorem{theorem}{Theorem}[section]
%\newtheorem{definition}[theorem]{Definition}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{claim}[theorem]{Claim}
%\newtheorem{example}[theorem]{Example}
\newtheorem{remark}[theorem]{Remark}
\theoremstyle{definition}\newtheorem{example}[theorem]{Example}
\theoremstyle{definition}\newtheorem{definition}[theorem]{Definition}
\theoremstyle{observation}\newtheorem{observation}[theorem]{Observation}

\newcommand{\comment}[1]{}
\newcommand{\QED}{\mbox{}\hfill \rule{3pt}{8pt}\vspace{10pt}\par}
%\newcommand{\eqref}[1]{(\ref{#1})}
\newcommand{\theoremref}[1]{(\ref{#1})}
\newenvironment{proof1}{\noindent \mbox{}{\bf Proof:}}{\QED}
%\newenvironment{observation}{\mbox{}\\[-10pt]{\sc Observation.} }%
%{\mbox{}\\[5pt]}

\def\m{{\rm min}}
%\def\m{\bar{m}}
\def\eps{{\epsilon}}
\def\half{{1\over 2}}
\def\third{{1\over 3}}
\def\quarter{{1\over 4}}
\def\polylog{\operatorname{polylog}}
\newcommand{\ignore}[1]{}
\newcommand{\eat}[1]{}
\newcommand{\floor}[1]{\left\lfloor #1 \right\rfloor}
\newcommand{\ceil}[1]{\left\lceil #1 \right\rceil}

\newcommand{\algorithmsize}[0]{}

%---------------------
%  SPACE SAVERS
%---------------------

\usepackage{times}
\usepackage[small,compact]{titlesec}
\usepackage[small,it]{caption}

\newcommand{\squishlist}{
 \begin{list}{$\bullet$}
  { \setlength{\itemsep}{0pt}
     \setlength{\parsep}{3pt}
     \setlength{\topsep}{3pt}
     \setlength{\partopsep}{0pt}
     \setlength{\leftmargin}{1.5em}
     \setlength{\labelwidth}{1em}
     \setlength{\labelsep}{0.5em} } }
\newcommand{\squishend}{
  \end{list}  }

%---------------------------------
% FOR MOVING PROOFS TO APPENDIX
%\usepackage{answers}
%%\usepackage[nosolutionfiles]{answers}
%\Newassociation{movedProof}{MovedProof}{movedProofs}
%\renewenvironment{MovedProof}[1]{\begin{proof}}{\end{proof}}

\def\e{{\rm E}}
\def\var{{\rm Var}}
\def\ent{{\rm Ent}}
\def\eps{{\epsilon}}
\def\lam{{\lambda}}
\def\bone{{\bf 1}}



\begin{document}


\title{Page Rank Computation in Distributed Model}

%\begin{titlepage}


\date{}

\maketitle \thispagestyle{empty}

\vspace*{.4in}


\maketitle


\section{Complexity bounds}
\begin{lemma}\label{lem:lemma1}
For pagerank $\frac{\pi_v}{d_v} = O(\frac{1}{n})$ for any vertex $v$, where $\pi_v$ and $d_v$ is stationary distribution and degree of the vertex $v$ respectively. 
\end{lemma}
\begin{proof}
Let $P$ be the transition probability matrix of $G$. Then $P_{uv} = \frac{1-\beta}{n} + \frac{\beta}{d_u}$ for some probability $\beta$. Let $v^*$ be the vertex such that $\frac{\pi_{v^*}}{d_{v^*}} = \max_v \frac{\pi_v}{d_v}$. \\
\textbf{Case 1:} $\frac{\pi_{v^*}}{d_{v^*}} \leq \frac{1-\beta}{n} $. Then clearly the required bound holds. \\
\textbf{Case 2:} $\frac{\pi_{v^*}}{d_{v^*}} > \frac{1-\beta}{n} $. Then assume $\frac{\pi_{v^*}}{d_{v^*}} = \frac{1-\beta}{n} + \gamma$ for some constant $\gamma$. Therefore, 

\begin{align*}
\pi_{v^*} = (P \pi)_{v^*} & = \sum_{u \sim v^*}{ }P_{uv^*}\pi_{u} \\
& =  \sum_{u \sim v^*}{ }\left( \frac{1-\beta}{n} + \frac{\beta}{d_u} \right) \pi_u  \\
& = \frac{1-\beta}{n} +  \beta \sum_{u \sim v^*}{ }\left( \frac{\pi_u}{d_u} \right)  \\
\Rightarrow \frac{\pi_{v^*}}{d_{v^*}} & \leq \frac{1}{d_{v^*}}\left[ \frac{1-\beta}{n} + \beta d_{v^*} \left( \frac{1- \beta}{n} + \gamma \right) \right] \\
\Rightarrow \frac{\pi_{v^*}}{d_{v^*}} & \leq \frac{1-\beta}{n d_{v^*}} + \beta \left( \frac{1- \beta}{n} + \gamma \right) \\
\Rightarrow \frac{1-\beta}{n} + \gamma & \leq \frac{1-\beta}{n d_{v^*}} + \beta \left( \frac{1- \beta}{n} + \gamma \right) \\
 \Rightarrow \gamma \left( 1 - \beta \right) & \leq  \frac{1- \beta}{n d_{v^*}} + \frac{\beta (1-\beta)}{ n} -  \frac{1- \beta}{n} \\
 \Rightarrow \gamma & \leq \frac{1} {n d_{v^*}} +  \frac{\beta}{n} -  \frac{1}{n} \\
 & \leq \frac{\beta}{n}
\end{align*} 

This implies, $$\frac{\pi_v}{d_v} \leq \frac{\pi_{v^*}}{d_{v^*}} \leq \frac{1-\beta}{n d_{v^*}} + \frac{\beta}{n}$$ 
Therefore, $\frac{\pi_v}{d_v} = O(\frac{1}{n})$.
\end{proof}

\begin{lemma}\label{lem:lemma1a}
For pagerank $\frac{\pi_v}{d_v} = O(\frac{1}{nd_v})$ for any vertex $v$, where $\pi_v$ and $d_v$ is stationary distribution and degree of the vertex $v$ respectively. 
\end{lemma}
\begin{proof}
Let $P$ be the transition probability matrix of $G$. Then $P_{uv} = \frac{1-\beta}{n} + \frac{\beta}{d_u}$ for some probability $\beta$. Let $v^*$ be the vertex such that $\frac{\pi_{v^*}}{d_{v^*}} = \max_v \frac{\pi_v}{d_v}$. \\
\begin{align*}
\pi_{v^*} = (P \pi)_{v^*} & = \sum_{u \sim v^*}{ }P_{uv^*}\pi_{u} \\
& =  \sum_{u \sim v^*}{ }\left( \frac{1-\beta}{n} + \frac{\beta}{d_u} \right) \pi_u  \\
& = \frac{1-\beta}{n} +  \beta \sum_{u \sim v^*}{ }\left( \frac{\pi_u}{d_u} \right)  \\
\Rightarrow \frac{\pi_{v^*}}{d_{v^*}} & \leq \frac{1}{d_{v^*}}\left[ \frac{1-\beta}{n} + \beta d_{v^*} \frac{\pi_{v^*}}{d_{v^*}} \right] \\
\Rightarrow \frac{\pi_{v^*}}{d_{v^*}} & \leq \frac{1-\beta}{n d_{v^*}} + \beta \frac{\pi_{v^*}}{d_{v^*}} \\
\Rightarrow \frac{\pi_{v^*}}{d_{v^*}} (1 - \beta) & \leq \frac{1-\beta}{n d_{v^*}} \\
 \Rightarrow \frac{\pi_{v^*}}{d_{v^*}} & \leq  \frac{1}{n d_{v^*}}   \\
\end{align*}
 This implies, $$\frac{\pi_v}{d_v} \leq \frac{\pi_{v^*}}{d_{v^*}} \leq \frac{1}{n d_{v^*}} \leq \frac{1}{nd_{v}} [\textbf{???}]$$ 
Therefore, $\frac{\pi_v}{d_v} = O(\frac{1}{nd_v})$.
\end{proof}

\begin{lemma}\label{lem:lemma2}
For any $\pi$, Phase 1 finishes in
$O(\frac{\lambda \eta D \log n}{n \pi_{\min}} )$ rounds with high probability.
\end{lemma}
\begin{proof}
Let $\rho = \frac{\eta}{\pi_{\min}} $. We consider the case when each node $v$ creates $\rho \pi_v \geq \eta$ messages. We show that the lemma holds even in this case. \\
Suppose for each message $M$, any $j=1, 2, ..., \lambda$, and any edge $e$, we define $X_M^j(e)$ to be a random variable having value
1 if $M$ is sent through $e$ in the $j^{th}$ iteration (i.e., when the counter on $M$ has value $j-1$). Let $X^j(e)=\sum_{M:
\text{message}} X_M^j(e)$.  We compute the expected number of messages that go through an edge, see claim below.

\begin{claim}\label{claim:first} For any edge $e$ and any $j$,
$\mathbb{E}[X^j(e)]=2\frac{\rho}{n}$.
\end{claim}
\begin{proof}
Assume that each node $v$ starts with $\eta \pi_v$ messages. Each
message takes a random walk. We prove that after any given number of
steps $j$, the expected number of messages at node $v$ is still
$\eta \pi_v$.  Consider the random walk's probability transition
matrix, call it $A$. In this case $Au = u$ for the vector $u$ having
value $\pi_v$ (since this $\pi_v$ is the stationary distribution). Now the number of messages we started
with at any node $v$ is proportional to its stationary distribution,
therefore, in expectation, the number of messages at any node
remains the same.

To calculate $\mathbb{E}[X^j(e)]$, notice that edge $e$ will receive
messages from its two end points, say $x$ and $y$. The number of
messages it receives from nodes $x$ in expectation is exactly $\rho \pi_x \times P_{xy} = \rho \left[ \pi_x (\frac{1-\beta}{n}) + \beta \frac{\pi_x}{d_x} \right] \leq \rho \left[ (\frac{1-\beta}{n}) + \beta \frac{\pi_x}{d_x} \right]  \leq \frac{\rho}{n}$ (Using above Lemma~\ref{lem:lemma1}). Hence the claim follows.
\end{proof}

By Chernoff's bound (e.g., in~\cite[Theorem~4.4.]{MU-book-05}), for
any edge $e$ and any $j$,
$$\mathbb{P}[X^j(e)\geq 4\frac{\rho}{n} \log{n}]\leq 2^{-4\log{n}}=n^{-4}.$$
It follows that the probability that there exists an edge $e$ and an
integer $1\leq j\leq \lambda$ such that $X^j(e)\geq 4\frac{\rho}{n} \log{n}$ is
at most $|E(G)| \lambda n^{-4}\leq \frac{1}{n}$ since $|E(G)|\leq
n^2$ and $\lambda\leq \ell\leq n$ (by the way we define $\lambda$).

Now suppose that $X^j(e)\leq 4\frac{\rho}{n} \log{n}$ for every edge $e$ and
every integer $j\leq \lambda$. This implies that we can extend all
walks of length $i$ to length $i+1$ in $4D \frac{\rho}{n} \log{n}$ rounds [The term $D$ is coming as we may have to send message to someone far away, not neighbors].
Therefore, we obtain walks of length $\lambda$ in
$4D\lambda \frac{\rho}{n} \log{n} = O(\frac{\lambda \eta D \log n}{n \pi_{\min}})$
rounds as claimed. 
\end{proof}

\begin{lemma}
It can be shown that for any $\pi$, Phase~1 finishes in
$O((\frac{\lambda \eta D \log n}{ \pi_{\min}})(\frac{(1-\beta)\Delta}{n^2} + \frac{\beta}{n}))$ rounds with high probability.
\end{lemma}
\begin{proof}
Let $\rho = \frac{\eta}{\pi_{\min}} $. We consider the case when each node $v$ creates $\rho \pi_v \geq \eta$ messages. We show that the lemma holds even in this case. \\
Suppose for each message $M$, any $j=1, 2, ..., \lambda$, and any edge $e$, we define $X_M^j(e)$ to be a random variable having value
1 if $M$ is sent through $e$ in the $j^{th}$ iteration (i.e., when the counter on $M$ has value $j-1$). Let $X^j(e)=\sum_{M:
\text{message}} X_M^j(e)$.  We compute the expected number of messages that go through an edge, see claim below.

\begin{claim}\label{claim:first} For any edge $e$ and any $j$,
$\mathbb{E}[X^j(e)]=2\rho(\frac{(1-\beta)\Delta}{n^2} + \frac{\beta}{n})$.
\end{claim}
\begin{proof}
Assume that each node $v$ starts with $\eta \pi_v$ messages. Each
message takes a random walk. We prove that after any given number of
steps $j$, the expected number of messages at node $v$ is still
$\eta \pi_v$.  Consider the random walk's probability transition
matrix, call it $A$. In this case $Au = u$ for the vector $u$ having
value $\pi_v$ (since this $\pi_v$ is the stationary distribution). Now the number of messages we started
with at any node $v$ is proportional to its stationary distribution,
therefore, in expectation, the number of messages at any node
remains the same.

To calculate $\mathbb{E}[X^j(e)]$, notice that edge $e$ will receive
messages from its two end points, say $x$ and $y$. The number of
messages it receives from nodes $x$ in expectation is exactly $\rho \pi_x \times P_{xy} = \rho \left[ \pi_x (\frac{1-\beta}{n}) + \beta \frac{\pi_x}{d_x} \right] \leq \rho \left[ \left(\frac{(1-\beta)d_x}{n^2}\right) + \frac{\beta}{n} \right]  \leq \rho\left(\frac{(1-\beta)\Delta}{n^2} + \frac{\beta}{n}\right)$ (Using the above Lemma~\ref{lem:lemma1} that $\pi_x/d_x \leq 1/n$). Hence the claim follows.
\end{proof}

By Chernoff's bound, for
any edge $e$ and any $j$,
$$\mathbb{P}[X^j(e)\geq 4 \left(\frac{(1-\beta)\Delta}{n^2} + \frac{\beta}{n}\right) \log{n}]\leq 2^{-4\log{n}}=n^{-4}.$$
It follows that the probability that there exists an edge $e$ and an
integer $1\leq j\leq \lambda$ such that $X^j(e)\geq 4 \left(\frac{(1-\beta)\Delta}{n^2} + \frac{\beta}{n}\right) \log{n}$ is
at most $|E(G)| \lambda n^{-4}\leq \frac{1}{n}$ since $|E(G)|\leq
n^2$ and $\lambda\leq \ell\leq n$ (by the way we define $\lambda$).

Now suppose that $X^j(e)\leq 4 \rho \left(\frac{(1-\beta)\Delta}{n^2} + \frac{\beta}{n}\right) \log{n}$ for every edge $e$ and
every integer $j\leq \lambda$. This implies that we can extend all
walks of length $i$ to length $i+1$ in $4D \rho \left(\frac{(1-\beta)\Delta}{n^2} + \frac{\beta}{n}\right) \log{n}$ rounds [The term $D$ is coming as we may have to send message to someone far away, not neighbors].
Therefore, we obtain walks of length $\lambda$ in
$4D\lambda \rho \left(\frac{(1-\beta)\Delta}{n^2} + \frac{\beta}{n}\right) \log{n} = O\left((\frac{\lambda \eta D \log n}{\pi_{\min}}) \left(\frac{(1-\beta)\Delta}{n^2} + \frac{\beta}{n}\right)\right)$
rounds as claimed. 
\end{proof}


\begin{lemma}\label{lem:get-more-walks}
For any $v$, {\sc Get-More-Walks($v$, $\eta$, $\lambda$)} always
finishes within $O(\lambda D)$ rounds.
\end{lemma}
\begin{proof}
Consider any node $v$ during the execution of the algorithm. If it
contains $x$ copies of the source ID, for some $x$, it has to pick
$x$ of its neighbors or non-neighbors at random, and pass the source ID to each of
these $x$ nodes. Although it might pass these messages to less
than $x$ nodes, it sends only the source ID and a {\em count} to
each node, where the count represents the number of copies of
source ID it wishes to send to such node. Note that there is
only one source ID as one node calls {\sc Get-More-Walks} at a time.
Therefore, there is no congestion and thus the algorithm terminates
in $O(\lambda D)$ rounds as we may have to send messages to some node far away. 
\end{proof}

\begin{lemma}\label{lem:Sample-Destination}
{\sc Sample-Destination} always finishes within $O(D)$ rounds.
\end{lemma}
\begin{proof}
Constructing a BFS tree clearly takes only $O(D)$ rounds. In the
second phase where the algorithm wishes to {\em sample} one of many
tokens (having its ID) spread across the graph. The sampling is done
while retracing the BFS tree starting from leaf nodes, eventually
reaching the root. The main observation is that when a node receives
multiple samples from its children, it only sends one of them to its
parent. Therefore, there is no congestion. The total number of
rounds required is therefore the number of levels in the BFS tree,
$O(D)$. The third phase of the algorithm can be done by broadcasting
(using a BFS tree) which needs $O(D)$ rounds.
\end{proof}

\begin{theorem}\label{thm:1-walk}
The {\sc Single-Random-Walk} (cf.
Algorithm~\ref{alg:single-random-walk}) with transition probability defined by $P_{uv} = \left(\frac{1-\beta}{n} + \frac{\beta}{d_u} \right)$ finishes in $O(\frac{\lambda \eta D \log n}{n \pi_{\min}} + \frac{\ell D}{\lambda} + \frac{\ell D}{\eta})$ rounds.
\end{theorem}

\section{Work plan}
Theory part:
\begin{itemize}
\item Improvement of general bound. At least include $\beta$ in phase~1round complexity.
\end{itemize}
Experiment part:
\begin{itemize}
\item Plot for round complexity
\item Plot for message complexity
\item Plot for showing that it converges fast 
\item accuracy: higher pagerank can have higher chance to estimate more accurately
\end{itemize}


\end{document}
