\documentclass[]{article}
\usepackage{algpseudocode}
%opening
\title{Cohen and Lewis approximation}
\author{}

\begin{document}

\maketitle

\begin{abstract}
Combining the approximate matrix multiplication method of Cohen and Lewis [CL]
with communication-avoiding parallel sparse matrix multiplication (CRoP).

\end{abstract}

\section{Summary of [CL]}
Given n-by-n matrices A and B, we estimate the product AB by
weighted sampling of all the $n^3$ terms that contribute to some entry. (In case
of sparse matrix products, we may see this as sampling the non-zero terms of the
outer products.) That is, the term $(i,k,j)$ is sampled with probability
proportional to $A[i,k] \times B[k,j]$. After a large number of samples the number of
times a term of the form (i,-,k) is sampled is proportional, in expectation, to
$(AB)[i,j]$. By scaling we get unbiased estimators for each entry. The values of
large entries will be rather precise, whereas the small (and especially zero)
entries have a larger relative error.

\section{Summary of CRoP}
Entries of the output matrix are implicitly split into $p$ groups, such that $(AB)[i,j]$ belongs to group $(h_1[i]+h_2[j])$ \textit{mod} $p$, where $h_1$ and
$h_2$ are arrays with random entries in ${0,..,p-1}$. One core (or machine) is
responsible for computing each output group. This is done one outer product at a
time. A simplified description follows. 
Consider the product of a (sparse) column vector $v$ and a (sparse) row vector $w$. We can sort the non-zero entries of $v$ according to $h_1$, to get a vector $v'$. Then, for each non-zero entry $w_j$ processor number $t$ can search for non-zero entries $v'_i$ such that $(h_1[i]+h_2[j])$ \textit{mod} $p = t$. These entries are placed in an interval $v'[a..b]$, the start and end point of which can be found in logarithmic time. Now the terms of the outer product of the form $w_j \times v'_i$ where \textit{i} is in $[a..b]$ can be processed in some way (for example added to a \emph{SpaceSaving} data structure). For each entry in $v'$ one needs to remember the corresponding entry in $v$, so that the contribution to the outer product is correctly assigned to an output entry.

\section{Combining CL and CRoP}
The idea is that instead of traversing the interval $v'[a..b]$ we sample from it, such that $v'[i]$ is sampled with probability proportional to its value. This will improve speed if the interval is sufficiently large. For efficient sampling we may use a precomputed array $vs$, where $vs[i] = v'[1] + ... + v'[i]$. Choose a random value $r$ between $vs[a-1]$ and $vs[b]$. Then binary search for the sample value $i$ which satisfies $vs[i-1] < r <= vs[i]$. The probability that we sample a specific value $i$ is proportional to $v'[i]$ as desired. The number of things sampled from $v'[a..b]$ should be proportional to $w_j$, i.e., $C \times w_j$ for some fixed $C$, in expectation. The higher $C$ is, the more samples will be taken. In the common case where $C \times w_j$ is not integer we may take 1 sample with probability equaling the fractional part of $C \times w_j$, to get the right expected number.
\end{document}
