\documentclass[]{article}
\usepackage{algorithm}
\usepackage{algpseudocode}
\usepackage{float}
\usepackage[cm]{fullpage}
%\usepackage{algorithmic}
%\usepackage[retainorgcmds]{IEEEtrantools}
%opening
\title{Cohen and Lewis approximation}
\author{Andreas Bok Andersen}
\begin{document}
\maketitle

\begin{abstract}
Combining the approximate matrix multiplication method of Cohen and Lewis\cite{Cohen1999} [CL]
with communication-avoiding parallel sparse matrix multiplication (CRoP). 
\end{abstract}

\section{Summary of [CL]}
Given n-by-n matrices A and B, we estimate the product AB by
weighted sampling of all the $n^3$ terms that contribute to some entry. (In case
of sparse matrix products, we may see this as sampling the non-zero terms of the
outer products.) That is, the term $(i,k,j)$ is sampled with probability
proportional to $A[i,k] \times B[k,j]$. After a large number of samples the number of
times a term of the form (i,-,k) is sampled is proportional, in expectation, to
$(AB)[i,j]$. By scaling we get unbiased estimators for each entry. The values of
large entries will be rather precise, whereas the small (and especially zero)
entries have a larger relative error.

\section{Summary of CRoP}
Entries of the output matrix are implicitly split into $p$ groups, such that $(AB)[i,j]$ belongs to group $(h_1[i]+h_2[j])$ \textit{mod} $p$, where $h_1$ and $h_2$ are arrays with random entries in ${0,..,p-1}$. One core (or machine) is responsible for computing each output group. This is done one outer product at a time. A simplified description follows. 
Consider the product of a (sparse) column vector $v$ and a (sparse) row vector $w$. We can sort the non-zero entries of $v$ according to $h_1$, to get a vector $v'$. Then, for each non-zero entry $w_j$ processor number $t$ can search for non-zero entries $v'_i$ such that $(h_1[i]+h_2[j])$ \textit{mod} $p = t$. By keeping an array of accumulated counts for $h_1(i)$ we are able to obtain the interval $v'[a..b]$ in $v'$ for entries mapped to a specific processor $p$ in constant time. This is done by solving the equation $h_1(i) = (p + t - h_2(j)) | p$, where $p$ is the number of processors. 
Now the terms of the outer product of the form $w_j \times v'_i$ where \textit{i} is in $[a..b]$ can be processed in some way. 

Each entry in $v$ is saved as a tuple $(h1(a_i), a_i, col[a_i])$ such that the contribution is assigned to the correct output entry.


sd  

\section{Combining CL and CRoP}
The idea is that instead of traversing the interval $v'[a..b]$ we sample from it, such that $v'[i]$ is sampled with probability proportional to its value. This will improve speed if the interval is sufficiently large. For efficient sampling we may use a precomputed array $vs$, where $vs[i] = v'[1] + ... + v'[i]$. Choose a random value $r$ between $vs[a-1]$ and $vs[b]$. Then binary search for the sample value $i$ which satisfies $vs[i-1] < r <= vs[i]$. The probability that we sample a specific value $i$ is proportional to $v'[i]$ as desired. The number of things sampled from $v'[a..b]$ should be proportional to $w_j$, i.e., $C \times w_j$ for some fixed $C$, in expectation. The higher $C$ is, the more samples will be taken. In the common case where $C \times w_j$ is not integer we may take 1 sample with probability equaling the fractional part of $C \times w_j$, to get the right expected number.
The samples are saved using the SpaceSaving algorithm\cite{Cormode2008}\cite{Manerikar2009} with a StreamSummary datastructure\cite{Demaine2002a}\footnote{The streamsummary was also proposed in \cite{Karp2003}}


\section{Approach}

\subsection{Algorithms for CL and CRoP}
\begin{algorithm}
\caption{Serial Combined CL and CRoP}
\begin{algorithmic}
\For{$a_k, col$ \textit{in} $enumerate(A)$} 
	\State $A,B \gets [i][k], [k][j]$
	\State $h1val, h2val, h1counts \gets []$
	\For{$a_i, w$ \textit{in} $enumerate(col)$} 
		\If{$w == 0$}
		\State $continue$
		\EndIf
		\State $hash1 = val(h1, a_i, 0)$
		\State $h1counts[hash1] += 1$
		\State $h1val.add((hash1, a_i, w))$
	\EndFor
	
	\For{$b_j, u$ \textit{in} $enumerate(B[a_k])$}
		\If{$u == 0$}
		\State $continue$
		\State $h2val.add((val(h2, b_j, 1), b_j, u))$
		\EndIf
	\EndFor
	\State $h1counts = cumulated\_sum(h1counts)$
	\State $h1counts.prepend(0)$
	\State $h1val.sort$
	\State $generate\_prob(h1counts, h1val)$
	\State $do\_outer\_product(h1val, h2val, h1counts)$
\EndFor
\end{algorithmic}
\end{algorithm}

\begin{algorithm}
\caption{Parallel Combined CL and CROP - Master Node}
\begin{algorithmic}
\State $A,B \gets inputmatrices$
\State $C \gets {}$
\State $nodes \gets []$ \Comment{compute nodes in cluster} 
\Function{main}{}
	\State $scatter(A,B)$
	\State $MPI\_Barrier\_Sync$
	\EndFunction
\Function{scatter}{$A$, $B$}
	\For{col, row in A,B}
	\State $MPI\_Broadcast(col, row, nodes)$ \Comment{Broadcast to all nodes}	
	\EndFor 	
\EndFunction

\Function{gather}{$sub_mm$} \Comment{gather results from compute nodes}
	\For{$i,j,w$} 
		\State$C[i,j] += w$ \Comment{update entry in the output matrix}
	\EndFor
\EndFunction
\end{algorithmic}
\end{algorithm}

\begin{algorithm}
\caption{Parallel Combined CL and CRoP - Slave Node}
\begin{algorithmic}
\State $p\_num \gets$ \#processors in cluster 
\State $pi \gets$ processor id
\State $SS \gets SpaceSaving$
\Function{crop}{$col_a, row_b$}
	\State $h1val, h2val, h1counts \gets []$
	
	\For{$a_i, w$ \textit{in} $enumerate(col_a)$}
	\Comment{Possibly do the loop in parallel}
		\If{$w == 0$} 
		\State $continue$
		\EndIf
		\State $hash1 \gets val(h1, a_i, 0)$ \Comment{$val$ returns a hash of $a_i$}
		\State $h1counts[hash1] += 1$ \Comment{keep a count}
		\State $h1val.add((hash1, a_i, w))$ 
	\EndFor
	
	\For{$b_j, u$ \textit{in} $enumerate(row_b)$}
		\If{$u == 0$}
		\State $continue$
		\State $h2val.add((val(h2, b_j, 1), b_j, u))$
		\EndIf
	\EndFor
	\State $h1counts \gets cumulated\_sum(h1counts)$ \Comment{accumulated counts}
	\State $h1val.sort$ \Comment{sort the hashed values in $col_a$}
	\State $generate\_prob(h1counts, h1val)$ 
	\State $do\_outer\_product(h1val, h2val, h1counts)$
\EndFunction
\State
\Function{$do\_outer\_product$}{$h1val,h2val,h1counts$}
	\ForAll{b in h2val}
		\State $a\_idx \gets (p + pi - b[0])\ |\ p$ \Comment{such that $AB[i,j]$ belongs to $h1val[i] + h2val[j])\ |\ p$}
		\State $ab\_range \gets h1val[h1counts[a_idx]:h1count[a\_idx + 1]]$
		\If{len(ab\_range) = 0} 
			\State continue 
		\EndIf		
		\ForAll{a in ab\_range}
			\State $C[i,j] += b.value * a.value$ \Comment{This is where we save in some hashed datastructure eg. SpaceSaving}  
		\EndFor  		
	\EndFor 
\EndFunction
\State
\Function{send}{} 
	\State $send(SS)$  \Comment {Send to result to master node}
\EndFunction
\end{algorithmic}
\end{algorithm}

\section{Analysis}
What are error bounds on the combination of Sampling\cite{Cohen1999} and SpaceSaving? 
Is it necessary to both sample and keep in a spacesaving?
How will parsing of input matrices in compressed format change performance?

Could exploit theading on node and do a parallel-loop using OpenMP

\section{Implementation}
The current implementation is written in Python. The parallelization of the algorithm has not been implemented yet. However setting the number of processors $> 1$ will still distribute the entries of the output matrix using $h1(i) + h2(j) \% p$. 
Thus the next step is implement a version in $C++$ and integrate this with MPI and OpenMP
\listofalgorithms

\bibliography{Thesis}
\bibliographystyle{plain}
\end{document}