% $Header: /home/grad2/araim1/cvs_root/beamer-example/beamer-example.tex,v 1.1.1.1 2008/03/22 18:58:58 araim1 Exp $

% Adopted from Till Tantau's template that comes packaged with Beamer

\documentclass[compress]{beamer}

\mode<presentation>

% Boadilla is a nice theme, but I don't like the big spherical bullet points. I also
% want a blue bar across the top of every slide

\usetheme{Boadilla}
\setbeamercolor*{frametitle}{parent=palette primary}
\setbeamertemplate{items}[default]
\setbeamertemplate{sections/subsections in toc}[sections numbered]

\setbeamercovered{transparent}

\usepackage{beamerinnerthemeumbcboxes}
\usepackage{listings}
\usepackage[noend]{algorithmic}
\usepackage{algorithm}

\usepackage[english]{babel}

\title{Cell SVD Project Update}

\subtitle{CMSC 691A, Spring 2008}

\author[araim1]{Andrew~M.~Raim}

\institute[UMBC]
{
  Department of Computer Science\\
  University of Maryland Baltimore County}

\date{4/14/2008}

\subject{Talks}

% If you have a file called "university-logo-filename.xxx", where xxx
% is a graphic format that can be processed by latex or pdflatex,
% resp., then you can add a logo as follows:

% \pgfdeclareimage[height=0.5cm]{university-logo}{university-logo-filename}
% \logo{\pgfuseimage{university-logo}}

\begin{document}

\begin{frame}
  \titlepage
\end{frame}

\begin{frame}{Outline}
  \tableofcontents
  % You might wish to add the option [pausesections]
\end{frame}

\section{Introduction}
\begin{frame}{Objectives}
\begin{itemize}
\item Compute the Singular Value Decomposition (SVD) on large matricies
\item Take advantage of the Cell Broadband Engine (CBE) architecture to compute quickly
\item Start with the algorithm presented in Strumpen, and make it suitable for use on Cell
\end{itemize}
\end{frame}

\section{Background}
\begin{frame}[shrink]{Algorithm1 from Strumpen paper}
\input{algorithm1}
\end{frame}

\begin{frame}[shrink]{Jacobi calculation from Strumpen paper}
\input{jacobi}
\end{frame}

\begin{frame}{Strumpen's Method}
\begin{itemize}
\item Assumes an $R x R$ array of processors
\item Data flows between processors in the array, in a systolic manner
\item Processors perform calculations as data passes through
\item We will try a different approach for Cell, to allow the SPEs to work independently
\end{itemize}
\end{frame}


\section{Work Matrix}
\begin{frame}{Creating row blocks}
\begin{small}

Original data matrix $A$ = 
\begin{displaymath}
\left[
\begin{array}{cccc}
  a_{1,1} &   a_{1,2} &  \cdots & a_{1,n} \\
  a_{2,1} &   a_{2,2} &  \cdots & a_{2,n} \\
  \hdots  &   \hdots &   \ddots & \vdots  \\
  a_{m,1} &   a_{m,2} &  \cdots & a_{m,n} \\
\end{array}
\right]
\end{displaymath}

Group the rows of $A$ into blocks of size $R$. Call the resulting blocks $rb_0 \cdots rb_k$. \\

\begin{displaymath}
\begin{array}{|c|}
  \hline
  rb_0 \textrm{: rows } 1 \textrm{ to } R \\ \hline
  rb_1 \textrm{: rows } R+1 \textrm{ to } 2R \\  \hline
  \vdots  \\ \hline
  rb_k \textrm{: rows } kR+1 \textrm{ to } M \\ \hline
\end{array}
\end{displaymath}

\end{small}
\end{frame}


\begin{frame}{The Work Matrix}
\begin{small}

When performing Algorithm1, we must perform a computation on each pair of row blocks. We can create
a matrix to show the work that must be done.

\begin{displaymath}
Work_{k,k} = 
\begin{array}{|c|c|c|c|}
  \hline
  rb_{1,1} &   rb_{1,2} &  \cdots & rb_{1,k} \\ \hline
  rb_{2,1} &   rb_{2,2} &  \cdots & rb_{2,k} \\ \hline
  \hdots   &   \hdots &    \ddots & \vdots  \\ \hline
  rb_{k,1} &   rb_{k,2} &  \cdots & rb_{k,k} \\ \hline
\end{array}
\end{displaymath}

Only the upper triangular portion of the matrix must be calculated. Computing $Work(rb_i, rb_j)$ will 
modify the rows in both $rb_i$ and $rb_j$.

\end{small}
\end{frame}

\begin{frame}[shrink]{Processing a Chunk of Work}
Here is the work required to process $Work(rb_i, rb_j)$. Let:
\begin{itemize}
\item $lb_i$: min row index of $rb_i$
\item $ub_i$: max row index of $rb_i$
\item $lb_j$: min row index of $rb_j$
\item $ub_j$: max row index of $rb_j$

\end{itemize}

\input{workchunk}

Note that the paper suggests using a systolic approach to computing this effeciently
\end{frame}


\section{Approach on Cell}
\begin{frame}{Parallelization}
Notice that in the course of processing $Work(rb_i, rb_j)$, all rows within those 
blocks are modified. Therefore, if we are to parallelize the problem, a give row block
$rb_i$ should have at most one processor operating on it. In terms of the $Work$ matrix,
this means that no two processors can be operating within the same row or column at any
given time. If this constraint is not enforced, the results will be in an indeterminite 
state (and will probably not be correct).
\end{frame}


\begin{frame}{Parallelization Idea}

Here is an example of the flow of control on a $Work$ matrix with $k = 4$. Let $t_i$ denote the $i$th time step of the
algorithm.

% First two frames of animation - shows up as upper row
\pause

\begin{displaymath}
\rightarrow
\begin{array}{|c|c|c|c|}
  \hline
  \color{red} t_1 &  &  & \\ \hline
    &   \color{red} t_1 &  & \\ \hline
    &  & \color{red} t_1 & \\ \hline
    &  &  & \color{red} t_1 \\ \hline
\end{array}
\pause
\rightarrow
\begin{array}{|c|c|c|c|}
  \hline
  t_1 & \color{red} t_2 &  & \\ \hline
    & t_1 &   \color{red} t_2 & \\ \hline
    &  & t_1 & \color{red} t_2 \\ \hline
    &  &  & t_1 \\ \hline
\end{array}
\end{displaymath}

\pause

% Last two frames of animation - shows up as lower row
\begin{displaymath}
\rightarrow
\begin{array}{|c|c|c|c|}
  \hline
  t_1 & t_2 & \color{red} t_3 & \\ \hline
    & t_1 & t_2 & \color{red} t_3 \\ \hline
    & & t_1 & t_2 \\ \hline
    & & & t_1 \\ \hline
\end{array}
\pause
\rightarrow
\begin{array}{|c|c|c|c|}
  \hline
  t_1 & t_2 & t_3 & \color{red} t_4 \\ \hline
    & t_1 & t_2 & t_3\\ \hline
    & & t_1 & t_2 \\ \hline
    & & & t_1 \\ \hline
\end{array}
\end{displaymath}

\end{frame}


\begin{frame}[shrink]{Parallelization Control}
Here is how we can orchestrate the processing units to maintain correctness of the result
\input{workcontrol}
\end{frame}

\begin{frame}[shrink]{Test Matrix}
Luke describes some of the attributes of the test matrix: \\

In our case, $M$ (number of terms) is $54574$, $N$ (number of docs) is
$19043$, and there are $1215837$ matrix elements that are non-zero. This
makes our matrix $0.117\%$ dense. We may need to modify our code to deal
with sparse matrices. A $54574$ x $19043$ dense matrix of unsigned chars
(I don't see any term frequency going over $256$ times in a document
since stop-words like "a", "the", "is" are removed) takes up $991$ MB.
The PS3s only have $256$ MB of main memory. To store a full SVD without
the original matrix $A (U = MxM, S = M, V = NxN)$, it would take $12$ GB
assuming the elements in $U$, $S$, and $V$ are $4$-byte floats. We can either
reduce the size of the corpus, or we can code for large matrices.
BLAS has ways of dealing with sparse matrices.
\end{frame}

\begin{frame}[shrink]{Sparseness}
\begin{itemize}
\item We don't want to store our matrix in full form - it's extremely wasteful
\item It is extremely expensive to unroll the sparse form of the matrix at any time during computation
\item It isn't too hard to perform the Hestenes-Jacobi rotation "computation" and "application" steps on a pair of sparsely encoded vectors at a time
\item Let's try to do that, and then perform those steps on a pair of sparsely encoded row blocks at a time
\item This should be roughly equivalent to the systolic approach in the paper
\end{itemize}

Suppose:
\begin{itemize}
\item $i$ is an index between $1, \cdots, n$
\item $v \in R$
\item Sparse row vector $r_1 = [ (i_{1,1}, v_{1,1}), (i_{1,2}, v_{1,2}), \cdots, (i_{1,n}, v_{1,n}) ] $
\item Sparse row vector $r_2 = [ (i_{2,1}, v_{2,1}), (i_{2,2}, v_{2,2}), \cdots, (i_{2,n}, v_{2,n}) ] $
\item All indicies in $r_1$ and $r_2$ not specified have a corresponding value of zero
\end{itemize}
\end{frame}

\begin{frame}[shrink]{Hestenes-Jacobi Rotation Computation}{Pair of sparse vectors}
\input{hj_compute_pair_vectors}
\end{frame}

\begin{frame}[shrink]{Hestenes-Jacobi Rotation Computation}{Pair of sparse row blocks}
Note: as Luke pointed out, this is exactly equivalent to a matrix-matrix multiply! TBD: Also need
to add checks for $|g| > \epsilon$ and $|g| > \delta$
\input{hj_compute_pair_blocks}
\end{frame}

\begin{frame}[shrink]{Hestenes-Jacobi Rotation Application}{Pair of sparse row vectors}
\input{hj_apply_pair_vectors}
\end{frame}

\begin{frame}[shrink]{Hestenes-Jacobi Rotation Application}{Pair of sparse row blocks}
\input{hj_apply_pair_blocks}
\end{frame}


\section{Next Steps}
\begin{frame}[shrink]{Next Steps}
\begin{itemize}
\item Set $k$ to be the number of SPEs (which for us will be 6). The should be the optimal number for $k$;
any less and we'd be wasting an SPE, any more and there would be more control overhead to get work to the SPEs
\item Would it be possible assign SPEs to work smartly, so they can keep some data in local store between jobs?
We'll need to consider if a block can be held completely in local store, for example.
\item There's probably a nice way to arrange the work in a single job, to minimize the number of loads, saves,
and SPE cycles
\item We'll want to keep track of how much time SPEs spend waiting for work, since each time step is only as
fast as the slowest SPE. Hopefully this time should be trivial
\item We'll want to make sure that in our implementation, we only spawn off SPEs threads once
\item If there's enough time, we'll want to investigate matrices that are too large to fit in memory, and optimizing
disk usage on the Cell
\end{itemize}
\end{frame}


\end{document}

