\documentclass[conference, 10pt, letter]{IEEEtran}
\input{preamble.tex}
\bibliographystyle{IEEEtranS}

\begin{document}

\title{MPI2GPU:A Framework For Porting MPI Code to The Graphics Processor}

\author{\IEEEauthorblockN{Chad Frederick and Sushil K. Prasad}
\IEEEauthorblockA{Department of Computer Science\\
Georgia State University\\
Atlanta, Georgia 30303, USA\\}
}

\maketitle

\begin{abstract}
Graphics Processing Units (GPUs) can be found in almost all areas of high performance computing, but can be difficult to program.  Message Passing Interface (MPI) is the de facto standard for programming on distributed systems.  However there is no an easy way to port MPI code to the GPU.  In this thesis we will introduce a framework for porting MPI code directly to the Computer Unified Device Architecture (CUDA) architecture.  We will define a way to classify MPI algorithms based on communication patterns and we will develop a performance predication model based on the classes of algorithms.
\end{abstract}

\section{Introduction}

Graphics Processing Units are powerful, inherently parrallel, and highly specialized\cite{1103933}. Since 2003, the GPU has been available for general programming, and a variety of strategies have been developed to leverage this technology\cite{1090126}. The specialized hardware and languages of GPU programming are a barrier to researchers wanting to port their existing parrallel or distributed code to the GPU.

In this paper, we develop a framework for converting existing MPI code to the Computer Unified Device Architecture (CUDA). This framework is intended to assist researches with existing MPI code to easily translate their code to the GPU. 

In the remainder of this section, we discuss the background and relevance of our approach, which is outlined in Section~\ref{sub:approach}. Section~\ref{sec:translation} presents the detailed algorithm for converting an MPI program to CUDA. A detailed case study, the conversion of an MPI version of Cannon's Algorithm, is presented in Section~\ref{sec:case-study}. Our performance results from the case study appear in Section~\ref{sec:results}, and our plans for future work and conclusions are in Section~\ref{sec:conclusion}.

\subsection{Related Work}
\label{sub:background}
\input{secs/sub-background.tex}

\section{Our Approach}
\label{sub:approach}

\input{secs/sub-approach}
Algorithm of 5 stages that results in cuda code.
First it is important to classify the algorithms in the code...performance 
We look at 2 methods to translate communication primitives

\subsection{Relevance}
\label{sub:relevance}
\subsection{Limitations}
\label{sub:limitations}

\section{Translating an MPI program(overview)}
\label{sec:translation}
An algorithm can be ported easily, but translating an MPI program to a GPU program can be difficult.  Here we will show that this is much easier when taken in stages.  

\subsection{Classifying MPI programs using Communication Patterns}
\input{secs/sub-classify.tex}

\subsection{Translating the communication primitives}
\input{secs/sub-primitives.tex}


\subsection{Algorithm}
\input{secs/sub-alg.tex}

%\input{secs/sub-algorithm.tex}



\section{Case Study}
\label{sec:case-study}
\input{secs/sub-casestudy.tex}




\section{Conclusion}
\label{sec:conclusion}
\input{secs/sub-conclusion.tex}


\section{Future Work}
\input{secs/sub-futurework.tex}


\bibliography{mpi2cuda}
\end{document}
