\documentclass[11pt]{article}
\usepackage{epsfig}
\usepackage{fullpage}
\usepackage{psfrag}
%\usepackage[ruled,vlined]{algorithm2e}
\usepackage[lined,boxed,commentsnumbered]{algorithm2e}
\usepackage{latexsym}
\usepackage{graphicx}
  % declare the path(s) where your graphic files are
  % and their extensions so you won't have to specify these with
  % every instance of \includegraphics

\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{corollary}[theorem]{Corollary}

\title {Parallel Algorithm Analysis for Prefix Sums}
\author {Songgang Xu}
\begin{document}
\maketitle
  \section{Introduction}
\indent{}The report presents the analysis and comparison of the performance of the MPI and OpenMP for computing prefix sums. A sequential program is designed for measuring the speedup of parallel programs. In the first part, the algorithms are depicted by pseudo code. For each algorithm, the theoretical time complexity and work complexity are proposed. After these, we design several experiments to measure the actual speedup on a super computer. The strong and weak scaling of speedup are measured and discussed. Finally, the approximated asymptotic bound is figured out by comparing the theoretical complexity model and the actual speedup results in experiments. 

%You will also compare them with a sequential program for computing the prefix sums that you will write.
  \section{Theoretical Analysis}
    \subsection{Sequential Algorithm}
      
    \begin{algorithm}[H]
      sum $\leftarrow$ 0 \\
      \For {$i \leftarrow 1$ \KwTo $n$}
      {
        sum $\leftarrow$  sum + A[i]\\
        prefix\_sum [i] = sum \\
      }
      \Return prefix\_sum \\
      \caption {Sequential Algorithm}
    \end{algorithm}
    This is a linear algorithm. The time complexity is $O(n)$, where $n$ is the size of input data. Because this is a sequential algorithm. The work is $O(n)$, where $n$ is the size of input data. 

    \subsection{Parallel Algorithm in OpenMP}
    \begin{algorithm}[H]
      sum $\leftarrow 0$\\
      id $\leftarrow$ processor id \\
      p $\leftarrow$ number of processors \\
      prefix\_sum $\leftarrow$ A\\
      \For {$i \leftarrow \frac{id\times n}{p}$ \KwTo $\frac{(id+1)\times n}{p}-1$}
      {
        predecessor [i] = i - 1;
      }
      predecessor [1] = 1;
      
      \For {$i \leftarrow \frac{id\times n}{p}$ \KwTo $\frac{(id+1)\times n}{p}-1$}
      {
        \While{\textrm {predecessor[i]}$\geq 1$}
        {
          prefix\_sum [i] += prefix\_sum[predecessor[i]]\\
          predecessor [i] = predecessor [predecessor[i]]\\
        }
      } 
      \Return prefix\_sum \\
      \caption {Parallel Algorithm in OpenMP}
    \end{algorithm}

The time complexity is $O(\frac{n \log n} { p} + \log n )$, where $n$ is the size of input data. The work is $O(n \log n )$, where $n$ is the size of input data. 


    The complexity analysis of algorithm (time and work) of your algorithms using asymptotic notation. 

    \subsection{Parallel Algorithm in MPI}
    \begin{algorithm}[H]
      1. Compute prefix sum in each group sequentially \\
      sum [processor\_id] $\leftarrow$ 0 \\
      \For {$i \leftarrow $ processor\_id $\times \frac{n}{p} + 1 $ \KwTo $( \textrm{processor\_id} + 1) \times \frac{n}{p} $}
      {
        sum [processor\_id] = sum[processor\_id] + A[i]\\
        prefix\_sum[i] = sum [processor\_id]\\
      }
      2. Compute the prefix sum among groups with pointer jumping technique \\
      \eIf {processor\_id $\neq 0$}
      {
        MPI\_Send partial\_sum to processor with id 0 \\
      }
      {
        sum $\leftarrow$ partial\_sum \\
        \For { $i \leftarrow 1$ \KwTo $p$}
        {
          MPI\_Recv partial\_sum from a processor and store in buffer \\
          sum $\leftarrow$ + buffer
        }
      }
      MPI\_Broadcast sum \\
      \Return sum
      \caption {Parallel Algorithm in MPI}
    \end{algorithm}
The time complexity is $O(\frac{n \log n} { p} + \log n )$, where $n$ is the size of input data. The work is $O(n \log n )$, where $n$ is the size of input data. 
  \section{Experimental Setup}
  \subsection{Machine Specification}
  The experiments are set up on the main IBM p5-575 Cluster. Operating System is AIX 5.3. The number of computing nodes is 52. The number of processing cores is 832. For each task, the maximum processing units allowed is 16. The peaking performance is 6.3 TFlops. 
  \subsection{Experiments Auxiliaries}
  
  \begin{itemize}
    \item {\bf Generation of input data} The input data are consisted of large number of integers. These integers are generated randomly by the function \emph{rand()} in C language. The size of the input data can be specified and the integers can be generated before computing the prefix sum. The size should be large than the machine cache size. Otherwise, the results may lead to super linear speedup. When accounting the time of computing prefix sum, the time of generating input data should be ignored. 
    \item {\bf Choose the number of processors} The number of processors can be specified before processing the prefix sum. The maximum number of processors allowed on the machine is 16. In each experiment, we will choose the number of processors using power of 2, i.e., 1, 2, 4, 8, 16, to compare the performance of the algorithm. 
    \item {\bf Time Mechanism} The function \emph{gettimeofday()} in C library always return system current time. In order to measure the time interval of one operation, we could get the time $t_1$ before and the time $t_2$ after the operation, and using $t_2 - t_1$ to indicate the time interval. To measure the time interval of prefix sum is similar. We get the system time just before computing sum and get the time again after the computing. The minus of both time's will indicate how long the computing has endured. Each experiment is repeated 32 times. The time cost for one experiment is considered to be the average of 32 time costs. 
  \end{itemize}

  \section{Experimental Results}
    \subsection {Results}
{\bf Experiment 1} This experiment indicates the relation between the speedup of parallel algorithms and the number of processors. The data size is $1e7$ Integers. Both the OpenMP and MPI implementations are tested. The choice of processors are 1,2,4,8,16. For each value of the number of processors, we repeat the experiment 32 times. We record the maximum, minimum and average time costs in Table \ref{table:proc:time}. The unit of time cast is microsecond (usec).  

\begin{table}[h]
\begin{center}
        \begin{tabular}{ | c | c | c | c | c | c | c |}
        \hline
        \#Processor & $\max_{OpenMP}$ & $\min_{OpenMP}$ & $\textrm{avg}_{OpenMP}$ &  $\max_{MPI}$ & $\min_{MPI}$ & $\textrm{avg}_{MPI}$ \\ \hline
        1 & 88824 & 88650 & 88714 & 95596 & 94556 & 95296 \\ \hline
        2 & 66448 & 66247 & 66335 & 44510 & 44248 & 44459 \\ \hline
        4 & 43431 & 43074 & 43201 & 22061 & 22028 & 22038 \\ \hline
        8 & 23814 & 23763 & 23793 & 11546 & 11059 & 11329 \\ \hline
        16 & 19432 & 18662 & 19101 & 7039 & 6545 & 6582 \\ \hline
        \end{tabular}
\end{center}
\caption{Data size is $1e7$ integers.  }
\label{table:proc:time}
\end{table}

\begin{figure}[h]
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[scale=1]{./gnuplot/ex1_1.eps}
\caption{The relationship between time cost and number of processors under OpenMP}
\label{fig:openmp:1}
\end{minipage}
\hspace{0.5cm}
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[scale=1]{./gnuplot/ex1_2.eps}
\caption{The relationship between time cost and number of processors under MPI}
\label{fig:mpi:1}
\end{minipage}
\end{figure}

Fig. \ref{fig:openmp:1} shows the relationship between the time cost and the number of processors with the implementation of OpenMP. The curve could be depicted mathematically as 

\begin{equation}
T_{\textrm{\tiny {OpenMP}}}(p) = a_1 \cdot e^{-b_1p},  
\label{for:openmp:1}
\end{equation}

where $T$ is the time cost, $p$ is the number of processors, $a_1$ and $b_1$ are two coefficients to be determined.  

%%Using least square fitting, we could get the values of $a_1$ and $b_1$ as follows

Fig. \ref{fig:mpi:1} shows the relationship between the time cost and the number of processors with the implementation of OpenMP. Similarly, the curve could be depicted mathematically as 
\begin{equation}
T_{\textrm{\tiny {MPI}}}(p) = a_2 \cdot e^{-b_2p},  
\label{for:mpi:1}
\end{equation}
where $a_2$ and $b_2$ are also two coefficients. 
  


{\bf Experiment 2} These experiments indicate the relation between the number of processors and speedup ratio. In each experiment, we fix the number of processors and change the size of input data. We measure the time cost of a sequential algorithm and that of parallel algorithms. Table \ref{table:speedup:openmp:4} and Table \ref{table:speedup:mpi:4} show the record of time costs of the sequential algorithm and the parallel algorithms using 4 processors. Similarly, Table \ref{table:speedup:openmp:8} and Table \ref{table:speedup:mpi:8} show the record of time costs of the sequential algorithm and the parallel algorithms using 8 processors. The data size ranges from 1e6 to 1.6e7 integers. The number of processor ranges from 2 to 16. 

We compute different ratio of speedup with respect to different number of processors. The result of OpenMP is shown in Fig. \ref{fig:openmp:2}. The result of MPI is shown in Fig. \ref{fig:mpi:2}. 

%Showing the running times of the sequential program (not a parallel program using one processor), and the 2 versions of the parallel programs for different data sizes and a fixed number of processors. You may want a few graphs for different numbers of processors.
  
%This experiment indicates the relation between the size of input data and the time consuming of processing. 

\begin{table}[h]
\begin{center}
        \begin{tabular}{ | c | c | c | c | c | c |}
        \hline
        Data Size & $\textrm{avg}_{seq}$ & $\max_{OpenMP}$ & $\min_{OpenMP}$ & $\textrm{avg}_{OpenMP}$ & $\textrm{Speedup}_{OpenMP}$  \\ \hline
        1e6 & 6090 & 4282 & 4150 & 4221 & 1.44 \\ \hline
        2e6 & 12278 & 8527 & 8513 & 8520 & 1.44 \\ \hline
        4e6 & 24553 & 17080 & 17053 & 17071 & 1.44 \\ \hline
        8e6 & 50204 & 35333 & 34235 & 35313 & 1.42  \\ \hline
        1.6e7 & 104408 & 69982 & 69837 & 69853 & 1.49  \\ \hline
        \end{tabular}
\end{center}
\caption{Speedup For OpenMP. The number of processors is 4.  }
\label{table:speedup:openmp:4}
\end{table}

\begin{table}[h]
\begin{center}
        \begin{tabular}{ | c | c | c | c | c | c |}
        \hline
        Data Size & $\textrm{avg}_{seq}$  &  $\max_{MPI}$ & $\min_{MPI}$ & $\textrm{avg}_{MPI}$ & $\textrm{Speedup}_{MPI}$ \\ \hline
        1e6 & 6090 & 2185 & 2043 & 2099 & 2.91 \\ \hline
        2e6 & 12278 & 4404 & 4391 & 4399 & 2.79 \\ \hline
        4e6 & 24553 & 8744 & 8801 & 8770 & 2.80 \\ \hline
        8e6 & 50204 & 17582 & 17519 & 17543 & 2.86 \\ \hline
        1.6e7 & 104408 & 35346 & 35320 & 35301 & 2.96\\ \hline
        \end{tabular}
\end{center}
\caption{Speedup for MPI. The number of processors is 4.  }
\label{table:speedup:mpi:4}
\end{table}

\begin{table}[h]
\begin{center}
        \begin{tabular}{ | c | c | c | c | c | c |}
        \hline
        Data Size & $\textrm{avg}_{seq}$ & $\max_{OpenMP}$ & $\min_{OpenMP}$ & $\textrm{avg}_{OpenMP}$ & $\textrm{Speedup}_{OpenMP}$  \\ \hline
        1e6 & 6090 & 2365 & 2356 & 2359 & 2.58  \\ \hline
        2e6 & 12278 & 4691 & 4668 & 4673 & 2.62 \\ \hline
        4e6 & 24553 & 9359 & 9301 & 9331 & 2.63 \\ \hline
        8e6 & 50204 & 18946 & 18890 & 18916 & 2.65 \\ \hline
        1.6e7 & 104408 & 39026 & 38521 & 38997 & 2.67 \\ \hline
        \end{tabular}
\end{center}
\caption{Speedup For OpenMP. The number of processors is 8. }
\label{table:speedup:openmp:8}
\end{table}

\begin{table}[h]
\begin{center}
        \begin{tabular}{ | c | c | c | c | c | c |}
        \hline
        Data Size & $\textrm{avg}_{seq}$  &  $\max_{MPI}$ & $\min_{MPI}$ & $\textrm{avg}_{MPI}$ & $\textrm{Speedup}_{MPI}$ \\ \hline
        1e6 & 6090 & 1243 & 1173 & 1213 & 5.02 \\ \hline
        2e6 & 12278 & 2260 & 2252 & 2257 & 5.44 \\ \hline
        4e6 & 24553 & 4688 & 4519 & 4599 & 5.34 \\ \hline
        8e6 & 50204 & 9340 & 8957 & 9190 & 5.46 \\ \hline
        1.6e7 & 104408 & 17710 & 17811 & 17787 & 5.87 \\ \hline
        \end{tabular}
\end{center}
\caption{Speedup for MPI. The number of processors is 8.  }
\label{table:speedup:mpi:8}
\end{table}

\begin{figure}[h]
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[scale=1]{./gnuplot/ex2_1.eps}
\caption{Speedup for OpenMP}
\label{fig:openmp:2}
\end{minipage}
\hspace{0.5cm}
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[scale=1]{./gnuplot/ex2_2.eps}
\caption{Speedup for MPI}
\label{fig:mpi:2}
\end{minipage}
\end{figure}

{\bf Experiment 3} This experiment compares the result between theoretical expectation and experimental results about the performance of parallel algorithms. The definition of speedup is 
\[
\textrm{Speedup} = \frac{T^*}{T_p}
\]
Theoretically, if we have $p$ processors (computing units), the speedup of performance is less than $p$. The sequential algorithm computes the prefix sums of 1e7 integers by using 65209 miroseconds. According to Fig. \ref{fig:openmp:1} and Fig. \ref{fig:mpi:1}, the speedup for strong scaling of OpenMP and MPI are shown in Fig. \ref{fig:openmp:3} and Fig. \ref{fig:mpi:3} respectively. From the graphs, we understand that there are overhead load for processors to cooperate. That's why the speedup is not as high as the theoretical one. 

\begin{figure}[h]
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[scale=1]{./gnuplot/ex3_1.eps}
\caption{Strong scaling for OpenMP}
\label{fig:openmp:3}
\end{minipage}
\hspace{0.5cm}
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[scale=1]{./gnuplot/ex3_2.eps}
\caption{Strong scaling for MPI}
\label{fig:mpi:3}
\end{minipage}
\end{figure}


Next, continue to analyze the weak scaling. For each processor we assign $1e6$ integers to compute the prefix sums. The ideal weak scaling should be a constant straight line. The actual weak scaling for OpenMP is shown in Fig. \ref{fig:openmp:4}. The actual weak scaling for MPI is shown in Fig. \ref{fig:mpi:4}. We could find that both are higher than the ideal value. There are overhead computation for coordinating different processors. 

\begin{figure}[h]
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[scale=1]{./gnuplot/ex4_1.eps}
\caption{Weak scaling for OpenMP}
\label{fig:openmp:4}
\end{minipage}
\hspace{0.5cm}
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[scale=1]{./gnuplot/ex4_2.eps}
\caption{Weak scaling for MPI}
\label{fig:mpi:4}
\end{minipage}
\end{figure}

%This will allow you compare your observed performance with your theoretically expected peformance (why?) and to determine the constants hidden in the asymptotic analysis (how?). You may want a few graphs for different numbers of processors (why?).


    
    \subsection{Performance Analysis}
    Theoretically, the time cost for $p$ processors is 
\[
T_p = O (\frac{n \log n}{p} + \log n).
\]
So the speedup should be 
\[
\frac{T^*}{T_p} = \frac{O(n)}{ O \left( \frac{n \log n}{p} + \log n \right)} = k \cdot \frac{n}{\frac{n \log n}{p} + \log n},
\]
where $n$ is the input data size, $k$ is a coefficient to be determined. For input data of size 1e7 and two processors, we plug them into above equation and get
\[
\frac{T^*}{T_p} = 0.086 k.
\] 
The time cost of a sequential algorithm is 65209 microsecond. With respect to Table \ref{table:proc:time}, the value of $k$ for OpenMP is $k = 1.017 / 0.086 = 11.82$; the value of $k$ for MPI is $k = 7.9$. In another word, the time cost for OpenMP is
\[
\left( \frac{n \log n}{p} + \log n \right) / 11.82 = 0.0846\cdot \left(\frac{n \log n}{p} + \log n\right);
\]
the time cost for MPI is
\[
\left(\frac{n \log n}{p} + \log n \right)/ 7.9  =  0.126 \cdot \left(\frac{n \log n}{p} + \log n \right).
\]
 
%    Using the timing graphs, decide if your measured performance matches your predicted asymptotic performance. Experimentally determine the constants hidden in your asymptotic analysis from the plots, and determine the values of n (data size) for which the asymptotic analysis holds. These values should be determined from the plots (you may wish to plot some additional functions to assist with this). Provide a discussion of your results, which includes but is not limited to:\\
  \section{Conclusion}
%%In this section you should provide some summary discussion comparing the two parallel algorithms and offering advice as to when each of them should be used. Here it would be relevant to include factors such as each of implementation and portability in your discussion.
\indent{}This report presents the analysis and comparison of the performance of the MPI and OpenMP for computing prefix sums. A sequential program is designed for measuring the speedup of parallel programs. We design several experiments to measure the actual speedup on the supercomputer `hydra'. The strong and weak scaling of speedup are measured and discussed. 

Finally we conclude that, the time cost for OpenMP is
\[
0.0846\cdot \left(\frac{n \log n}{p} + \log n\right);
\]
the time cost for MPI is
\[
0.126 \cdot \left(\frac{n \log n}{p} + \log n \right).
\]
The result shows that the MPI has higher speedup than OpenMP. Thus MPI has a higher performance. 

\end{document}
