\section{Parallel Processing in Python}

\centeredlargetext{white}{black}{
Parallel Processing in Python
}


\subsection{Multi-Processing}

\begin{frame}
\frametitle{Multi-Threading in Python}
\reference{\url{http://docs.python.org/library/threading.html}}
\begin{itemize}
  \item Python supports multi-threading of code but has an important limitation.
  \item Only one thread can execute Python code at a time.
  \item Multi-thread can still be useful in certain cases such as
  \begin{itemize}
    \item running multiple I/O-bound tasks simultaneously
    \item code that uses a lot of C-based modules (e.g.\ NumPy and SciPy)
  \end{itemize}
  \item To get parallel processing in Python it is often better to use multi-processing.
\end{itemize}
\end{frame}


\begin{frame}[fragile]
\frametitle{Multi-Processing in Python}
\reference{\url{http://docs.python.org/library/multiprocessing.html}}
\begin{itemize}
  \item The \lstpy{multiprocessing} module provides convenient ways to divide Python execution over multiple processes.
  \item Create a pool of worker processes and divide up the work.
\end{itemize}
\lstset{title=Examples/multiproc.py}
\lstlistingwithnumber{1}{10}{Examples/multiproc.py}
\end{frame}


\subsection{Heterogeneous Computing in Python with GPGPU}
\reference{\url{http://blogs.nvidia.com/tag/kepler/index.html}}
\begin{frame}
\frametitle{Modern GPGPU}

\begin{itemize}
  \item GPUs have been evolving from dedicating graphics rendering hardware, to powerful general computing devices:
\end{itemize}

\begin{center}
  \includegraphics[width=0.5\textwidth]{Diagrams/NVDA_Kepler_Architecture}\\[-0.05em]
  \footnotesize
  Modern GPU Architecture (Nvidia Kepler)
\end{center}

\end{frame}


\begin{frame}[fragile]
\frametitle{Modern GPGPU}
\reference{\url{http://blogs.nvidia.com/tag/kepler/index.html}}
\begin{itemize}
  \item Massive amount of processing cores (3072 CUDA cores in Nvidia GeForce GTX 690 single card)
  \item Each core has its own dedicated register file
  \item Each cluster of cores share L1 cache
  \item Clusters of cores share L2 cache
  \item Huge memory throughput (512bit, 384GB/sec for Nvidia GeForce 690)
  \item Atomic operations
  \item 64-bit precision
  \item Error-correcting Code (ECC) memory to meet HPC requirement
  \item GPU Vs CPU on HPC performance
  \begin{itemize}
    \item Most CV developers: 10 to 500x speed-up depending on the application and implementation
    \item Intel: ``GPUs are ONLY up to 14 times faster than CPU.''
  \end{itemize}

\end{itemize}

\end{frame}



\begin{frame}
\frametitle{GPGPU Programming Model: Shading Language}

  \begin{columns}[c]
    \column{0.40\textwidth}
      \includegraphics[width=\linewidth]{Diagrams/shading_model}\\[-0.05em]
      \footnotesize
      Shading Language (Vertex \& Fragment Shader)
    \column{0.60\textwidth}
    \begin{itemize}
      \item Map parallel computing task to openGL rendering pipeline
      \item Pack input data as vertices and texture, operate these data from vertex shader and fragment shader, results are output to frame buffer
      \item Pros: 
        \begin{itemize}
          \item enable the GPU for general use for the first time
        \end{itemize}
      \item Cons:
        \begin{itemize}
          \item single precision
          \item no flow control and branching
          \item very limited kernel size
          \item lack of cross-kernel synchronization
        \end{itemize}
    \end{itemize}
  \end{columns}
\end{frame}


\begin{frame}
\frametitle{GPGPU Programming Model: CUDA and OpenCL}
\reference{\url{http://ixbtlabs.com/articles3/video/cuda-1-p5.html}}
  \begin{columns}[c]
    \column{0.40\textwidth}
      \includegraphics[width=\linewidth]{Diagrams/cuda_programming_model}\\[-0.05em]
      \footnotesize
      CUDA Programming Model (OpenCL has very similar model)
    \column{0.60\textwidth}
    \begin{itemize}
      \item Enable C/C++ like direct parallel thread programming
      \item Enable cross-thread synchronization
      \item Enable direct memory access
      \item Enable atomic operations
      \item Effectively remove the kernel size limit
      \item Enable full support for flow control and branching
      \item GPUs become truly general purpose
    \end{itemize}
  \end{columns}
\end{frame}


\begin{frame}
\frametitle{GPGPU in Computer Vision}
  \begin{itemize}
    \item Many hardwired graphics rendering resources in GPU, including texture engine, 3D transformation, make GPU a natural fit for computer vision and computational photography processing.
    \item OpenVIDIA: Parallel GPU Computing Vision Library developed by Nvidia
    \item OpenCV: more and more GPU Modules
    \item Nvidia CUDA Zone: An Image and Computer Vision GPU library, including segmentation, feature processing, stereo imaging, machine learning and data processing
  \end{itemize}
\end{frame}


\begin{frame}
\frametitle{Why use GPGPU in Python?}
\reference{\url{http://mathema.tician.de/software/pycuda}}
  \begin{itemize}
    \item GPUs are everything that scripting languages are not
      \begin{itemize}
        \item Highly parallel
        \item Very architecture-sensitive
        \item Built for maximum performance
      \end{itemize}
    \item GPU and Python can be complement to each other
      \begin{itemize}
        \item Use Python script as the brain, organizing processing module and data connection
        \item Use GPU as muscles, executing intensive, dirty inner loops
      \end{itemize}
    \item Play to the strengths of each programming environment
    \item Python + CUDA: PyCUDA
    \item Python + OpenCL: PyOpenCL
  \end{itemize}
\end{frame}

\begin{frame}
\frametitle{Native CUDA work flow}
\begin{center}
  \includegraphics[width=0.5\textwidth]{Diagrams/native_cuda_workflow}
\end{center}
\end{frame}


\begin{frame}
\frametitle{PyCUDA work flow}
\reference{\url{http://mathema.tician.de/software/pycuda}}
\begin{center}
  \includegraphics[width=0.9\textwidth]{Diagrams/pycuda_workflow}
\end{center}
\end{frame}

\begin{frame}[fragile]
\frametitle{PyCUDA Example}
\reference{\url{http://mathema.tician.de/software/pycuda}}
\begin{lstlisting}
import pycuda.driver as cuda                     # import PyCUDA module
import pycuda.autoinit, pycuda.compiler          # initialize the CUDA hardware
import numpy 

a = numpy.random.randn(4,4).astype(numpy.float32)# create a 4x4 array random numbers
a_gpu = cuda.mem_alloc(a.nbytes)                 # alloc the same sized array in GPU
cuda.memcpy_htod(a_gpu, a)                       # copy the data from host to device

mod = pycuda.compiler.SourceModule("""           # specify a CUDA kernel function
    __global__  void  twice (float *a)
    {
        int idx = threadIdx.x + threadIdx.y*4;
        a[idx] *= 2;
    }
    """)
func = mod.get_function("twice")                 # get the handle of CUDA kernel 
func(a_gpu, block=(4,4,1))           # run the CUDA function with a configuration

a_doubled = numpy.empty_like(a)                  # create an empty buffer on host
cuda.memcpy_dtoh(a_doubled, a_gpu)               # copy result from device to host
print	a_double
print	a
\end{lstlisting}
\end{frame}



\begin{frame}[fragile]
\frametitle{gpuarray: Simple Linear Algebra}
\reference{\url{http://mathema.tician.de/software/pycuda}}
\begin{itemize}
  \item Meant to look and feel just like numpy
  \item \lstpy{+}, \lstpy{-}, \lstpy{*}, \lstpy{/}, \lstpy{fill}, \lstpy{sin}, \lstpy{exp}, \lstpy{rand}, basic indexing, \lstpy{norm}, inner product, ...
  \item Mixed types (\lstpy{int32 + float32 = float64})
  \item ``\lstpy{print gpuarray}'' for debugging
  \item Allows access to raw bits 
\end{itemize}

\end{frame}

\begin{frame}[fragile]
\frametitle{gpuarray: Element-wise expressions}
\reference{\url{http://mathema.tician.de/software/pycuda}}
\begin{itemize}
  \item An example of element-wise operation in PyCUDA:
\end{itemize}
\begin{lstlisting}
from pycuda.curandom import rand as curand    # import CUDA random number module

a_gpu = curand((50,))                         # create a 1-d array with random number
b_gpu = curand((50,))                         

from pycuda.elementwise import ElementwiseKernel  # import ElementwiseKernel module

# specify the detail of element-wise operation
lin_comb = ElementwiseKernel(   
    " float a, float *x, float b, float *y, float *z",
    "z[i] = a*x[i] + b*y[i]")

c_gpu = gpuarray.empty_like(a_gpu)            # create a GPU array of same size
lin_comb(5, a_gpu, 6, b_gpu, c_gpu)           # run the ElementwiseKernel function
assert  la.norm((c_gpu -  (5*a_gpu+6*b_gpu)).get()) < 1e-5

print a_gpu
print b_gpu
print c_gpu
\end{lstlisting}
\end{frame}


\begin{frame}[fragile]
\frametitle{gpuarray: Reduction made easy}
\reference{\url{http://mathema.tician.de/software/pycuda}}
\begin{itemize}
  \item Example: A scalar product calculation
\end{itemize}
\begin{lstlisting}
from pycuda.reduction import ReductionKernel  # import ReductionKernel module

# specify the detail of the reduction operation
dot = ReductionKernel(
        dtype_out=numpy.float32, 				
        neutral="0",
        reduce_expr="a+b", 
        map_expr="x[i]*y[i]",
        arguments="const float *x, const float *y")

from pycuda.curandom import rand as curand

x = curand((1000*1000), dtype=numpy.float32)
y = curand((1000*1000), dtype=numpy.float32)

x_dot_y = dot(x, y).get()
x_dot_y_cpu = numpy.dot(x.get(), y.get())

print x
print y
print x_dot_y
print x_dot_y_cpu
\end{lstlisting}
\end{frame}



\begin{frame}
\frametitle{PyCUDA: Vital Information}
\reference{\url{http://mathema.tician.de/software/pycuda}}
  \begin{columns}[c]
    \column{0.75\textwidth}
    \begin{itemize}
      \item http://mathema.tician.de/software/pycuda
      \item Complete documentation
      \item MIT License (no warranty, free for all use)
      \item Requires: numpy, Python 2.4+ (Win/OS X/Linux)
      \item Support via mailing list
    \end{itemize}
    \column{0.25\textwidth}
      \includegraphics[width=\linewidth]{Logo/PyCUDA}
  \end{columns}
\end{frame}


\begin{frame}
\frametitle{Introducing... OpenCL and PyOpenCL}
\reference{\url{http://mathema.tician.de/software/pyopencl}}
  \begin{columns}[c]
    \column{0.75\textwidth}
    \begin{itemize}
      \item OpenCL: the Open Computing Language
      \begin{itemize}
        \item Vendor-independence
        \item Single abstraction works well for GPUs, CPUs
        \item A JIT C compiler baked into a library
        \item Intel's future integrated GPUs will be supporting (more ubiquitous, available in cloud)
      \end{itemize}
      \item PyOpenCL: the ``PyCUDA'' for OpenCL
      \begin{itemize}
        \item Complete, mature API wrapper
        \item Has: Arrays, elemet-wise operations, ...
        \item http://mathema.tician.de/software/pyopencl
      \end{itemize}
    \end{itemize}
    \column{0.25\textwidth}
      \includegraphics[width=\linewidth]{Logo/opencl-logo}
  \end{columns}
\end{frame}


\begin{frame}[fragile]
\frametitle{OpenCL: Same flavor, different recipe}
\reference{\url{http://mathema.tician.de/software/pyopencl}}
\begin{lstlisting}
import pyopencl as cl , numpy     # import PyOpenCL module

a = numpy.random.rand(50000).astype(numpy.float32)  # create a buffer on host

ctx = cl.create_some_context()    # create an OpenCL context
queue = cl.CommandQueue(ctx)

# create a buffer on device and copy the data in host buffer to device buffer
a_buf = cl.Buffer(ctx , cl.mem_flags.READ_WRITE, size=a.nbytes) 
cl.enqueue_write_buffer(queue, a_buf , a)

# specify an OpenCL kernel function
prg = cl.Program(ctx, """   
      __kernel void twice(__global float *a)
      {
          int gid = get_global_id (0);
          a[gid] *= 2;
      }
      """).build()

# run an OpenCL kernel function
prg.twice(queue, a.shape, None, a_buf).wait()
\end{lstlisting}
\end{frame}



\begin{frame}
\frametitle{Metaprogramming}
\reference{\url{http://mathema.tician.de/software/pycuda}}
  \begin{columns}[c]
    \column{0.25\textwidth}
      \includegraphics[width=\linewidth]{Diagrams/metaprogramming}
    \column{0.75\textwidth}
    \begin{itemize}
      \item GPU code does not need to be a compile-time constant
      \item Code is data - it wants to be reasoned about at run time
      \item Automated tuning
      \item Data types
      \item Specialized code for given problem
      \item Constants faster than variables
      \item Loop unrolling
    \end{itemize}
  \end{columns}
\end{frame}


\begin{frame}
\frametitle{Showcase...}
\end{frame}

