<!-- HTML header for doxygen 1.8.3.1-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.4"/>
<title>CUB: Main Page</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/javascript">
  $(document).ready(function() { searchBox.OnSelectItem(0); });
</script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
<link href="extra_stylesheet.css" rel="stylesheet" type="text/css"/>
<link rel="shortcut icon" href="favicon.ico" type="image/x-icon" />
<script type="text/javascript">
  var _gaq = _gaq || [];
  _gaq.push(['_setAccount', 'UA-38890655-1']);
  _gaq.push(['_trackPageview']);
  (function() {
    var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
    ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
    var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
  })();
</script>
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
 <tbody>
 <tr style="height: 56px;">
  <td style="padding-left: 0.5em;">
   <div id="projectname">CUB
   </div>
  </td>
 </tr>
 </tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.4 -->
<script type="text/javascript">
var searchBox = new SearchBox("searchBox", "search",false,'Search');
</script>
  <div id="navrow1" class="tabs">
    <ul class="tablist">
      <li class="current"><a href="index.html"><span>Main&#160;Page</span></a></li>
      <li><a href="modules.html"><span>Modules</span></a></li>
      <li><a href="annotated.html"><span>Classes</span></a></li>
      <li>
        <div id="MSearchBox" class="MSearchBoxInactive">
        <span class="left">
          <img id="MSearchSelect" src="search/mag_sel.png"
               onmouseover="return searchBox.OnSearchSelectShow()"
               onmouseout="return searchBox.OnSearchSelectHide()"
               alt=""/>
          <input type="text" id="MSearchField" value="Search" accesskey="S"
               onfocus="searchBox.OnSearchFieldFocus(true)" 
               onblur="searchBox.OnSearchFieldFocus(false)" 
               onkeyup="searchBox.OnSearchFieldChange(event)"/>
          </span><span class="right">
            <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
          </span>
        </div>
      </li>
    </ul>
  </div>
</div><!-- top -->
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
     onmouseover="return searchBox.OnSearchSelectShow()"
     onmouseout="return searchBox.OnSearchSelectHide()"
     onkeydown="return searchBox.OnSearchSelectKey(event)">
<a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(0)"><span class="SelectionMark">&#160;</span>All</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(1)"><span class="SelectionMark">&#160;</span>Classes</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(2)"><span class="SelectionMark">&#160;</span>Namespaces</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(3)"><span class="SelectionMark">&#160;</span>Files</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(4)"><span class="SelectionMark">&#160;</span>Functions</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(5)"><span class="SelectionMark">&#160;</span>Variables</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(6)"><span class="SelectionMark">&#160;</span>Typedefs</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(7)"><span class="SelectionMark">&#160;</span>Enumerations</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(8)"><span class="SelectionMark">&#160;</span>Enumerator</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(9)"><span class="SelectionMark">&#160;</span>Groups</a></div>

<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0" 
        name="MSearchResults" id="MSearchResults">
</iframe>
</div>

<div class="header">
  <div class="headertitle">
<div class="title">CUB Documentation</div>  </div>
</div><!--header-->
<div class="contents">
<div class="toc"><h3>Table of Contents</h3>
<ul><li class="level1"><a href="#sec1">(1) What is CUB?</a><ul><li class="level2"><a href="#sec1sec1">1.1 Collective Primitives</a></li>
<li class="level2"><a href="#sec1sec2">1.2 Design Motivation</a></li>
</ul>
</li>
<li class="level1"><a href="#sec2">(2) An Example (block-sorting)</a></li>
<li class="level1"><a href="#sec3">(3) How is CUB different than Thrust?</a></li>
<li class="level1"><a href="#sec4">(4) Why do you need CUB?</a></li>
<li class="level1"><a href="#sec5">(5) How do CUB collectives work?</a><ul><li class="level2"><a href="#sec5sec1">5.1 Template Specialization</a></li>
<li class="level2"><a href="#sec5sec2">5.2 Reflective Class Interfaces</a></li>
<li class="level2"><a href="#sec5sec3">5.3 Tuning and Adaptation</a></li>
<li class="level2"><a href="#sec5sec4">5.4 Mapping data onto threads</a></li>
</ul>
</li>
<li class="level1"><a href="#sec6">(6) Recent News</a></li>
<li class="level1"><a href="#sec7">(7) Contributors</a></li>
<li class="level1"><a href="#sec8">(8) Open Source License</a></li>
</ul>
</div>
<div class="textblock"> 
<a href="http://research.nvidia.com"><img src="nvresearch.png" style="position:relative; bottom:-10px; border:0px;"/></a>
&nbsp;&nbsp;
<a href="http://research.nvidia.com"><em>NVIDIA Research</em></a>
<br>
<a href="https://github.com/NVlabs/cub"><img src="github-icon-747d8b799a48162434b2c0595ba1317e.png" style="position:relative; bottom:-10px; border:0px;"/></a>
&nbsp;&nbsp;
<a href="https://github.com/NVlabs/cub"><em>Browse or fork CUB at GitHub</em></a>
<br>
<a href="http://groups.google.com/group/cub-users"><img src="groups-icon.png" style="position:relative; bottom:-10px; border:0px;"/></a>
&nbsp;&nbsp;
<a href="http://groups.google.com/group/cub-users"><em>Join the cub-users discussion forum</em></a>
<br>
<a href="download_cub.html"><img src="download-icon.png" style="position:relative; bottom:-10px; border:0px;"/></a>
&nbsp;&nbsp;
<a href="download_cub.html"><em>Download CUB v1.0.2 (August 23, 2013)</em></a>
<h1><a class="anchor" id="sec1"></a>
(1) What is CUB?</h1>
<dl class="section user"><dt></dt><dd>CUB provides state-of-the-art, reusable software components for every layer of the CUDA programming model:<ul>
<li><a href="group___device_module.html"><b><em>Device-wide primitives</em></b></a><ul>
<li>Sort, prefix scan, reduction, histogram, etc.</li>
<li>Compatible with CUDA dynamic parallelism</li>
</ul>
</li>
<li><a href="group___block_module.html"><b><em>Block-wide "collective" primitives</em></b></a><ul>
<li>I/O, sort, prefix scan, reduction, histogram, etc.</li>
<li>Compatible with arbitrary thread block sizes and types</li>
</ul>
</li>
<li><a href="group___warp_module.html"><b><em>Warp-wide "collective" primitives</em></b></a><ul>
<li>Warp-wide prefix scan, reduction, etc.</li>
<li>Safe and architecture-specific</li>
</ul>
</li>
<li><a href="group___thread_module.html"><b><em>Thread and resource utilities</em></b></a><ul>
<li>PTX intrinsics, device reflection, texture-caching iterators, caching memory allocators, etc.</li>
</ul>
</li>
</ul>
</dd></dl>
<h2><a class="anchor" id="sec1sec1"></a>
1.1 Collective Primitives</h2>
<dl class="section user"><dt></dt><dd>As a SIMT programming model, CUDA engenders both <em><b>scalar</b></em> and <em><b>collective</b></em> software interfaces. Traditional software interfaces are <em>scalar</em> : a single thread invokes a library routine to perform some operation (which may include spawning parallel subtasks). Alternatively, a <em>collective</em> interface is entered simultaneously by a group of parallel threads to perform some cooperative operation. Collective SIMT primitives are essential for constructing performance-portable kernels for use in higher level software abstractions, libraries, domain-specific languages, etc.</dd></dl>
<dl class="section user"><dt></dt><dd><div class="image">
<img src="cub_overview.png" alt="cub_overview.png"/>
</div>
 <div class="centercaption">Orientation of <em>collective</em> primitives within the CUDA software stack</div></dd></dl>
<dl class="section user"><dt></dt><dd>CUB's collective primitives are not bound to any particular width of parallelism or to any particular data type. This allows them to be:<ul>
<li><b><em>Adaptable</em></b> to fit the needs of the enclosing kernel computation</li>
<li><b><em>Trivially tunable</em></b> to different grain sizes (threads per block, items per thread, etc.)</li>
</ul>
</dd></dl>
<dl class="section user"><dt></dt><dd>Thus CUB is <a href="index.html"><em>CUDA Unbound</em></a>.</dd></dl>
<h2><a class="anchor" id="sec1sec2"></a>
1.2 Design Motivation</h2>
<dl class="section user"><dt></dt><dd>CUB is inspired by the following goals:<ul>
<li><em><b>Absolute performance</b></em>. CUB primitives are specialized and tuned to best match the features and capabilities of each CUDA architecture.</li>
<li><em><b>Enhanced programmer productivity</b></em>. CUB primitives allow developers to quickly compose sequences of complex parallel operations in both CUDA kernel code and CUDA host code.</li>
<li><em><b>Enhanced tunability</b></em>. CUB primitives allow developers to quickly change grain sizes (threads per block, items per thread, etc.).</li>
<li><em><b>Reduced maintenance burden</b></em>. CUB provides a SIMT software abstraction layer over the diversity of CUDA hardware. With CUB, applications can enjoy performance-portability without intensive and costly rewriting or porting efforts.</li>
</ul>
</dd></dl>
<h1><a class="anchor" id="sec2"></a>
(2) An Example (block-sorting)</h1>
<dl class="section user"><dt></dt><dd>The following code snippet presents a CUDA kernel in which each block of 128 threads will collectively load, sort, and store its own segment of 2048 integer keys:</dd></dl>
<dl class="section user"><dt></dt><dd><div class="fragment"><div class="line"><span class="preprocessor">#include &lt;<a class="code" href="cub_8cuh.html">cub/cub.cuh</a>&gt;</span></div>
<div class="line"></div>
<div class="line"><span class="comment">// Block-sorting CUDA kernel</span></div>
<div class="line">__global__ <span class="keywordtype">void</span> BlockSortKernel(<span class="keywordtype">int</span> *d_in, <span class="keywordtype">int</span> *d_out)</div>
<div class="line">{</div>
<div class="line">    <span class="keyword">using namespace </span>cub;</div>
<div class="line"></div>
<div class="line">    <span class="comment">// Specialize BlockRadixSort, BlockLoad, and BlockStore for 128 threads </span></div>
<div class="line">    <span class="comment">// owning 16 integer items each</span></div>
<div class="line">    <span class="keyword">typedef</span> <a class="code" href="classcub_1_1_block_radix_sort.html" title="The cub::BlockRadixSort class provides collective methods for sorting items partitioned across a CUDA...">BlockRadixSort&lt;int, 128, 16&gt;</a>                     BlockRadixSort;</div>
<div class="line">    <span class="keyword">typedef</span> <a class="code" href="classcub_1_1_block_load.html" title="The BlockLoad class provides collective data movement methods for loading a linear segment of items f...">BlockLoad&lt;int*, 128, 16, BLOCK_LOAD_TRANSPOSE&gt;</a>   BlockLoad;</div>
<div class="line">    <span class="keyword">typedef</span> <a class="code" href="classcub_1_1_block_store.html" title="The BlockStore class provides collective data movement methods for writing a blocked arrangement of i...">BlockStore&lt;int*, 128, 16, BLOCK_STORE_TRANSPOSE&gt;</a> BlockStore;</div>
<div class="line"></div>
<div class="line">    <span class="comment">// Allocate shared memory</span></div>
<div class="line">    __shared__ <span class="keyword">union </span>{</div>
<div class="line">        <span class="keyword">typename</span> <a class="code" href="structcub_1_1_block_radix_sort_1_1_temp_storage.html" title="The operations exposed by BlockScan require a temporary memory allocation of this nested type for thr...">BlockRadixSort::TempStorage</a>  sort;</div>
<div class="line">        <span class="keyword">typename</span> <a class="code" href="structcub_1_1_block_load_1_1_temp_storage.html" title="The operations exposed by BlockLoad require a temporary memory allocation of this nested type for thr...">BlockLoad::TempStorage</a>       load; </div>
<div class="line">        <span class="keyword">typename</span> <a class="code" href="structcub_1_1_block_store_1_1_temp_storage.html" title="The operations exposed by BlockStore require a temporary memory allocation of this nested type for th...">BlockStore::TempStorage</a>      store; </div>
<div class="line">    } temp_storage; </div>
<div class="line"></div>
<div class="line">    <span class="keywordtype">int</span> block_offset = blockIdx.x * (128 * 16);   <span class="comment">// Offset for this block&#39;s segment</span></div>
<div class="line"></div>
<div class="line">    <span class="comment">// Obtain a segment of 2048 consecutive keys that are blocked across threads</span></div>
<div class="line">    <span class="keywordtype">int</span> thread_keys[16];</div>
<div class="line">    BlockLoad(temp_storage.load).Load(d_in + blocffset, thread_keys);</div>
<div class="line">    __syncthreads();</div>
<div class="line"></div>
<div class="line">    <span class="comment">// Collectively sort the keys</span></div>
<div class="line">    BlockRadixSort(temp_storage.sort).Sort(thread_keys);</div>
<div class="line">    __syncthreads();</div>
<div class="line"></div>
<div class="line">    <span class="comment">// Store the sorted segment </span></div>
<div class="line">    BlockStore(temp_storage.store).Store(d_out + block_offset, thread_keys);</div>
<div class="line">}</div>
</div><!-- fragment --></dd></dl>
<dl class="section user"><dt></dt><dd>Each thread block uses <a class="el" href="classcub_1_1_block_radix_sort.html" title="The cub::BlockRadixSort class provides collective methods for sorting items partitioned across a CUDA...">cub::BlockRadixSort</a> to collectively sort its own input segment. The class is specialized by the data type being sorted, by the number of threads per block, by the number of keys per thread, and implicitly by the targeted compilation architecture.</dd></dl>
<dl class="section user"><dt></dt><dd>The <a class="el" href="classcub_1_1_block_load.html" title="The BlockLoad class provides collective data movement methods for loading a linear segment of items f...">cub::BlockLoad</a> and <a class="el" href="classcub_1_1_block_store.html" title="The BlockStore class provides collective data movement methods for writing a blocked arrangement of i...">cub::BlockStore</a> classes are similarly specialized. Furthermore, to provide coalesced accesses to device memory, these primitives are configured to access memory using a striped access pattern (where consecutive threads simultaneously access consecutive items) and then <em>transpose</em> the keys into a <a href="index.html#sec5sec4"><em>blocked arrangement</em></a> of elements across threads.</dd></dl>
<dl class="section user"><dt></dt><dd>Once specialized, these classes expose opaque <code>TempStorage</code> member types. The thread block uses these storage types to statically allocate the union of shared memory needed by the thread block. (Alternatively these storage types could be aliased to global memory allocations).</dd></dl>
<h1><a class="anchor" id="sec3"></a>
(3) How is CUB different than Thrust?</h1>
<dl class="section user"><dt></dt><dd>CUB and <a href="http://thrust.github.com/"><b><em>Thrust</em></b></a> have some similarities in that they both provide device-wide primitives for CUDA. However, the Thrust abstractions are agnostic of any particular implementation (e.g., CUDA, TBB, OpenMP, sequential CPU, etc.). While Thrust has a "backend" for CUDA devices, Thrust interfaces themselves are not CUDA-specific and do not explicitly expose CUDA-specific details (e.g., <code>cudaStream_t</code> parameters).</dd></dl>
<dl class="section user"><dt></dt><dd>CUB, on the other hand, is slightly lower-level than Thrust. CUB is CUDA-specific and its interfaces explicitly accommodate CUDA-specific features. Furthermore, CUB is also a library of SIMT collective primitives for block-wide and warp-wide kernel programming. CUB is complimentary to Thrust in that it can be used to implement portions of Thrust's CUDA backend. In fact, the CUB project arose out of a maintenance need for easier performance-portability within Thrust.</dd></dl>
<h1><a class="anchor" id="sec4"></a>
(4) Why do you need CUB?</h1>
<dl class="section user"><dt></dt><dd>Constructing, tuning, and maintaining kernel code is perhaps the most challenging, time-consuming aspect of CUDA programming. CUDA kernel software is where the complexity of parallelism is expressed. Programmers must reason about deadlock, livelock, synchronization, race conditions, shared memory layout, plurality of state, granularity, throughput, latency, memory bottlenecks, etc.</dd></dl>
<dl class="section user"><dt></dt><dd>However, with the exception of CUB, there are few (if any) software libraries of reusable kernel primitives. In the CUDA ecosystem, CUB is unique in this regard. As a <a href="http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#hardware-implementation">SIMT</a> library and software abstraction layer, CUB provides:<ol type="1">
<li><b><em>Simplicity of composition</em></b>. CUB primitives can be simply sequenced and nested in kernel code. For example, <a class="el" href="classcub_1_1_block_radix_sort.html" title="The cub::BlockRadixSort class provides collective methods for sorting items partitioned across a CUDA...">cub::BlockRadixSort</a> is constructed from <a class="el" href="classcub_1_1_block_exchange.html" title="The BlockExchange class provides collective methods for rearranging data partitioned across a CUDA th...">cub::BlockExchange</a> and cub::BlockRadixRank. The latter is composed of <a class="el" href="classcub_1_1_block_scan.html" title="The BlockScan class provides collective methods for computing a parallel prefix sum/scan of items par...">cub::BlockScan</a> which incorporates <a class="el" href="classcub_1_1_warp_scan.html" title="The WarpScan class provides collective methods for computing a parallel prefix scan of items partitio...">cub::WarpScan</a>.</li>
<li><b><em>High performance</em></b>. CUB simplifies high performance kernel development by taking care to implement the state-of-the-art in parallel algorithms. Expert code should be reused rather than reimplemented.</li>
<li><b><em>Performance portability</em></b>. CUB primitives are specialized to match the diversity of NVIDIA hardware, continuously evolving to accommodate new features and instructions. For example, CUB reductions and prefix scans employ warp-shuffle on Kepler GPUs. Code should be recompiled rather than hand-ported.</li>
<li><b><em>Simplicity of performance tuning</em></b>:<ul>
<li><em>Variant tuning</em>. Most CUB primitives support alternative algorithmic strategies. For example, <a class="el" href="classcub_1_1_block_histogram.html" title="The BlockHistogram class provides collective methods for constructing block-wide histograms from data...">cub::BlockHistogram</a> is parameterized to implement either an atomic-based approach or a sorting-based approach. (The latter provides uniform performance regardless of input distribution.)</li>
<li><em>Kernel+library co-optimization</em>. Most CUB primitives support arbitrary granularity (threads per block, items per thread, etc.). When the enclosing kernel is similarly parameterizable, a configuration can be found that optimally accommodates their combined register and shared memory pressure.</li>
</ul>
</li>
<li><b><em>Robustness and durability</em></b>. CUB just works. CUB primitives are designed to function properly for arbitrary data types and widths of parallelism (not just for the built-in C++ types or for powers-of-two threads per block).</li>
<li><b><em>A path for language evolution</em></b>. CUB primitives are designed to easily accommodate new features in the CUDA programming model, e.g., thread subgroups and named barriers, dynamic shared memory allocators, etc.</li>
</ol>
</dd></dl>
<h1><a class="anchor" id="sec5"></a>
(5) How do CUB collectives work?</h1>
<dl class="section user"><dt></dt><dd>Central to the design of CUB are two programming idioms:<ul>
<li><b><em>Generic programming</em></b>. C++ templates provide the flexibility and adaptive code generation needed for CUB primitives to be useful, reusable, and fast in arbitrary kernel settings.</li>
<li><b><em>Reflective class interfaces</em></b>. CUB collectives statically export their their resource requirements (e.g., shared memory size and layout) for a given specialization, which allows compile-time tuning decisions and resource allocation.</li>
</ul>
</dd></dl>
<h2><a class="anchor" id="sec5sec1"></a>
5.1 Template Specialization</h2>
<dl class="section user"><dt></dt><dd>We use template parameters to specialize CUB primitives for the particular problem setting at hand. Until compile time, CUB primitives are not bound to any particular:<ul>
<li>Data type (int, float, double, etc.)</li>
<li>Width of parallelism (threads array size)</li>
<li>Grain size (data items per thread)</li>
<li>Underlying processor (special instructions, warp size, rules for bank conflicts, etc.)</li>
<li>Tuning configuration (e.g., latency vs. throughput, algorithm selection, etc.)</li>
</ul>
</dd></dl>
<h2><a class="anchor" id="sec5sec2"></a>
5.2 Reflective Class Interfaces</h2>
<dl class="section user"><dt></dt><dd>Unlike traditional function-oriented interfaces, CUB exposes its collective primtives as templated C++ classes. The resource requirements for a specific parameterization are reflectively advertised as members of the class. The resources can then be statically or dynamically allocated, aliased to global or shared memory, etc. The following illustrates a CUDA kernel fragment performing a collective prefix sum across the threads of a thread block:</dd></dl>
<dl class="section user"><dt></dt><dd><div class="fragment"><div class="line"><span class="preprocessor">#include &lt;<a class="code" href="cub_8cuh.html">cub/cub.cuh</a>&gt;</span></div>
<div class="line"></div>
<div class="line"><span class="comment">// Specialize BlockScan for 128 threads on integer types</span></div>
<div class="line"><span class="keyword">typedef</span> <a class="code" href="classcub_1_1_block_scan.html" title="The BlockScan class provides collective methods for computing a parallel prefix sum/scan of items par...">cub::BlockScan&lt;int, 128&gt;</a> BlockScan;</div>
<div class="line"> </div>
<div class="line"><span class="comment">// Allocate shared memory for BlockScan</span></div>
<div class="line">__shared__ <span class="keyword">typename</span> BlockScan::TempStorage scan_storage;</div>
<div class="line"></div>
<div class="line"><span class="comment">// Obtain a segment of consecutive items that are blocked across threads</span></div>
<div class="line"><span class="keywordtype">int</span> thread_data_in[4];</div>
<div class="line"><span class="keywordtype">int</span> thread_data_out[4];</div>
<div class="line">...</div>
<div class="line"></div>
<div class="line"><span class="comment">// Perform an exclusive block-wide prefix sum</span></div>
<div class="line"><a class="code" href="classcub_1_1_block_scan.html#a982e1407d00b704c3046cd72c48acabb" title="Collective constructor for 1D thread blocks using a private static allocation of shared memory as tem...">BlockScan</a>(scan_storage).ExclusiveSum(thread_data_in, thread_data_out);</div>
</div><!-- fragment --></dd></dl>
<dl class="section user"><dt></dt><dd>Furthermore, the CUB interface is designed to separate parameter fields by concerns. CUB primitives have three distinct parameter fields:<ol type="1">
<li><b><em>Static template parameters</em></b>. These are constants that will dictate the storage layout and the unrolling of algorithmic steps (e.g., the input data type and the number of block threads), and are used to specialize the class.</li>
<li><b><em>Constructor parameters</em></b>. These are optional parameters regarding inter-thread communication (e.g., storage allocation, thread-identifier mapping, named barriers, etc.), and are orthogonal to the functions exposed by the class.</li>
<li><b><em>Formal method parameters</em></b>. These are the operational inputs/outputs for the various functions exposed by the class.</li>
</ol>
</dd></dl>
<dl class="section user"><dt></dt><dd>This allows CUB types to easily accommodate new programming model features (e.g., named barriers, memory allocators, etc.) without incurring a combinatorial growth of interface methods.</dd></dl>
<h2><a class="anchor" id="sec5sec3"></a>
5.3 Tuning and Adaptation</h2>
<dl class="section user"><dt></dt><dd>This style of flexible interface simplifies performance tuning. Most CUB primitives support alternative algorithmic strategies that can be statically targeted by a compiler-based or JIT-based autotuner. For example, <a class="el" href="classcub_1_1_block_histogram.html" title="The BlockHistogram class provides collective methods for constructing block-wide histograms from data...">cub::BlockHistogram</a> is parameterized to implement either an atomic-based approach or a sorting-based approach. Algorithms are also tunable over parameters such as thread count and grain size as well. Taken together, each of the CUB algorithms provides a fairly rich tuning space.</dd></dl>
<dl class="section user"><dt></dt><dd>Whereas conventional libraries are optimized offline and in isolation, CUB provides interesting opportunities for whole-program optimization. For example, each CUB primitive is typically parameterized by threads-per-block and items-per-thread, both of which affect the underlying algorithm's efficiency and resource requirements. When the enclosing kernel is similarly parameterized, the coupled CUB primitives adjust accordingly. This enables autotuners to search for a single configuration that maximizes the performance of the entire kernel for a given set of hardware resources.</dd></dl>
<h2><a class="anchor" id="sec5sec4"></a>
5.4 Mapping data onto threads</h2>
<dl class="section user"><dt></dt><dd>CUDA kernels are often designed such that each thread block is assigned a segment of data items for processing.</dd></dl>
<dl class="section user"><dt></dt><dd><div class="image">
<img src="tile.png" alt="tile.png"/>
</div>
 <div class="centercaption">Segment of eight ordered data items</div></dd></dl>
<dl class="section user"><dt></dt><dd>When the tile size equals the thread block size, the mapping of data onto threads is straightforward (one datum per thread). However, there are often performance advantages for processing more than one datum per thread. For these scenarios, CUB primitives will specify which of the following partitioning alternatives they accommodate:</dd></dl>
<table  border="0px" cellpadding="0px" cellspacing="0px">
<tr>
<td><dl class="section user"><dt></dt><dd><ul>
<li><b><em>Blocked arrangement</em></b>. The aggregate tile of items is partitioned evenly across threads in "blocked" fashion with thread<sub><em>i</em></sub> owning the <em>i</em><sup>th</sup> segment of consecutive elements. Blocked arrangements are often desirable for algorithmic benefits (where long sequences of items can be processed sequentially within each thread).  </li>
</ul>
</dd></dl>
</td><td><dl class="section user"><dt></dt><dd><div class="image">
<img src="blocked.png" alt="blocked.png"/>
</div>
 <div class="centercaption"><em>Blocked</em> arrangement across four threads <br/>
(emphasis on items owned by <em>thread</em><sub>0</sub>)</div>  </dd></dl>
</td></tr>
<tr>
<td><dl class="section user"><dt></dt><dd><ul>
<li><b><em>Striped arrangement</em></b>. The aggregate tile of items is partitioned across threads in "striped" fashion, i.e., the <code>ITEMS_PER_THREAD</code> items owned by each thread have logical stride <code>BLOCK_THREADS</code> between them. Striped arrangements are often desirable for data movement through global memory (where <a href="http://docs.nvidia.com/cuda/cuda-c-best-practices-guide/#coalesced-access-global-memory">read/write coalescing</a> is an important performance consideration).  </li>
</ul>
</dd></dl>
</td><td><dl class="section user"><dt></dt><dd><div class="image">
<img src="striped.png" alt="striped.png"/>
</div>
 <div class="centercaption"><em>Striped</em> arrangement across four threads <br/>
(emphasis on items owned by <em>thread</em><sub>0</sub>)</div>  </dd></dl>
</td></tr>
</table>
<dl class="section user"><dt></dt><dd>The benefits of processing multiple items per thread (a.k.a., <em>register blocking</em>, <em>granularity coarsening</em>, etc.) include:<ul>
<li>Algorithmic efficiency. Sequential work over multiple items in thread-private registers is cheaper than synchronized, cooperative work through shared memory spaces.</li>
<li>Data occupancy. The number of items that can be resident on-chip in thread-private register storage is often greater than the number of schedulable threads.</li>
<li>Instruction-level parallelism. Multiple items per thread also facilitates greater ILP for improved throughput and utilization.</li>
</ul>
</dd></dl>
<dl class="section user"><dt></dt><dd>Finally, <a class="el" href="classcub_1_1_block_exchange.html" title="The BlockExchange class provides collective methods for rearranging data partitioned across a CUDA th...">cub::BlockExchange</a> provides operations for converting between blocked and striped arrangements.</dd></dl>
<h1><a class="anchor" id="sec6"></a>
(6) Recent News</h1>
<dl class="section user"><dt></dt><dd><table class="doxtable">
<tr>
<td style="white-space: nowrap; vertical-align:text-top;">08/23/2013<br/>
 <a href="https://github.com/NVlabs/cub/archive/1.0.2.zip">CUB v1.0.2 (update)</a> </td><td style="vertical-align:text-top;"><ul>
<li>Cleaned up unnecessary and/or missing include statements from .cuh files. You can now safely include specific .cuh files (instead of the umbrella <a class="el" href="cub_8cuh.html">cub/cub.cuh</a>)</li>
<li>Documentation corrections</li>
<li>Minor bugfixes for <a class="el" href="classcub_1_1_block_histogram.html" title="The BlockHistogram class provides collective methods for constructing block-wide histograms from data...">cub::BlockHistogram</a></li>
<li>See the <a href="https://github.com/NVlabs/cub/blob/master/CHANGE_LOG.TXT">change-log</a> for further details. </li>
</ul>
<p class="endtd"></p>
</td></tr>
<tr>
<td style="white-space: nowrap; vertical-align:text-top;">08/08/2013<br/>
 <a href="https://github.com/NVlabs/cub/archive/1.0.1.zip">CUB v1.0.1 (primary)</a> </td><td style="vertical-align:text-top;"><p class="starttd">This release focuses on (1) new and updated primitives, (2) extensive API documentation, and (3) aggressive regression testing. Notable additions inlcude:</p>
<ul>
<li><a class="el" href="structcub_1_1_device_radix_sort.html" title="DeviceRadixSort provides operations for computing a device-wide, parallel radix sort across data item...">cub::DeviceRadixSort</a>. This implementation is an overhaul of our previous efforts in the B40C and MGPU projects. It is constructed from tunable CUB block-level primitives and is up to 1.7x faster than Thrust v1.7 on GTX Titan (up to 2.15 billion 32b keys/s)</li>
<li><a class="el" href="structcub_1_1_device_scan.html" title="DeviceScan provides operations for computing a device-wide, parallel prefix scan across data items re...">cub::DeviceScan</a>. This implementation is a new 1-pass decomposition featuring <em>"adaptive look-back"</em> for mitigating the latencies of otherwise serial dependences between thread blocks. It is up to 3.2x faster than Thrust v1.7 on GTX Titan (up to 28.9 billion 32b items/s)</li>
</ul>
<p>This release also contains significant updates to CUB's suite of block-wide and warp-wide primitives, including:</p>
<ul>
<li>Improved support for the Kepler instruction set (e.g., <code>SHFL</code> is used for warp-wide exchanges of all types, including those larger than 32b)</li>
<li>An updated interface design for collective primitives (specialize::construct::invoke) that avoids a combinatorial product of library entry-points</li>
</ul>
<p>See the <a href="https://github.com/NVlabs/cub/blob/master/CHANGE_LOG.TXT">change-log</a> for further details. </p>
<p class="endtd"></p>
</td></tr>
<tr>
<td style="white-space: nowrap; vertical-align:text-top;">05/07/2013<br/>
 <a href="https://github.com/NVlabs/cub/archive/0.9.4.zip">CUB v0.9.4 (update)</a> </td><td style="vertical-align:text-top;"><ul>
<li>Compilation fixes for several primitives on older architectures.</li>
<li>Introduced several new device-wide and block-wide primitives, including 256-bin histogram.</li>
<li>Misc. cosmetic and bug fixes.</li>
<li>See the <a href="https://github.com/NVlabs/cub/blob/master/CHANGE_LOG.TXT">change-log</a> for further details. </li>
</ul>
<p class="endtd"></p>
</td></tr>
<tr>
<td style="white-space: nowrap; vertical-align:text-top;">04/04/2013<br/>
 <a href="https://github.com/NVlabs/cub/archive/0.9.2.zip">CUB v0.9.2 (update)</a> </td><td style="vertical-align:text-top;"><ul>
<li>Minor cosmetic, feature, and compilation updates.</li>
<li>See the <a href="https://github.com/NVlabs/cub/blob/master/CHANGE_LOG.TXT">change-log</a> for further details. </li>
</ul>
<p class="endtd"></p>
</td></tr>
<tr>
<td style="white-space: nowrap; vertical-align:text-top;">03/07/2013<br/>
 <a href="https://github.com/NVlabs/cub/archive/0.9.zip">CUB v0.9 (preview)</a> </td><td style="vertical-align:text-top;"><ul>
<li>CUB is the first durable, high-performance library of cooperative threadblock, warp, and thread primitives for CUDA kernel programming.  </li>
</ul>
</td></tr>
</table>
</dd></dl>
<h1><a class="anchor" id="sec7"></a>
(7) Contributors</h1>
<dl class="section user"><dt></dt><dd>CUB is developed as an open-source project by <a href="http://research.nvidia.com">NVIDIA Research</a>. The primary contributor is <a href="http://github.com/dumerrill">Duane Merrill</a>.</dd></dl>
<h1><a class="anchor" id="sec8"></a>
(8) Open Source License</h1>
<dl class="section user"><dt></dt><dd>CUB is available under the "New BSD" open-source license:</dd></dl>
<dl class="section user"><dt></dt><dd><div class="fragment"><div class="line">Copyright (c) 2011, Duane Merrill.  All rights reserved.</div>
<div class="line">Copyright (c) 2011-2013, NVIDIA CORPORATION.  All rights reserved.</div>
<div class="line"></div>
<div class="line">Redistribution and use in source and binary forms, with or without</div>
<div class="line">modification, are permitted provided that the following conditions are met:</div>
<div class="line">   Redistributions of source code must retain the above copyright</div>
<div class="line">      notice, <span class="keyword">this</span> list of conditions and the following disclaimer.</div>
<div class="line">   Redistributions in binary form must reproduce the above copyright</div>
<div class="line">      notice, <span class="keyword">this</span> list of conditions and the following disclaimer in the</div>
<div class="line">      documentation and/or other materials provided with the distribution.</div>
<div class="line">   Neither the name of the NVIDIA CORPORATION nor the</div>
<div class="line">      names of its contributors may be used to endorse or promote products</div>
<div class="line">      derived from <span class="keyword">this</span> software without specific prior written permission.</div>
<div class="line"></div>
<div class="line">THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <span class="stringliteral">&quot;AS IS&quot;</span> AND</div>
<div class="line">ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED</div>
<div class="line">WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE</div>
<div class="line">DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY</div>
<div class="line">DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES</div>
<div class="line">(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;</div>
<div class="line">LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND</div>
<div class="line">ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT</div>
<div class="line">(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS</div>
<div class="line">SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.</div>
</div><!-- fragment --> </dd></dl>
</div></div><!-- contents -->
<!-- HTML footer for doxygen 1.8.3.1-->
<!-- start footer part -->
<hr class="footer"/><address class="footer"><small>
Generated on Fri Aug 23 2013 17:31:13 for CUB by &#160;<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/>
</a> 1.8.4
<br>
&copy; 2013 NVIDIA Corporation
</small></address>
</body>
</html>
