<!-- HTML header for doxygen 1.8.13-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.14"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>Taskflow Handbook</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<link rel="icon" type="image/x-icon" href="favicon.ico" />
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtreedata.js"></script>
<script type="text/javascript" src="navtree.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
  $(document).ready(initResizable);
/* @license-end */</script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
 <tbody>
 <tr style="height: 56px;">
  <td id="projectalign" style="padding-left: 0.5em;">
   <div id="projectname"><a href="https://taskflow.github.io/">Taskflow</a>
   &#160;<span id="projectnumber">3.0.0-Master-Branch</span>
   </div>
  </td>
 </tr>
 </tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.14 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
var searchBox = new SearchBox("searchBox", "search",false,'Search');
/* @license-end */
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(function() {
  initMenu('',true,false,'search.php','Search');
  $(document).ready(function() { init_search(); });
});
/* @license-end */</script>
<div id="main-nav"></div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
  <div id="nav-tree">
    <div id="nav-tree-contents">
      <div id="nav-sync" class="sync"></div>
    </div>
  </div>
  <div id="splitbar" style="-moz-user-select:none;" 
       class="ui-resizable-handle">
  </div>
</div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(function(){initNavTree('classtf_1_1cudaFlow.html','');});
/* @license-end */
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
     onmouseover="return searchBox.OnSearchSelectShow()"
     onmouseout="return searchBox.OnSearchSelectHide()"
     onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>

<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0" 
        name="MSearchResults" id="MSearchResults">
</iframe>
</div>

<div class="header">
  <div class="summary">
<a href="#pub-methods">Public Member Functions</a> &#124;
<a href="#friends">Friends</a> &#124;
<a href="classtf_1_1cudaFlow-members.html">List of all members</a>  </div>
  <div class="headertitle">
<div class="title">tf::cudaFlow Class Reference</div>  </div>
</div><!--header-->
<div class="contents">

<p>class for building a CUDA task dependency graph  
 <a href="classtf_1_1cudaFlow.html#details">More...</a></p>

<p><code>#include &lt;<a class="el" href="cuda__flow_8hpp_source.html">cuda_flow.hpp</a>&gt;</code></p>
<table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="pub-methods"></a>
Public Member Functions</h2></td></tr>
<tr class="memitem:a1926f45a038d8faa9c1b1ee43fd29a93"><td class="memItemLeft" align="right" valign="top"><a id="a1926f45a038d8faa9c1b1ee43fd29a93"></a>
bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#a1926f45a038d8faa9c1b1ee43fd29a93">empty</a> () const</td></tr>
<tr class="memdesc:a1926f45a038d8faa9c1b1ee43fd29a93"><td class="mdescLeft">&#160;</td><td class="mdescRight">queries the emptiness of the graph <br /></td></tr>
<tr class="separator:a1926f45a038d8faa9c1b1ee43fd29a93"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ac29d95787db4b622e1458bb64da11264"><td class="memItemLeft" align="right" valign="top"><a id="ac29d95787db4b622e1458bb64da11264"></a>
bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#ac29d95787db4b622e1458bb64da11264">joinable</a> () const</td></tr>
<tr class="memdesc:ac29d95787db4b622e1458bb64da11264"><td class="mdescLeft">&#160;</td><td class="mdescRight">queries if the cudaflow is joinable <br /></td></tr>
<tr class="separator:ac29d95787db4b622e1458bb64da11264"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a30b2e107cb2c90a37f467b28d1b42a74"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a>&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#a30b2e107cb2c90a37f467b28d1b42a74">noop</a> ()</td></tr>
<tr class="memdesc:a30b2e107cb2c90a37f467b28d1b42a74"><td class="mdescLeft">&#160;</td><td class="mdescRight">creates a no-operation task  <a href="#a30b2e107cb2c90a37f467b28d1b42a74">More...</a><br /></td></tr>
<tr class="separator:a30b2e107cb2c90a37f467b28d1b42a74"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a060e1c96111c2134ce0f896420a42cd0"><td class="memTemplParams" colspan="2">template&lt;typename C &gt; </td></tr>
<tr class="memitem:a060e1c96111c2134ce0f896420a42cd0"><td class="memTemplItemLeft" align="right" valign="top"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a>&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#a060e1c96111c2134ce0f896420a42cd0">host</a> (C &amp;&amp;callable)</td></tr>
<tr class="memdesc:a060e1c96111c2134ce0f896420a42cd0"><td class="mdescLeft">&#160;</td><td class="mdescRight">creates a host execution task  <a href="#a060e1c96111c2134ce0f896420a42cd0">More...</a><br /></td></tr>
<tr class="separator:a060e1c96111c2134ce0f896420a42cd0"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:adb731be71bdd436dfb5e36e6213a9a17"><td class="memTemplParams" colspan="2">template&lt;typename F , typename... ArgsT&gt; </td></tr>
<tr class="memitem:adb731be71bdd436dfb5e36e6213a9a17"><td class="memTemplItemLeft" align="right" valign="top"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a>&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#adb731be71bdd436dfb5e36e6213a9a17">kernel</a> (dim3 g, dim3 b, size_t s, F &amp;&amp;f, ArgsT &amp;&amp;... args)</td></tr>
<tr class="memdesc:adb731be71bdd436dfb5e36e6213a9a17"><td class="mdescLeft">&#160;</td><td class="mdescRight">creates a kernel task  <a href="#adb731be71bdd436dfb5e36e6213a9a17">More...</a><br /></td></tr>
<tr class="separator:adb731be71bdd436dfb5e36e6213a9a17"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a4a839dbaa01237a440edfebe8faf4e5b"><td class="memTemplParams" colspan="2">template&lt;typename F , typename... ArgsT&gt; </td></tr>
<tr class="memitem:a4a839dbaa01237a440edfebe8faf4e5b"><td class="memTemplItemLeft" align="right" valign="top"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a>&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#a4a839dbaa01237a440edfebe8faf4e5b">kernel_on</a> (int d, dim3 g, dim3 b, size_t s, F &amp;&amp;f, ArgsT &amp;&amp;... args)</td></tr>
<tr class="memdesc:a4a839dbaa01237a440edfebe8faf4e5b"><td class="mdescLeft">&#160;</td><td class="mdescRight">creates a kernel task on a device  <a href="#a4a839dbaa01237a440edfebe8faf4e5b">More...</a><br /></td></tr>
<tr class="separator:a4a839dbaa01237a440edfebe8faf4e5b"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a079ca65da35301e5aafd45878a19e9d2"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a>&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#a079ca65da35301e5aafd45878a19e9d2">memset</a> (void *dst, int v, size_t count)</td></tr>
<tr class="memdesc:a079ca65da35301e5aafd45878a19e9d2"><td class="mdescLeft">&#160;</td><td class="mdescRight">creates a memset task  <a href="#a079ca65da35301e5aafd45878a19e9d2">More...</a><br /></td></tr>
<tr class="separator:a079ca65da35301e5aafd45878a19e9d2"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ad37637606f0643f360e9eda1f9a6e559"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a>&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#ad37637606f0643f360e9eda1f9a6e559">memcpy</a> (void *tgt, const void *src, size_t bytes)</td></tr>
<tr class="memdesc:ad37637606f0643f360e9eda1f9a6e559"><td class="mdescLeft">&#160;</td><td class="mdescRight">creates a memcpy task  <a href="#ad37637606f0643f360e9eda1f9a6e559">More...</a><br /></td></tr>
<tr class="separator:ad37637606f0643f360e9eda1f9a6e559"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a40172fac4464f6d805f75921ea3c2a3b"><td class="memTemplParams" colspan="2">template&lt;typename T , std::enable_if_t&lt; is_pod_v&lt; T &gt; &amp;&amp;(sizeof(T)==1||sizeof(T)==2||sizeof(T)==4), void &gt; *  = nullptr&gt; </td></tr>
<tr class="memitem:a40172fac4464f6d805f75921ea3c2a3b"><td class="memTemplItemLeft" align="right" valign="top"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a>&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#a40172fac4464f6d805f75921ea3c2a3b">zero</a> (T *dst, size_t count)</td></tr>
<tr class="memdesc:a40172fac4464f6d805f75921ea3c2a3b"><td class="mdescLeft">&#160;</td><td class="mdescRight">creates a zero task that zeroes a typed memory block  <a href="#a40172fac4464f6d805f75921ea3c2a3b">More...</a><br /></td></tr>
<tr class="separator:a40172fac4464f6d805f75921ea3c2a3b"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a21d4447bc834f4d3e1bb4772c850d090"><td class="memTemplParams" colspan="2">template&lt;typename T , std::enable_if_t&lt; is_pod_v&lt; T &gt; &amp;&amp;(sizeof(T)==1||sizeof(T)==2||sizeof(T)==4), cudaTask &gt; *  = nullptr&gt; </td></tr>
<tr class="memitem:a21d4447bc834f4d3e1bb4772c850d090"><td class="memTemplItemLeft" align="right" valign="top"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a>&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#a21d4447bc834f4d3e1bb4772c850d090">fill</a> (T *dst, T value, size_t count)</td></tr>
<tr class="memdesc:a21d4447bc834f4d3e1bb4772c850d090"><td class="mdescLeft">&#160;</td><td class="mdescRight">creates a fill task that fills a typed memory block with a value  <a href="#a21d4447bc834f4d3e1bb4772c850d090">More...</a><br /></td></tr>
<tr class="separator:a21d4447bc834f4d3e1bb4772c850d090"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:af03e04771b655f9e629eb4c22e19b19f"><td class="memTemplParams" colspan="2">template&lt;typename T , std::enable_if_t&lt;!std::is_same_v&lt; T, void &gt;, void &gt; *  = nullptr&gt; </td></tr>
<tr class="memitem:af03e04771b655f9e629eb4c22e19b19f"><td class="memTemplItemLeft" align="right" valign="top"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a>&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#af03e04771b655f9e629eb4c22e19b19f">copy</a> (T *tgt, const T *src, size_t num)</td></tr>
<tr class="memdesc:af03e04771b655f9e629eb4c22e19b19f"><td class="mdescLeft">&#160;</td><td class="mdescRight">creates a copy task of typed data  <a href="#af03e04771b655f9e629eb4c22e19b19f">More...</a><br /></td></tr>
<tr class="separator:af03e04771b655f9e629eb4c22e19b19f"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a99358da15e3bdfa1faabb3e326130e1f"><td class="memTemplParams" colspan="2">template&lt;typename P &gt; </td></tr>
<tr class="memitem:a99358da15e3bdfa1faabb3e326130e1f"><td class="memTemplItemLeft" align="right" valign="top">void&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#a99358da15e3bdfa1faabb3e326130e1f">offload_until</a> (P &amp;&amp;predicate)</td></tr>
<tr class="memdesc:a99358da15e3bdfa1faabb3e326130e1f"><td class="mdescLeft">&#160;</td><td class="mdescRight">offloads the cudaFlow onto a GPU and repeatedly running it until the predicate becomes true  <a href="#a99358da15e3bdfa1faabb3e326130e1f">More...</a><br /></td></tr>
<tr class="separator:a99358da15e3bdfa1faabb3e326130e1f"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ac2269fd7dc8ca04a294a718204703dad"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#ac2269fd7dc8ca04a294a718204703dad">offload_n</a> (size_t N)</td></tr>
<tr class="memdesc:ac2269fd7dc8ca04a294a718204703dad"><td class="mdescLeft">&#160;</td><td class="mdescRight">offloads the cudaFlow and executes it by the given times  <a href="#ac2269fd7dc8ca04a294a718204703dad">More...</a><br /></td></tr>
<tr class="separator:ac2269fd7dc8ca04a294a718204703dad"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a85789ed8a1f47704cf1f1a2b98969444"><td class="memItemLeft" align="right" valign="top"><a id="a85789ed8a1f47704cf1f1a2b98969444"></a>
void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#a85789ed8a1f47704cf1f1a2b98969444">offload</a> ()</td></tr>
<tr class="memdesc:a85789ed8a1f47704cf1f1a2b98969444"><td class="mdescLeft">&#160;</td><td class="mdescRight">offloads the cudaFlow and executes it once <br /></td></tr>
<tr class="separator:a85789ed8a1f47704cf1f1a2b98969444"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:aea77b710bf74fb3ccc6043592d4cdbc7"><td class="memTemplParams" colspan="2">template&lt;typename P &gt; </td></tr>
<tr class="memitem:aea77b710bf74fb3ccc6043592d4cdbc7"><td class="memTemplItemLeft" align="right" valign="top">void&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#aea77b710bf74fb3ccc6043592d4cdbc7">join_until</a> (P &amp;&amp;predicate)</td></tr>
<tr class="memdesc:aea77b710bf74fb3ccc6043592d4cdbc7"><td class="mdescLeft">&#160;</td><td class="mdescRight">offloads the cudaFlow with the given stop predicate and then joins the execution  <a href="#aea77b710bf74fb3ccc6043592d4cdbc7">More...</a><br /></td></tr>
<tr class="separator:aea77b710bf74fb3ccc6043592d4cdbc7"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a9b28ad99e4d3c0208422a2db094df277"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#a9b28ad99e4d3c0208422a2db094df277">join_n</a> (size_t N)</td></tr>
<tr class="memdesc:a9b28ad99e4d3c0208422a2db094df277"><td class="mdescLeft">&#160;</td><td class="mdescRight">offloads the cudaFlow and executes it by the given times, and then joins the execution  <a href="#a9b28ad99e4d3c0208422a2db094df277">More...</a><br /></td></tr>
<tr class="separator:a9b28ad99e4d3c0208422a2db094df277"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:afa479a33e555179c400ba2376b7c5f29"><td class="memItemLeft" align="right" valign="top"><a id="afa479a33e555179c400ba2376b7c5f29"></a>
void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#afa479a33e555179c400ba2376b7c5f29">join</a> ()</td></tr>
<tr class="memdesc:afa479a33e555179c400ba2376b7c5f29"><td class="mdescLeft">&#160;</td><td class="mdescRight">offloads the cudaFlow and executes it once, and then joins the execution <br /></td></tr>
<tr class="separator:afa479a33e555179c400ba2376b7c5f29"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:aa841b36aca935162489b5e7430dafe99"><td class="memTemplParams" colspan="2"><a id="aa841b36aca935162489b5e7430dafe99"></a>
template&lt;typename... ArgsT&gt; </td></tr>
<tr class="memitem:aa841b36aca935162489b5e7430dafe99"><td class="memTemplItemLeft" align="right" valign="top">void&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#aa841b36aca935162489b5e7430dafe99">update_kernel</a> (<a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> ct, dim3 g, dim3 b, size_t shm, ArgsT &amp;&amp;... args)</td></tr>
<tr class="memdesc:aa841b36aca935162489b5e7430dafe99"><td class="mdescLeft">&#160;</td><td class="mdescRight">updates parameters of a kernel task <br /></td></tr>
<tr class="separator:aa841b36aca935162489b5e7430dafe99"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a0644814008b31c7a9bcc64dada5d0ca9"><td class="memTemplParams" colspan="2"><a id="a0644814008b31c7a9bcc64dada5d0ca9"></a>
template&lt;typename T , std::enable_if_t&lt;!std::is_same_v&lt; T, void &gt;, void &gt; *  = nullptr&gt; </td></tr>
<tr class="memitem:a0644814008b31c7a9bcc64dada5d0ca9"><td class="memTemplItemLeft" align="right" valign="top">void&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#a0644814008b31c7a9bcc64dada5d0ca9">update_copy</a> (<a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> ct, T *tgt, const T *src, size_t num)</td></tr>
<tr class="memdesc:a0644814008b31c7a9bcc64dada5d0ca9"><td class="mdescLeft">&#160;</td><td class="mdescRight">updates parameters of a copy task <br /></td></tr>
<tr class="separator:a0644814008b31c7a9bcc64dada5d0ca9"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a8703eba0d22464208b2581d99306d709"><td class="memItemLeft" align="right" valign="top"><a id="a8703eba0d22464208b2581d99306d709"></a>
void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#a8703eba0d22464208b2581d99306d709">update_memcpy</a> (<a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> ct, void *tgt, const void *src, size_t bytes)</td></tr>
<tr class="memdesc:a8703eba0d22464208b2581d99306d709"><td class="mdescLeft">&#160;</td><td class="mdescRight">updates parameters of a memcpy task <br /></td></tr>
<tr class="separator:a8703eba0d22464208b2581d99306d709"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ab40c77439e98d51070e32762d2323de1"><td class="memItemLeft" align="right" valign="top"><a id="ab40c77439e98d51070e32762d2323de1"></a>
void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#ab40c77439e98d51070e32762d2323de1">update_memset</a> (<a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> ct, void *dst, int ch, size_t count)</td></tr>
<tr class="memdesc:ab40c77439e98d51070e32762d2323de1"><td class="mdescLeft">&#160;</td><td class="mdescRight">updates parameters of a memset task <br /></td></tr>
<tr class="separator:ab40c77439e98d51070e32762d2323de1"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a53927cca2d935fa7ab2b33e3d6b13dab"><td class="memTemplParams" colspan="2">template&lt;typename C &gt; </td></tr>
<tr class="memitem:a53927cca2d935fa7ab2b33e3d6b13dab"><td class="memTemplItemLeft" align="right" valign="top"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a>&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#a53927cca2d935fa7ab2b33e3d6b13dab">single_task</a> (C &amp;&amp;callable)</td></tr>
<tr class="memdesc:a53927cca2d935fa7ab2b33e3d6b13dab"><td class="mdescLeft">&#160;</td><td class="mdescRight">runs a callable with only a single kernel thread  <a href="#a53927cca2d935fa7ab2b33e3d6b13dab">More...</a><br /></td></tr>
<tr class="separator:a53927cca2d935fa7ab2b33e3d6b13dab"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a97c248490dbde983378f757239eaed4a"><td class="memTemplParams" colspan="2">template&lt;typename I , typename C &gt; </td></tr>
<tr class="memitem:a97c248490dbde983378f757239eaed4a"><td class="memTemplItemLeft" align="right" valign="top"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a>&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#a97c248490dbde983378f757239eaed4a">for_each</a> (I first, I last, C &amp;&amp;callable)</td></tr>
<tr class="memdesc:a97c248490dbde983378f757239eaed4a"><td class="mdescLeft">&#160;</td><td class="mdescRight">applies a callable to each dereferenced element of the data array  <a href="#a97c248490dbde983378f757239eaed4a">More...</a><br /></td></tr>
<tr class="separator:a97c248490dbde983378f757239eaed4a"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ab5a7c12e383be4972844a9f29033e487"><td class="memTemplParams" colspan="2">template&lt;typename I , typename C &gt; </td></tr>
<tr class="memitem:ab5a7c12e383be4972844a9f29033e487"><td class="memTemplItemLeft" align="right" valign="top"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a>&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#ab5a7c12e383be4972844a9f29033e487">for_each_index</a> (I first, I last, I step, C &amp;&amp;callable)</td></tr>
<tr class="memdesc:ab5a7c12e383be4972844a9f29033e487"><td class="mdescLeft">&#160;</td><td class="mdescRight">applies a callable to each index in the range with the step size  <a href="#ab5a7c12e383be4972844a9f29033e487">More...</a><br /></td></tr>
<tr class="separator:ab5a7c12e383be4972844a9f29033e487"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a552f2da29009113beee4ee90bc95ae65"><td class="memTemplParams" colspan="2">template&lt;typename I , typename C , typename... S&gt; </td></tr>
<tr class="memitem:a552f2da29009113beee4ee90bc95ae65"><td class="memTemplItemLeft" align="right" valign="top"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a>&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#a552f2da29009113beee4ee90bc95ae65">transform</a> (I first, I last, C &amp;&amp;callable, S... srcs)</td></tr>
<tr class="memdesc:a552f2da29009113beee4ee90bc95ae65"><td class="mdescLeft">&#160;</td><td class="mdescRight">applies a callable to a source range and stores the result in a target range  <a href="#a552f2da29009113beee4ee90bc95ae65">More...</a><br /></td></tr>
<tr class="separator:a552f2da29009113beee4ee90bc95ae65"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a89c389fff64a16e5dd8c60875d3b514d"><td class="memTemplParams" colspan="2">template&lt;typename C &gt; </td></tr>
<tr class="memitem:a89c389fff64a16e5dd8c60875d3b514d"><td class="memTemplItemLeft" align="right" valign="top"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a>&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="classtf_1_1cudaFlow.html#a89c389fff64a16e5dd8c60875d3b514d">capture</a> (C &amp;&amp;callable)</td></tr>
<tr class="memdesc:a89c389fff64a16e5dd8c60875d3b514d"><td class="mdescLeft">&#160;</td><td class="mdescRight">constructs a subflow graph through <a class="el" href="classtf_1_1cudaFlowCapturer.html" title="class for building a CUDA task dependency graph through stream capture ">tf::cudaFlowCapturer</a>  <a href="#a89c389fff64a16e5dd8c60875d3b514d">More...</a><br /></td></tr>
<tr class="separator:a89c389fff64a16e5dd8c60875d3b514d"><td class="memSeparator" colspan="2">&#160;</td></tr>
</table><table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="friends"></a>
Friends</h2></td></tr>
<tr class="memitem:a763b2f90bc53f92d680a635fe28e858e"><td class="memItemLeft" align="right" valign="top"><a id="a763b2f90bc53f92d680a635fe28e858e"></a>
class&#160;</td><td class="memItemRight" valign="bottom"><b>Executor</b></td></tr>
<tr class="separator:a763b2f90bc53f92d680a635fe28e858e"><td class="memSeparator" colspan="2">&#160;</td></tr>
</table>
<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
<div class="textblock"><p>class for building a CUDA task dependency graph </p>
<p>A cudaFlow is a high-level interface over CUDA Graph to perform GPU operations using the task dependency graph model. The class provides a set of methods for creating and launch different tasks on one or multiple CUDA devices, for instance, kernel tasks, data transfer tasks, and memory operation tasks. The following example creates a cudaFlow of two kernel tasks, <code>task_1</code> and <code>task_2</code>, where <code>task_1</code> runs before <code>task_2</code>.</p>
<div class="fragment"><div class="line"><a class="code" href="classtf_1_1Taskflow.html">tf::Taskflow</a> taskflow;</div><div class="line"><a class="code" href="classtf_1_1Executor.html">tf::Executor</a> executor;</div><div class="line"></div><div class="line">taskflow.<a class="code" href="classtf_1_1FlowBuilder.html#a60d7a666cab71ecfa3010b2efb0d6b57">emplace</a>([&amp;](<a class="code" href="classtf_1_1cudaFlow.html">tf::cudaFlow</a>&amp; cf){</div><div class="line">  <span class="comment">// create two kernel tasks </span></div><div class="line">  <a class="code" href="classtf_1_1cudaTask.html">tf::cudaTask</a> task_1 = cf.<a class="code" href="classtf_1_1cudaFlow.html#adb731be71bdd436dfb5e36e6213a9a17">kernel</a>(grid_1, block_1, shm_size_1, kernel_1, my_args_1);</div><div class="line">  <a class="code" href="classtf_1_1cudaTask.html">tf::cudaTask</a> task_2 = cf.<a class="code" href="classtf_1_1cudaFlow.html#adb731be71bdd436dfb5e36e6213a9a17">kernel</a>(grid_2, block_2, shm_size_2, kernel_2, my_args_2);</div><div class="line">  </div><div class="line">  <span class="comment">// kernel_1 runs before kernel_2</span></div><div class="line">  task_1.<a class="code" href="classtf_1_1cudaTask.html#abdd68287ec4dff4216af34d1db44d1b4">precede</a>(task_2);</div><div class="line">});</div><div class="line"></div><div class="line">executor.<a class="code" href="classtf_1_1Executor.html#a81f35d5b0a20ac0646447eb80d97c0aa">run</a>(taskflow).wait();</div></div><!-- fragment --> </div><h2 class="groupheader">Member Function Documentation</h2>
<a id="a89c389fff64a16e5dd8c60875d3b514d"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a89c389fff64a16e5dd8c60875d3b514d">&#9670;&nbsp;</a></span>capture()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename C &gt; </div>
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> tf::cudaFlow::capture </td>
          <td>(</td>
          <td class="paramtype">C &amp;&amp;&#160;</td>
          <td class="paramname"><em>callable</em></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>constructs a subflow graph through <a class="el" href="classtf_1_1cudaFlowCapturer.html" title="class for building a CUDA task dependency graph through stream capture ">tf::cudaFlowCapturer</a> </p>
<dl class="tparams"><dt>Template Parameters</dt><dd>
  <table class="tparams">
    <tr><td class="paramname">C</td><td>callable type constructible from <code>std::function&lt;void(tf::cudaFlowCapturer&amp;)&gt;</code> </td></tr>
  </table>
  </dd>
</dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">callable</td><td>the callable to construct a capture flow</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>a <a class="el" href="classtf_1_1cudaTask.html" title="handle to a node of the internal CUDA graph ">tf::cudaTask</a> handle</dd></dl>
<p>A captured subflow forms a sub-graph to the cudaFlow and can be used to capture custom (or third-party) kernels that cannot be directly constructed from the cudaFlow.</p>
<p>Example usage:</p>
<div class="fragment"><div class="line">taskflow.<a class="code" href="classtf_1_1FlowBuilder.html#a60d7a666cab71ecfa3010b2efb0d6b57">emplace</a>([&amp;](<a class="code" href="classtf_1_1cudaFlow.html">tf::cudaFlow</a>&amp; cf){</div><div class="line">  </div><div class="line">  <a class="code" href="classtf_1_1cudaTask.html">tf::cudaTask</a> my_kernel = cf.<a class="code" href="classtf_1_1cudaFlow.html#adb731be71bdd436dfb5e36e6213a9a17">kernel</a>(my_arguments);</div><div class="line">  </div><div class="line">  <span class="comment">// create a flow capturer to capture custom kernels</span></div><div class="line">  <a class="code" href="classtf_1_1cudaTask.html">tf::cudaTask</a> my_subflow = cf.<a class="code" href="classtf_1_1cudaFlow.html#a89c389fff64a16e5dd8c60875d3b514d">capture</a>([&amp;](<a class="code" href="classtf_1_1cudaFlowCapturer.html">tf::cudaFlowCapturer</a>&amp; capturer){</div><div class="line">    capturer.<a class="code" href="classtf_1_1cudaFlowCapturerBase.html#adf651356def71f613c589c29588398c2">on</a>([&amp;](cudaStream_t stream){</div><div class="line">      invoke_custom_kernel_with_stream(stream, custom_arguments);</div><div class="line">    }); </div><div class="line">  });</div><div class="line"></div><div class="line">  my_kernel.<a class="code" href="classtf_1_1cudaTask.html#abdd68287ec4dff4216af34d1db44d1b4">precede</a>(my_subflow);</div><div class="line">});</div></div><!-- fragment --> 
</div>
</div>
<a id="af03e04771b655f9e629eb4c22e19b19f"></a>
<h2 class="memtitle"><span class="permalink"><a href="#af03e04771b655f9e629eb4c22e19b19f">&#9670;&nbsp;</a></span>copy()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T , std::enable_if_t&lt;!std::is_same_v&lt; T, void &gt;, void &gt; * &gt; </div>
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> tf::cudaFlow::copy </td>
          <td>(</td>
          <td class="paramtype">T *&#160;</td>
          <td class="paramname"><em>tgt</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const T *&#160;</td>
          <td class="paramname"><em>src</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_t&#160;</td>
          <td class="paramname"><em>num</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>creates a copy task of typed data </p>
<dl class="tparams"><dt>Template Parameters</dt><dd>
  <table class="tparams">
    <tr><td class="paramname">T</td><td>element type (non-void)</td></tr>
  </table>
  </dd>
</dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">tgt</td><td>pointer to the target memory block </td></tr>
    <tr><td class="paramname">src</td><td>pointer to the source memory block </td></tr>
    <tr><td class="paramname">num</td><td>number of elements to copy</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>a <a class="el" href="classtf_1_1cudaTask.html" title="handle to a node of the internal CUDA graph ">tf::cudaTask</a> handle</dd></dl>
<p>A copy task transfers <code>num*sizeof(T)</code> bytes of data from a source location to a target location. Direction can be arbitrary among CPUs and GPUs. </p>

</div>
</div>
<a id="a21d4447bc834f4d3e1bb4772c850d090"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a21d4447bc834f4d3e1bb4772c850d090">&#9670;&nbsp;</a></span>fill()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T , std::enable_if_t&lt; is_pod_v&lt; T &gt; &amp;&amp;(sizeof(T)==1||sizeof(T)==2||sizeof(T)==4), cudaTask &gt; * &gt; </div>
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> tf::cudaFlow::fill </td>
          <td>(</td>
          <td class="paramtype">T *&#160;</td>
          <td class="paramname"><em>dst</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">T&#160;</td>
          <td class="paramname"><em>value</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_t&#160;</td>
          <td class="paramname"><em>count</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>creates a fill task that fills a typed memory block with a value </p>
<dl class="tparams"><dt>Template Parameters</dt><dd>
  <table class="tparams">
    <tr><td class="paramname">T</td><td>element type (size of <code>T</code> must be either 1, 2, or 4)</td></tr>
  </table>
  </dd>
</dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">dst</td><td>pointer to the destination device memory area </td></tr>
    <tr><td class="paramname">value</td><td>value to fill for each element of type <code>T</code> </td></tr>
    <tr><td class="paramname">count</td><td>number of elements</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>a <a class="el" href="classtf_1_1cudaTask.html" title="handle to a node of the internal CUDA graph ">tf::cudaTask</a> handle</dd></dl>
<p>A fill task fills the first <code>count</code> elements of type <code>T</code> with <code>value</code> in a device memory area pointed by <code>dst</code>. The value to fill is interpreted in type <code>T</code> rather than byte. </p>

</div>
</div>
<a id="a97c248490dbde983378f757239eaed4a"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a97c248490dbde983378f757239eaed4a">&#9670;&nbsp;</a></span>for_each()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename I , typename C &gt; </div>
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> tf::cudaFlow::for_each </td>
          <td>(</td>
          <td class="paramtype">I&#160;</td>
          <td class="paramname"><em>first</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">I&#160;</td>
          <td class="paramname"><em>last</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">C &amp;&amp;&#160;</td>
          <td class="paramname"><em>callable</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>applies a callable to each dereferenced element of the data array </p>
<dl class="tparams"><dt>Template Parameters</dt><dd>
  <table class="tparams">
    <tr><td class="paramname">I</td><td>iterator type </td></tr>
    <tr><td class="paramname">C</td><td>callable type</td></tr>
  </table>
  </dd>
</dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">first</td><td>iterator to the beginning (inclusive) </td></tr>
    <tr><td class="paramname">last</td><td>iterator to the end (exclusive) </td></tr>
    <tr><td class="paramname">callable</td><td>a callable object to apply to the dereferenced iterator</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd><a class="el" href="classtf_1_1cudaTask.html" title="handle to a node of the internal CUDA graph ">cudaTask</a> handle</dd></dl>
<p>This method is equivalent to the parallel execution of the following loop on a GPU:</p>
<div class="fragment"><div class="line"><span class="keywordflow">for</span>(<span class="keyword">auto</span> itr = first; itr != last; i++) {</div><div class="line">  callable(*itr);</div><div class="line">}</div></div><!-- fragment --> 
</div>
</div>
<a id="ab5a7c12e383be4972844a9f29033e487"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ab5a7c12e383be4972844a9f29033e487">&#9670;&nbsp;</a></span>for_each_index()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename I , typename C &gt; </div>
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> tf::cudaFlow::for_each_index </td>
          <td>(</td>
          <td class="paramtype">I&#160;</td>
          <td class="paramname"><em>first</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">I&#160;</td>
          <td class="paramname"><em>last</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">I&#160;</td>
          <td class="paramname"><em>step</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">C &amp;&amp;&#160;</td>
          <td class="paramname"><em>callable</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>applies a callable to each index in the range with the step size </p>
<dl class="tparams"><dt>Template Parameters</dt><dd>
  <table class="tparams">
    <tr><td class="paramname">I</td><td>index type </td></tr>
    <tr><td class="paramname">C</td><td>callable type</td></tr>
  </table>
  </dd>
</dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">first</td><td>beginning index </td></tr>
    <tr><td class="paramname">last</td><td>last index </td></tr>
    <tr><td class="paramname">step</td><td>step size </td></tr>
    <tr><td class="paramname">callable</td><td>the callable to apply to each element in the data array</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd><a class="el" href="classtf_1_1cudaTask.html" title="handle to a node of the internal CUDA graph ">cudaTask</a> handle</dd></dl>
<p>This method is equivalent to the parallel execution of the following loop on a GPU:</p>
<div class="fragment"><div class="line"><span class="comment">// step is positive [first, last)</span></div><div class="line"><span class="keywordflow">for</span>(<span class="keyword">auto</span> i=first; i&lt;last; i+=step) {</div><div class="line">  callable(i);</div><div class="line">}</div><div class="line"></div><div class="line"><span class="comment">// step is negative [first, last)</span></div><div class="line"><span class="keywordflow">for</span>(<span class="keyword">auto</span> i=first; i&gt;last; i+=step) {</div><div class="line">  callable(i);</div><div class="line">}</div></div><!-- fragment --> 
</div>
</div>
<a id="a060e1c96111c2134ce0f896420a42cd0"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a060e1c96111c2134ce0f896420a42cd0">&#9670;&nbsp;</a></span>host()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename C &gt; </div>
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> tf::cudaFlow::host </td>
          <td>(</td>
          <td class="paramtype">C &amp;&amp;&#160;</td>
          <td class="paramname"><em>callable</em></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>creates a host execution task </p>
<dl class="tparams"><dt>Template Parameters</dt><dd>
  <table class="tparams">
    <tr><td class="paramname">C</td><td>callable type</td></tr>
  </table>
  </dd>
</dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">callable</td><td>a callable object with neither arguments nor return (i.e., constructible from std::function&lt;void()&gt;)</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>a <a class="el" href="classtf_1_1cudaTask.html" title="handle to a node of the internal CUDA graph ">tf::cudaTask</a> handle</dd></dl>
<p>A host task can only execute CPU-specific functions and cannot do any CUDA calls (e.g., cudaMalloc). </p>

</div>
</div>
<a id="a9b28ad99e4d3c0208422a2db094df277"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a9b28ad99e4d3c0208422a2db094df277">&#9670;&nbsp;</a></span>join_n()</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void tf::cudaFlow::join_n </td>
          <td>(</td>
          <td class="paramtype">size_t&#160;</td>
          <td class="paramname"><em>N</em></td><td>)</td>
          <td></td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>offloads the cudaFlow and executes it by the given times, and then joins the execution </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">N</td><td>number of executions before join </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="aea77b710bf74fb3ccc6043592d4cdbc7"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aea77b710bf74fb3ccc6043592d4cdbc7">&#9670;&nbsp;</a></span>join_until()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename P &gt; </div>
      <table class="memname">
        <tr>
          <td class="memname">void tf::cudaFlow::join_until </td>
          <td>(</td>
          <td class="paramtype">P &amp;&amp;&#160;</td>
          <td class="paramname"><em>predicate</em></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>offloads the cudaFlow with the given stop predicate and then joins the execution </p>
<dl class="tparams"><dt>Template Parameters</dt><dd>
  <table class="tparams">
    <tr><td class="paramname">P</td><td>predicate type (a binary callable)</td></tr>
  </table>
  </dd>
</dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">predicate</td><td>a binary predicate (returns <code>true</code> for stop)</td></tr>
  </table>
  </dd>
</dl>
<p>Immediately offloads the present cudaFlow onto a GPU and repeatedly executes it until the predicate returns <code>true</code>. When execution finishes, the cudaFlow is joined. A joined cudaflow becomes <em>invalid</em> and cannot take other operations. </p>

</div>
</div>
<a id="adb731be71bdd436dfb5e36e6213a9a17"></a>
<h2 class="memtitle"><span class="permalink"><a href="#adb731be71bdd436dfb5e36e6213a9a17">&#9670;&nbsp;</a></span>kernel()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename F , typename... ArgsT&gt; </div>
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> tf::cudaFlow::kernel </td>
          <td>(</td>
          <td class="paramtype">dim3&#160;</td>
          <td class="paramname"><em>g</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">dim3&#160;</td>
          <td class="paramname"><em>b</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_t&#160;</td>
          <td class="paramname"><em>s</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">F &amp;&amp;&#160;</td>
          <td class="paramname"><em>f</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">ArgsT &amp;&amp;...&#160;</td>
          <td class="paramname"><em>args</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>creates a kernel task </p>
<dl class="tparams"><dt>Template Parameters</dt><dd>
  <table class="tparams">
    <tr><td class="paramname">F</td><td>kernel function type </td></tr>
    <tr><td class="paramname">ArgsT</td><td>kernel function parameters type</td></tr>
  </table>
  </dd>
</dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">g</td><td>configured grid </td></tr>
    <tr><td class="paramname">b</td><td>configured block </td></tr>
    <tr><td class="paramname">s</td><td>configured shared memory </td></tr>
    <tr><td class="paramname">f</td><td>kernel function </td></tr>
    <tr><td class="paramname">args</td><td>arguments to forward to the kernel function by copy</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>a <a class="el" href="classtf_1_1cudaTask.html" title="handle to a node of the internal CUDA graph ">tf::cudaTask</a> handle </dd></dl>

</div>
</div>
<a id="a4a839dbaa01237a440edfebe8faf4e5b"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a4a839dbaa01237a440edfebe8faf4e5b">&#9670;&nbsp;</a></span>kernel_on()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename F , typename... ArgsT&gt; </div>
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> tf::cudaFlow::kernel_on </td>
          <td>(</td>
          <td class="paramtype">int&#160;</td>
          <td class="paramname"><em>d</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">dim3&#160;</td>
          <td class="paramname"><em>g</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">dim3&#160;</td>
          <td class="paramname"><em>b</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_t&#160;</td>
          <td class="paramname"><em>s</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">F &amp;&amp;&#160;</td>
          <td class="paramname"><em>f</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">ArgsT &amp;&amp;...&#160;</td>
          <td class="paramname"><em>args</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>creates a kernel task on a device </p>
<dl class="tparams"><dt>Template Parameters</dt><dd>
  <table class="tparams">
    <tr><td class="paramname">F</td><td>kernel function type </td></tr>
    <tr><td class="paramname">ArgsT</td><td>kernel function parameters type</td></tr>
  </table>
  </dd>
</dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">d</td><td>device identifier to launch the kernel </td></tr>
    <tr><td class="paramname">g</td><td>configured grid </td></tr>
    <tr><td class="paramname">b</td><td>configured block </td></tr>
    <tr><td class="paramname">s</td><td>configured shared memory </td></tr>
    <tr><td class="paramname">f</td><td>kernel function </td></tr>
    <tr><td class="paramname">args</td><td>arguments to forward to the kernel function by copy</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>a <a class="el" href="classtf_1_1cudaTask.html" title="handle to a node of the internal CUDA graph ">tf::cudaTask</a> handle </dd></dl>

</div>
</div>
<a id="ad37637606f0643f360e9eda1f9a6e559"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ad37637606f0643f360e9eda1f9a6e559">&#9670;&nbsp;</a></span>memcpy()</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> tf::cudaFlow::memcpy </td>
          <td>(</td>
          <td class="paramtype">void *&#160;</td>
          <td class="paramname"><em>tgt</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const void *&#160;</td>
          <td class="paramname"><em>src</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_t&#160;</td>
          <td class="paramname"><em>bytes</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>creates a memcpy task </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">tgt</td><td>pointer to the target memory block </td></tr>
    <tr><td class="paramname">src</td><td>pointer to the source memory block </td></tr>
    <tr><td class="paramname">bytes</td><td>bytes to copy</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>a <a class="el" href="classtf_1_1cudaTask.html" title="handle to a node of the internal CUDA graph ">tf::cudaTask</a> handle</dd></dl>
<p>A memcpy task transfers <code>bytes</code> of data from a source location to a target location. Direction can be arbitrary among CPUs and GPUs. </p>

</div>
</div>
<a id="a079ca65da35301e5aafd45878a19e9d2"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a079ca65da35301e5aafd45878a19e9d2">&#9670;&nbsp;</a></span>memset()</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> tf::cudaFlow::memset </td>
          <td>(</td>
          <td class="paramtype">void *&#160;</td>
          <td class="paramname"><em>dst</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">int&#160;</td>
          <td class="paramname"><em>v</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_t&#160;</td>
          <td class="paramname"><em>count</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>creates a memset task </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">dst</td><td>pointer to the destination device memory area </td></tr>
    <tr><td class="paramname">v</td><td>value to set for each byte of specified memory </td></tr>
    <tr><td class="paramname">count</td><td>size in bytes to set</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>a <a class="el" href="classtf_1_1cudaTask.html" title="handle to a node of the internal CUDA graph ">tf::cudaTask</a> handle</dd></dl>
<p>A memset task fills the first <code>count</code> bytes of device memory area pointed by <code>dst</code> with the byte value <code>v</code>. </p>

</div>
</div>
<a id="a30b2e107cb2c90a37f467b28d1b42a74"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a30b2e107cb2c90a37f467b28d1b42a74">&#9670;&nbsp;</a></span>noop()</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> tf::cudaFlow::noop </td>
          <td>(</td>
          <td class="paramname"></td><td>)</td>
          <td></td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>creates a no-operation task </p>
<dl class="section return"><dt>Returns</dt><dd>a <a class="el" href="classtf_1_1cudaTask.html" title="handle to a node of the internal CUDA graph ">tf::cudaTask</a> handle</dd></dl>
<p>An empty node performs no operation during execution, but can be used for transitive ordering. For example, a phased execution graph with 2 groups of n nodes with a barrier between them can be represented using an empty node and 2*n dependency edges, rather than no empty node and n^2 dependency edges. </p>

</div>
</div>
<a id="ac2269fd7dc8ca04a294a718204703dad"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ac2269fd7dc8ca04a294a718204703dad">&#9670;&nbsp;</a></span>offload_n()</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void tf::cudaFlow::offload_n </td>
          <td>(</td>
          <td class="paramtype">size_t&#160;</td>
          <td class="paramname"><em>N</em></td><td>)</td>
          <td></td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>offloads the cudaFlow and executes it by the given times </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">N</td><td>number of executions </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a99358da15e3bdfa1faabb3e326130e1f"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a99358da15e3bdfa1faabb3e326130e1f">&#9670;&nbsp;</a></span>offload_until()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename P &gt; </div>
      <table class="memname">
        <tr>
          <td class="memname">void tf::cudaFlow::offload_until </td>
          <td>(</td>
          <td class="paramtype">P &amp;&amp;&#160;</td>
          <td class="paramname"><em>predicate</em></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>offloads the cudaFlow onto a GPU and repeatedly running it until the predicate becomes true </p>
<dl class="tparams"><dt>Template Parameters</dt><dd>
  <table class="tparams">
    <tr><td class="paramname">P</td><td>predicate type (a binary callable)</td></tr>
  </table>
  </dd>
</dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">predicate</td><td>a binary predicate (returns <code>true</code> for stop)</td></tr>
  </table>
  </dd>
</dl>
<p>Immediately offloads the present cudaFlow onto a GPU and repeatedly executes it until the predicate returns <code>true</code>.</p>
<p>A offloaded cudaFlow force the underlying graph to be instantiated. After the instantiation, you should not modify the graph topology but update node parameters. </p>

</div>
</div>
<a id="a53927cca2d935fa7ab2b33e3d6b13dab"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a53927cca2d935fa7ab2b33e3d6b13dab">&#9670;&nbsp;</a></span>single_task()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename C &gt; </div>
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> tf::cudaFlow::single_task </td>
          <td>(</td>
          <td class="paramtype">C &amp;&amp;&#160;</td>
          <td class="paramname"><em>callable</em></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>runs a callable with only a single kernel thread </p>
<dl class="tparams"><dt>Template Parameters</dt><dd>
  <table class="tparams">
    <tr><td class="paramname">C</td><td>callable type</td></tr>
  </table>
  </dd>
</dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">callable</td><td>callable to run by a single kernel thread</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>a <a class="el" href="classtf_1_1cudaTask.html" title="handle to a node of the internal CUDA graph ">tf::cudaTask</a> handle </dd></dl>

</div>
</div>
<a id="a552f2da29009113beee4ee90bc95ae65"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a552f2da29009113beee4ee90bc95ae65">&#9670;&nbsp;</a></span>transform()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename I , typename C , typename... S&gt; </div>
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> tf::cudaFlow::transform </td>
          <td>(</td>
          <td class="paramtype">I&#160;</td>
          <td class="paramname"><em>first</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">I&#160;</td>
          <td class="paramname"><em>last</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">C &amp;&amp;&#160;</td>
          <td class="paramname"><em>callable</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">S...&#160;</td>
          <td class="paramname"><em>srcs</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>applies a callable to a source range and stores the result in a target range </p>
<dl class="tparams"><dt>Template Parameters</dt><dd>
  <table class="tparams">
    <tr><td class="paramname">I</td><td>iterator type </td></tr>
    <tr><td class="paramname">C</td><td>callable type </td></tr>
    <tr><td class="paramname">S</td><td>source types</td></tr>
  </table>
  </dd>
</dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">first</td><td>iterator to the beginning (inclusive) </td></tr>
    <tr><td class="paramname">last</td><td>iterator to the end (exclusive) </td></tr>
    <tr><td class="paramname">callable</td><td>the callable to apply to each element in the range </td></tr>
    <tr><td class="paramname">srcs</td><td>iterators to the source ranges</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd><a class="el" href="classtf_1_1cudaTask.html" title="handle to a node of the internal CUDA graph ">cudaTask</a> handle</dd></dl>
<p>This method is equivalent to the parallel execution of the following loop on a GPU:</p>
<div class="fragment"><div class="line"><span class="keywordflow">while</span> (first != last) {</div><div class="line">  *first++ = callable(*src1++, *src2++, *src3++, ...);</div><div class="line">}</div></div><!-- fragment --> 
</div>
</div>
<a id="a40172fac4464f6d805f75921ea3c2a3b"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a40172fac4464f6d805f75921ea3c2a3b">&#9670;&nbsp;</a></span>zero()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T , std::enable_if_t&lt; is_pod_v&lt; T &gt; &amp;&amp;(sizeof(T)==1||sizeof(T)==2||sizeof(T)==4), void &gt; * &gt; </div>
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classtf_1_1cudaTask.html">cudaTask</a> tf::cudaFlow::zero </td>
          <td>(</td>
          <td class="paramtype">T *&#160;</td>
          <td class="paramname"><em>dst</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_t&#160;</td>
          <td class="paramname"><em>count</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>creates a zero task that zeroes a typed memory block </p>
<dl class="tparams"><dt>Template Parameters</dt><dd>
  <table class="tparams">
    <tr><td class="paramname">T</td><td>element type (size of <code>T</code> must be either 1, 2, or 4) </td></tr>
  </table>
  </dd>
</dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">dst</td><td>pointer to the destination device memory area </td></tr>
    <tr><td class="paramname">count</td><td>number of elements</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>a <a class="el" href="classtf_1_1cudaTask.html" title="handle to a node of the internal CUDA graph ">tf::cudaTask</a> handle</dd></dl>
<p>A zero task zeroes the first <code>count</code> elements of type <code>T</code> in a device memory area pointed by <code>dst</code>. </p>

</div>
</div>
<hr/>The documentation for this class was generated from the following files:<ul>
<li><a class="el" href="cuda__flow_8hpp_source.html">cuda_flow.hpp</a></li>
<li><a class="el" href="executor_8hpp_source.html">executor.hpp</a></li>
</ul>
</div><!-- contents -->
</div><!-- doc-content -->
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
  <ul>
    <li class="navelem"><b>tf</b></li><li class="navelem"><a class="el" href="classtf_1_1cudaFlow.html">cudaFlow</a></li>
    <li class="footer">Generated by
    <a href="http://www.doxygen.org/index.html">
    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.14 </li>
  </ul>
</div>
</body>
</html>
