<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en-US">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=11"/>
<meta name="generator" content="Doxygen 1.12.0"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>NeuZephyr: nz::cuStrm::StreamManager&lt; T &gt; Class Template Reference</title>
<link rel="icon" href="NZ_logo2.png" type="image/x-icon" />
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
 <tbody>
 <tr id="projectrow">
  <td id="projectlogo"><img alt="Logo" src="NZ_logo2.png"/></td>
  <td id="projectalign">
   <div id="projectname">NeuZephyr
   </div>
   <div id="projectbrief">Simple DL Framework</div>
  </td>
 </tr>
 </tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.12.0 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&amp;dn=expat.txt MIT */
$(function() { codefold.init(0); });
/* @license-end */
</script>
  <div id="navrow1" class="tabs">
    <ul class="tablist">
      <li><a href="index.html"><span>Main&#160;Page</span></a></li>
      <li><a href="pages.html"><span>Related&#160;Pages</span></a></li>
      <li><a href="namespaces.html"><span>Namespaces</span></a></li>
      <li class="current"><a href="annotated.html"><span>Classes</span></a></li>
      <li><a href="files.html"><span>Files</span></a></li>
    </ul>
  </div>
  <div id="navrow2" class="tabs2">
    <ul class="tablist">
      <li><a href="annotated.html"><span>Class&#160;List</span></a></li>
      <li><a href="classes.html"><span>Class&#160;Index</span></a></li>
      <li><a href="inherits.html"><span>Class&#160;Hierarchy</span></a></li>
      <li><a href="functions.html"><span>Class&#160;Members</span></a></li>
    </ul>
  </div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&amp;dn=expat.txt MIT */
$(function(){ initResizable(false); });
/* @license-end */
</script>
<div id="nav-path" class="navpath">
  <ul>
<li class="navelem"><b>nz</b></li><li class="navelem"><a class="el" href="namespacenz_1_1cu_strm.html">cuStrm</a></li><li class="navelem"><a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">StreamManager</a></li>  </ul>
</div>
</div><!-- top -->
<div id="doc-content">
<div class="header">
  <div class="summary">
<a href="#pub-methods">Public Member Functions</a> &#124;
<a href="#pub-static-methods">Static Public Member Functions</a> &#124;
<a href="classnz_1_1cu_strm_1_1_stream_manager-members.html">List of all members</a>  </div>
  <div class="headertitle"><div class="title">nz::cuStrm::StreamManager&lt; T &gt; Class Template Reference</div></div>
</div><!--header-->
<div class="contents">

<p>Centralized CUDA stream and resource management system with automatic dependency tracking.  
 <a href="#details">More...</a></p>
<table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a id="pub-methods" name="pub-methods"></a>
Public Member Functions</h2></td></tr>
<tr class="memitem:a8e79a1506664e3a235cf7327e6d1275b" id="r_a8e79a1506664e3a235cf7327e6d1275b"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a8e79a1506664e3a235cf7327e6d1275b">~StreamManager</a> ()</td></tr>
<tr class="memdesc:a8e79a1506664e3a235cf7327e6d1275b"><td class="mdescLeft">&#160;</td><td class="mdescRight">Destructor for the <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">StreamManager</a> class.  <br /></td></tr>
<tr class="separator:a8e79a1506664e3a235cf7327e6d1275b"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a97f78a2d43f6e0508c82d4f3b629de96" id="r_a97f78a2d43f6e0508c82d4f3b629de96"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a97f78a2d43f6e0508c82d4f3b629de96">malloc</a> (T **data, const size_t size)</td></tr>
<tr class="memdesc:a97f78a2d43f6e0508c82d4f3b629de96"><td class="mdescLeft">&#160;</td><td class="mdescRight">Asynchronously allocates device memory for type-specific data with stream-ordered dependency tracking.  <br /></td></tr>
<tr class="separator:a97f78a2d43f6e0508c82d4f3b629de96"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a785cf34395067f425e032d9bd5e1fa20" id="r_a785cf34395067f425e032d9bd5e1fa20"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a785cf34395067f425e032d9bd5e1fa20">free</a> (T *data)</td></tr>
<tr class="memdesc:a785cf34395067f425e032d9bd5e1fa20"><td class="mdescLeft">&#160;</td><td class="mdescRight">Frees the CUDA device memory pointed to by the given pointer.  <br /></td></tr>
<tr class="separator:a785cf34395067f425e032d9bd5e1fa20"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ab6803232b9c08d9282b16322a6c7b8a9" id="r_ab6803232b9c08d9282b16322a6c7b8a9"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#ab6803232b9c08d9282b16322a6c7b8a9">freeHost</a> (T *data)</td></tr>
<tr class="memdesc:ab6803232b9c08d9282b16322a6c7b8a9"><td class="mdescLeft">&#160;</td><td class="mdescRight">Frees the pinned host memory pointed to by the given pointer.  <br /></td></tr>
<tr class="separator:ab6803232b9c08d9282b16322a6c7b8a9"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a1084057ef6f5b2871c60702209bb4469" id="r_a1084057ef6f5b2871c60702209bb4469"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a1084057ef6f5b2871c60702209bb4469">freeAsync</a> (T *data)</td></tr>
<tr class="memdesc:a1084057ef6f5b2871c60702209bb4469"><td class="mdescLeft">&#160;</td><td class="mdescRight">Asynchronously frees the CUDA device memory pointed to by the given pointer.  <br /></td></tr>
<tr class="separator:a1084057ef6f5b2871c60702209bb4469"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a71ad766cb2869d3dd6a3931966e81706" id="r_a71ad766cb2869d3dd6a3931966e81706"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a71ad766cb2869d3dd6a3931966e81706">memset</a> (T *data, const int value, const size_t count)</td></tr>
<tr class="memdesc:a71ad766cb2869d3dd6a3931966e81706"><td class="mdescLeft">&#160;</td><td class="mdescRight">Asynchronously sets a block of CUDA device memory to a specified value.  <br /></td></tr>
<tr class="separator:a71ad766cb2869d3dd6a3931966e81706"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:afa38d5c6db0e6b48c8f74ce8ad0df2bc" id="r_afa38d5c6db0e6b48c8f74ce8ad0df2bc"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#afa38d5c6db0e6b48c8f74ce8ad0df2bc">memcpy</a> (T *dst, T *src, const size_t size, const cudaMemcpyKind kind)</td></tr>
<tr class="memdesc:afa38d5c6db0e6b48c8f74ce8ad0df2bc"><td class="mdescLeft">&#160;</td><td class="mdescRight">Asynchronously copies data between CUDA device and host memory based on the specified memory copy kind.  <br /></td></tr>
<tr class="separator:afa38d5c6db0e6b48c8f74ce8ad0df2bc"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a46ce59b45de432842454aadf00b93791" id="r_a46ce59b45de432842454aadf00b93791"><td class="memTemplParams" colspan="2">template&lt;typename F , typename... Args&gt; </td></tr>
<tr class="memitem:a46ce59b45de432842454aadf00b93791"><td class="memTemplItemLeft" align="right" valign="top">void&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="#a46ce59b45de432842454aadf00b93791">submit</a> (F func, dim3 grid, dim3 block, size_t shared, T *odata, T *idata, Args... args)</td></tr>
<tr class="memdesc:a46ce59b45de432842454aadf00b93791"><td class="mdescLeft">&#160;</td><td class="mdescRight">Asynchronously submits a CUDA kernel with stream-ordered dependency management.  <br /></td></tr>
<tr class="separator:a46ce59b45de432842454aadf00b93791"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:aa11ff4c21b003e5089f3cd65724b2193" id="r_aa11ff4c21b003e5089f3cd65724b2193"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#aa11ff4c21b003e5089f3cd65724b2193">sync</a> () const</td></tr>
<tr class="memdesc:aa11ff4c21b003e5089f3cd65724b2193"><td class="mdescLeft">&#160;</td><td class="mdescRight">Synchronizes all CUDA streams in the stream pool by blocking the host thread.  <br /></td></tr>
<tr class="separator:aa11ff4c21b003e5089f3cd65724b2193"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:abe439fa00c0bd369c0b2345b095ed5af" id="r_abe439fa00c0bd369c0b2345b095ed5af"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#abe439fa00c0bd369c0b2345b095ed5af">syncData</a> (T *data)</td></tr>
<tr class="memdesc:abe439fa00c0bd369c0b2345b095ed5af"><td class="mdescLeft">&#160;</td><td class="mdescRight">Synchronizes host thread with completion events for a specific data object.  <br /></td></tr>
<tr class="separator:abe439fa00c0bd369c0b2345b095ed5af"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a731986c2c4ecd056562eaddadef46df8" id="r_a731986c2c4ecd056562eaddadef46df8"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a731986c2c4ecd056562eaddadef46df8">randomize</a> (T *data, size_t size, size_t seed, curandRngType_t rngType)</td></tr>
<tr class="memdesc:a731986c2c4ecd056562eaddadef46df8"><td class="mdescLeft">&#160;</td><td class="mdescRight">Generates uniformly distributed random numbers on GPU using CURAND.  <br /></td></tr>
<tr class="separator:a731986c2c4ecd056562eaddadef46df8"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a1de1cf3aadea137faf90a2f9b4b7abe2" id="r_a1de1cf3aadea137faf90a2f9b4b7abe2"><td class="memItemLeft" align="right" valign="top">cudaStream_t&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a1de1cf3aadea137faf90a2f9b4b7abe2">getStream</a> ()</td></tr>
<tr class="memdesc:a1de1cf3aadea137faf90a2f9b4b7abe2"><td class="mdescLeft">&#160;</td><td class="mdescRight">Acquires CUDA stream from pool using round-robin scheduling.  <br /></td></tr>
<tr class="separator:a1de1cf3aadea137faf90a2f9b4b7abe2"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:adb1078a67c6e38932d7d58c2adb05ec0" id="r_adb1078a67c6e38932d7d58c2adb05ec0"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#adb1078a67c6e38932d7d58c2adb05ec0">streamWait</a> (T *data, cudaStream_t stream)</td></tr>
<tr class="memdesc:adb1078a67c6e38932d7d58c2adb05ec0"><td class="mdescLeft">&#160;</td><td class="mdescRight">Synchronizes CUDA stream execution until data writes complete.  <br /></td></tr>
<tr class="separator:adb1078a67c6e38932d7d58c2adb05ec0"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a1260d95d0eddf75b72700da07361a4bd" id="r_a1260d95d0eddf75b72700da07361a4bd"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a1260d95d0eddf75b72700da07361a4bd">recordData</a> (T *data, cudaStream_t stream)</td></tr>
<tr class="memdesc:a1260d95d0eddf75b72700da07361a4bd"><td class="mdescLeft">&#160;</td><td class="mdescRight">Records write completion event for asynchronous data operations.  <br /></td></tr>
<tr class="separator:a1260d95d0eddf75b72700da07361a4bd"><td class="memSeparator" colspan="2">&#160;</td></tr>
</table><table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a id="pub-static-methods" name="pub-static-methods"></a>
Static Public Member Functions</h2></td></tr>
<tr class="memitem:ab4b2eb422e0e1ee44bdfdc0eb94457ce" id="r_ab4b2eb422e0e1ee44bdfdc0eb94457ce"><td class="memItemLeft" align="right" valign="top">static <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">StreamManager</a> &amp;&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#ab4b2eb422e0e1ee44bdfdc0eb94457ce">Instance</a> ()</td></tr>
<tr class="memdesc:ab4b2eb422e0e1ee44bdfdc0eb94457ce"><td class="mdescLeft">&#160;</td><td class="mdescRight">Returns a reference to the singleton instance of the <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">StreamManager</a>.  <br /></td></tr>
<tr class="separator:ab4b2eb422e0e1ee44bdfdc0eb94457ce"><td class="memSeparator" colspan="2">&#160;</td></tr>
</table>
<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
<div class="textblock"><div class="compoundTemplParams">template&lt;typename T&gt;<br />
class nz::cuStrm::StreamManager&lt; T &gt;</div><p>Centralized CUDA stream and resource management system with automatic dependency tracking. </p>
<h3><a class="anchor" id="autotoc_md125"></a>
Core Functionality</h3>
<ul>
<li><b>Resource Management</b>:<ul>
<li><code>malloc/mallocAsync</code>: Stream-ordered memory allocation</li>
<li><code>free/freeAsync</code>: Type-specific deallocation with safety checks</li>
</ul>
</li>
<li><b>Kernel Submission</b>:<ul>
<li>Family of <code>submit*</code> methods supporting 1-4 outputs and mixed data types</li>
<li>Automatic dependency injection for input/output buffers</li>
</ul>
</li>
<li><b>System Control</b>:<ul>
<li><code>sync/syncData</code>: Full pipeline or data-specific synchronization</li>
<li><code>randomize</code>: Managed CURAND initialization and execution</li>
</ul>
</li>
</ul>
<h3><a class="anchor" id="autotoc_md126"></a>
Architecture Integration</h3>
<ul>
<li><b><a class="el" href="classnz_1_1cu_strm_1_1_event_pool.html" title="Internal event management system for CUDA stream synchronization (Part of StreamManager)">EventPool</a> Collaboration</b>:<ul>
<li>Uses <a class="el" href="classnz_1_1cu_strm_1_1_event_pool.html" title="Internal event management system for CUDA stream synchronization (Part of StreamManager)">EventPool</a> for cross-stream dependency tracking</li>
<li>Delegates event lifecycle management to specialized component</li>
</ul>
</li>
<li><b>CUDA Resource Isolation</b>:<ul>
<li>Encapsulates all CUDA API calls</li>
<li>Prevents direct stream/event access from external code</li>
</ul>
</li>
</ul>
<h3><a class="anchor" id="autotoc_md127"></a>
Usage Example:</h3>
<div class="fragment"><div class="line"><span class="comment">// Get singleton instance</span></div>
<div class="line"><a class="code hl_class" href="classnz_1_1cu_strm_1_1_stream_manager.html">StreamManager&lt;float&gt;</a>&amp; manager = <a class="code hl_function" href="#ab4b2eb422e0e1ee44bdfdc0eb94457ce">StreamManager&lt;float&gt;::Instance</a>();</div>
<div class="line"> </div>
<div class="line"><span class="comment">// Allocate device memory</span></div>
<div class="line"><span class="keywordtype">float</span>* d_data;</div>
<div class="line">manager.<a class="code hl_function" href="#a97f78a2d43f6e0508c82d4f3b629de96">malloc</a>(&amp;d_data, 1024);</div>
<div class="line"> </div>
<div class="line"><span class="comment">// Initialize memory</span></div>
<div class="line">manager.<a class="code hl_function" href="#a71ad766cb2869d3dd6a3931966e81706">memset</a>(d_data, 0, 1024);</div>
<div class="line"> </div>
<div class="line"><span class="comment">// Launch kernel (axpy example)</span></div>
<div class="line">manager.<a class="code hl_function" href="#a46ce59b45de432842454aadf00b93791">submit</a>(</div>
<div class="line">    axpy_kernel,            <span class="comment">// Kernel function</span></div>
<div class="line">    dim3(1024/256),         <span class="comment">// Grid size</span></div>
<div class="line">    dim3(256),              <span class="comment">// Block size</span></div>
<div class="line">    0,                      <span class="comment">// Shared memory</span></div>
<div class="line">    d_data,                 <span class="comment">// Output</span></div>
<div class="line">    d_data, d_data,         <span class="comment">// Inputs</span></div>
<div class="line">    2.0f                    <span class="comment">// alpha parameter</span></div>
<div class="line">);</div>
<div class="line"> </div>
<div class="line"><span class="comment">// Asynchronous memory release</span></div>
<div class="line">manager.<a class="code hl_function" href="#a1084057ef6f5b2871c60702209bb4469">freeAsync</a>(d_data);</div>
<div class="ttc" id="aclassnz_1_1cu_strm_1_1_stream_manager_html"><div class="ttname"><a href="classnz_1_1cu_strm_1_1_stream_manager.html">nz::cuStrm::StreamManager</a></div><div class="ttdoc">Centralized CUDA stream and resource management system with automatic dependency tracking.</div><div class="ttdef"><b>Definition</b> <a href="_stream_manager_8cuh_source.html#l00131">StreamManager.cuh:131</a></div></div>
<div class="ttc" id="aclassnz_1_1cu_strm_1_1_stream_manager_html_a1084057ef6f5b2871c60702209bb4469"><div class="ttname"><a href="#a1084057ef6f5b2871c60702209bb4469">nz::cuStrm::StreamManager::freeAsync</a></div><div class="ttdeci">void freeAsync(T *data)</div><div class="ttdoc">Asynchronously frees the CUDA device memory pointed to by the given pointer.</div><div class="ttdef"><b>Definition</b> <a href="_stream_manager_8cuh_source.html#l00325">StreamManager.cuh:325</a></div></div>
<div class="ttc" id="aclassnz_1_1cu_strm_1_1_stream_manager_html_a46ce59b45de432842454aadf00b93791"><div class="ttname"><a href="#a46ce59b45de432842454aadf00b93791">nz::cuStrm::StreamManager::submit</a></div><div class="ttdeci">void submit(F func, dim3 grid, dim3 block, size_t shared, T *odata, T *idata, Args... args)</div><div class="ttdoc">Asynchronously submits a CUDA kernel with stream-ordered dependency management.</div><div class="ttdef"><b>Definition</b> <a href="_stream_manager_8cuh_source.html#l00471">StreamManager.cuh:471</a></div></div>
<div class="ttc" id="aclassnz_1_1cu_strm_1_1_stream_manager_html_a71ad766cb2869d3dd6a3931966e81706"><div class="ttname"><a href="#a71ad766cb2869d3dd6a3931966e81706">nz::cuStrm::StreamManager::memset</a></div><div class="ttdeci">void memset(T *data, const int value, const size_t count)</div><div class="ttdoc">Asynchronously sets a block of CUDA device memory to a specified value.</div><div class="ttdef"><b>Definition</b> <a href="_stream_manager_8cuh_source.html#l00360">StreamManager.cuh:360</a></div></div>
<div class="ttc" id="aclassnz_1_1cu_strm_1_1_stream_manager_html_a97f78a2d43f6e0508c82d4f3b629de96"><div class="ttname"><a href="#a97f78a2d43f6e0508c82d4f3b629de96">nz::cuStrm::StreamManager::malloc</a></div><div class="ttdeci">void malloc(T **data, const size_t size)</div><div class="ttdoc">Asynchronously allocates device memory for type-specific data with stream-ordered dependency tracking...</div><div class="ttdef"><b>Definition</b> <a href="_stream_manager_8cuh_source.html#l00230">StreamManager.cuh:230</a></div></div>
<div class="ttc" id="aclassnz_1_1cu_strm_1_1_stream_manager_html_ab4b2eb422e0e1ee44bdfdc0eb94457ce"><div class="ttname"><a href="#ab4b2eb422e0e1ee44bdfdc0eb94457ce">nz::cuStrm::StreamManager::Instance</a></div><div class="ttdeci">static StreamManager &amp; Instance()</div><div class="ttdoc">Returns a reference to the singleton instance of the StreamManager.</div><div class="ttdef"><b>Definition</b> <a href="_stream_manager_8cuh_source.html#l00154">StreamManager.cuh:154</a></div></div>
</div><!-- fragment --><dl class="section note"><dt>Note</dt><dd><ol type="1">
<li>All public methods are thread-safe via mutex protection</li>
<li>Destruction triggers full pipeline synchronization</li>
<li>CURAND generators are created per-operation for thread safety</li>
<li>Template specialization handles half-precision math requirements</li>
</ol>
</dd></dl>
<dl class="section author"><dt>Author</dt><dd>Mgepahmge(<a href="https://github.com/Mgepahmge">https://github.com/Mgepahmge</a>)</dd></dl>
<dl class="section date"><dt>Date</dt><dd>2024/11/29</dd></dl>
<p>This singleton class implements a high-level abstraction layer for CUDA concurrency management, combining stream scheduling, event-based dependency tracking, and resource lifecycle management into a unified interface. As the core of NVIDIA GPU task scheduling infrastructure, it enforces strict execution ordering constraints while maximizing concurrent throughput.</p>
<dl class="section warning"><dt>Warning</dt><dd><ul>
<li><b>Singleton Pattern</b>: Must be accessed exclusively through <a class="el" href="#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager.">Instance()</a> method</li>
<li><b>Type-Specific Instantiation</b>: Template parameter T must match allocation/free types</li>
<li><b>Resource Ownership</b>: All CUDA resources managed through this class must not be externally modified</li>
</ul>
</dd></dl>
<p>@design </p>
<h3><a class="anchor" id="autotoc_md122"></a>
1. Stream Scheduling Strategy: Queue-based Least Recently Used (LRU)</h3>
<p>Implements lightweight load balancing through cyclic stream allocation:</p><ul>
<li><b>Pool Initialization</b>: Creates <code>maxStream</code> non-blocking CUDA streams at construction</li>
<li><b>Rotation Mechanism</b>: Maintains a queue of stream indices, cycling through available streams <div class="fragment"><div class="line"><span class="comment">// Acquisition pseudocode:</span></div>
<div class="line">lock();</div>
<div class="line">stream_id = queue.front();  <span class="comment">// Get least recently used</span></div>
<div class="line">queue.pop();</div>
<div class="line">queue.push(stream_id);      <span class="comment">// Cycle to end as most recently used</span></div>
<div class="line">unlock();</div>
</div><!-- fragment --></li>
<li><b>Contention Handling</b>:<ul>
<li>Fixed pool size prevents CUDA context bloat</li>
<li>Queue rotation naturally balances workload across streams</li>
<li>Mutex protection ensures thread-safe access</li>
</ul>
</li>
</ul>
<h3><a class="anchor" id="autotoc_md123"></a>
2. CUDA Operation Orchestration: Managed Execution Pipeline</h3>
<p>Standardizes all GPU operations through a four-stage protocol:</p><ol type="1">
<li><b>Stream Acquisition</b>: Obtain execution channel via LRU scheduler</li>
<li><b>Dependency Resolution</b>:<ul>
<li>Query all events associated with input/output buffers</li>
<li>Insert stream wait commands for pending operations <div class="fragment"><div class="line"><span class="comment">// streamWait implementation:</span></div>
<div class="line"><span class="keywordflow">for</span> (event in <a class="code hl_function" href="classnz_1_1cu_strm_1_1_event_pool.html#ac850289a3ad016430ab5b5a497f0eb19">EventPool::getEvents</a>(data)) {</div>
<div class="line">    cudaStreamWaitEvent(stream, event, 0);</div>
<div class="line">}</div>
<div class="ttc" id="aclassnz_1_1cu_strm_1_1_event_pool_html_ac850289a3ad016430ab5b5a497f0eb19"><div class="ttname"><a href="classnz_1_1cu_strm_1_1_event_pool.html#ac850289a3ad016430ab5b5a497f0eb19">nz::cuStrm::EventPool::getEvents</a></div><div class="ttdeci">std::unordered_set&lt; cudaEvent_t &gt; getEvents(void *data)</div><div class="ttdoc">Retrieve all CUDA events associated with a given data pointer.</div><div class="ttdef"><b>Definition</b> <a href="_event_pool_8cuh_source.html#l00282">EventPool.cuh:282</a></div></div>
</div><!-- fragment --></li>
</ul>
</li>
<li><b>Operation Execution</b>:<ul>
<li>Dispatch kernels/memops with CUDA API wrappers</li>
<li>Template methods handle variable input/output configurations</li>
</ul>
</li>
<li><b>Event Recording &amp; Cleanup</b>:<ul>
<li>Attach completion event to output buffers</li>
<li>Register CUDA callback for automatic event recycling</li>
</ul>
</li>
</ol>
<h3><a class="anchor" id="autotoc_md124"></a>
3. Memory-Centric Synchronization</h3>
<p>Extends <a class="el" href="classnz_1_1cu_strm_1_1_event_pool.html" title="Internal event management system for CUDA stream synchronization (Part of StreamManager)">EventPool</a>'s data-event binding with type-aware management:</p><ul>
<li><b>Allocation Tracking</b>: Records events for all allocated memory regions</li>
<li><b>Smart Free Operations</b>:<ul>
<li>Synchronous free: Full sync before deallocation</li>
<li>Async free: Stream-ordered deallocation with dependency enforcement</li>
</ul>
</li>
<li><b>Type Specialization</b>: Explicit handling of half-precision types </li>
</ul>

<p class="definition">Definition at line <a class="el" href="_stream_manager_8cuh_source.html#l00131">131</a> of file <a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a>.</p>
</div><h2 class="groupheader">Constructor &amp; Destructor Documentation</h2>
<a id="a8e79a1506664e3a235cf7327e6d1275b" name="a8e79a1506664e3a235cf7327e6d1275b"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a8e79a1506664e3a235cf7327e6d1275b">&#9670;&#160;</a></span>~StreamManager()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T &gt; </div>
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">nz::cuStrm::StreamManager</a>&lt; T &gt;::~<a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">StreamManager</a> </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Destructor for the <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">StreamManager</a> class. </p>
<p>This destructor is responsible for cleaning up the resources used by the <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">StreamManager</a>. It first synchronizes all the streams in the stream pool, then destroys each CUDA stream in the pool, and finally resets the event pool.</p>
<dl class="section return"><dt>Returns</dt><dd>None.</dd></dl>
<p><b>Memory Management Strategy</b>:</p><ul>
<li>The CUDA streams in the <code>streamPool</code> are explicitly destroyed using <code>cudaStreamDestroy</code>, which releases the resources associated with these streams.</li>
<li>The <code>eventPool</code> is reset, which should release any resources held by the event pool.</li>
</ul>
<p><b>Exception Handling Mechanism</b>:</p><ul>
<li>This destructor does not throw exceptions. However, <code>cudaStreamDestroy</code> can return an error code indicating a failure to destroy the stream. These errors are not explicitly handled in this destructor, but it is assumed that the calling code or the CUDA runtime will handle such errors appropriately.</li>
</ul>
<p><b>Relationship with Other Components</b>:</p><ul>
<li>This destructor depends on the <code>sync</code> function to synchronize the streams before destroying them. It also interacts with the <code>cudaStreamDestroy</code> function from the CUDA library to release the stream resources and the <code>reset</code> method of the <code>eventPool</code> object.</li>
</ul>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Ensure that all CUDA operations in the streams have completed before the destructor is called. Otherwise, destroying the streams prematurely may lead to undefined behavior. </li>
</ul>
</dd></dl>

<p class="definition">Definition at line <a class="el" href="_stream_manager_8cuh_source.html#l00180">180</a> of file <a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1cu_strm_1_1_stream_manager_a8e79a1506664e3a235cf7327e6d1275b_cgraph.png" border="0" usemap="#aclassnz_1_1cu_strm_1_1_stream_manager_a8e79a1506664e3a235cf7327e6d1275b_cgraph" alt=""/></div>
<map name="aclassnz_1_1cu_strm_1_1_stream_manager_a8e79a1506664e3a235cf7327e6d1275b_cgraph" id="aclassnz_1_1cu_strm_1_1_stream_manager_a8e79a1506664e3a235cf7327e6d1275b_cgraph">
<area shape="rect" title="Destructor for the StreamManager class." alt="" coords="5,5,191,48"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#aa11ff4c21b003e5089f3cd65724b2193" title="Synchronizes all CUDA streams in the stream pool by blocking the host thread." alt="" coords="239,5,424,48"/>
<area shape="poly" title=" " alt="" coords="191,24,223,24,223,29,191,29"/>
</map>
</div>

</div>
</div>
<h2 class="groupheader">Member Function Documentation</h2>
<a id="a785cf34395067f425e032d9bd5e1fa20" name="a785cf34395067f425e032d9bd5e1fa20"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a785cf34395067f425e032d9bd5e1fa20">&#9670;&#160;</a></span>free()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T &gt; </div>
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">nz::cuStrm::StreamManager</a>&lt; T &gt;::free </td>
          <td>(</td>
          <td class="paramtype">T *</td>          <td class="paramname"><span class="paramname"><em>data</em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Frees the CUDA device memory pointed to by the given pointer. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">data</td><td>A pointer to the CUDA device memory to be freed (device-to-host).</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>None.</dd></dl>
<p>This function is responsible for releasing the CUDA device memory pointed to by <code>data</code>. Before freeing the memory, it calls the <code>syncData</code> function to ensure that any pending data synchronization operations are completed. The <code>cudaFree</code> function from the CUDA library is then used to release the device memory.</p>
<p><b>Memory Management Strategy</b>:</p><ul>
<li>The function ensures that the memory is synchronized before freeing it to avoid data inconsistency. After calling <code>cudaFree</code>, the memory pointed to by <code>data</code> is released and should not be accessed further.</li>
</ul>
<p><b>Exception Handling Mechanism</b>:</p><ul>
<li>This function does not throw exceptions. However, <code>cudaFree</code> can return an error code indicating a failure to free the memory. These errors are not explicitly handled in this function, but it is assumed that the calling code or the CUDA runtime will handle such errors appropriately.</li>
</ul>
<p><b>Relationship with Other Components</b>:</p><ul>
<li>This function depends on the <code>syncData</code> function to synchronize the data before freeing the memory. It also interacts with the <code>cudaFree</code> function from the CUDA library.</li>
</ul>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Ensure that the pointer <code>data</code> points to valid CUDA device memory. Passing a null pointer or a pointer to non - CUDA device memory will lead to undefined behavior. </li>
</ul>
</dd></dl>

<p class="definition">Definition at line <a class="el" href="_stream_manager_8cuh_source.html#l00263">263</a> of file <a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1cu_strm_1_1_stream_manager_a785cf34395067f425e032d9bd5e1fa20_cgraph.png" border="0" usemap="#aclassnz_1_1cu_strm_1_1_stream_manager_a785cf34395067f425e032d9bd5e1fa20_cgraph" alt=""/></div>
<map name="aclassnz_1_1cu_strm_1_1_stream_manager_a785cf34395067f425e032d9bd5e1fa20_cgraph" id="aclassnz_1_1cu_strm_1_1_stream_manager_a785cf34395067f425e032d9bd5e1fa20_cgraph">
<area shape="rect" title="Frees the CUDA device memory pointed to by the given pointer." alt="" coords="5,5,191,48"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="239,5,424,48"/>
<area shape="poly" title=" " alt="" coords="191,24,223,24,223,29,191,29"/>
</map>
</div>

</div>
</div>
<a id="a1084057ef6f5b2871c60702209bb4469" name="a1084057ef6f5b2871c60702209bb4469"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a1084057ef6f5b2871c60702209bb4469">&#9670;&#160;</a></span>freeAsync()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T &gt; </div>
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">nz::cuStrm::StreamManager</a>&lt; T &gt;::freeAsync </td>
          <td>(</td>
          <td class="paramtype">T *</td>          <td class="paramname"><span class="paramname"><em>data</em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Asynchronously frees the CUDA device memory pointed to by the given pointer. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">data</td><td>A pointer to the CUDA device memory to be freed (device-to-host).</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>None.</dd></dl>
<p>This function is responsible for asynchronously releasing the CUDA device memory pointed to by <code>data</code>. First, it retrieves a CUDA stream using the <code>getStream</code> function. Then, it calls <code>streamWait</code> to ensure that all operations related to the <code>data</code> in the retrieved stream are completed. Finally, it uses <code>cudaFreeAsync</code> to asynchronously free the device memory in the specified stream.</p>
<p><b>Memory Management Strategy</b>:</p><ul>
<li>The function ensures that all stream - related operations on the memory are finished before scheduling the memory to be freed asynchronously. After <code>cudaFreeAsync</code> is called, the memory will be released once all previous operations in the stream have completed. The memory should not be accessed after this call.</li>
</ul>
<p><b>Exception Handling Mechanism</b>:</p><ul>
<li>This function does not throw exceptions. However, <code>getStream</code>, <code>streamWait</code>, and <code>cudaFreeAsync</code> can return error codes indicating failures. These errors are not explicitly handled in this function, and it is assumed that the calling code or the CUDA runtime will handle them appropriately.</li>
</ul>
<p><b>Relationship with Other Components</b>:</p><ul>
<li>It depends on the <code>getStream</code> function to obtain a CUDA stream, the <code>streamWait</code> function to synchronize operations in the stream, and the <code>cudaFreeAsync</code> function from the CUDA library to perform the asynchronous memory deallocation.</li>
</ul>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Ensure that the pointer <code>data</code> points to valid CUDA device memory. Passing a null pointer or a pointer to non - CUDA device memory will lead to undefined behavior. </li>
</ul>
</dd></dl>

<p class="definition">Definition at line <a class="el" href="_stream_manager_8cuh_source.html#l00325">325</a> of file <a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1cu_strm_1_1_stream_manager_a1084057ef6f5b2871c60702209bb4469_cgraph.png" border="0" usemap="#aclassnz_1_1cu_strm_1_1_stream_manager_a1084057ef6f5b2871c60702209bb4469_cgraph" alt=""/></div>
<map name="aclassnz_1_1cu_strm_1_1_stream_manager_a1084057ef6f5b2871c60702209bb4469_cgraph" id="aclassnz_1_1cu_strm_1_1_stream_manager_a1084057ef6f5b2871c60702209bb4469_cgraph">
<area shape="rect" title="Asynchronously frees the CUDA device memory pointed to by the given pointer." alt="" coords="5,39,191,81"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="239,5,424,48"/>
<area shape="poly" title=" " alt="" coords="191,44,223,39,223,45,192,49"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="239,72,424,115"/>
<area shape="poly" title=" " alt="" coords="192,71,223,75,223,81,191,76"/>
</map>
</div>

</div>
</div>
<a id="ab6803232b9c08d9282b16322a6c7b8a9" name="ab6803232b9c08d9282b16322a6c7b8a9"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ab6803232b9c08d9282b16322a6c7b8a9">&#9670;&#160;</a></span>freeHost()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T &gt; </div>
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">nz::cuStrm::StreamManager</a>&lt; T &gt;::freeHost </td>
          <td>(</td>
          <td class="paramtype">T *</td>          <td class="paramname"><span class="paramname"><em>data</em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Frees the pinned host memory pointed to by the given pointer. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">data</td><td>A pointer to the pinned host memory to be freed (host-to-host).</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>None.</dd></dl>
<p>This function is designed to release the pinned host memory allocated by CUDA. Before freeing the memory, it invokes the <code>syncData</code> function to make sure that all data synchronization operations related to this memory are finished. Subsequently, it uses the <code>cudaFreeHost</code> function from the CUDA library to free the pinned host memory.</p>
<p><b>Memory Management Strategy</b>:</p><ul>
<li>The function guarantees that the data in the memory is synchronized before deallocation to prevent data loss or inconsistency. Once <code>cudaFreeHost</code> is called, the memory pointed to by <code>data</code> is released and should no longer be accessed.</li>
</ul>
<p><b>Exception Handling Mechanism</b>:</p><ul>
<li>This function does not throw exceptions. However, <code>cudaFreeHost</code> may return an error code if it fails to free the memory. These errors are not explicitly handled within this function, and it is assumed that the calling code or the CUDA runtime will deal with such issues appropriately.</li>
</ul>
<p><b>Relationship with Other Components</b>:</p><ul>
<li>It relies on the <code>syncData</code> function for data synchronization. Additionally, it interacts with the <code>cudaFreeHost</code> function from the CUDA library to perform the actual memory deallocation.</li>
</ul>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Ensure that the pointer <code>data</code> points to valid pinned host memory allocated by CUDA. Passing a null pointer or a pointer to non - pinned host memory will result in undefined behavior.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line">T* pinnedHostData;</div>
<div class="line">cudaMallocHost((<span class="keywordtype">void</span>**)&amp;pinnedHostData, <span class="keyword">sizeof</span>(T));</div>
<div class="line"><span class="comment">// Use pinnedHostData</span></div>
<div class="line"><a class="code hl_class" href="classnz_1_1cu_strm_1_1_stream_manager.html">StreamManager</a> manager;</div>
<div class="line">manager.<a class="code hl_function" href="#ab6803232b9c08d9282b16322a6c7b8a9">freeHost</a>(pinnedHostData);</div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1cu_strm_1_1_stream_manager_html_ab6803232b9c08d9282b16322a6c7b8a9"><div class="ttname"><a href="#ab6803232b9c08d9282b16322a6c7b8a9">nz::cuStrm::StreamManager::freeHost</a></div><div class="ttdeci">void freeHost(T *data)</div><div class="ttdoc">Frees the pinned host memory pointed to by the given pointer.</div><div class="ttdef"><b>Definition</b> <a href="_stream_manager_8cuh_source.html#l00299">StreamManager.cuh:299</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_stream_manager_8cuh_source.html#l00299">299</a> of file <a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1cu_strm_1_1_stream_manager_ab6803232b9c08d9282b16322a6c7b8a9_cgraph.png" border="0" usemap="#aclassnz_1_1cu_strm_1_1_stream_manager_ab6803232b9c08d9282b16322a6c7b8a9_cgraph" alt=""/></div>
<map name="aclassnz_1_1cu_strm_1_1_stream_manager_ab6803232b9c08d9282b16322a6c7b8a9_cgraph" id="aclassnz_1_1cu_strm_1_1_stream_manager_ab6803232b9c08d9282b16322a6c7b8a9_cgraph">
<area shape="rect" title="Frees the pinned host memory pointed to by the given pointer." alt="" coords="5,5,191,48"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="239,5,424,48"/>
<area shape="poly" title=" " alt="" coords="191,24,223,24,223,29,191,29"/>
</map>
</div>

</div>
</div>
<a id="a1de1cf3aadea137faf90a2f9b4b7abe2" name="a1de1cf3aadea137faf90a2f9b4b7abe2"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a1de1cf3aadea137faf90a2f9b4b7abe2">&#9670;&#160;</a></span>getStream()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T &gt; </div>
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">cudaStream_t <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">nz::cuStrm::StreamManager</a>&lt; T &gt;::getStream </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Acquires CUDA stream from pool using round-robin scheduling. </p>
<p>This function:</p><ol type="1">
<li>Locks access to stream queue with mutex for thread safety</li>
<li>Retrieves next available stream ID from front of queue</li>
<li>Rotates queue by moving front element to end (FIFO rotation)</li>
<li>Returns corresponding CUDA stream from preallocated pool</li>
</ol>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Thread Safety: Protected by mutex for concurrent access</li>
<li>Stream Pool: Requires pre-initialized stream pool and queue</li>
<li>Fair Scheduling: Round-robin prevents stream starvation</li>
<li>Stream Lifetime: Streams remain owned by pool manager</li>
</ul>
</dd></dl>
<dl class="section warning"><dt>Warning</dt><dd><ul>
<li>Pool Capacity: Fixed stream count may lead to saturation</li>
<li>No Stream Creation: Assumes streams already initialized</li>
<li>Queue State: Undefined behavior if called before pool initialization</li>
<li>Thread Blocking: Mutex contention may impact performance at scale</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line"><span class="comment">// Typical usage pattern:</span></div>
<div class="line">cudaStream_t stream = manager.getStream();</div>
<div class="line">kernel&lt;&lt;&lt;blocks, threads, 0, stream&gt;&gt;&gt;(...);</div>
<div class="line">cudaStreamSynchronize(stream);</div>
</div><!-- fragment --><dl class="section see"><dt>See also</dt><dd>cudaStreamCreate, cudaStreamSynchronize, std::lock_guard </dd></dl>

<p class="definition">Definition at line <a class="el" href="_stream_manager_8cuh_source.html#l00799">799</a> of file <a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a>.</p>

</div>
</div>
<a id="ab4b2eb422e0e1ee44bdfdc0eb94457ce" name="ab4b2eb422e0e1ee44bdfdc0eb94457ce"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ab4b2eb422e0e1ee44bdfdc0eb94457ce">&#9670;&#160;</a></span>Instance()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T &gt; </div>
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">static <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">StreamManager</a> &amp; <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">nz::cuStrm::StreamManager</a>&lt; T &gt;::Instance </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span><span class="mlabel">static</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Returns a reference to the singleton instance of the <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">StreamManager</a>. </p>
<dl class="section return"><dt>Returns</dt><dd>A reference to the singleton instance of the <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">StreamManager</a>.</dd></dl>
<p>This function implements the singleton pattern for the <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">StreamManager</a> class. It ensures that only one instance of the <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">StreamManager</a> is created throughout the program's lifetime. The instance is created with initial parameters 16 and 128. Memory management of the instance is handled automatically by the static keyword, which means the instance is created on the first call to this function and destroyed when the program terminates. There is no specific exception handling mechanism for this function as it is a simple singleton accessor. It serves as a central point of access for other components to interact with the <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">StreamManager</a> instance.</p>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>This function is thread-safe in C++11 and later due to the static variable initialization being guaranteed to be thread-safe.</li>
<li>Do not attempt to create additional instances of <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">StreamManager</a> manually; always use this function to access the singleton instance.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1cu_strm_1_1_stream_manager.html">StreamManager&lt;float&gt;</a>&amp; manager = <a class="code hl_function" href="#ab4b2eb422e0e1ee44bdfdc0eb94457ce">StreamManager&lt;float&gt;::Instance</a>();</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_stream_manager_8cuh_source.html#l00154">154</a> of file <a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a>.</p>

</div>
</div>
<a id="a97f78a2d43f6e0508c82d4f3b629de96" name="a97f78a2d43f6e0508c82d4f3b629de96"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a97f78a2d43f6e0508c82d4f3b629de96">&#9670;&#160;</a></span>malloc()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T &gt; </div>
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">nz::cuStrm::StreamManager</a>&lt; T &gt;::malloc </td>
          <td>(</td>
          <td class="paramtype">T **</td>          <td class="paramname"><span class="paramname"><em>data</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const size_t</td>          <td class="paramname"><span class="paramname"><em>size</em></span>&#160;)</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Asynchronously allocates device memory for type-specific data with stream-ordered dependency tracking. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">data</td><td>Double pointer to device memory (host-to-device parameter). Receives the allocated memory address.<ul>
<li>Must be a valid pointer to device memory pointer (T**)</li>
<li>The allocated memory is accessible only after stream synchronization </li>
</ul>
</td></tr>
    <tr><td class="paramname">size</td><td>Number of elements to allocate (host-to-device parameter)<ul>
<li>Determines total allocation size as sizeof(T) * size</li>
<li>Must be &gt; 0</li>
</ul>
</td></tr>
  </table>
  </dd>
</dl>
<p>This method implements a stream-ordered memory allocation workflow:</p><ol type="1">
<li>Acquires CUDA stream using LRU scheduling policy</li>
<li>Executes cudaMallocAsync on the acquired stream</li>
<li>Records allocation event in <a class="el" href="classnz_1_1cu_strm_1_1_event_pool.html" title="Internal event management system for CUDA stream synchronization (Part of StreamManager)">EventPool</a> for dependency tracking</li>
</ol>
<p>The allocation operation becomes visible to subsequent operations through:</p><ul>
<li>Implicit stream ordering within the same CUDA stream</li>
<li>Explicit event dependencies managed by <a class="el" href="classnz_1_1cu_strm_1_1_event_pool.html" title="Internal event management system for CUDA stream synchronization (Part of StreamManager)">EventPool</a></li>
</ul>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">No</td><td>explicit exceptions, but CUDA errors can be checked using cudaGetLastError()</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Thread-safe through internal mutex protection</li>
<li>Allocation lifetime is managed by CUDA's async memory system</li>
<li>Subsequent operations using this memory must call <a class="el" href="#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete.">streamWait()</a> for dependency resolution</li>
<li>Requires CUDA 11.2+ for async memory APIs</li>
<li>For half-precision allocations, use the explicit half** overload</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line"><a class="code hl_class" href="classnz_1_1cu_strm_1_1_stream_manager.html">StreamManager&lt;float&gt;</a>&amp; manager = <a class="code hl_function" href="#ab4b2eb422e0e1ee44bdfdc0eb94457ce">StreamManager&lt;float&gt;::Instance</a>();</div>
<div class="line"><span class="keywordtype">float</span>* device_buffer = <span class="keyword">nullptr</span>;</div>
<div class="line"> </div>
<div class="line"><span class="comment">// Allocate 1MB buffer</span></div>
<div class="line">manager.<a class="code hl_function" href="#a97f78a2d43f6e0508c82d4f3b629de96">malloc</a>(&amp;device_buffer, 1024*1024/<span class="keyword">sizeof</span>(<span class="keywordtype">float</span>));</div>
<div class="line"> </div>
<div class="line"><span class="comment">// Check CUDA errors</span></div>
<div class="line">cudaError_t err = cudaGetLastError();</div>
<div class="line"><span class="keywordflow">if</span> (err != cudaSuccess) {</div>
<div class="line">    <span class="comment">// Handle allocation error</span></div>
<div class="line">}</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_stream_manager_8cuh_source.html#l00230">230</a> of file <a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1cu_strm_1_1_stream_manager_a97f78a2d43f6e0508c82d4f3b629de96_cgraph.png" border="0" usemap="#aclassnz_1_1cu_strm_1_1_stream_manager_a97f78a2d43f6e0508c82d4f3b629de96_cgraph" alt=""/></div>
<map name="aclassnz_1_1cu_strm_1_1_stream_manager_a97f78a2d43f6e0508c82d4f3b629de96_cgraph" id="aclassnz_1_1cu_strm_1_1_stream_manager_a97f78a2d43f6e0508c82d4f3b629de96_cgraph">
<area shape="rect" title="Asynchronously allocates device memory for type&#45;specific data with stream&#45;ordered dependency tracking..." alt="" coords="5,5,191,48"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="239,5,424,48"/>
<area shape="poly" title=" " alt="" coords="191,24,223,24,223,29,191,29"/>
</map>
</div>

</div>
</div>
<a id="afa38d5c6db0e6b48c8f74ce8ad0df2bc" name="afa38d5c6db0e6b48c8f74ce8ad0df2bc"></a>
<h2 class="memtitle"><span class="permalink"><a href="#afa38d5c6db0e6b48c8f74ce8ad0df2bc">&#9670;&#160;</a></span>memcpy()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T &gt; </div>
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">nz::cuStrm::StreamManager</a>&lt; T &gt;::memcpy </td>
          <td>(</td>
          <td class="paramtype">T *</td>          <td class="paramname"><span class="paramname"><em>dst</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">T *</td>          <td class="paramname"><span class="paramname"><em>src</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const size_t</td>          <td class="paramname"><span class="paramname"><em>size</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const cudaMemcpyKind</td>          <td class="paramname"><span class="paramname"><em>kind</em></span>&#160;)</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Asynchronously copies data between CUDA device and host memory based on the specified memory copy kind. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">dst</td><td>A pointer to the destination memory (memory flow depends on <code>kind</code>). </td></tr>
    <tr><td class="paramname">src</td><td>A pointer to the source memory (memory flow depends on <code>kind</code>). </td></tr>
    <tr><td class="paramname">size</td><td>The number of bytes to copy. </td></tr>
    <tr><td class="paramname">kind</td><td>The type of memory copy operation (<code>cudaMemcpyKind</code>). This determines the direction of the memory transfer (e.g., host - to - device, device - to - host, etc.).</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>None.</dd></dl>
<p>This function is responsible for performing an asynchronous memory copy operation. It first retrieves a CUDA stream using the <code>getStream</code> function. Then, it waits for all previous operations related to both the source (<code>src</code>) and destination (<code>dst</code>) memory in the retrieved stream to complete by calling <code>streamWait</code> twice. After that, it uses <code>cudaMemcpyAsync</code> to asynchronously copy the specified number of bytes from the source to the destination memory according to the given <code>kind</code> in the retrieved stream. Finally, it records the data operation in the <code>eventPool</code> for future reference.</p>
<p><b>Memory Management Strategy</b>:</p><ul>
<li>The function ensures that all previous operations on both the source and destination memory are finished before scheduling the <code>cudaMemcpyAsync</code> operation. The memory should not be accessed until the <code>cudaMemcpyAsync</code> operation has completed. The <code>eventPool</code> can be used to check the completion status.</li>
</ul>
<p><b>Exception Handling Mechanism</b>:</p><ul>
<li>This function does not throw exceptions. However, <code>getStream</code>, <code>streamWait</code>, <code>cudaMemcpyAsync</code>, and the operations related to <code>eventPool</code> can return error codes indicating failures. These errors are not explicitly handled in this function, and it is assumed that the calling code or the CUDA runtime will handle them appropriately.</li>
</ul>
<p><b>Relationship with Other Components</b>:</p><ul>
<li>It depends on the <code>getStream</code> function to obtain a CUDA stream, the <code>streamWait</code> function to synchronize operations in the stream, the <code>cudaMemcpyAsync</code> function from the CUDA library to perform the asynchronous memory copy, and the <code>eventPool</code> object to record the data operation.</li>
</ul>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Ensure that both <code>src</code> and <code>dst</code> pointers point to valid memory locations appropriate for the specified <code>cudaMemcpyKind</code>. Passing null pointers or pointers to incorrect memory types will lead to undefined behavior. </li>
</ul>
</dd></dl>

<p class="definition">Definition at line <a class="el" href="_stream_manager_8cuh_source.html#l00391">391</a> of file <a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1cu_strm_1_1_stream_manager_afa38d5c6db0e6b48c8f74ce8ad0df2bc_cgraph.png" border="0" usemap="#aclassnz_1_1cu_strm_1_1_stream_manager_afa38d5c6db0e6b48c8f74ce8ad0df2bc_cgraph" alt=""/></div>
<map name="aclassnz_1_1cu_strm_1_1_stream_manager_afa38d5c6db0e6b48c8f74ce8ad0df2bc_cgraph" id="aclassnz_1_1cu_strm_1_1_stream_manager_afa38d5c6db0e6b48c8f74ce8ad0df2bc_cgraph">
<area shape="rect" title="Asynchronously copies data between CUDA device and host memory based on the specified memory copy kin..." alt="" coords="5,39,191,81"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="239,5,424,48"/>
<area shape="poly" title=" " alt="" coords="191,44,223,39,223,45,192,49"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="239,72,424,115"/>
<area shape="poly" title=" " alt="" coords="192,71,223,75,223,81,191,76"/>
</map>
</div>

</div>
</div>
<a id="a71ad766cb2869d3dd6a3931966e81706" name="a71ad766cb2869d3dd6a3931966e81706"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a71ad766cb2869d3dd6a3931966e81706">&#9670;&#160;</a></span>memset()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T &gt; </div>
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">nz::cuStrm::StreamManager</a>&lt; T &gt;::memset </td>
          <td>(</td>
          <td class="paramtype">T *</td>          <td class="paramname"><span class="paramname"><em>data</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int</td>          <td class="paramname"><span class="paramname"><em>value</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const size_t</td>          <td class="paramname"><span class="paramname"><em>count</em></span>&#160;)</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Asynchronously sets a block of CUDA device memory to a specified value. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">data</td><td>A pointer to the CUDA device memory to be set (device-to-host). </td></tr>
    <tr><td class="paramname">value</td><td>The value to set each byte of the memory block to. </td></tr>
    <tr><td class="paramname">count</td><td>The number of bytes to set.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>None.</dd></dl>
<p>This function is designed to asynchronously initialize a block of CUDA device memory to a given value. It first retrieves a CUDA stream using the <code>getStream</code> function. Then, it calls <code>streamWait</code> to ensure that all previous operations related to the <code>data</code> in the retrieved stream are completed. After that, it uses <code>cudaMemsetAsync</code> to asynchronously set the specified number of bytes in the device memory to the given value. Finally, it records the data operation in the <code>eventPool</code> for future reference.</p>
<p><b>Memory Management Strategy</b>:</p><ul>
<li>The function ensures that all previous operations on the memory block are finished before scheduling the <code>cudaMemsetAsync</code> operation. The memory should not be accessed until the <code>cudaMemsetAsync</code> operation has completed. The <code>eventPool</code> can be used to check the completion status.</li>
</ul>
<p><b>Exception Handling Mechanism</b>:</p><ul>
<li>This function does not throw exceptions. However, <code>getStream</code>, <code>streamWait</code>, <code>cudaMemsetAsync</code>, and the operations related to <code>eventPool</code> can return error codes indicating failures. These errors are not explicitly handled in this function, and it is assumed that the calling code or the CUDA runtime will handle them appropriately.</li>
</ul>
<p><b>Relationship with Other Components</b>:</p><ul>
<li>It depends on the <code>getStream</code> function to obtain a CUDA stream, the <code>streamWait</code> function to synchronize operations in the stream, the <code>cudaMemsetAsync</code> function from the CUDA library to perform the asynchronous memory setting, and the <code>eventPool</code> object to record the data operation.</li>
</ul>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Ensure that the pointer <code>data</code> points to valid CUDA device memory. Passing a null pointer or a pointer to non - CUDA device memory will lead to undefined behavior. </li>
</ul>
</dd></dl>

<p class="definition">Definition at line <a class="el" href="_stream_manager_8cuh_source.html#l00360">360</a> of file <a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1cu_strm_1_1_stream_manager_a71ad766cb2869d3dd6a3931966e81706_cgraph.png" border="0" usemap="#aclassnz_1_1cu_strm_1_1_stream_manager_a71ad766cb2869d3dd6a3931966e81706_cgraph" alt=""/></div>
<map name="aclassnz_1_1cu_strm_1_1_stream_manager_a71ad766cb2869d3dd6a3931966e81706_cgraph" id="aclassnz_1_1cu_strm_1_1_stream_manager_a71ad766cb2869d3dd6a3931966e81706_cgraph">
<area shape="rect" title="Asynchronously sets a block of CUDA device memory to a specified value." alt="" coords="5,39,191,81"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="239,5,424,48"/>
<area shape="poly" title=" " alt="" coords="191,44,223,39,223,45,192,49"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="239,72,424,115"/>
<area shape="poly" title=" " alt="" coords="192,71,223,75,223,81,191,76"/>
</map>
</div>

</div>
</div>
<a id="a731986c2c4ecd056562eaddadef46df8" name="a731986c2c4ecd056562eaddadef46df8"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a731986c2c4ecd056562eaddadef46df8">&#9670;&#160;</a></span>randomize()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T &gt; </div>
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">nz::cuStrm::StreamManager</a>&lt; T &gt;::randomize </td>
          <td>(</td>
          <td class="paramtype">T *</td>          <td class="paramname"><span class="paramname"><em>data</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_t</td>          <td class="paramname"><span class="paramname"><em>size</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_t</td>          <td class="paramname"><span class="paramname"><em>seed</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">curandRngType_t</td>          <td class="paramname"><span class="paramname"><em>rngType</em></span>&#160;)</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Generates uniformly distributed random numbers on GPU using CURAND. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">data</td><td>Device pointer to allocated memory for random numbers </td></tr>
    <tr><td class="paramname">size</td><td>Number of elements to generate </td></tr>
    <tr><td class="paramname">seed</td><td>Seed value for pseudo-random generator </td></tr>
    <tr><td class="paramname">rngType</td><td>CURAND RNG algorithm type (e.g., CURAND_RNG_PSEUDO_XORWOW)</td></tr>
  </table>
  </dd>
</dl>
<p>This function:</p><ol type="1">
<li>Acquires a CUDA stream from the pool</li>
<li>Initializes CURAND generator with specified configuration</li>
<li>Ensures prior operations on data complete via stream synchronization</li>
<li>Generates random numbers in device memory</li>
<li>Records completion event for subsequent synchronization</li>
</ol>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Data Requirements: Memory must be pre-allocated with correct type/size</li>
<li>RNG Performance: XORWOW vs MRG32K3A have different speed/quality tradeoffs</li>
<li>Stream Isolation: Uses dedicated stream to avoid RNG sequence corruption</li>
<li>Float Generation: Produces values in [0,1) range for 32-bit float types</li>
</ul>
</dd></dl>
<dl class="section warning"><dt>Warning</dt><dd><ul>
<li>Type Safety: Requires T=float for correct operation with curandGenerateUniform</li>
<li>Seed Size: Implicit cast to unsigned long long may truncate 64-bit values</li>
<li>Generator Overhead: Repeated create/destroy calls impact performance</li>
<li>Concurrent Access: Not thread-safe for same data pointer</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line"><span class="comment">// Initialize 1M element array with random values</span></div>
<div class="line"><span class="keywordtype">float</span>* d_data;</div>
<div class="line">cudaMalloc(&amp;d_data, 1&lt;&lt;20 * <span class="keyword">sizeof</span>(<span class="keywordtype">float</span>));</div>
<div class="line"> </div>
<div class="line">manager.randomize(d_data, 1&lt;&lt;20, 12345, CURAND_RNG_PSEUDO_XORWOW);</div>
<div class="line">manager.syncData(d_data);  <span class="comment">// Wait for completion</span></div>
</div><!-- fragment --><dl class="section see"><dt>See also</dt><dd>curandCreateGenerator, curandGenerateUniform, eventPool </dd></dl>

<p class="definition">Definition at line <a class="el" href="_stream_manager_8cuh_source.html#l00757">757</a> of file <a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1cu_strm_1_1_stream_manager_a731986c2c4ecd056562eaddadef46df8_cgraph.png" border="0" usemap="#aclassnz_1_1cu_strm_1_1_stream_manager_a731986c2c4ecd056562eaddadef46df8_cgraph" alt=""/></div>
<map name="aclassnz_1_1cu_strm_1_1_stream_manager_a731986c2c4ecd056562eaddadef46df8_cgraph" id="aclassnz_1_1cu_strm_1_1_stream_manager_a731986c2c4ecd056562eaddadef46df8_cgraph">
<area shape="rect" title="Generates uniformly distributed random numbers on GPU using CURAND." alt="" coords="5,39,191,81"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="239,5,424,48"/>
<area shape="poly" title=" " alt="" coords="191,44,223,39,223,45,192,49"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="239,72,424,115"/>
<area shape="poly" title=" " alt="" coords="192,71,223,75,223,81,191,76"/>
</map>
</div>

</div>
</div>
<a id="a1260d95d0eddf75b72700da07361a4bd" name="a1260d95d0eddf75b72700da07361a4bd"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a1260d95d0eddf75b72700da07361a4bd">&#9670;&#160;</a></span>recordData()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T &gt; </div>
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">nz::cuStrm::StreamManager</a>&lt; T &gt;::recordData </td>
          <td>(</td>
          <td class="paramtype">T *</td>          <td class="paramname"><span class="paramname"><em>data</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">cudaStream_t</td>          <td class="paramname"><span class="paramname"><em>stream</em></span>&#160;)</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Records write completion event for asynchronous data operations. </p>
<dl class="tparams"><dt>Template Parameters</dt><dd>
  <table class="tparams">
    <tr><td class="paramname">T</td><td>Data type (inferred from pointer) </td></tr>
  </table>
  </dd>
</dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">data</td><td>Device memory pointer tracking write completion </td></tr>
    <tr><td class="paramname">stream</td><td>CUDA stream where write operation occurred</td></tr>
  </table>
  </dd>
</dl>
<p>This method:</p><ol type="1">
<li>Creates CUDA event through event pool</li>
<li>Records event on specified stream at current execution point</li>
<li>Registers automatic cleanup callback upon event completion</li>
<li>Associates event with target data for dependency tracking</li>
</ol>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Event Lifetime: Managed by event pool with automatic recycling</li>
<li>Write Tracking: Enables cross-stream synchronization via <a class="el" href="#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete.">streamWait()</a></li>
<li>Thread Safety: Requires external synchronization for concurrent data access</li>
<li>Stream Ordering: Captures all preceding operations in the stream</li>
</ul>
</dd></dl>
<dl class="section warning"><dt>Warning</dt><dd><ul>
<li>Event Pool Initialization: Must be properly initialized before use</li>
<li>Data Ownership: Pointer must match subsequent synchronization calls</li>
<li>Stream Validity: Target stream must be active during recording</li>
<li>Host Visibility: Does NOT guarantee host memory consistency</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line"><span class="comment">// Typical asynchronous write pattern:</span></div>
<div class="line">cudaMemcpyAsync(devPtr, hostPtr, size, cudaMemcpyHostToDevice, stream);</div>
<div class="line"><a class="code hl_function" href="#a1260d95d0eddf75b72700da07361a4bd">recordData</a>(devPtr, stream);  <span class="comment">// Bookmark write completion</span></div>
<div class="line"><a class="code hl_function" href="#adb1078a67c6e38932d7d58c2adb05ec0">streamWait</a>(devPtr, computeStream);  <span class="comment">// Enforce dependency</span></div>
<div class="ttc" id="aclassnz_1_1cu_strm_1_1_stream_manager_html_a1260d95d0eddf75b72700da07361a4bd"><div class="ttname"><a href="#a1260d95d0eddf75b72700da07361a4bd">nz::cuStrm::StreamManager::recordData</a></div><div class="ttdeci">void recordData(T *data, cudaStream_t stream)</div><div class="ttdoc">Records write completion event for asynchronous data operations.</div><div class="ttdef"><b>Definition</b> <a href="_stream_manager_8cuh_source.html#l00886">StreamManager.cuh:886</a></div></div>
<div class="ttc" id="aclassnz_1_1cu_strm_1_1_stream_manager_html_adb1078a67c6e38932d7d58c2adb05ec0"><div class="ttname"><a href="#adb1078a67c6e38932d7d58c2adb05ec0">nz::cuStrm::StreamManager::streamWait</a></div><div class="ttdeci">void streamWait(T *data, cudaStream_t stream)</div><div class="ttdoc">Synchronizes CUDA stream execution until data writes complete.</div><div class="ttdef"><b>Definition</b> <a href="_stream_manager_8cuh_source.html#l00840">StreamManager.cuh:840</a></div></div>
</div><!-- fragment --><dl class="section see"><dt>See also</dt><dd>cudaEventRecord, cudaStreamAddCallback, <a class="el" href="classnz_1_1cu_strm_1_1_event_pool.html#a7cdb0eb8403303e52ad5cc5f6e2d0737" title="Record an event in a CUDA stream associated with a given data pointer.">EventPool::recordData</a> </dd></dl>

<p class="definition">Definition at line <a class="el" href="_stream_manager_8cuh_source.html#l00886">886</a> of file <a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a>.</p>

</div>
</div>
<a id="adb1078a67c6e38932d7d58c2adb05ec0" name="adb1078a67c6e38932d7d58c2adb05ec0"></a>
<h2 class="memtitle"><span class="permalink"><a href="#adb1078a67c6e38932d7d58c2adb05ec0">&#9670;&#160;</a></span>streamWait()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T &gt; </div>
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">nz::cuStrm::StreamManager</a>&lt; T &gt;::streamWait </td>
          <td>(</td>
          <td class="paramtype">T *</td>          <td class="paramname"><span class="paramname"><em>data</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">cudaStream_t</td>          <td class="paramname"><span class="paramname"><em>stream</em></span>&#160;)</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Synchronizes CUDA stream execution until data writes complete. </p>
<dl class="tparams"><dt>Template Parameters</dt><dd>
  <table class="tparams">
    <tr><td class="paramname">T</td><td>Data type (inferred from pointer) </td></tr>
  </table>
  </dd>
</dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">data</td><td>Device memory pointer with pending write operations </td></tr>
    <tr><td class="paramname">stream</td><td>CUDA stream to apply synchronization constraints</td></tr>
  </table>
  </dd>
</dl>
<p>This function:</p><ol type="1">
<li>Retrieves all CUDA events associated with write operations on target data</li>
<li>Applies stream wait operations for each pending write event</li>
<li>Ensures subsequent stream operations execute only after data writes complete</li>
</ol>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Event Lifetime: Events remain owned by event pool until explicitly released</li>
<li>Write Synchronization: Only affects write-type CUDA events for this data</li>
<li>Thread Safety: Requires external synchronization for concurrent data access</li>
<li>Pointer Association: Data pointer must match previous async operation records</li>
</ul>
</dd></dl>
<dl class="section warning"><dt>Warning</dt><dd><ul>
<li>Event Pool Validity: Undefined behavior if event pool not initialized</li>
<li>Data Ownership: Incorrect pointers may wait on unrelated operations</li>
<li>Stream State: Target stream must be valid and not destroyed</li>
<li>Partial Completion: Does NOT guarantee host-side data readiness</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line"><span class="comment">// Typical producer-consumer pattern:</span></div>
<div class="line">asyncWrite(d_data, host_buffer, writeStream);  <span class="comment">// Records write event</span></div>
<div class="line"><a class="code hl_function" href="#adb1078a67c6e38932d7d58c2adb05ec0">streamWait</a>(d_data, computeStream);  <span class="comment">// Make compute wait</span></div>
<div class="line">processData&lt;&lt;&lt;..., computeStream&gt;&gt;&gt;(d_data);  <span class="comment">// Safe access</span></div>
</div><!-- fragment --><dl class="section see"><dt>See also</dt><dd>cudaStreamWaitEvent, cudaEventRecord, <a class="el" href="classnz_1_1cu_strm_1_1_event_pool.html" title="Internal event management system for CUDA stream synchronization (Part of StreamManager)">EventPool</a> </dd></dl>

<p class="definition">Definition at line <a class="el" href="_stream_manager_8cuh_source.html#l00840">840</a> of file <a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a>.</p>

</div>
</div>
<a id="a46ce59b45de432842454aadf00b93791" name="a46ce59b45de432842454aadf00b93791"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a46ce59b45de432842454aadf00b93791">&#9670;&#160;</a></span>submit()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T &gt; </div>
<div class="memtemplate">
template&lt;typename F , typename... Args&gt; </div>
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">nz::cuStrm::StreamManager</a>&lt; T &gt;::submit </td>
          <td>(</td>
          <td class="paramtype">F</td>          <td class="paramname"><span class="paramname"><em>func</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">dim3</td>          <td class="paramname"><span class="paramname"><em>grid</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">dim3</td>          <td class="paramname"><span class="paramname"><em>block</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_t</td>          <td class="paramname"><span class="paramname"><em>shared</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">T *</td>          <td class="paramname"><span class="paramname"><em>odata</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">T *</td>          <td class="paramname"><span class="paramname"><em>idata</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">Args...</td>          <td class="paramname"><span class="paramname"><em>args</em></span>&#160;)</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Asynchronously submits a CUDA kernel with stream-ordered dependency management. </p>
<dl class="tparams"><dt>Template Parameters</dt><dd>
  <table class="tparams">
    <tr><td class="paramname">F</td><td>CUDA kernel function type (automatically deduced) </td></tr>
    <tr><td class="paramname">Args</td><td>Variadic template parameter types (automatically deduced) </td></tr>
  </table>
  </dd>
</dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">func</td><td>CUDA kernel function pointer (host-to-device)<ul>
<li>Must comply with CUDA kernel calling conventions </li>
</ul>
</td></tr>
    <tr><td class="paramname">grid</td><td>Grid dimension configuration (host-to-device)<ul>
<li>Use dim3 to define computation grid structure </li>
</ul>
</td></tr>
    <tr><td class="paramname">block</td><td>Thread block dimension configuration (host-to-device)<ul>
<li>Use dim3 to define thread block structure </li>
</ul>
</td></tr>
    <tr><td class="paramname">shared</td><td>Dynamic shared memory size in bytes (host-to-device)<ul>
<li>Set to 0 if not using dynamic shared memory </li>
</ul>
</td></tr>
    <tr><td class="paramname">odata</td><td>Output data device pointer (device-to-device)<ul>
<li>First output parameter of the kernel</li>
<li>Automatically records completion event </li>
</ul>
</td></tr>
    <tr><td class="paramname">idata</td><td>Input data device pointer (device-to-device)<ul>
<li>First input parameter of the kernel</li>
<li>Automatically inserts stream wait dependencies </li>
</ul>
</td></tr>
    <tr><td class="paramname">args</td><td>Additional kernel arguments (host-to-device)<ul>
<li>Supports scalar type parameter passing</li>
<li>Must satisfy CUDA kernel parameter passing rules</li>
</ul>
</td></tr>
  </table>
  </dd>
</dl>
<p>This method implements full lifecycle management for stream-ordered kernel execution:</p><ol type="1">
<li>Acquires CUDA stream through LRU policy</li>
<li>Inserts stream wait operations for input/output data dependencies</li>
<li>Submis configured CUDA kernel to target stream</li>
<li>Records output data completion event</li>
</ol>
<p>Dependency management mechanisms:</p><ul>
<li>Input data dependencies: Ensures producer-consumer order through streamWait</li>
<li>Output data visibility: Guarantees subsequent operation visibility via eventPool</li>
</ul>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>This is the base implementation for single input/output, multiple overloads exist for different parameter counts</li>
<li>Kernel function signature must match parameter order (output first, input second)</li>
<li>Default stream-ordered launch, no manual synchronization required</li>
<li>Thread-safe: Internal stream acquisition protected by mutex</li>
<li>CUDA errors should be checked via cudaPeekAtLastError</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line"><span class="comment">// Example: Vector addition kernel</span></div>
<div class="line">__global__ <span class="keywordtype">void</span> vecAdd(<span class="keywordtype">float</span>* out, <span class="keyword">const</span> <span class="keywordtype">float</span>* a, <span class="keyword">const</span> <span class="keywordtype">float</span>* b, <span class="keywordtype">int</span> n) {</div>
<div class="line">    <span class="keywordtype">int</span> i = blockIdx.x * blockDim.x + threadIdx.x;</div>
<div class="line">    <span class="keywordflow">if</span> (i &lt; n) out[i] = a[i] + b[i];</div>
<div class="line">}</div>
<div class="line"> </div>
<div class="line"><span class="comment">// Usage example</span></div>
<div class="line"><span class="keywordtype">float</span> *d_out, *d_a, *d_b;</div>
<div class="line">manager.malloc(&amp;d_out, N);</div>
<div class="line">manager.malloc(&amp;d_a, N);</div>
<div class="line">manager.malloc(&amp;d_b, N);</div>
<div class="line"> </div>
<div class="line">manager.submit(vecAdd,</div>
<div class="line">              dim3((N+255)/256),  <span class="comment">// grid</span></div>
<div class="line">              dim3(256),          <span class="comment">// block</span></div>
<div class="line">              0,                  <span class="comment">// shared memory</span></div>
<div class="line">              d_out,              <span class="comment">// output pointer</span></div>
<div class="line">              d_a,                <span class="comment">// input pointer</span></div>
<div class="line">              d_b, N);            <span class="comment">// additional args</span></div>
<div class="line"> </div>
<div class="line">cudaError_t err = cudaPeekAtLastError();</div>
<div class="line"><span class="keywordflow">if</span> (err != cudaSuccess) {</div>
<div class="line">    <span class="comment">// Handle kernel configuration errors</span></div>
<div class="line">}</div>
</div><!-- fragment --><dl class="section warning"><dt>Warning</dt><dd><ul>
<li>Additional arguments must persist until kernel execution completes</li>
<li>Avoid passing host pointers in kernel arguments </li>
</ul>
</dd></dl>

<p class="definition">Definition at line <a class="el" href="_stream_manager_8cuh_source.html#l00471">471</a> of file <a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1cu_strm_1_1_stream_manager_a46ce59b45de432842454aadf00b93791_cgraph.png" border="0" usemap="#aclassnz_1_1cu_strm_1_1_stream_manager_a46ce59b45de432842454aadf00b93791_cgraph" alt=""/></div>
<map name="aclassnz_1_1cu_strm_1_1_stream_manager_a46ce59b45de432842454aadf00b93791_cgraph" id="aclassnz_1_1cu_strm_1_1_stream_manager_a46ce59b45de432842454aadf00b93791_cgraph">
<area shape="rect" title="Asynchronously submits a CUDA kernel with stream&#45;ordered dependency management." alt="" coords="5,39,191,81"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="239,5,424,48"/>
<area shape="poly" title=" " alt="" coords="191,44,223,39,223,45,192,49"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="239,72,424,115"/>
<area shape="poly" title=" " alt="" coords="192,71,223,75,223,81,191,76"/>
</map>
</div>

</div>
</div>
<a id="aa11ff4c21b003e5089f3cd65724b2193" name="aa11ff4c21b003e5089f3cd65724b2193"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aa11ff4c21b003e5089f3cd65724b2193">&#9670;&#160;</a></span>sync()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T &gt; </div>
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">nz::cuStrm::StreamManager</a>&lt; T &gt;::sync </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Synchronizes all CUDA streams in the stream pool by blocking the host thread. </p>
<p>This function performs a full barrier synchronization across all managed CUDA streams in the stream pool. It sequentially waits for the completion of all operations enqueued in every stream, ensuring no pending GPU work remains after return.</p>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Heavyweight Operation: Introduces host-side wait for entire stream pool completion</li>
<li>Execution Order: Synchronizes streams regardless of their dependency relationships</li>
<li>Alternative: Use event-based synchronization for partial stream dependencies</li>
<li>Thread Safety: Safe to call concurrently if streamPool remains unmodified</li>
</ul>
</dd></dl>
<dl class="section warning"><dt>Warning</dt><dd><ul>
<li>Host Blocking: Freezes calling thread until all streams complete (millisecond~second scale)</li>
<li>Error Propagation: Does not handle CUDA errors internally; check errors post-call</li>
<li>Stream Validity: Assumes all streams in streamPool are valid CUDA streams</li>
</ul>
</dd></dl>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">No</td><td>explicit exceptions thrown. CUDA runtime errors may surface through:<ul>
<li>Subsequent CUDA API calls returning error codes</li>
<li>External CUDA error handlers (if configured)</li>
</ul>
</td></tr>
  </table>
  </dd>
</dl>
<div class="fragment"><div class="line"><span class="comment">// Benchmark timing with full synchronization</span></div>
<div class="line"><span class="keyword">auto</span> start = std::chrono::high_resolution_clock::now();</div>
<div class="line"> </div>
<div class="line"><span class="comment">// Submit multiple GPU workloads</span></div>
<div class="line">manager.submit(kernel1, ...);</div>
<div class="line">manager.submit(kernel2, ...);</div>
<div class="line"> </div>
<div class="line"><span class="comment">// Full system synchronization</span></div>
<div class="line">manager.sync();</div>
<div class="line"> </div>
<div class="line"><span class="keyword">auto</span> end = std::chrono::high_resolution_clock::now();</div>
<div class="line"> </div>
<div class="line"><span class="comment">// Check for asynchronous errors</span></div>
<div class="line">cudaError_t err = cudaDeviceSynchronize();</div>
<div class="line"><span class="keywordflow">if</span> (err != cudaSuccess) {</div>
<div class="line">    std::cerr &lt;&lt; <span class="stringliteral">&quot;CUDA error: &quot;</span> &lt;&lt; cudaGetErrorString(err) &lt;&lt; std::endl;</div>
<div class="line">}</div>
</div><!-- fragment --><dl class="section see"><dt>See also</dt><dd>cudaStreamSynchronize, <a class="el" href="#a46ce59b45de432842454aadf00b93791" title="Asynchronously submits a CUDA kernel with stream-ordered dependency management.">submit()</a> </dd></dl>

<p class="definition">Definition at line <a class="el" href="_stream_manager_8cuh_source.html#l00671">671</a> of file <a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a>.</p>

</div>
</div>
<a id="abe439fa00c0bd369c0b2345b095ed5af" name="abe439fa00c0bd369c0b2345b095ed5af"></a>
<h2 class="memtitle"><span class="permalink"><a href="#abe439fa00c0bd369c0b2345b095ed5af">&#9670;&#160;</a></span>syncData()</h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename T &gt; </div>
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void <a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html">nz::cuStrm::StreamManager</a>&lt; T &gt;::syncData </td>
          <td>(</td>
          <td class="paramtype">T *</td>          <td class="paramname"><span class="paramname"><em>data</em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Synchronizes host thread with completion events for a specific data object. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">data</td><td>Device data pointer to synchronize (device-to-host)<ul>
<li>Must be a valid device pointer previously used in <a class="el" href="#a46ce59b45de432842454aadf00b93791" title="Asynchronously submits a CUDA kernel with stream-ordered dependency management.">submit()</a> calls</li>
<li>Synchronizes based on last recorded event for this data</li>
</ul>
</td></tr>
  </table>
  </dd>
</dl>
<p>This function provides targeted synchronization for a specific data object by:</p><ol type="1">
<li>Querying the event pool for the most recent completion event associated with the data</li>
<li>Blocking host execution until all operations preceding the event complete</li>
<li>Maintaining other stream operations' asynchronous execution</li>
</ol>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Precision Synchronization: Only waits for operations affecting this specific data</li>
<li>Event Reuse: Completion event is preserved for future dependency tracking</li>
<li>Thread Safety: Safe for concurrent access if data isn't being modified</li>
<li>Lightweight Alternative: Prefer over full <a class="el" href="#aa11ff4c21b003e5089f3cd65724b2193" title="Synchronizes all CUDA streams in the stream pool by blocking the host thread.">sync()</a> for partial workflow completion</li>
</ul>
</dd></dl>
<dl class="section warning"><dt>Warning</dt><dd><ul>
<li>Host Blocking: Freezes thread until target data's operations complete</li>
<li>Stale Pointers: Undefined behavior if data has been deallocated</li>
<li>Temporal Scope: Only synchronizes with events recorded since last <a class="el" href="#a46ce59b45de432842454aadf00b93791" title="Asynchronously submits a CUDA kernel with stream-ordered dependency management.">submit()</a></li>
</ul>
</dd></dl>
<div class="fragment"><div class="line"><span class="comment">// Producer-consumer workflow</span></div>
<div class="line">manager.submit(producerKernel, ..., output_data, ...);</div>
<div class="line">manager.syncData(output_data);  <span class="comment">// Wait for producer completion</span></div>
<div class="line">manager.submit(consumerKernel, ..., input_data=output_data, ...);</div>
<div class="line"> </div>
<div class="line"><span class="comment">// Host data access</span></div>
<div class="line">manager.submit(computeKernel, ..., result_data, ...);</div>
<div class="line">manager.syncData(result_data);</div>
<div class="line">cudaMemcpy(host_result, result_data, ..., cudaMemcpyDeviceToHost);</div>
</div><!-- fragment --><dl class="section see"><dt>See also</dt><dd><a class="el" href="#a46ce59b45de432842454aadf00b93791" title="Asynchronously submits a CUDA kernel with stream-ordered dependency management.">submit()</a>, eventPool </dd></dl>

<p class="definition">Definition at line <a class="el" href="_stream_manager_8cuh_source.html#l00714">714</a> of file <a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a>.</p>

</div>
</div>
<hr/>The documentation for this class was generated from the following file:<ul>
<li>D:/Users/Mgepahmge/Documents/C Program/NeuZephyr/include/NeuZephyr/<a class="el" href="_stream_manager_8cuh_source.html">StreamManager.cuh</a></li>
</ul>
</div><!-- contents -->
<!-- start footer part -->
<hr class="footer"/><address class="footer"><small>
Generated by&#160;<a href="https://www.doxygen.org/index.html"><img class="footer" src="doxygen.svg" width="104" height="31" alt="doxygen"/></a> 1.12.0
</small></address>
</div><!-- doc-content -->
</body>
</html>
