<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en-US">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=11"/>
<meta name="generator" content="Doxygen 1.12.0"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>NeuZephyr: nz::data::Tensor Class Reference</title>
<link rel="icon" href="NZ_logo2.png" type="image/x-icon" />
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
 <tbody>
 <tr id="projectrow">
  <td id="projectlogo"><img alt="Logo" src="NZ_logo2.png"/></td>
  <td id="projectalign">
   <div id="projectname">NeuZephyr
   </div>
   <div id="projectbrief">Simple DL Framework</div>
  </td>
 </tr>
 </tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.12.0 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&amp;dn=expat.txt MIT */
$(function() { codefold.init(0); });
/* @license-end */
</script>
  <div id="navrow1" class="tabs">
    <ul class="tablist">
      <li><a href="index.html"><span>Main&#160;Page</span></a></li>
      <li><a href="pages.html"><span>Related&#160;Pages</span></a></li>
      <li><a href="namespaces.html"><span>Namespaces</span></a></li>
      <li class="current"><a href="annotated.html"><span>Classes</span></a></li>
      <li><a href="files.html"><span>Files</span></a></li>
    </ul>
  </div>
  <div id="navrow2" class="tabs2">
    <ul class="tablist">
      <li><a href="annotated.html"><span>Class&#160;List</span></a></li>
      <li><a href="classes.html"><span>Class&#160;Index</span></a></li>
      <li><a href="inherits.html"><span>Class&#160;Hierarchy</span></a></li>
      <li><a href="functions.html"><span>Class&#160;Members</span></a></li>
    </ul>
  </div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&amp;dn=expat.txt MIT */
$(function(){ initResizable(false); });
/* @license-end */
</script>
<div id="nav-path" class="navpath">
  <ul>
<li class="navelem"><b>nz</b></li><li class="navelem"><a class="el" href="namespacenz_1_1data.html">data</a></li><li class="navelem"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a></li>  </ul>
</div>
</div><!-- top -->
<div id="doc-content">
<div class="header">
  <div class="summary">
<a href="#friends">Friends</a> &#124;
<a href="classnz_1_1data_1_1_tensor-members.html">List of all members</a>  </div>
  <div class="headertitle"><div class="title">nz::data::Tensor Class Reference</div></div>
</div><!--header-->
<div class="contents">

<p>A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.  
 <a href="#details">More...</a></p>
<table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a id="pub-methods" name="pub-methods"></a>
Public Member Functions</h2></td></tr>
<tr><td colspan="2"><div class="groupHeader">Constructors and Destructors</div></td></tr>
<tr class="memitem:ad0dda0efff93778cab46fd5aa708b983" id="r_ad0dda0efff93778cab46fd5aa708b983"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#ad0dda0efff93778cab46fd5aa708b983">Tensor</a> ()</td></tr>
<tr class="memdesc:ad0dda0efff93778cab46fd5aa708b983"><td class="mdescLeft">&#160;</td><td class="mdescRight">Default constructor for <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>.  <br /></td></tr>
<tr class="separator:ad0dda0efff93778cab46fd5aa708b983"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a6a3fc1e2d0b5154cdb4961679d0752af" id="r_a6a3fc1e2d0b5154cdb4961679d0752af"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a6a3fc1e2d0b5154cdb4961679d0752af">Tensor</a> (const <a class="el" href="classnz_1_1data_1_1_dimension.html">shape_type</a> &amp;<a class="el" href="#aade7b0c42622279888d755f4f7989aac">shape</a>, bool requires_grad=false)</td></tr>
<tr class="memdesc:a6a3fc1e2d0b5154cdb4961679d0752af"><td class="mdescLeft">&#160;</td><td class="mdescRight">Constructor that initializes a <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> with the specified shape.  <br /></td></tr>
<tr class="separator:a6a3fc1e2d0b5154cdb4961679d0752af"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ad65fa89fac9d72c92d34ace7e94610df" id="r_ad65fa89fac9d72c92d34ace7e94610df"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#ad65fa89fac9d72c92d34ace7e94610df">Tensor</a> (const <a class="el" href="classnz_1_1data_1_1_dimension.html">shape_type</a> &amp;<a class="el" href="#aade7b0c42622279888d755f4f7989aac">shape</a>, value_type *<a class="el" href="#a38ba233ef49f34620297f96edd962c55">data</a>, bool requires_grad=false, bool host=true)</td></tr>
<tr class="memdesc:ad65fa89fac9d72c92d34ace7e94610df"><td class="mdescLeft">&#160;</td><td class="mdescRight">Constructs a <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> object with specified shape, data, gradient requirement, and data location.  <br /></td></tr>
<tr class="separator:ad65fa89fac9d72c92d34ace7e94610df"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a18937864a9eb48eb91a5d82ebf9c010e" id="r_a18937864a9eb48eb91a5d82ebf9c010e"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a18937864a9eb48eb91a5d82ebf9c010e">Tensor</a> (const <a class="el" href="classnz_1_1data_1_1_dimension.html">shape_type</a> &amp;<a class="el" href="#aade7b0c42622279888d755f4f7989aac">shape</a>, const std::initializer_list&lt; value_type &gt; &amp;<a class="el" href="#a38ba233ef49f34620297f96edd962c55">data</a>, bool requires_grad=false)</td></tr>
<tr class="memdesc:a18937864a9eb48eb91a5d82ebf9c010e"><td class="mdescLeft">&#160;</td><td class="mdescRight">Constructs a <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> object with a specified shape, initializer list data, and gradient requirement.  <br /></td></tr>
<tr class="separator:a18937864a9eb48eb91a5d82ebf9c010e"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a6184f0270420ac054f7bd372bbed1406" id="r_a6184f0270420ac054f7bd372bbed1406"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a6184f0270420ac054f7bd372bbed1406">Tensor</a> (const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;other)</td></tr>
<tr class="memdesc:a6184f0270420ac054f7bd372bbed1406"><td class="mdescLeft">&#160;</td><td class="mdescRight">Copy constructor for <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>.  <br /></td></tr>
<tr class="separator:a6184f0270420ac054f7bd372bbed1406"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:adb57f91ae907875d78d804de85dbbc73" id="r_adb57f91ae907875d78d804de85dbbc73"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#adb57f91ae907875d78d804de85dbbc73">Tensor</a> (<a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;&amp;other) noexcept(false)</td></tr>
<tr class="memdesc:adb57f91ae907875d78d804de85dbbc73"><td class="mdescLeft">&#160;</td><td class="mdescRight">Move constructor for <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>.  <br /></td></tr>
<tr class="separator:adb57f91ae907875d78d804de85dbbc73"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:acdb68bf53d38e5a93fdd0effa4c3059a" id="r_acdb68bf53d38e5a93fdd0effa4c3059a"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#acdb68bf53d38e5a93fdd0effa4c3059a">operator=</a> (const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;other)</td></tr>
<tr class="memdesc:acdb68bf53d38e5a93fdd0effa4c3059a"><td class="mdescLeft">&#160;</td><td class="mdescRight">Assignment operator for <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>.  <br /></td></tr>
<tr class="separator:acdb68bf53d38e5a93fdd0effa4c3059a"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a26b24cc132d14e054b3c25923516d781" id="r_a26b24cc132d14e054b3c25923516d781"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a26b24cc132d14e054b3c25923516d781">operator=</a> (<a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;&amp;other) noexcept(false)</td></tr>
<tr class="memdesc:a26b24cc132d14e054b3c25923516d781"><td class="mdescLeft">&#160;</td><td class="mdescRight">Move assignment operator for <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>.  <br /></td></tr>
<tr class="separator:a26b24cc132d14e054b3c25923516d781"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a98a8b254d2b6c8b4893d7a286452a9b0" id="r_a98a8b254d2b6c8b4893d7a286452a9b0"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a98a8b254d2b6c8b4893d7a286452a9b0">~Tensor</a> () noexcept(false)</td></tr>
<tr class="memdesc:a98a8b254d2b6c8b4893d7a286452a9b0"><td class="mdescLeft">&#160;</td><td class="mdescRight">Destructor for <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>.  <br /></td></tr>
<tr class="separator:a98a8b254d2b6c8b4893d7a286452a9b0"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr><td colspan="2"><div class="groupHeader">Getters and Setters</div></td></tr>
<tr class="memitem:a7cbc6dd248b882c95840835d0deaae1c" id="r_a7cbc6dd248b882c95840835d0deaae1c"><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a7cbc6dd248b882c95840835d0deaae1c">requiresGrad</a> () const noexcept</td></tr>
<tr class="memdesc:a7cbc6dd248b882c95840835d0deaae1c"><td class="mdescLeft">&#160;</td><td class="mdescRight">Checks whether the tensor requires gradient computation.  <br /></td></tr>
<tr class="separator:a7cbc6dd248b882c95840835d0deaae1c"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:aade7b0c42622279888d755f4f7989aac" id="r_aade7b0c42622279888d755f4f7989aac"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classnz_1_1data_1_1_dimension.html">shape_type</a>&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#aade7b0c42622279888d755f4f7989aac">shape</a> () const noexcept</td></tr>
<tr class="memdesc:aade7b0c42622279888d755f4f7989aac"><td class="mdescLeft">&#160;</td><td class="mdescRight">Retrieves the shape of the tensor.  <br /></td></tr>
<tr class="separator:aade7b0c42622279888d755f4f7989aac"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a31a3aa01fa3ccb56503994a99e39e177" id="r_a31a3aa01fa3ccb56503994a99e39e177"><td class="memItemLeft" align="right" valign="top">size_type&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a31a3aa01fa3ccb56503994a99e39e177">size</a> () const noexcept</td></tr>
<tr class="memdesc:a31a3aa01fa3ccb56503994a99e39e177"><td class="mdescLeft">&#160;</td><td class="mdescRight">Retrieves the total number of elements in the tensor.  <br /></td></tr>
<tr class="separator:a31a3aa01fa3ccb56503994a99e39e177"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:abddb47a6dc305d289a1e4f91d01a5082" id="r_abddb47a6dc305d289a1e4f91d01a5082"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#abddb47a6dc305d289a1e4f91d01a5082">setRequiresGrad</a> (bool requires_grad)</td></tr>
<tr class="memdesc:abddb47a6dc305d289a1e4f91d01a5082"><td class="mdescLeft">&#160;</td><td class="mdescRight">Sets whether the tensor requires gradient computation.  <br /></td></tr>
<tr class="separator:abddb47a6dc305d289a1e4f91d01a5082"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a38ba233ef49f34620297f96edd962c55" id="r_a38ba233ef49f34620297f96edd962c55"><td class="memItemLeft" align="right" valign="top">value_type *&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a38ba233ef49f34620297f96edd962c55">data</a> () const noexcept</td></tr>
<tr class="memdesc:a38ba233ef49f34620297f96edd962c55"><td class="mdescLeft">&#160;</td><td class="mdescRight">Retrieves a pointer to the tensor's data stored in GPU memory.  <br /></td></tr>
<tr class="separator:a38ba233ef49f34620297f96edd962c55"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a615af61999990e2edebacf5afbad0e57" id="r_a615af61999990e2edebacf5afbad0e57"><td class="memItemLeft" align="right" valign="top">std::vector&lt; value_type &gt;&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a615af61999990e2edebacf5afbad0e57">hostData</a> () const noexcept</td></tr>
<tr class="memdesc:a615af61999990e2edebacf5afbad0e57"><td class="mdescLeft">&#160;</td><td class="mdescRight">Retrieves the tensor data from the device to the host and returns it as a std::vector.  <br /></td></tr>
<tr class="separator:a615af61999990e2edebacf5afbad0e57"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ad6107b98beb881d0209345185d5ad145" id="r_ad6107b98beb881d0209345185d5ad145"><td class="memItemLeft" align="right" valign="top">value_type *&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#ad6107b98beb881d0209345185d5ad145">grad</a> () const</td></tr>
<tr class="memdesc:ad6107b98beb881d0209345185d5ad145"><td class="mdescLeft">&#160;</td><td class="mdescRight">Retrieves a pointer to the gradient data stored in GPU memory.  <br /></td></tr>
<tr class="separator:ad6107b98beb881d0209345185d5ad145"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a7fd4badf84f9c5398e08b23a9826dfbc" id="r_a7fd4badf84f9c5398e08b23a9826dfbc"><td class="memItemLeft" align="right" valign="top">std::vector&lt; value_type &gt;&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a7fd4badf84f9c5398e08b23a9826dfbc">hostGrad</a> () const</td></tr>
<tr class="memdesc:a7fd4badf84f9c5398e08b23a9826dfbc"><td class="mdescLeft">&#160;</td><td class="mdescRight">Retrieves the gradient data of the tensor from the device to the host and returns it as a std::vector.  <br /></td></tr>
<tr class="separator:a7fd4badf84f9c5398e08b23a9826dfbc"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:adf80894b8e06f260bb2695951e2f539e" id="r_adf80894b8e06f260bb2695951e2f539e"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#adf80894b8e06f260bb2695951e2f539e">dataInject</a> (value_type *<a class="el" href="#a38ba233ef49f34620297f96edd962c55">data</a>, bool <a class="el" href="#ad6107b98beb881d0209345185d5ad145">grad</a>=false) const</td></tr>
<tr class="memdesc:adf80894b8e06f260bb2695951e2f539e"><td class="mdescLeft">&#160;</td><td class="mdescRight">Injects data or gradient data into the tensor.  <br /></td></tr>
<tr class="separator:adf80894b8e06f260bb2695951e2f539e"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ae3cc31f5b541809d75556ebbfda8ae57" id="r_ae3cc31f5b541809d75556ebbfda8ae57"><td class="memTemplParams" colspan="2">template&lt;typename Iterator &gt; </td></tr>
<tr class="memitem:ae3cc31f5b541809d75556ebbfda8ae57"><td class="memTemplItemLeft" align="right" valign="top">void&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="#ae3cc31f5b541809d75556ebbfda8ae57">dataInject</a> (Iterator begin, Iterator end, const bool <a class="el" href="#ad6107b98beb881d0209345185d5ad145">grad</a>=false) const</td></tr>
<tr class="memdesc:ae3cc31f5b541809d75556ebbfda8ae57"><td class="mdescLeft">&#160;</td><td class="mdescRight">Injects data or gradient data into the tensor using iterators.  <br /></td></tr>
<tr class="separator:ae3cc31f5b541809d75556ebbfda8ae57"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a2c4217ad3ebcdb4a1bcf2fd38151d007" id="r_a2c4217ad3ebcdb4a1bcf2fd38151d007"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a2c4217ad3ebcdb4a1bcf2fd38151d007">dataInject</a> (const std::initializer_list&lt; value_type &gt; &amp;<a class="el" href="#a38ba233ef49f34620297f96edd962c55">data</a>, bool <a class="el" href="#ad6107b98beb881d0209345185d5ad145">grad</a>=false) const</td></tr>
<tr class="memdesc:a2c4217ad3ebcdb4a1bcf2fd38151d007"><td class="mdescLeft">&#160;</td><td class="mdescRight">Injects data or gradient data into the tensor using a std::initializer_list.  <br /></td></tr>
<tr class="separator:a2c4217ad3ebcdb4a1bcf2fd38151d007"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr><td colspan="2"><div class="groupHeader">Modifiers</div></td></tr>
<tr class="memitem:a6fed8efad540a7621dd6640b2f2466d0" id="r_a6fed8efad540a7621dd6640b2f2466d0"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a6fed8efad540a7621dd6640b2f2466d0">zeroGrad</a> () const</td></tr>
<tr class="memdesc:a6fed8efad540a7621dd6640b2f2466d0"><td class="mdescLeft">&#160;</td><td class="mdescRight">Resets the gradient data to zero.  <br /></td></tr>
<tr class="separator:a6fed8efad540a7621dd6640b2f2466d0"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a7a9f1d5fae2989181645e5f59f7666d8" id="r_a7a9f1d5fae2989181645e5f59f7666d8"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a7a9f1d5fae2989181645e5f59f7666d8">randomize</a> (unsigned long long seed=0) const</td></tr>
<tr class="memdesc:a7a9f1d5fae2989181645e5f59f7666d8"><td class="mdescLeft">&#160;</td><td class="mdescRight">Randomizes the tensor's data with a uniform distribution.  <br /></td></tr>
<tr class="separator:a7a9f1d5fae2989181645e5f59f7666d8"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:afc4e6385b97cf7ceb8bb74748b73b681" id="r_afc4e6385b97cf7ceb8bb74748b73b681"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#afc4e6385b97cf7ceb8bb74748b73b681">clear</a> () const</td></tr>
<tr class="memdesc:afc4e6385b97cf7ceb8bb74748b73b681"><td class="mdescLeft">&#160;</td><td class="mdescRight">Clears the tensor's data by setting all elements to zero.  <br /></td></tr>
<tr class="separator:afc4e6385b97cf7ceb8bb74748b73b681"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ad220de56b18c404611f07f2290cd7e9d" id="r_ad220de56b18c404611f07f2290cd7e9d"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#ad220de56b18c404611f07f2290cd7e9d">fill</a> (value_type value, bool isGrad=false) const</td></tr>
<tr class="memdesc:ad220de56b18c404611f07f2290cd7e9d"><td class="mdescLeft">&#160;</td><td class="mdescRight">Fills the tensor's data with a specified value.  <br /></td></tr>
<tr class="separator:ad220de56b18c404611f07f2290cd7e9d"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ae6144f6d7fa612d98538f17baf4ef574" id="r_ae6144f6d7fa612d98538f17baf4ef574"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#ae6144f6d7fa612d98538f17baf4ef574">fillMatrix</a> (value_type value, size_type batch, size_type channels, bool isGrad=false)</td></tr>
<tr class="memdesc:ae6144f6d7fa612d98538f17baf4ef574"><td class="mdescLeft">&#160;</td><td class="mdescRight">Fill a specific matrix slice within the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> with a given value.  <br /></td></tr>
<tr class="separator:ae6144f6d7fa612d98538f17baf4ef574"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a877f9f2704e39100142d81d289ddc3f2" id="r_a877f9f2704e39100142d81d289ddc3f2"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a877f9f2704e39100142d81d289ddc3f2">reshape</a> (const <a class="el" href="classnz_1_1data_1_1_dimension.html">shape_type</a> &amp;<a class="el" href="#aade7b0c42622279888d755f4f7989aac">shape</a>)</td></tr>
<tr class="memdesc:a877f9f2704e39100142d81d289ddc3f2"><td class="mdescLeft">&#160;</td><td class="mdescRight">Reshapes the tensor to the specified shape.  <br /></td></tr>
<tr class="separator:a877f9f2704e39100142d81d289ddc3f2"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a45e6f84ae74111ced9a96bdf204b2294" id="r_a45e6f84ae74111ced9a96bdf204b2294"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a45e6f84ae74111ced9a96bdf204b2294">transpose</a> ()</td></tr>
<tr class="memdesc:a45e6f84ae74111ced9a96bdf204b2294"><td class="mdescLeft">&#160;</td><td class="mdescRight">Transposes the tensor by swapping its dimensions and rearranging the data.  <br /></td></tr>
<tr class="separator:a45e6f84ae74111ced9a96bdf204b2294"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a2f9be06ac6766a5fa6de3548c722ef43" id="r_a2f9be06ac6766a5fa6de3548c722ef43"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a2f9be06ac6766a5fa6de3548c722ef43">setData</a> (const <a class="el" href="classnz_1_1data_1_1_dimension.html">shape_type</a> &amp;position, value_type value, bool isGrad=false) const</td></tr>
<tr class="memdesc:a2f9be06ac6766a5fa6de3548c722ef43"><td class="mdescLeft">&#160;</td><td class="mdescRight">Sets the value of an element in the tensor or its gradient at a specified position.  <br /></td></tr>
<tr class="separator:a2f9be06ac6766a5fa6de3548c722ef43"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr><td colspan="2"><div class="groupHeader">Math</div></td></tr>
<tr class="memitem:a36cd1679c45059de64deeca9152b0288" id="r_a36cd1679c45059de64deeca9152b0288"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a>&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a36cd1679c45059de64deeca9152b0288">operator+</a> (const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;other) const</td></tr>
<tr class="memdesc:a36cd1679c45059de64deeca9152b0288"><td class="mdescLeft">&#160;</td><td class="mdescRight">Adds two tensors element-wise and returns the result.  <br /></td></tr>
<tr class="separator:a36cd1679c45059de64deeca9152b0288"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a25cc6634977413df0b67d6e7365448a2" id="r_a25cc6634977413df0b67d6e7365448a2"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a>&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a25cc6634977413df0b67d6e7365448a2">operator-</a> (const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;other) const</td></tr>
<tr class="memdesc:a25cc6634977413df0b67d6e7365448a2"><td class="mdescLeft">&#160;</td><td class="mdescRight">Subtracts one tensor from another element-wise and returns the result.  <br /></td></tr>
<tr class="separator:a25cc6634977413df0b67d6e7365448a2"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:aaa22ac6f3de75ee92a4307320eda7e87" id="r_aaa22ac6f3de75ee92a4307320eda7e87"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a>&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#aaa22ac6f3de75ee92a4307320eda7e87">operator*</a> (const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;other) const</td></tr>
<tr class="memdesc:aaa22ac6f3de75ee92a4307320eda7e87"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs matrix multiplication of two tensors (matrices) and returns the result.  <br /></td></tr>
<tr class="separator:aaa22ac6f3de75ee92a4307320eda7e87"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ad6ac34675276afe1fb2ee2f5d16af538" id="r_ad6ac34675276afe1fb2ee2f5d16af538"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a>&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#ad6ac34675276afe1fb2ee2f5d16af538">operator/</a> (const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;other) const</td></tr>
<tr class="memdesc:ad6ac34675276afe1fb2ee2f5d16af538"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs element-wise division between two Tensors.  <br /></td></tr>
<tr class="separator:ad6ac34675276afe1fb2ee2f5d16af538"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ad66d0c0f5d9ecb375e1006bc0aecf404" id="r_ad66d0c0f5d9ecb375e1006bc0aecf404"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a>&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#ad66d0c0f5d9ecb375e1006bc0aecf404">operator-</a> () const</td></tr>
<tr class="memdesc:ad66d0c0f5d9ecb375e1006bc0aecf404"><td class="mdescLeft">&#160;</td><td class="mdescRight">Negates all elements of the tensor and returns the result.  <br /></td></tr>
<tr class="separator:ad66d0c0f5d9ecb375e1006bc0aecf404"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a92c7313608326bb4123d6f08341a6d80" id="r_a92c7313608326bb4123d6f08341a6d80"><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a92c7313608326bb4123d6f08341a6d80">operator==</a> (const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;other) const</td></tr>
<tr class="memdesc:a92c7313608326bb4123d6f08341a6d80"><td class="mdescLeft">&#160;</td><td class="mdescRight">Checks if two <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> objects are equal.  <br /></td></tr>
<tr class="separator:a92c7313608326bb4123d6f08341a6d80"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:aae7b7714f78f4d366e66f1664d37d36a" id="r_aae7b7714f78f4d366e66f1664d37d36a"><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#aae7b7714f78f4d366e66f1664d37d36a">operator!=</a> (const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;other) const</td></tr>
<tr class="memdesc:aae7b7714f78f4d366e66f1664d37d36a"><td class="mdescLeft">&#160;</td><td class="mdescRight">Checks if two <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> objects are not equal.  <br /></td></tr>
<tr class="separator:aae7b7714f78f4d366e66f1664d37d36a"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a178a2240cd5d441be508490b2613fc55" id="r_a178a2240cd5d441be508490b2613fc55"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a178a2240cd5d441be508490b2613fc55">recip</a> () const</td></tr>
<tr class="memdesc:a178a2240cd5d441be508490b2613fc55"><td class="mdescLeft">&#160;</td><td class="mdescRight">Computes the reciprocal (1/x) of each element in the tensor and updates the tensor in-place.  <br /></td></tr>
<tr class="separator:a178a2240cd5d441be508490b2613fc55"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a4a657091dfa6a490d873ab8e95d9bb9e" id="r_a4a657091dfa6a490d873ab8e95d9bb9e"><td class="memItemLeft" align="right" valign="top">value_type&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a4a657091dfa6a490d873ab8e95d9bb9e">sum</a> () const</td></tr>
<tr class="memdesc:a4a657091dfa6a490d873ab8e95d9bb9e"><td class="mdescLeft">&#160;</td><td class="mdescRight">Compute the sum of all elements in the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>.  <br /></td></tr>
<tr class="separator:a4a657091dfa6a490d873ab8e95d9bb9e"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a74aa515ba6b83aa1d05a7bb001b297b3" id="r_a74aa515ba6b83aa1d05a7bb001b297b3"><td class="memItemLeft" align="right" valign="top">value_type&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a74aa515ba6b83aa1d05a7bb001b297b3">sum</a> (size_type batch, size_type channel) const</td></tr>
<tr class="memdesc:a74aa515ba6b83aa1d05a7bb001b297b3"><td class="mdescLeft">&#160;</td><td class="mdescRight">Computes the sum of elements in a specific batch and channel of a <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>.  <br /></td></tr>
<tr class="separator:a74aa515ba6b83aa1d05a7bb001b297b3"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a9131832f57339c0de2e7fb7955940a55" id="r_a9131832f57339c0de2e7fb7955940a55"><td class="memItemLeft" align="right" valign="top">value_type&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a9131832f57339c0de2e7fb7955940a55">max</a> () const</td></tr>
<tr class="memdesc:a9131832f57339c0de2e7fb7955940a55"><td class="mdescLeft">&#160;</td><td class="mdescRight">Finds the maximum value in the tensor.  <br /></td></tr>
<tr class="separator:a9131832f57339c0de2e7fb7955940a55"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a90f7c7cde42c58b41f77d1b941da129f" id="r_a90f7c7cde42c58b41f77d1b941da129f"><td class="memItemLeft" align="right" valign="top">value_type&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a90f7c7cde42c58b41f77d1b941da129f">max</a> (size_type batch, size_type channel) const</td></tr>
<tr class="memdesc:a90f7c7cde42c58b41f77d1b941da129f"><td class="mdescLeft">&#160;</td><td class="mdescRight">Finds the maximum value in a specific batch and channel of the tensor.  <br /></td></tr>
<tr class="separator:a90f7c7cde42c58b41f77d1b941da129f"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a70caeac6652c0008b7554db438db090c" id="r_a70caeac6652c0008b7554db438db090c"><td class="memItemLeft" align="right" valign="top">value_type&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a70caeac6652c0008b7554db438db090c">min</a> () const</td></tr>
<tr class="memdesc:a70caeac6652c0008b7554db438db090c"><td class="mdescLeft">&#160;</td><td class="mdescRight">Finds the minimum value in the entire tensor.  <br /></td></tr>
<tr class="separator:a70caeac6652c0008b7554db438db090c"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ae846233848b4cd26181205a594c083b5" id="r_ae846233848b4cd26181205a594c083b5"><td class="memItemLeft" align="right" valign="top">value_type&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#ae846233848b4cd26181205a594c083b5">min</a> (size_type batch, size_type channel) const</td></tr>
<tr class="memdesc:ae846233848b4cd26181205a594c083b5"><td class="mdescLeft">&#160;</td><td class="mdescRight">Finds the minimum value in a specific batch and channel of the tensor.  <br /></td></tr>
<tr class="separator:ae846233848b4cd26181205a594c083b5"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a373a517d4a813c94a820d0a45806693e" id="r_a373a517d4a813c94a820d0a45806693e"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classnz_1_1data_1_1_dimension.html">shape_type</a>&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a373a517d4a813c94a820d0a45806693e">find</a> (value_type value) const</td></tr>
<tr class="memdesc:a373a517d4a813c94a820d0a45806693e"><td class="mdescLeft">&#160;</td><td class="mdescRight">Finds the first occurrence of a given value in the entire tensor and returns its shape indices.  <br /></td></tr>
<tr class="separator:a373a517d4a813c94a820d0a45806693e"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ae6c6bc33a47e23ec62e6a62e5e25a8ed" id="r_ae6c6bc33a47e23ec62e6a62e5e25a8ed"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classnz_1_1data_1_1_dimension.html">shape_type</a>&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#ae6c6bc33a47e23ec62e6a62e5e25a8ed">find</a> (value_type value, size_type batch, size_type channel) const</td></tr>
<tr class="memdesc:ae6c6bc33a47e23ec62e6a62e5e25a8ed"><td class="mdescLeft">&#160;</td><td class="mdescRight">Finds the first occurrence of a given value in a specific batch and channel of the tensor and returns its shape indices.  <br /></td></tr>
<tr class="separator:ae6c6bc33a47e23ec62e6a62e5e25a8ed"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:aa1818a10415337403d43aad091a5a4c7" id="r_aa1818a10415337403d43aad091a5a4c7"><td class="memItemLeft" align="right" valign="top">value_type&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#aa1818a10415337403d43aad091a5a4c7">expSum</a> () const</td></tr>
<tr class="memdesc:aa1818a10415337403d43aad091a5a4c7"><td class="mdescLeft">&#160;</td><td class="mdescRight">Compute the sum of the exponential values of all elements in the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>.  <br /></td></tr>
<tr class="separator:aa1818a10415337403d43aad091a5a4c7"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ac4833838e9a704b6b8d29cbd53c6b3b1" id="r_ac4833838e9a704b6b8d29cbd53c6b3b1"><td class="memItemLeft" align="right" valign="top">value_type&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#ac4833838e9a704b6b8d29cbd53c6b3b1">expSum</a> (size_t batch, size_t channel) const</td></tr>
<tr class="memdesc:ac4833838e9a704b6b8d29cbd53c6b3b1"><td class="mdescLeft">&#160;</td><td class="mdescRight">Computes the sum of exponential values of elements in a specific batch and channel of a <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>.  <br /></td></tr>
<tr class="separator:ac4833838e9a704b6b8d29cbd53c6b3b1"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a7aab89d371ff013c5c021a191bd7348e" id="r_a7aab89d371ff013c5c021a191bd7348e"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a7aab89d371ff013c5c021a191bd7348e">syncData</a> () const</td></tr>
<tr class="memdesc:a7aab89d371ff013c5c021a191bd7348e"><td class="mdescLeft">&#160;</td><td class="mdescRight">Synchronize the tensor data by waiting for all CUDA stream write operations to complete.  <br /></td></tr>
<tr class="separator:a7aab89d371ff013c5c021a191bd7348e"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:af28425ddc9bee1f75fd923a0de68c37b" id="r_af28425ddc9bee1f75fd923a0de68c37b"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#af28425ddc9bee1f75fd923a0de68c37b">syncGrad</a> () const</td></tr>
<tr class="memdesc:af28425ddc9bee1f75fd923a0de68c37b"><td class="mdescLeft">&#160;</td><td class="mdescRight">Synchronize the gradient data of the tensor if gradient computation is required.  <br /></td></tr>
<tr class="separator:af28425ddc9bee1f75fd923a0de68c37b"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a0c150b841f02921eb7826a6e03d0267e" id="r_a0c150b841f02921eb7826a6e03d0267e"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a0c150b841f02921eb7826a6e03d0267e">sync</a> () const</td></tr>
<tr class="memdesc:a0c150b841f02921eb7826a6e03d0267e"><td class="mdescLeft">&#160;</td><td class="mdescRight">Synchronize both the tensor data and its gradient data.  <br /></td></tr>
<tr class="separator:a0c150b841f02921eb7826a6e03d0267e"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr><td colspan="2"><div class="groupHeader">Printer</div></td></tr>
<tr class="memitem:a4b02ed4d2afec1ce75931201af181e14" id="r_a4b02ed4d2afec1ce75931201af181e14"><td class="memItemLeft" align="right" valign="top">std::ostream &amp;&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a4b02ed4d2afec1ce75931201af181e14">printGrad</a> (std::ostream &amp;os) const</td></tr>
<tr class="memdesc:a4b02ed4d2afec1ce75931201af181e14"><td class="mdescLeft">&#160;</td><td class="mdescRight">Prints the gradient values of the tensor to an output stream.  <br /></td></tr>
<tr class="separator:a4b02ed4d2afec1ce75931201af181e14"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a2b2309d5428331f2e6f88037bb123c8f" id="r_a2b2309d5428331f2e6f88037bb123c8f"><td class="memItemLeft" align="right" valign="top">std::ostream &amp;&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a2b2309d5428331f2e6f88037bb123c8f">print</a> (std::ostream &amp;os) const</td></tr>
<tr class="memdesc:a2b2309d5428331f2e6f88037bb123c8f"><td class="mdescLeft">&#160;</td><td class="mdescRight">Prints the tensor data to an output stream.  <br /></td></tr>
<tr class="separator:a2b2309d5428331f2e6f88037bb123c8f"><td class="memSeparator" colspan="2">&#160;</td></tr>
</table><table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a id="friends" name="friends"></a>
Friends</h2></td></tr>
<tr class="memitem:ab8eaa8e06861a868b7df1a9ee0616a1a" id="r_ab8eaa8e06861a868b7df1a9ee0616a1a"><td class="memItemLeft" align="right" valign="top">DL_API std::ostream &amp;&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#ab8eaa8e06861a868b7df1a9ee0616a1a">operator&lt;&lt;</a> (std::ostream &amp;os, const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;tensor)</td></tr>
<tr class="memdesc:ab8eaa8e06861a868b7df1a9ee0616a1a"><td class="mdescLeft">&#160;</td><td class="mdescRight">Overloads the <code>&lt;&lt;</code> operator to print the tensor's data to an output stream.  <br /></td></tr>
<tr class="separator:ab8eaa8e06861a868b7df1a9ee0616a1a"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a1ae147fdd4255f7d148aef41e3e436a9" id="r_a1ae147fdd4255f7d148aef41e3e436a9"><td class="memItemLeft" align="right" valign="top">DL_API std::istream &amp;&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a1ae147fdd4255f7d148aef41e3e436a9">operator&gt;&gt;</a> (std::istream &amp;is, const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;tensor)</td></tr>
<tr class="memdesc:a1ae147fdd4255f7d148aef41e3e436a9"><td class="mdescLeft">&#160;</td><td class="mdescRight">Overloads the <code>&gt;&gt;</code> operator to read a tensor's data from an input stream.  <br /></td></tr>
<tr class="separator:a1ae147fdd4255f7d148aef41e3e436a9"><td class="memSeparator" colspan="2">&#160;</td></tr>
</table>
<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
<div class="textblock"><p>A class for representing and manipulating multidimensional arrays (tensors) in GPU memory. </p>
<p>The <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> class is designed for high-performance numerical computations in GPU-based environments. It provides a wide range of functionalities, including tensor creation, mathematical operations, memory management, and gradient computation for deep learning tasks.</p>
<h3><a class="anchor" id="autotoc_md128"></a>
Type Definitions:</h3>
<ul>
<li><code>size_type</code>: An alias for <code>unsigned long long</code>, used to represent the size of the tensor. Supports large tensors with up to 64-bit indices.</li>
<li><code>value_type</code>: An alias for <code>float</code>, representing the data type of the tensor elements. Suitable for most machine learning computations.</li>
<li><code>shape_type</code>: An alias for <code>std::vector&lt;int&gt;</code>, representing the shape of the tensor (e.g., <code>{2, 3}</code> for a 2x3 matrix).</li>
</ul>
<h3><a class="anchor" id="autotoc_md129"></a>
Key Features:</h3>
<ul>
<li><b>Memory Management</b>: Handles GPU memory allocation and deallocation using CUDA.</li>
<li><b>Flexible Initialization</b>: Supports initialization via shapes, data pointers, initializer lists, and iterators.</li>
<li><b>Mathematical Operations</b>: Includes overloaded operators (<code>+</code>, <code>-</code>, <code>*</code>, <code>/</code>) and activation functions (<code>ReLU</code>, <code>Sigmoid</code>, <code>Tanh</code>, etc.).</li>
<li><b>Gradient Support</b>: Tracks gradients for tensors that require gradient computation (<code>requires_grad</code>) to facilitate backpropagation in neural networks.</li>
<li><b>Shape Transformation</b>: Supports reshaping and transposing tensors.</li>
</ul>
<h3><a class="anchor" id="autotoc_md130"></a>
Usage Example:</h3>
<div class="fragment"><div class="line"><span class="keyword">using namespace </span><a class="code hl_namespace" href="namespacenz_1_1data.html">nz::data</a>;</div>
<div class="line"> </div>
<div class="line"><span class="comment">// Create a tensor that requires gradient with shape 2x3</span></div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({2, 3}, <span class="keyword">true</span>);</div>
<div class="line">tensor.<a class="code hl_function" href="#ad220de56b18c404611f07f2290cd7e9d">fill</a>(1.0f);     <span class="comment">// Fill the tensor with value 1.0</span></div>
<div class="line"> </div>
<div class="line"><span class="comment">// Apply element-wise ReLU activation</span></div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> result = <a class="code hl_function" href="namespacenz_1_1data.html#a4706224f5e7c9a0cfe4c74983aaef1bd">ReLU</a>(tensor);</div>
<div class="line">std::cout &lt;&lt; <span class="stringliteral">&quot;ReLU activated tensor:&quot;</span> &lt;&lt; std::endl;</div>
<div class="line">std::cout &lt;&lt; result &lt;&lt; std::endl;        <span class="comment">// Print the result of ReLU activation</span></div>
<div class="line"> </div>
<div class="line"><span class="comment">// Perform matrix multiplication (2x3 * 3x2 = 2x2)</span></div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor3({3, 2}, <span class="keyword">true</span>);</div>
<div class="line">tensor3.<a class="code hl_function" href="#adf80894b8e06f260bb2695951e2f539e">dataInject</a>({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}); <span class="comment">// Fill tensor3 with values</span></div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> multiplied_result = tensor * tensor3;  <span class="comment">// Multiply tensor (2x3) by tensor3 (3x2)</span></div>
<div class="line">std::cout &lt;&lt; <span class="stringliteral">&quot;Multiplication result (2x3 * 3x2 = 2x2):&quot;</span> &lt;&lt; std::endl;</div>
<div class="line">std::cout &lt;&lt; multiplied_result &lt;&lt; std::endl;  <span class="comment">// Print the result of matrix multiplication</span></div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html"><div class="ttname"><a href="classnz_1_1data_1_1_tensor.html">nz::data::Tensor</a></div><div class="ttdoc">A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cuh_source.html#l00134">Tensor.cuh:134</a></div></div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_ad220de56b18c404611f07f2290cd7e9d"><div class="ttname"><a href="#ad220de56b18c404611f07f2290cd7e9d">nz::data::Tensor::fill</a></div><div class="ttdeci">void fill(value_type value, bool isGrad=false) const</div><div class="ttdoc">Fills the tensor's data with a specified value.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00306">Tensor.cu:306</a></div></div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_adf80894b8e06f260bb2695951e2f539e"><div class="ttname"><a href="#adf80894b8e06f260bb2695951e2f539e">nz::data::Tensor::dataInject</a></div><div class="ttdeci">void dataInject(value_type *data, bool grad=false) const</div><div class="ttdoc">Injects data or gradient data into the tensor.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00282">Tensor.cu:282</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html"><div class="ttname"><a href="namespacenz_1_1data.html">nz::data</a></div><div class="ttdoc">Contains data structures and utilities for tensor operations in machine learning workflows.</div><div class="ttdef"><b>Definition</b> <a href="_dimension_8cuh_source.html#l00009">Dimension.cuh:9</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_a4706224f5e7c9a0cfe4c74983aaef1bd"><div class="ttname"><a href="namespacenz_1_1data.html#a4706224f5e7c9a0cfe4c74983aaef1bd">nz::data::ReLU</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, T &gt; ReLU(T &amp;input)</div><div class="ttdoc">Apply the Rectified Linear Unit (ReLU) activation function element-wise to an input tensor.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_operations_8cuh_source.html#l00050">TensorOperations.cuh:50</a></div></div>
</div><!-- fragment --><dl class="section note"><dt>Note</dt><dd><ul>
<li>Ensure proper cleanup by calling the destructor or relying on RAII to avoid memory leaks.</li>
<li><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> size and shape must match during operations to prevent runtime errors.</li>
<li>Requires CUDA-compatible hardware and a properly configured environment.</li>
<li>Most of the methods in this class involve CUDA operations and may throw the <a class="el" href="classnz_1_1_cuda_exception.html" title="A final class that represents CUDA exceptions, inheriting from std::runtime_error.">nz::CudaException</a> in certain cases.</li>
</ul>
</dd></dl>
<dl class="section author"><dt>Author</dt><dd>Mgepahmge(<a href="https://github.com/Mgepahmge">https://github.com/Mgepahmge</a>)</dd></dl>
<dl class="section date"><dt>Date</dt><dd>2024/11/29 </dd></dl>

<p class="definition">Definition at line <a class="el" href="_tensor_8cuh_source.html#l00134">134</a> of file <a class="el" href="_tensor_8cuh_source.html">Tensor.cuh</a>.</p>
</div><h2 class="groupheader">Constructor &amp; Destructor Documentation</h2>
<a id="ad0dda0efff93778cab46fd5aa708b983" name="ad0dda0efff93778cab46fd5aa708b983"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ad0dda0efff93778cab46fd5aa708b983">&#9670;&#160;</a></span>Tensor() <span class="overload">[1/6]</span></h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">nz::data::Tensor::Tensor </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Default constructor for <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>. </p>
<p>Initializes an empty <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> with no data or shape. This constructor is primarily used as a placeholder or for initializing variables before assigning a valid tensor. </p>

<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00088">88</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>

</div>
</div>
<a id="a6a3fc1e2d0b5154cdb4961679d0752af" name="a6a3fc1e2d0b5154cdb4961679d0752af"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a6a3fc1e2d0b5154cdb4961679d0752af">&#9670;&#160;</a></span>Tensor() <span class="overload">[2/6]</span></h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">nz::data::Tensor::Tensor </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="classnz_1_1data_1_1_dimension.html">shape_type</a> &amp;</td>          <td class="paramname"><span class="paramname"><em>shape</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">bool</td>          <td class="paramname"><span class="paramname"><em>requires_grad</em></span><span class="paramdefsep"> = </span><span class="paramdefval">false</span>&#160;)</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">explicit</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Constructor that initializes a <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> with the specified shape. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">shape</td><td>A vector representing the dimensions of the tensor. </td></tr>
    <tr><td class="paramname">requires_grad</td><td>A boolean indicating whether the tensor requires gradient computation.</td></tr>
  </table>
  </dd>
</dl>
<p>This constructor allocates GPU memory for the tensor based on the specified shape. If <code>requires_grad</code> is set to true, additional memory is allocated for storing gradients. </p>

<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00092">92</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a6a3fc1e2d0b5154cdb4961679d0752af_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a6a3fc1e2d0b5154cdb4961679d0752af_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a6a3fc1e2d0b5154cdb4961679d0752af_cgraph" id="aclassnz_1_1data_1_1_tensor_a6a3fc1e2d0b5154cdb4961679d0752af_cgraph">
<area shape="rect" title="Constructor that initializes a Tensor with the specified shape." alt="" coords="5,47,168,73"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="216,5,401,48"/>
<area shape="poly" title=" " alt="" coords="168,45,200,40,200,46,169,50"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a97f78a2d43f6e0508c82d4f3b629de96" title="Asynchronously allocates device memory for type&#45;specific data with stream&#45;ordered dependency tracking..." alt="" coords="216,72,401,115"/>
<area shape="poly" title=" " alt="" coords="169,70,200,74,200,80,168,75"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="449,72,634,115"/>
<area shape="poly" title=" " alt="" coords="401,91,433,91,433,96,401,96"/>
</map>
</div>

</div>
</div>
<a id="ad65fa89fac9d72c92d34ace7e94610df" name="ad65fa89fac9d72c92d34ace7e94610df"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ad65fa89fac9d72c92d34ace7e94610df">&#9670;&#160;</a></span>Tensor() <span class="overload">[3/6]</span></h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">nz::data::Tensor::Tensor </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="classnz_1_1data_1_1_dimension.html">shape_type</a> &amp;</td>          <td class="paramname"><span class="paramname"><em>shape</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">value_type *</td>          <td class="paramname"><span class="paramname"><em>data</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">bool</td>          <td class="paramname"><span class="paramname"><em>requires_grad</em></span><span class="paramdefsep"> = </span><span class="paramdefval">false</span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">bool</td>          <td class="paramname"><span class="paramname"><em>host</em></span><span class="paramdefsep"> = </span><span class="paramdefval">true</span>&#160;)</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">explicit</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Constructs a <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> object with specified shape, data, gradient requirement, and data location. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">shape</td><td>A reference to the shape of the tensor (host-to-device). The shape determines the size of the tensor. </td></tr>
    <tr><td class="paramname">data</td><td>A pointer to the initial data of the tensor. The data can be either on the host or device depending on the <code>host</code> parameter. </td></tr>
    <tr><td class="paramname">requires_grad</td><td>A boolean indicating whether the tensor requires gradient computation. </td></tr>
    <tr><td class="paramname">host</td><td>A boolean indicating whether the data pointed to by <code>data</code> is on the host or device. If true, data is on the host; otherwise, it is on the device.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>None. This is a constructor.</dd></dl>
<p>This constructor initializes a <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> object. It first calculates the total size of the tensor based on the provided shape. Then, it allocates device memory for the tensor's data using <code>cudaMalloc</code>.</p>
<p>Depending on the value of the <code>host</code> parameter, it copies the data from either the host or another device memory location to the newly allocated device memory using <code>cudaMemcpy</code>.</p>
<p>If the <code>requires_grad</code> parameter is <code>true</code>, it also allocates device memory for the gradient data of the tensor. Otherwise, it sets the gradient pointer <code>_grad</code> to <code>nullptr</code>.</p>
<p>For memory management, the constructor allocates device memory for the tensor's data and gradient (if required). The responsibility of freeing this memory lies with the destructor of the <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> class.</p>
<p>In terms of exception handling, this constructor does not explicitly catch any CUDA errors. If a CUDA operation fails (e.g., <code>cudaMalloc</code> or <code>cudaMemcpy</code>), it will likely lead to undefined behavior in subsequent operations. It is the caller's responsibility to check for CUDA errors using <code>cudaGetLastError</code> or other appropriate methods.</p>
<p>This constructor is a fundamental part of the <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> class as it initializes the object's internal state.</p>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">None</td><td>explicitly, but CUDA operations may fail and return an error code.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Ensure that the <code>data</code> pointer is valid and points to enough data to fill the tensor according to the specified shape.</li>
<li>The CUDA runtime environment should be properly initialized before calling this constructor.</li>
<li>This constructor has a time complexity of O(1) for memory allocation and O(n) for data copying, where n is the total number of elements in the tensor (<code>_size</code>).</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_dimension.html">shape_type</a> <a class="code hl_function" href="#aade7b0c42622279888d755f4f7989aac">shape</a> = {2, 3};</div>
<div class="line">value_type <a class="code hl_function" href="#a38ba233ef49f34620297f96edd962c55">data</a>[] = {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f};</div>
<div class="line"><a class="code hl_function" href="#ad0dda0efff93778cab46fd5aa708b983">Tensor</a> tensor(<a class="code hl_function" href="#aade7b0c42622279888d755f4f7989aac">shape</a>, <a class="code hl_function" href="#a38ba233ef49f34620297f96edd962c55">data</a>, <span class="keyword">true</span>, <span class="keyword">true</span>);</div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1data_1_1_dimension_html"><div class="ttname"><a href="classnz_1_1data_1_1_dimension.html">nz::data::Dimension</a></div><div class="ttdoc">Represents a multi - dimensional shape, typically used in deep learning for tensor dimensions.</div><div class="ttdef"><b>Definition</b> <a href="_dimension_8cuh_source.html#l00057">Dimension.cuh:57</a></div></div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_a38ba233ef49f34620297f96edd962c55"><div class="ttname"><a href="#a38ba233ef49f34620297f96edd962c55">nz::data::Tensor::data</a></div><div class="ttdeci">value_type * data() const noexcept</div><div class="ttdoc">Retrieves a pointer to the tensor's data stored in GPU memory.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00432">Tensor.cu:432</a></div></div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_aade7b0c42622279888d755f4f7989aac"><div class="ttname"><a href="#aade7b0c42622279888d755f4f7989aac">nz::data::Tensor::shape</a></div><div class="ttdeci">shape_type shape() const noexcept</div><div class="ttdoc">Retrieves the shape of the tensor.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00225">Tensor.cu:225</a></div></div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_ad0dda0efff93778cab46fd5aa708b983"><div class="ttname"><a href="#ad0dda0efff93778cab46fd5aa708b983">nz::data::Tensor::Tensor</a></div><div class="ttdeci">Tensor()</div><div class="ttdoc">Default constructor for Tensor.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00088">Tensor.cu:88</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00104">104</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_ad65fa89fac9d72c92d34ace7e94610df_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_ad65fa89fac9d72c92d34ace7e94610df_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_ad65fa89fac9d72c92d34ace7e94610df_cgraph" id="aclassnz_1_1data_1_1_tensor_ad65fa89fac9d72c92d34ace7e94610df_cgraph">
<area shape="rect" title="Constructs a Tensor object with specified shape, data, gradient requirement, and data location." alt="" coords="5,97,168,124"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a38ba233ef49f34620297f96edd962c55" title="Retrieves a pointer to the tensor&#39;s data stored in GPU memory." alt="" coords="234,5,382,32"/>
<area shape="poly" title=" " alt="" coords="109,95,157,68,215,42,234,35,236,40,217,46,159,73,112,99"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="216,56,401,99"/>
<area shape="poly" title=" " alt="" coords="168,96,200,91,200,96,169,101"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a97f78a2d43f6e0508c82d4f3b629de96" title="Asynchronously allocates device memory for type&#45;specific data with stream&#45;ordered dependency tracking..." alt="" coords="216,123,401,165"/>
<area shape="poly" title=" " alt="" coords="169,120,200,125,200,130,168,126"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#afa38d5c6db0e6b48c8f74ce8ad0df2bc" title="Asynchronously copies data between CUDA device and host memory based on the specified memory copy kin..." alt="" coords="216,189,401,232"/>
<area shape="poly" title=" " alt="" coords="113,122,217,175,231,181,228,186,215,180,111,127"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="449,123,634,165"/>
<area shape="poly" title=" " alt="" coords="401,141,433,141,433,147,401,147"/>
<area shape="poly" title=" " alt="" coords="384,186,449,168,450,173,386,191"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="449,189,634,232"/>
<area shape="poly" title=" " alt="" coords="401,208,433,208,433,213,401,213"/>
</map>
</div>

</div>
</div>
<a id="a18937864a9eb48eb91a5d82ebf9c010e" name="a18937864a9eb48eb91a5d82ebf9c010e"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a18937864a9eb48eb91a5d82ebf9c010e">&#9670;&#160;</a></span>Tensor() <span class="overload">[4/6]</span></h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">nz::data::Tensor::Tensor </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="classnz_1_1data_1_1_dimension.html">shape_type</a> &amp;</td>          <td class="paramname"><span class="paramname"><em>shape</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const std::initializer_list&lt; value_type &gt; &amp;</td>          <td class="paramname"><span class="paramname"><em>data</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">bool</td>          <td class="paramname"><span class="paramname"><em>requires_grad</em></span><span class="paramdefsep"> = </span><span class="paramdefval">false</span>&#160;)</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">explicit</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Constructs a <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> object with a specified shape, initializer list data, and gradient requirement. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">shape</td><td>A reference to the shape of the tensor (host-to-device). The shape determines the dimensions and total size of the tensor. </td></tr>
    <tr><td class="paramname">data</td><td>A std::initializer_list containing the initial data for the tensor (host-to-device). </td></tr>
    <tr><td class="paramname">requires_grad</td><td>A boolean indicating whether the tensor requires gradient computation.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>None. This is a constructor.</dd></dl>
<p>This constructor initializes a <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> object. First, it calculates the total size of the tensor based on the provided shape. It then checks if the size of the std::initializer_list is sufficient to fill the tensor. If not, it throws a std::invalid_argument exception.</p>
<p>For memory management, it allocates device memory for the tensor's data using cudaMalloc. If the tensor requires gradient computation, it also allocates device memory for the gradient data; otherwise, it sets the gradient pointer to nullptr.</p>
<p>A temporary host buffer is created to hold the data from the std::initializer_list. The data is copied from the initializer list to the host buffer and then transferred from the host buffer to the device memory using cudaMemcpy. After the transfer, the temporary host buffer is deleted to prevent memory leaks.</p>
<p>Regarding exception handling, it throws a std::invalid_argument if the initializer list size is insufficient. Any CUDA errors during memory allocation or data transfer are not explicitly caught here, and it's the caller's responsibility to check for CUDA errors.</p>
<p>This constructor is an important part of the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> class as it provides a convenient way to initialize a tensor with an initializer list.</p>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">std::invalid_argument</td><td>If the size of the std::initializer_list is less than the size of the tensor.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Ensure that the std::initializer_list contains enough elements to fill the tensor according to the specified shape.</li>
<li>The CUDA runtime environment should be properly initialized before calling this constructor.</li>
<li>The time complexity of this constructor is O(n), where n is the total number of elements in the tensor, due to the loop that copies data from the initializer list to the host buffer.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><span class="preprocessor">#include &lt;vector&gt;</span></div>
<div class="line"> </div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_dimension.html">shape_type</a> <a class="code hl_function" href="#aade7b0c42622279888d755f4f7989aac">shape</a> = {2, 3};</div>
<div class="line"><span class="keywordflow">try</span> {</div>
<div class="line">    <a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor(<a class="code hl_function" href="#aade7b0c42622279888d755f4f7989aac">shape</a>, {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}, <span class="keyword">true</span>);</div>
<div class="line">} <span class="keywordflow">catch</span> (<span class="keyword">const</span> std::invalid_argument&amp; e) {</div>
<div class="line">    std::cerr &lt;&lt; e.what() &lt;&lt; std::endl;</div>
<div class="line">}</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00123">123</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a18937864a9eb48eb91a5d82ebf9c010e_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a18937864a9eb48eb91a5d82ebf9c010e_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a18937864a9eb48eb91a5d82ebf9c010e_cgraph" id="aclassnz_1_1data_1_1_tensor_a18937864a9eb48eb91a5d82ebf9c010e_cgraph">
<area shape="rect" title="Constructs a Tensor object with a specified shape, initializer list data, and gradient requirement." alt="" coords="5,97,168,124"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a38ba233ef49f34620297f96edd962c55" title="Retrieves a pointer to the tensor&#39;s data stored in GPU memory." alt="" coords="234,5,382,32"/>
<area shape="poly" title=" " alt="" coords="109,95,157,68,215,42,234,35,236,40,217,46,159,73,112,99"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="216,56,401,99"/>
<area shape="poly" title=" " alt="" coords="168,96,200,91,200,96,169,101"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a97f78a2d43f6e0508c82d4f3b629de96" title="Asynchronously allocates device memory for type&#45;specific data with stream&#45;ordered dependency tracking..." alt="" coords="216,123,401,165"/>
<area shape="poly" title=" " alt="" coords="169,120,200,125,200,130,168,126"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#afa38d5c6db0e6b48c8f74ce8ad0df2bc" title="Asynchronously copies data between CUDA device and host memory based on the specified memory copy kin..." alt="" coords="216,189,401,232"/>
<area shape="poly" title=" " alt="" coords="113,122,217,175,231,181,228,186,215,180,111,127"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="449,123,634,165"/>
<area shape="poly" title=" " alt="" coords="401,141,433,141,433,147,401,147"/>
<area shape="poly" title=" " alt="" coords="384,186,449,168,450,173,386,191"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="449,189,634,232"/>
<area shape="poly" title=" " alt="" coords="401,208,433,208,433,213,401,213"/>
</map>
</div>

</div>
</div>
<a id="a6184f0270420ac054f7bd372bbed1406" name="a6184f0270420ac054f7bd372bbed1406"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a6184f0270420ac054f7bd372bbed1406">&#9670;&#160;</a></span>Tensor() <span class="overload">[5/6]</span></h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">nz::data::Tensor::Tensor </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;</td>          <td class="paramname"><span class="paramname"><em>other</em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Copy constructor for <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">other</td><td>The <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> object to copy from.</td></tr>
  </table>
  </dd>
</dl>
<p>Performs a deep copy of the tensor, including its shape, data, and gradient (if applicable). </p>

<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00146">146</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a6184f0270420ac054f7bd372bbed1406_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a6184f0270420ac054f7bd372bbed1406_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a6184f0270420ac054f7bd372bbed1406_cgraph" id="aclassnz_1_1data_1_1_tensor_a6184f0270420ac054f7bd372bbed1406_cgraph">
<area shape="rect" title="Copy constructor for Tensor." alt="" coords="5,80,168,107"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="216,5,401,48"/>
<area shape="poly" title=" " alt="" coords="133,77,220,50,221,56,135,82"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a97f78a2d43f6e0508c82d4f3b629de96" title="Asynchronously allocates device memory for type&#45;specific data with stream&#45;ordered dependency tracking..." alt="" coords="216,72,401,115"/>
<area shape="poly" title=" " alt="" coords="168,91,200,91,200,96,168,96"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#afa38d5c6db0e6b48c8f74ce8ad0df2bc" title="Asynchronously copies data between CUDA device and host memory based on the specified memory copy kin..." alt="" coords="216,139,401,181"/>
<area shape="poly" title=" " alt="" coords="135,105,221,131,220,136,133,110"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="449,72,634,115"/>
<area shape="poly" title=" " alt="" coords="401,91,433,91,433,96,401,96"/>
<area shape="poly" title=" " alt="" coords="384,136,449,117,450,122,386,141"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="449,139,634,181"/>
<area shape="poly" title=" " alt="" coords="401,157,433,157,433,163,401,163"/>
</map>
</div>

</div>
</div>
<a id="adb57f91ae907875d78d804de85dbbc73" name="adb57f91ae907875d78d804de85dbbc73"></a>
<h2 class="memtitle"><span class="permalink"><a href="#adb57f91ae907875d78d804de85dbbc73">&#9670;&#160;</a></span>Tensor() <span class="overload">[6/6]</span></h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">nz::data::Tensor::Tensor </td>
          <td>(</td>
          <td class="paramtype"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;&amp;</td>          <td class="paramname"><span class="paramname"><em>other</em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Move constructor for <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">other</td><td>The <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> object to move from.</td></tr>
  </table>
  </dd>
</dl>
<p>Moves the tensor data and ownership of the GPU memory to the new object. </p>

<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00161">161</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>

</div>
</div>
<a id="a98a8b254d2b6c8b4893d7a286452a9b0" name="a98a8b254d2b6c8b4893d7a286452a9b0"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a98a8b254d2b6c8b4893d7a286452a9b0">&#9670;&#160;</a></span>~Tensor()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">nz::data::Tensor::~Tensor </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Destructor for <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>. </p>
<p>Releases all GPU memory allocated for the tensor's data and gradient. Ensures that no memory leaks occur during the lifetime of the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> object. </p>

<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00210">210</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a98a8b254d2b6c8b4893d7a286452a9b0_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a98a8b254d2b6c8b4893d7a286452a9b0_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a98a8b254d2b6c8b4893d7a286452a9b0_cgraph" id="aclassnz_1_1data_1_1_tensor_a98a8b254d2b6c8b4893d7a286452a9b0_cgraph">
<area shape="rect" title="Destructor for Tensor." alt="" coords="5,72,129,115"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1084057ef6f5b2871c60702209bb4469" title="Asynchronously frees the CUDA device memory pointed to by the given pointer." alt="" coords="177,39,362,81"/>
<area shape="poly" title=" " alt="" coords="129,81,161,75,162,80,130,86"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="177,105,362,148"/>
<area shape="poly" title=" " alt="" coords="130,101,162,106,161,111,129,106"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="410,5,595,48"/>
<area shape="poly" title=" " alt="" coords="362,44,394,39,395,45,363,49"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="410,72,595,115"/>
<area shape="poly" title=" " alt="" coords="363,71,395,75,394,81,362,76"/>
</map>
</div>

</div>
</div>
<h2 class="groupheader">Member Function Documentation</h2>
<a id="afc4e6385b97cf7ceb8bb74748b73b681" name="afc4e6385b97cf7ceb8bb74748b73b681"></a>
<h2 class="memtitle"><span class="permalink"><a href="#afc4e6385b97cf7ceb8bb74748b73b681">&#9670;&#160;</a></span>clear()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void nz::data::Tensor::clear </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Clears the tensor's data by setting all elements to zero. </p>
<p>This function resets the tensor's data to zero by filling the memory allocated for the tensor's data with zero values. It uses the <code>cudaMemset</code> function to set all the values in the tensor's GPU memory to zero. This is commonly used to clear or reset the tensor before using it for new computations.</p>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>This function does not deallocate the memory; it only sets the values in the tensor's data to zero.</li>
<li>The tensor's data memory is assumed to be allocated before calling this function. This is automatically managed when the tensor is created, so no additional memory allocation is needed.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({2, 3});  <span class="comment">// Create a tensor with shape 2x3</span></div>
<div class="line">tensor.<a class="code hl_function" href="#afc4e6385b97cf7ceb8bb74748b73b681">clear</a>();         <span class="comment">// Clear the tensor&#39;s data by setting all elements to zero</span></div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_afc4e6385b97cf7ceb8bb74748b73b681"><div class="ttname"><a href="#afc4e6385b97cf7ceb8bb74748b73b681">nz::data::Tensor::clear</a></div><div class="ttdeci">void clear() const</div><div class="ttdoc">Clears the tensor's data by setting all elements to zero.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00302">Tensor.cu:302</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00302">302</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_afc4e6385b97cf7ceb8bb74748b73b681_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_afc4e6385b97cf7ceb8bb74748b73b681_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_afc4e6385b97cf7ceb8bb74748b73b681_cgraph" id="aclassnz_1_1data_1_1_tensor_afc4e6385b97cf7ceb8bb74748b73b681_cgraph">
<area shape="rect" title="Clears the tensor&#39;s data by setting all elements to zero." alt="" coords="5,47,157,73"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="205,5,390,48"/>
<area shape="poly" title=" " alt="" coords="157,46,188,41,189,46,157,51"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a71ad766cb2869d3dd6a3931966e81706" title="Asynchronously sets a block of CUDA device memory to a specified value." alt="" coords="205,72,390,115"/>
<area shape="poly" title=" " alt="" coords="157,69,189,74,188,79,157,74"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="438,39,623,81"/>
<area shape="poly" title=" " alt="" coords="390,77,422,73,423,78,391,83"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="438,105,623,148"/>
<area shape="poly" title=" " alt="" coords="391,104,423,109,422,114,390,109"/>
</map>
</div>

</div>
</div>
<a id="a38ba233ef49f34620297f96edd962c55" name="a38ba233ef49f34620297f96edd962c55"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a38ba233ef49f34620297f96edd962c55">&#9670;&#160;</a></span>data()</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">Tensor::value_type * nz::data::Tensor::data </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span><span class="mlabel">noexcept</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Retrieves a pointer to the tensor's data stored in GPU memory. </p>
<dl class="section return"><dt>Returns</dt><dd>A <code>value_type*</code> (pointer to float) pointing to the tensor's data in GPU memory.</dd></dl>
<p>This function provides direct access to the raw data of the tensor stored in GPU memory. It is useful for low-level operations or when interfacing with other libraries that require access to the tensor's memory.</p>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The returned pointer points to GPU memory, so it cannot be directly dereferenced in CPU code.</li>
<li>Ensure that CUDA synchronization is handled properly before using this pointer in GPU operations.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({2, 3});</div>
<div class="line"><span class="keyword">const</span> <span class="keywordtype">float</span>* gpu_data = tensor.<a class="code hl_function" href="#a38ba233ef49f34620297f96edd962c55">data</a>(); <span class="comment">// Access raw data</span></div>
<div class="line"><span class="comment">// Use gpu_data in CUDA kernels or other GPU-based operations</span></div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00432">432</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>

</div>
</div>
<a id="a2c4217ad3ebcdb4a1bcf2fd38151d007" name="a2c4217ad3ebcdb4a1bcf2fd38151d007"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a2c4217ad3ebcdb4a1bcf2fd38151d007">&#9670;&#160;</a></span>dataInject() <span class="overload">[1/3]</span></h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void nz::data::Tensor::dataInject </td>
          <td>(</td>
          <td class="paramtype">const std::initializer_list&lt; value_type &gt; &amp;</td>          <td class="paramname"><span class="paramname"><em>data</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">bool</td>          <td class="paramname"><span class="paramname"><em>grad</em></span><span class="paramdefsep"> = </span><span class="paramdefval">false</span>&#160;) const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Injects data or gradient data into the tensor using a std::initializer_list. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">data</td><td>A std::initializer_list containing the data to be injected (host-to-device). </td></tr>
    <tr><td class="paramname">grad</td><td>A boolean indicating whether to inject gradient data.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>void</dd></dl>
<p>This function serves as a wrapper that calls another <code>dataInject</code> function, passing the begin and end iterators of the provided <code>std::initializer_list</code>. In terms of memory management, it relies on the underlying <code>dataInject</code> function to handle memory operations for the actual data injection. Regarding exception handling, it simply propagates any exceptions thrown by the underlying <code>dataInject</code> function without additional handling. This function is closely related to the <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> class and the other <code>dataInject</code> functions as it leverages the existing data injection logic.</p>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">std::runtime_error</td><td>If the length of the input array is less than the size of the tensor. </td></tr>
    <tr><td class="paramname"><a class="el" href="classnz_1_1_cuda_exception.html" title="A final class that represents CUDA exceptions, inheriting from std::runtime_error.">nz::CudaException</a></td><td>If the CUDA memory copy fails or if the tensor does not require gradients when trying to inject gradient data.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The <code>std::initializer_list</code> should contain enough elements to fill the tensor.</li>
<li>This function has a time complexity of O(1) for the wrapper itself, but the overall complexity depends on the underlying <code>dataInject</code> function which is O(n) where n is the size of the tensor.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({1,3});</div>
<div class="line"><span class="keywordflow">try</span> {</div>
<div class="line">    tensor.dataInject({1.0f, 2.0f, 3.0f}, <span class="keyword">false</span>);</div>
<div class="line">} <span class="keywordflow">catch</span> (<span class="keyword">const</span> std::runtime_error&amp; e) {</div>
<div class="line">    std::cerr &lt;&lt; e.what() &lt;&lt; std::endl;</div>
<div class="line">}</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00241">241</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a2c4217ad3ebcdb4a1bcf2fd38151d007_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a2c4217ad3ebcdb4a1bcf2fd38151d007_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a2c4217ad3ebcdb4a1bcf2fd38151d007_cgraph" id="aclassnz_1_1data_1_1_tensor_a2c4217ad3ebcdb4a1bcf2fd38151d007_cgraph">
<area shape="rect" title="Injects data or gradient data into the tensor using a std::initializer_list." alt="" coords="5,56,186,83"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a38ba233ef49f34620297f96edd962c55" title="Retrieves a pointer to the tensor&#39;s data stored in GPU memory." alt="" coords="480,5,629,32"/>
<area shape="poly" title=" " alt="" coords="186,54,233,48,465,24,466,29,234,53,187,59"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#adf80894b8e06f260bb2695951e2f539e" title="Injects data or gradient data into the tensor." alt="" coords="234,107,414,133"/>
<area shape="poly" title=" " alt="" coords="160,81,246,100,245,105,159,86"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#ad6107b98beb881d0209345185d5ad145" title="Retrieves a pointer to the gradient data stored in GPU memory." alt="" coords="480,56,629,83"/>
<area shape="poly" title=" " alt="" coords="186,67,465,67,465,72,186,72"/>
<area shape="poly" title=" " alt="" coords="345,104,397,73,461,42,480,35,482,40,463,46,399,78,348,109"/>
<area shape="poly" title=" " alt="" coords="388,103,475,84,477,89,389,109"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="462,107,647,149"/>
<area shape="poly" title=" " alt="" coords="414,120,447,122,446,127,414,126"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#afa38d5c6db0e6b48c8f74ce8ad0df2bc" title="Asynchronously copies data between CUDA device and host memory based on the specified memory copy kin..." alt="" coords="462,173,647,216"/>
<area shape="poly" title=" " alt="" coords="368,131,473,166,471,171,367,136"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="695,140,881,183"/>
<area shape="poly" title=" " alt="" coords="647,179,679,174,680,179,648,184"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="695,207,881,249"/>
<area shape="poly" title=" " alt="" coords="648,205,680,210,679,215,647,211"/>
</map>
</div>

</div>
</div>
<a id="ae3cc31f5b541809d75556ebbfda8ae57" name="ae3cc31f5b541809d75556ebbfda8ae57"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ae3cc31f5b541809d75556ebbfda8ae57">&#9670;&#160;</a></span>dataInject() <span class="overload">[2/3]</span></h2>

<div class="memitem">
<div class="memproto">
<div class="memtemplate">
template&lt;typename Iterator &gt; </div>
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void nz::data::Tensor::dataInject </td>
          <td>(</td>
          <td class="paramtype">Iterator</td>          <td class="paramname"><span class="paramname"><em>begin</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">Iterator</td>          <td class="paramname"><span class="paramname"><em>end</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const bool</td>          <td class="paramname"><span class="paramname"><em>grad</em></span><span class="paramdefsep"> = </span><span class="paramdefval">false</span>&#160;) const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Injects data or gradient data into the tensor using iterators. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">begin</td><td>An iterator pointing to the beginning of the input data range (host-to-device). </td></tr>
    <tr><td class="paramname">end</td><td>An iterator pointing to the end of the input data range (host-to-device). </td></tr>
    <tr><td class="paramname">grad</td><td>A boolean indicating whether to inject gradient data. Defaults to false.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>void</dd></dl>
<p>This function injects data or gradient data into the tensor using the provided iterator range. First, it checks if the length of the input range (determined by <code>std::distance(begin, end)</code>) is at least as large as the size of the tensor (<code>_size</code>). If not, it throws a <code>std::runtime_error</code>.</p>
<p>For memory management, it allocates a temporary host array <code>host_data</code> of size <code>_size</code> to store the data from the iterator range. The data is then copied from the iterator range to this temporary array. After that, it calls the <code>dataInject</code> function with the temporary array and the <code>grad</code> flag.</p>
<p>In case of an exception during the call to the <code>dataInject</code> function, the temporary array is deleted to prevent memory leaks. Finally, the temporary array is deleted after the call to <code>dataInject</code> returns successfully.</p>
<p>The exception handling mechanism catches any <code><a class="el" href="classnz_1_1_cuda_exception.html" title="A final class that represents CUDA exceptions, inheriting from std::runtime_error.">nz::CudaException</a></code> or <code>std::runtime_error</code> thrown by the <code>dataInject</code> function and re - throws it after cleaning up the temporary memory.</p>
<p>This function is closely related to the <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> class and the other <code>dataInject</code> function as it uses the other <code>dataInject</code> function to perform the actual data injection.</p>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">std::runtime_error</td><td>If the length of the input array is less than the size of the tensor. </td></tr>
    <tr><td class="paramname"><a class="el" href="classnz_1_1_cuda_exception.html" title="A final class that represents CUDA exceptions, inheriting from std::runtime_error.">nz::CudaException</a></td><td>If the CUDA memory copy fails or if the tensor does not require gradients when trying to inject gradient data.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The iterators <code>begin</code> and <code>end</code> should be valid and form a proper range.</li>
<li>The input data should be convertible to the <code>value_type</code> of the tensor.</li>
<li>The time complexity of this function is O(n), where n is the size of the tensor (<code>_size</code>), due to the loop that copies data from the iterator range to the temporary array.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><span class="preprocessor">#include &lt;vector&gt;</span></div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({1,3});</div>
<div class="line">std::vector&lt;float&gt; <a class="code hl_function" href="#a38ba233ef49f34620297f96edd962c55">data</a> = {1.0f, 2.0f, 3.0f};</div>
<div class="line"><span class="keywordflow">try</span> {</div>
<div class="line">    tensor.dataInject(<a class="code hl_function" href="#a38ba233ef49f34620297f96edd962c55">data</a>.begin(), <a class="code hl_function" href="#a38ba233ef49f34620297f96edd962c55">data</a>.end(), <span class="keyword">false</span>);</div>
<div class="line">} <span class="keywordflow">catch</span> (<span class="keyword">const</span> std::runtime_error&amp; e) {</div>
<div class="line">    std::cerr &lt;&lt; e.what() &lt;&lt; std::endl;</div>
<div class="line">}</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cuh_source.html#l00567">567</a> of file <a class="el" href="_tensor_8cuh_source.html">Tensor.cuh</a>.</p>

</div>
</div>
<a id="adf80894b8e06f260bb2695951e2f539e" name="adf80894b8e06f260bb2695951e2f539e"></a>
<h2 class="memtitle"><span class="permalink"><a href="#adf80894b8e06f260bb2695951e2f539e">&#9670;&#160;</a></span>dataInject() <span class="overload">[3/3]</span></h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void nz::data::Tensor::dataInject </td>
          <td>(</td>
          <td class="paramtype">value_type *</td>          <td class="paramname"><span class="paramname"><em>data</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">bool</td>          <td class="paramname"><span class="paramname"><em>grad</em></span><span class="paramdefsep"> = </span><span class="paramdefval">false</span>&#160;) const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Injects data or gradient data into the tensor. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">data</td><td>A pointer to the data to be injected (host-to-device). </td></tr>
    <tr><td class="paramname">grad</td><td>A boolean indicating whether to inject gradient data.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>void</dd></dl>
<p>This function is responsible for injecting data or gradient data into the tensor. For memory management, it uses <code>cudaMemcpy</code> to copy data from the host to the device. If the <code>grad</code> parameter is <code>true</code>, it tries to copy data to the gradient buffer (<code>_grad</code>). If the tensor does not require gradients (<code>_requires_grad</code> is <code>false</code>), it throws an exception. If the <code>grad</code> parameter is <code>false</code>, it copies data to the main data buffer (<code>_data</code>).</p>
<p>The exception handling mechanism is in place to catch any CUDA memory copy errors. If the <code>cudaMemcpy</code> operation fails, it throws a <code><a class="el" href="classnz_1_1_cuda_exception.html" title="A final class that represents CUDA exceptions, inheriting from std::runtime_error.">nz::CudaException</a></code> with an appropriate error message.</p>
<p>This function is closely related to the <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> class as it modifies the internal data of the tensor.</p>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname"><a class="el" href="classnz_1_1_cuda_exception.html" title="A final class that represents CUDA exceptions, inheriting from std::runtime_error.">nz::CudaException</a></td><td>If the CUDA memory copy fails or if the tensor does not require gradients when trying to inject gradient data.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The input data pointer <code>data</code> should point to a valid memory location with enough data to fill the tensor.</li>
<li>Ensure that the CUDA environment is properly initialized before calling this function.</li>
</ul>
</dd></dl>
<dl class="section warning"><dt>Warning</dt><dd>This function is not safe. If the length of the input array pointed to by <code>data</code> is less than the size of the tensor, it will lead to undefined behavior and potentially cause unknown issues in the program.</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({1, 3});</div>
<div class="line"><span class="keywordtype">float</span> <a class="code hl_function" href="#a38ba233ef49f34620297f96edd962c55">data</a>[] = {1.0, 2.0, 3.0};</div>
<div class="line"><span class="keywordflow">try</span> {</div>
<div class="line">    tensor.dataInject(<a class="code hl_function" href="#a38ba233ef49f34620297f96edd962c55">data</a>, <span class="keyword">false</span>);</div>
<div class="line">} <span class="keywordflow">catch</span> (<span class="keyword">const</span> std::runtime_error&amp; e) {</div>
<div class="line">    std::cerr &lt;&lt; e.what() &lt;&lt; std::endl;</div>
<div class="line">}</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00282">282</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_adf80894b8e06f260bb2695951e2f539e_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_adf80894b8e06f260bb2695951e2f539e_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_adf80894b8e06f260bb2695951e2f539e_cgraph" id="aclassnz_1_1data_1_1_tensor_adf80894b8e06f260bb2695951e2f539e_cgraph">
<area shape="rect" title="Injects data or gradient data into the tensor." alt="" coords="5,85,186,112"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a38ba233ef49f34620297f96edd962c55" title="Retrieves a pointer to the tensor&#39;s data stored in GPU memory." alt="" coords="252,5,400,32"/>
<area shape="poly" title=" " alt="" coords="128,82,233,41,256,34,257,39,235,47,130,87"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#ad6107b98beb881d0209345185d5ad145" title="Retrieves a pointer to the gradient data stored in GPU memory." alt="" coords="252,56,400,83"/>
<area shape="poly" title=" " alt="" coords="185,85,236,78,237,83,186,90"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="234,107,419,149"/>
<area shape="poly" title=" " alt="" coords="186,107,219,112,218,117,185,113"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#afa38d5c6db0e6b48c8f74ce8ad0df2bc" title="Asynchronously copies data between CUDA device and host memory based on the specified memory copy kin..." alt="" coords="234,173,419,216"/>
<area shape="poly" title=" " alt="" coords="126,110,235,159,250,165,248,170,233,164,124,115"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="467,140,652,183"/>
<area shape="poly" title=" " alt="" coords="419,179,451,174,452,179,420,184"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="467,207,652,249"/>
<area shape="poly" title=" " alt="" coords="420,205,452,210,451,215,419,211"/>
</map>
</div>

</div>
</div>
<a id="aa1818a10415337403d43aad091a5a4c7" name="aa1818a10415337403d43aad091a5a4c7"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aa1818a10415337403d43aad091a5a4c7">&#9670;&#160;</a></span>expSum() <span class="overload">[1/2]</span></h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">Tensor::value_type nz::data::Tensor::expSum </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Compute the sum of the exponential values of all elements in the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>. </p>
<dl class="section return"><dt>Returns</dt><dd>The sum of the exponential values of all elements in the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> as a value of type <code>Tensor::value_type</code>.</dd></dl>
<p>This function calculates the sum of the exponential values of all elements in the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>. It first configures the CUDA block and grid dimensions. Then, it allocates device memory for intermediate results and host memory to hold the copied results from the device. The <code><a class="el" href="namespacenz_1_1krnl.html#a51a5ff3c8cc2c3051fddf32de294b467" title="Kernel function to compute the summation of exponentials of each element in the input array.">krnl::SummationExp</a></code> CUDA kernel is launched to compute the partial sums of the exponential values on the device. After the kernel execution, the partial sums are transferred from the device to the host using <code>cudaMemcpy</code>. Finally, the partial sums on the host are added together to obtain the total sum, and the allocated host and device memory are freed.</p>
<p>Memory management:</p><ul>
<li>Host memory is allocated for <code>hData</code> using <code>new[]</code> and freed using <code>delete[]</code>.</li>
<li>Device memory is allocated for <code>dData</code> using <code>cudaMalloc</code> and freed using <code>cudaFree</code>.</li>
</ul>
<p>Exception handling:</p><ul>
<li>The <code>CHECK</code> macro is used to handle CUDA API errors. If a CUDA API call fails, the <code>CHECK</code> macro will throw an exception, and the function will terminate.</li>
</ul>
<p>Relationship with other components:</p><ul>
<li>This function depends on the <code><a class="el" href="namespacenz_1_1krnl.html#a51a5ff3c8cc2c3051fddf32de294b467" title="Kernel function to compute the summation of exponentials of each element in the input array.">krnl::SummationExp</a></code> CUDA kernel to perform the partial sums of exponential values on the device.</li>
<li>It also depends on the <code>CHECK</code> macro to handle CUDA API errors.</li>
</ul>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">[Exception</td><td>type thrown by CHECK macro] If there are CUDA API errors during memory allocation, kernel execution, or memory copying.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function is approximately O(n), where n is the number of elements in the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> (<code>_size</code>). The CUDA kernel parallelizes the partial sum calculation of exponential values, and the final sum on the host is a linear operation over the number of grid blocks.</li>
<li>Ensure that the CUDA device is properly initialized before calling this function.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">nz::data::Tensor</a> tensor({2, 3}, <span class="keyword">true</span>);</div>
<div class="line"><span class="comment">// Assume tensor is filled with some values</span></div>
<div class="line">nz::data::Tensor::value_type exp_sum_result = tensor.<a class="code hl_function" href="#aa1818a10415337403d43aad091a5a4c7">expSum</a>();</div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_aa1818a10415337403d43aad091a5a4c7"><div class="ttname"><a href="#aa1818a10415337403d43aad091a5a4c7">nz::data::Tensor::expSum</a></div><div class="ttdeci">value_type expSum() const</div><div class="ttdoc">Compute the sum of the exponential values of all elements in the Tensor.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00694">Tensor.cu:694</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00694">694</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_aa1818a10415337403d43aad091a5a4c7_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_aa1818a10415337403d43aad091a5a4c7_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_aa1818a10415337403d43aad091a5a4c7_cgraph" id="aclassnz_1_1data_1_1_tensor_aa1818a10415337403d43aad091a5a4c7_cgraph">
<area shape="rect" title="Compute the sum of the exponential values of all elements in the Tensor." alt="" coords="5,160,177,187"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a785cf34395067f425e032d9bd5e1fa20" title="Frees the CUDA device memory pointed to by the given pointer." alt="" coords="225,5,410,48"/>
<area shape="poly" title=" " alt="" coords="105,157,155,113,223,64,245,53,248,57,226,69,159,118,109,161"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="458,35,643,77"/>
<area shape="poly" title=" " alt="" coords="115,157,164,132,224,108,336,81,442,64,443,69,337,86,225,113,167,137,118,162"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="458,181,643,224"/>
<area shape="poly" title=" " alt="" coords="177,176,442,193,442,198,177,181"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a97f78a2d43f6e0508c82d4f3b629de96" title="Asynchronously allocates device memory for type&#45;specific data with stream&#45;ordered dependency tracking..." alt="" coords="225,123,410,165"/>
<area shape="poly" title=" " alt="" coords="177,160,209,155,209,161,177,165"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#afa38d5c6db0e6b48c8f74ce8ad0df2bc" title="Asynchronously copies data between CUDA device and host memory based on the specified memory copy kin..." alt="" coords="458,315,643,357"/>
<area shape="poly" title=" " alt="" coords="107,185,156,231,189,256,226,276,279,296,336,311,443,328,442,333,334,316,278,301,224,281,187,260,153,235,103,189"/>
<area shape="rect" href="namespacenz_1_1krnl.html#a51a5ff3c8cc2c3051fddf32de294b467" title="Kernel function to compute the summation of exponentials of each element in the input array." alt="" coords="237,240,398,267"/>
<area shape="poly" title=" " alt="" coords="131,185,226,219,262,232,261,237,224,224,130,190"/>
<area shape="poly" title=" " alt="" coords="411,36,443,40,442,45,410,41"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="691,248,877,291"/>
<area shape="poly" title=" " alt="" coords="410,136,464,136,524,140,585,150,644,167,675,181,703,198,751,235,748,240,700,202,672,185,642,172,584,155,523,146,464,141,410,141"/>
<area shape="poly" title=" " alt="" coords="627,312,691,293,693,298,628,317"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="691,315,877,357"/>
<area shape="poly" title=" " alt="" coords="644,333,676,333,676,339,644,339"/>
<area shape="poly" title=" " alt="" coords="382,237,442,224,443,229,383,242"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a46ce59b45de432842454aadf00b93791" title="Asynchronously submits a CUDA kernel with stream&#45;ordered dependency management." alt="" coords="458,248,643,291"/>
<area shape="poly" title=" " alt="" coords="398,256,443,259,442,265,398,261"/>
<area shape="poly" title=" " alt="" coords="644,267,676,267,676,272,644,272"/>
<area shape="poly" title=" " alt="" coords="628,289,693,307,691,312,627,294"/>
</map>
</div>

</div>
</div>
<a id="ac4833838e9a704b6b8d29cbd53c6b3b1" name="ac4833838e9a704b6b8d29cbd53c6b3b1"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ac4833838e9a704b6b8d29cbd53c6b3b1">&#9670;&#160;</a></span>expSum() <span class="overload">[2/2]</span></h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">Tensor::value_type nz::data::Tensor::expSum </td>
          <td>(</td>
          <td class="paramtype">size_t</td>          <td class="paramname"><span class="paramname"><em>batch</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_t</td>          <td class="paramname"><span class="paramname"><em>channel</em></span>&#160;) const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Computes the sum of exponential values of elements in a specific batch and channel of a <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">batch</td><td>The batch index. Memory flow: host - to - device (used for index calculation on the host side). </td></tr>
    <tr><td class="paramname">channel</td><td>The channel index. Memory flow: host - to - device (used for index calculation on the host side).</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>The sum of exponential values of elements in the specified batch and channel of the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>.</dd></dl>
<p>This function calculates the sum of the exponential values of elements within a particular batch and channel of a <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>. First, it validates the provided <code>batch</code> and <code>channel</code> indices. If they are out of the valid range of the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>'s shape, it throws a <code>std::invalid_argument</code> exception.</p>
<p>After validation, it computes the size of the region to be processed based on the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>'s shape. It then allocates device memory for intermediate results (<code>dData</code>) and host memory (<code>hData</code>) to receive the computed values from the device. The offset in the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>'s data is determined according to the <code>batch</code> and <code>channel</code> indices.</p>
<p>The <code><a class="el" href="namespacenz_1_1krnl.html#a51a5ff3c8cc2c3051fddf32de294b467" title="Kernel function to compute the summation of exponentials of each element in the input array.">krnl::SummationExp</a></code> kernel is launched to compute the exponential of each element and perform partial summation on the device. The intermediate results are then copied from the device to the host. Finally, the function sums up all the intermediate results on the host, frees the allocated host and device memory, and returns the final sum.</p>
<p><b>Memory Management Strategy</b>:</p><ul>
<li>On the host side, an array <code>hData</code> of size <code>grid.x</code> is dynamically allocated using <code>new[]</code> and later freed using <code>delete[]</code>.</li>
<li>On the device side, memory for <code>dData</code> is allocated using <code><a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">cuStrm::StreamManager</a>&lt;value_type&gt;::Instance().malloc</code> and freed using <code><a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">cuStrm::StreamManager</a>&lt;value_type&gt;::Instance().free</code>.</li>
</ul>
<p><b>Exception Handling Mechanism</b>:</p><ul>
<li>Throws a <code>std::invalid_argument</code> exception if the provided <code>batch</code> or <code>channel</code> indices are out of the valid range of the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>'s shape.</li>
<li>The CUDA memory allocation, copying, and kernel launch operations may return error codes indicating failures. It is assumed that the calling code or the CUDA runtime will handle these errors appropriately.</li>
</ul>
<p><b>Relationship with Other Components</b>:</p><ul>
<li>Depends on the <code>_shape</code> member of the <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> class to get the shape information and strides.</li>
<li>Uses the <code><a class="el" href="namespacenz_1_1krnl.html#a51a5ff3c8cc2c3051fddf32de294b467" title="Kernel function to compute the summation of exponentials of each element in the input array.">krnl::SummationExp</a></code> kernel to perform the exponential calculation and partial summation on the device.</li>
<li>Relies on <code><a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">cuStrm::StreamManager</a>&lt;value_type&gt;::Instance()</code> for CUDA memory management (malloc, memcpy, free) operations.</li>
</ul>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">std::invalid_argument</td><td>If the provided <code>batch</code> or <code>channel</code> indices are out of the valid range of the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>'s shape.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Ensure that the provided <code>batch</code> and <code>channel</code> indices are within the valid range of the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>'s shape to avoid exceptions.</li>
<li>Be aware of potential CUDA errors during memory allocation, copying, and kernel launch operations and handle them appropriately in the calling code.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor; <span class="comment">// Assume Tensor is properly initialized</span></div>
<div class="line"><span class="keywordtype">size_t</span> batch = 0;</div>
<div class="line"><span class="keywordtype">size_t</span> channel = 1;</div>
<div class="line">Tensor::value_type expSumResult = tensor.<a class="code hl_function" href="#aa1818a10415337403d43aad091a5a4c7">expSum</a>(batch, channel);</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00713">713</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_ac4833838e9a704b6b8d29cbd53c6b3b1_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_ac4833838e9a704b6b8d29cbd53c6b3b1_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_ac4833838e9a704b6b8d29cbd53c6b3b1_cgraph" id="aclassnz_1_1data_1_1_tensor_ac4833838e9a704b6b8d29cbd53c6b3b1_cgraph">
<area shape="rect" title="Computes the sum of exponential values of elements in a specific batch and channel of a Tensor." alt="" coords="5,173,177,200"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a785cf34395067f425e032d9bd5e1fa20" title="Frees the CUDA device memory pointed to by the given pointer." alt="" coords="225,5,410,48"/>
<area shape="poly" title=" " alt="" coords="103,171,153,121,187,91,223,64,244,53,246,57,226,69,190,95,156,125,107,175"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="458,19,643,61"/>
<area shape="poly" title=" " alt="" coords="120,170,224,127,336,91,442,63,444,68,338,96,226,132,122,175"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="458,99,643,141"/>
<area shape="poly" title=" " alt="" coords="160,170,224,159,442,129,443,135,225,164,161,175"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a97f78a2d43f6e0508c82d4f3b629de96" title="Asynchronously allocates device memory for type&#45;specific data with stream&#45;ordered dependency tracking..." alt="" coords="225,275,410,317"/>
<area shape="poly" title=" " alt="" coords="119,198,226,254,252,266,250,271,223,258,116,203"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#afa38d5c6db0e6b48c8f74ce8ad0df2bc" title="Asynchronously copies data between CUDA device and host memory based on the specified memory copy kin..." alt="" coords="458,232,643,275"/>
<area shape="poly" title=" " alt="" coords="161,198,225,209,443,239,442,244,224,215,160,203"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a31a3aa01fa3ccb56503994a99e39e177" title="Retrieves the total number of elements in the tensor." alt="" coords="244,341,391,368"/>
<area shape="poly" title=" " alt="" coords="103,199,151,261,186,297,226,327,236,332,234,337,223,332,182,301,147,264,99,202"/>
<area shape="rect" href="namespacenz_1_1krnl.html#a51a5ff3c8cc2c3051fddf32de294b467" title="Kernel function to compute the summation of exponentials of each element in the input array." alt="" coords="237,173,398,200"/>
<area shape="poly" title=" " alt="" coords="177,184,221,184,221,189,177,189"/>
<area shape="poly" title=" " alt="" coords="411,29,443,31,442,36,410,35"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="691,232,877,275"/>
<area shape="poly" title=" " alt="" coords="410,296,522,294,643,284,688,276,689,281,644,289,522,299,410,301"/>
<area shape="poly" title=" " alt="" coords="644,251,676,251,676,256,644,256"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="691,165,877,208"/>
<area shape="poly" title=" " alt="" coords="627,229,691,210,693,215,628,234"/>
<area shape="poly" title=" " alt="" coords="366,170,458,144,459,149,368,175"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a46ce59b45de432842454aadf00b93791" title="Asynchronously submits a CUDA kernel with stream&#45;ordered dependency management." alt="" coords="458,165,643,208"/>
<area shape="poly" title=" " alt="" coords="398,184,443,184,443,189,398,189"/>
<area shape="poly" title=" " alt="" coords="628,206,693,225,691,230,627,211"/>
<area shape="poly" title=" " alt="" coords="644,184,676,184,676,189,644,189"/>
</map>
</div>

</div>
</div>
<a id="ad220de56b18c404611f07f2290cd7e9d" name="ad220de56b18c404611f07f2290cd7e9d"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ad220de56b18c404611f07f2290cd7e9d">&#9670;&#160;</a></span>fill()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void nz::data::Tensor::fill </td>
          <td>(</td>
          <td class="paramtype">value_type</td>          <td class="paramname"><span class="paramname"><em>value</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">bool</td>          <td class="paramname"><span class="paramname"><em>isGrad</em></span><span class="paramdefsep"> = </span><span class="paramdefval">false</span>&#160;) const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Fills the tensor's data with a specified value. </p>
<p>This function sets all elements in the tensor's data to the specified value. It uses the <code>cudaMemset</code> function to fill the GPU memory allocated for the tensor with the provided value. This is commonly used to initialize a tensor with a constant value.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">value</td><td>The value to which all elements of the tensor will be set. This value is copied to every element in the tensor's data. </td></tr>
    <tr><td class="paramname">isGrad</td><td>A boolean flag indicating whether to fill the gradients or the data. If true, gradients are filled; otherwise, data is filled (host-to-device).</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>This function does not deallocate the memory; it only sets the values in the tensor's data to the specified value.</li>
<li>The tensor's data memory is assumed to be allocated before calling this function. This is automatically managed when the tensor is created, so no additional memory allocation is needed.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({2, 3});  <span class="comment">// Create a tensor with shape 2x3</span></div>
<div class="line">tensor.<a class="code hl_function" href="#ad220de56b18c404611f07f2290cd7e9d">fill</a>(5.0f);      <span class="comment">// Fill the tensor&#39;s data with the value 5.0f</span></div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00306">306</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_ad220de56b18c404611f07f2290cd7e9d_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_ad220de56b18c404611f07f2290cd7e9d_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_ad220de56b18c404611f07f2290cd7e9d_cgraph" id="aclassnz_1_1data_1_1_tensor_ad220de56b18c404611f07f2290cd7e9d_cgraph">
<area shape="rect" title="Fills the tensor&#39;s data with a specified value." alt="" coords="5,47,142,73"/>
<area shape="rect" href="namespacenz_1_1krnl.html#ad136c8a6560a5305984ce0a31bea71bf" title="Kernel function to fill a data array with a given value." alt="" coords="190,47,279,73"/>
<area shape="poly" title=" " alt="" coords="142,57,174,57,174,63,142,63"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="327,5,512,48"/>
<area shape="poly" title=" " alt="" coords="279,49,311,44,312,49,280,55"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a46ce59b45de432842454aadf00b93791" title="Asynchronously submits a CUDA kernel with stream&#45;ordered dependency management." alt="" coords="327,72,512,115"/>
<area shape="poly" title=" " alt="" coords="280,65,312,71,311,76,279,71"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="560,39,746,81"/>
<area shape="poly" title=" " alt="" coords="512,77,544,73,545,78,513,83"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="560,105,746,148"/>
<area shape="poly" title=" " alt="" coords="513,104,545,109,544,114,512,109"/>
</map>
</div>

</div>
</div>
<a id="ae6144f6d7fa612d98538f17baf4ef574" name="ae6144f6d7fa612d98538f17baf4ef574"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ae6144f6d7fa612d98538f17baf4ef574">&#9670;&#160;</a></span>fillMatrix()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void nz::data::Tensor::fillMatrix </td>
          <td>(</td>
          <td class="paramtype">value_type</td>          <td class="paramname"><span class="paramname"><em>value</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_type</td>          <td class="paramname"><span class="paramname"><em>batch</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_type</td>          <td class="paramname"><span class="paramname"><em>channels</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">bool</td>          <td class="paramname"><span class="paramname"><em>isGrad</em></span><span class="paramdefsep"> = </span><span class="paramdefval">false</span>&#160;)</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Fill a specific matrix slice within the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> with a given value. </p>
<p>This method allows users to populate a particular matrix slice of the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> (specified by batch and channels) with a provided value. It also supports filling the gradient matrix if the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> requires gradient computation and the <code>isGrad</code> flag is set.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">value</td><td>The value used to fill the matrix slice. Memory flow: host-to-function, passed from the calling code. </td></tr>
    <tr><td class="paramname">batch</td><td>The index of the batch. Memory flow: host-to-function, passed from the calling code. </td></tr>
    <tr><td class="paramname">channels</td><td>The index of the channels. Memory flow: host-to-function, passed from the calling code. </td></tr>
    <tr><td class="paramname">isGrad</td><td>A boolean indicating whether to fill the gradient matrix. Memory flow: host-to-function, passed from the calling code.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>None</dd></dl>
<p><b>Memory Management Strategy</b>:</p><ul>
<li>This function does not allocate or free any additional memory. It operates on the existing <code>_data</code> or <code>_grad</code> buffer of the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>.</li>
</ul>
<p><b>Exception Handling Mechanism</b>:</p><ul>
<li>Throws <code>std::invalid_argument</code> if the provided <code>batch</code> or <code>channels</code> indices are out of bounds.</li>
<li>Throws <code>std::invalid_argument</code> if <code>isGrad</code> is <code>true</code> but the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> does not require gradient computation.</li>
</ul>
<p><b>Relationship with Other Components</b>:</p><ul>
<li>Depends on the <code>_shape</code> object to access tensor shape information and calculate offsets.</li>
<li>Relies on the <code><a class="el" href="namespacenz_1_1krnl.html#ad136c8a6560a5305984ce0a31bea71bf" title="Kernel function to fill a data array with a given value.">krnl::Fill</a></code> CUDA kernel to perform the actual filling operation.</li>
</ul>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">std::invalid_argument</td><td>When <code>batch</code> or <code>channels</code> are out of bounds or when trying to fill gradients of a non - gradient - requiring <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function is O(n), where n is the number of elements in the matrix slice (<code>_shape[2] * _shape[3]</code>).</li>
<li>Ensure that the <code><a class="el" href="namespacenz_1_1krnl.html#ad136c8a6560a5305984ce0a31bea71bf" title="Kernel function to fill a data array with a given value.">krnl::Fill</a></code> CUDA kernel is properly implemented and the CUDA environment is set up correctly.</li>
<li>Verify that the <code>_shape</code> object provides accurate shape and stride information.</li>
</ul>
</dd></dl>
<dl class="section warning"><dt>Warning</dt><dd><ul>
<li>Incorrect CUDA kernel usage may lead to runtime errors or undefined behavior.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor;</div>
<div class="line">Tensor::value_type fillValue = 2.0;</div>
<div class="line">Tensor::size_type batchIndex = 0;</div>
<div class="line">Tensor::size_type channelIndex = 1;</div>
<div class="line"><span class="keywordtype">bool</span> fillGrad = <span class="keyword">false</span>;</div>
<div class="line"><span class="keywordflow">try</span> {</div>
<div class="line">    tensor.fillMatrix(fillValue, batchIndex, channelIndex, fillGrad);</div>
<div class="line">} <span class="keywordflow">catch</span> (<span class="keyword">const</span> std::invalid_argument&amp; e) {</div>
<div class="line">    std::cerr &lt;&lt; e.what() &lt;&lt; std::endl;</div>
<div class="line">}</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00316">316</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_ae6144f6d7fa612d98538f17baf4ef574_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_ae6144f6d7fa612d98538f17baf4ef574_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_ae6144f6d7fa612d98538f17baf4ef574_cgraph" id="aclassnz_1_1data_1_1_tensor_ae6144f6d7fa612d98538f17baf4ef574_cgraph">
<area shape="rect" title="Fill a specific matrix slice within the Tensor with a given value." alt="" coords="5,76,178,103"/>
<area shape="rect" href="namespacenz_1_1krnl.html#ad136c8a6560a5305984ce0a31bea71bf" title="Kernel function to fill a data array with a given value." alt="" coords="250,47,340,73"/>
<area shape="poly" title=" " alt="" coords="178,74,234,66,235,71,178,80"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a4831fea5aaf7dbad3578d3fa8e55aef1" title="Retrieves the stride value at a specified index within the Dimension object." alt="" coords="226,97,364,140"/>
<area shape="poly" title=" " alt="" coords="178,99,211,104,210,109,178,104"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="412,5,597,48"/>
<area shape="poly" title=" " alt="" coords="340,50,397,41,397,46,341,56"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a46ce59b45de432842454aadf00b93791" title="Asynchronously submits a CUDA kernel with stream&#45;ordered dependency management." alt="" coords="412,72,597,115"/>
<area shape="poly" title=" " alt="" coords="341,64,397,74,397,79,340,70"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="645,39,831,81"/>
<area shape="poly" title=" " alt="" coords="597,77,629,73,630,78,598,83"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="645,105,831,148"/>
<area shape="poly" title=" " alt="" coords="598,104,630,109,629,114,597,109"/>
</map>
</div>

</div>
</div>
<a id="a373a517d4a813c94a820d0a45806693e" name="a373a517d4a813c94a820d0a45806693e"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a373a517d4a813c94a820d0a45806693e">&#9670;&#160;</a></span>find() <span class="overload">[1/2]</span></h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classnz_1_1data_1_1_dimension.html">Tensor::shape_type</a> nz::data::Tensor::find </td>
          <td>(</td>
          <td class="paramtype">value_type</td>          <td class="paramname"><span class="paramname"><em>value</em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Finds the first occurrence of a given value in the entire tensor and returns its shape indices. </p>
<p>This function retrieves the tensor data from the device to the host, then iterates through the data to find the first element equal to the given value. Once found, it calculates the corresponding shape indices (batch, channel, height, width) and returns them.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">value</td><td>The value to search for in the tensor. Memory location: host - to - device (used for comparison).</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>A <code>Tensor::shape_type</code> object representing the shape indices (batch, channel, height, width) of the first occurrence of the given value in the tensor. Memory flow: device - to - host.</dd></dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function is O(n), where n is the number of elements in the tensor (<code>_size</code>), due to the linear traversal of the tensor data.</li>
<li>Ensure that the CUDA runtime environment is properly initialized and the device memory is valid before calling this function, as it depends on <code><a class="el" href="#a615af61999990e2edebacf5afbad0e57" title="Retrieves the tensor data from the device to the host and returns it as a std::vector.">hostData()</a></code>.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor;</div>
<div class="line">Tensor::value_type targetValue = 5.0;</div>
<div class="line"><span class="keywordflow">try</span> {</div>
<div class="line">    <a class="code hl_class" href="classnz_1_1data_1_1_dimension.html">Tensor::shape_type</a> indices = tensor.find(targetValue);</div>
<div class="line">    std::cout &lt;&lt; <span class="stringliteral">&quot;The first occurrence of &quot;</span> &lt;&lt; targetValue &lt;&lt; <span class="stringliteral">&quot; is at indices: (&quot;</span></div>
<div class="line">              &lt;&lt; indices[0] &lt;&lt; <span class="stringliteral">&quot;, &quot;</span> &lt;&lt; indices[1] &lt;&lt; <span class="stringliteral">&quot;, &quot;</span> &lt;&lt; indices[2] &lt;&lt; <span class="stringliteral">&quot;, &quot;</span> &lt;&lt; indices[3] &lt;&lt; <span class="stringliteral">&quot;)&quot;</span> &lt;&lt; std::endl;</div>
<div class="line">} <span class="keywordflow">catch</span> (<span class="keyword">const</span> std::exception&amp; e) {</div>
<div class="line">    std::cerr &lt;&lt; e.what() &lt;&lt; std::endl;</div>
<div class="line">}</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00660">660</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a373a517d4a813c94a820d0a45806693e_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a373a517d4a813c94a820d0a45806693e_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a373a517d4a813c94a820d0a45806693e_cgraph" id="aclassnz_1_1data_1_1_tensor_a373a517d4a813c94a820d0a45806693e_cgraph">
<area shape="rect" title="Finds the first occurrence of a given value in the entire tensor and returns its shape indices." alt="" coords="5,47,150,73"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a615af61999990e2edebacf5afbad0e57" title="Retrieves the tensor data from the device to the host and returns it as a std::vector." alt="" coords="198,47,374,73"/>
<area shape="poly" title=" " alt="" coords="150,57,182,57,182,63,150,63"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a7aab89d371ff013c5c021a191bd7348e" title="Synchronize the tensor data by waiting for all CUDA stream write operations to complete." alt="" coords="422,47,601,73"/>
<area shape="poly" title=" " alt="" coords="374,57,406,57,406,63,374,63"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="649,5,835,48"/>
<area shape="poly" title=" " alt="" coords="601,44,633,40,634,45,602,50"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="649,72,835,115"/>
<area shape="poly" title=" " alt="" coords="602,70,634,75,633,80,601,76"/>
</map>
</div>

</div>
</div>
<a id="ae6c6bc33a47e23ec62e6a62e5e25a8ed" name="ae6c6bc33a47e23ec62e6a62e5e25a8ed"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ae6c6bc33a47e23ec62e6a62e5e25a8ed">&#9670;&#160;</a></span>find() <span class="overload">[2/2]</span></h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classnz_1_1data_1_1_dimension.html">Tensor::shape_type</a> nz::data::Tensor::find </td>
          <td>(</td>
          <td class="paramtype">value_type</td>          <td class="paramname"><span class="paramname"><em>value</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_type</td>          <td class="paramname"><span class="paramname"><em>batch</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_type</td>          <td class="paramname"><span class="paramname"><em>channel</em></span>&#160;) const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Finds the first occurrence of a given value in a specific batch and channel of the tensor and returns its shape indices. </p>
<p>This function first calculates the offset in the tensor data based on the provided batch and channel indices. It then retrieves the tensor data from the device to the host and iterates through the subset of data in the specified batch and channel to find the first element equal to the given value. Once found, it calculates the height and width indices and returns the complete shape indices (batch, channel, height, width).</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">value</td><td>The value to search for in the tensor. Memory location: host - to - device (used for comparison). </td></tr>
    <tr><td class="paramname">batch</td><td>The batch index. Memory location: host - to - device (used for index calculation). </td></tr>
    <tr><td class="paramname">channel</td><td>The channel index. Memory location: host - to - device (used for index calculation).</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>A <code>Tensor::shape_type</code> object representing the shape indices (batch, channel, height, width) of the first occurrence of the given value in the specified batch and channel of the tensor. Memory flow: device - to - host.</dd></dl>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">std::invalid_argument</td><td>When the <code>batch</code> or <code>channel</code> index is out of bounds.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function is O(m), where m is the number of elements in the specified batch and channel (<code>_shape[2] * _shape[3]</code>), due to the linear traversal of the subset of the tensor data.</li>
<li>Ensure that the CUDA runtime environment is properly initialized and the device memory is valid before calling this function, as it depends on <code><a class="el" href="#a615af61999990e2edebacf5afbad0e57" title="Retrieves the tensor data from the device to the host and returns it as a std::vector.">hostData()</a></code>.</li>
<li>Ensure that the <code>batch</code> and <code>channel</code> indices are within the valid range of the tensor's shape to avoid unexpected behavior.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor;</div>
<div class="line">Tensor::value_type targetValue = 5.0;</div>
<div class="line">Tensor::size_type batch = 0;</div>
<div class="line">Tensor::size_type channel = 0;</div>
<div class="line"><span class="keywordflow">try</span> {</div>
<div class="line">    <a class="code hl_class" href="classnz_1_1data_1_1_dimension.html">Tensor::shape_type</a> indices = tensor.find(targetValue, batch, channel);</div>
<div class="line">    std::cout &lt;&lt; <span class="stringliteral">&quot;The first occurrence of &quot;</span> &lt;&lt; targetValue &lt;&lt; <span class="stringliteral">&quot; in batch &quot;</span> &lt;&lt; batch &lt;&lt; <span class="stringliteral">&quot; and channel &quot;</span> &lt;&lt; channel</div>
<div class="line">              &lt;&lt; <span class="stringliteral">&quot; is at indices: (&quot;</span> &lt;&lt; indices[0] &lt;&lt; <span class="stringliteral">&quot;, &quot;</span> &lt;&lt; indices[1] &lt;&lt; <span class="stringliteral">&quot;, &quot;</span> &lt;&lt; indices[2] &lt;&lt; <span class="stringliteral">&quot;, &quot;</span> &lt;&lt; indices[3] &lt;&lt; <span class="stringliteral">&quot;)&quot;</span> &lt;&lt; std::endl;</div>
<div class="line">} <span class="keywordflow">catch</span> (<span class="keyword">const</span> std::exception&amp; e) {</div>
<div class="line">    std::cerr &lt;&lt; e.what() &lt;&lt; std::endl;</div>
<div class="line">}</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00676">676</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_ae6c6bc33a47e23ec62e6a62e5e25a8ed_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_ae6c6bc33a47e23ec62e6a62e5e25a8ed_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_ae6c6bc33a47e23ec62e6a62e5e25a8ed_cgraph" id="aclassnz_1_1data_1_1_tensor_ae6c6bc33a47e23ec62e6a62e5e25a8ed_cgraph">
<area shape="rect" title="Finds the first occurrence of a given value in a specific batch and channel of the tensor and returns..." alt="" coords="5,43,150,69"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a4831fea5aaf7dbad3578d3fa8e55aef1" title="Retrieves the stride value at a specified index within the Dimension object." alt="" coords="217,5,355,48"/>
<area shape="poly" title=" " alt="" coords="150,43,200,36,201,41,151,48"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a615af61999990e2edebacf5afbad0e57" title="Retrieves the tensor data from the device to the host and returns it as a std::vector." alt="" coords="198,72,374,99"/>
<area shape="poly" title=" " alt="" coords="151,64,183,68,182,73,150,69"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a7aab89d371ff013c5c021a191bd7348e" title="Synchronize the tensor data by waiting for all CUDA stream write operations to complete." alt="" coords="422,72,601,99"/>
<area shape="poly" title=" " alt="" coords="374,83,406,83,406,88,374,88"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="649,31,835,73"/>
<area shape="poly" title=" " alt="" coords="601,70,633,65,634,70,602,75"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="649,97,835,140"/>
<area shape="poly" title=" " alt="" coords="602,96,634,100,633,106,601,101"/>
</map>
</div>

</div>
</div>
<a id="ad6107b98beb881d0209345185d5ad145" name="ad6107b98beb881d0209345185d5ad145"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ad6107b98beb881d0209345185d5ad145">&#9670;&#160;</a></span>grad()</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">Tensor::value_type * nz::data::Tensor::grad </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Retrieves a pointer to the gradient data stored in GPU memory. </p>
<dl class="section return"><dt>Returns</dt><dd>A <code>value_type*</code> (pointer to float) pointing to the tensor's gradient data in GPU memory.</dd></dl>
<p>This function provides access to the gradient data of the tensor, stored in GPU memory. If the tensor does not require gradient computation (<code>requires_grad</code> is <code>false</code>), the function throws a <code>std::runtime_error</code>.</p>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">std::runtime_error</td><td>If the tensor does not require gradient computation.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The returned pointer points to GPU memory and cannot be directly dereferenced in CPU code.</li>
<li>Ensure that CUDA synchronization is handled properly before using this pointer in GPU operations.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({2, 3}, <span class="keyword">true</span>); <span class="comment">// Create a tensor that requires gradients</span></div>
<div class="line"><span class="keywordflow">try</span> {</div>
<div class="line">    <span class="keyword">const</span> <span class="keywordtype">float</span>* grad_data = tensor.grad(); <span class="comment">// Access raw gradient data</span></div>
<div class="line">    <span class="comment">// Use grad_data in CUDA kernels or other GPU-based operations</span></div>
<div class="line">} <span class="keywordflow">catch</span> (<span class="keyword">const</span> std::runtime_error&amp; e) {</div>
<div class="line">    std::cerr &lt;&lt; e.what() &lt;&lt; std::endl; <span class="comment">// Handle error if tensor does not require gradients</span></div>
<div class="line">}</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00445">445</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>

</div>
</div>
<a id="a615af61999990e2edebacf5afbad0e57" name="a615af61999990e2edebacf5afbad0e57"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a615af61999990e2edebacf5afbad0e57">&#9670;&#160;</a></span>hostData()</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">std::vector&lt; Tensor::value_type &gt; nz::data::Tensor::hostData </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span><span class="mlabel">noexcept</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Retrieves the tensor data from the device to the host and returns it as a std::vector. </p>
<p>This member function is used to transfer the tensor data from the device memory to the host memory. It returns a <code>std::vector</code> containing the tensor data.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">None</td><td></td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>A <code>std::vector</code> of <code>Tensor::value_type</code> containing the tensor data. Memory flow: device - to - host.</dd></dl>
<p><b>Memory Management Strategy</b>:</p><ul>
<li>A temporary array <code>temp</code> of size <code>_size</code> is dynamically allocated on the host using <code>new</code>.</li>
<li>After the data is copied from the device to the host using <code>cudaMemcpy</code>, a <code>std::vector</code> is constructed from the temporary array.</li>
<li>The temporary array <code>temp</code> is then deleted using <code>delete[]</code> to avoid memory leaks.</li>
</ul>
<p><b>Exception Handling Mechanism</b>:</p><ul>
<li>This function is marked as <code>noexcept</code>, meaning it does not throw any exceptions. However, if <code>cudaMemcpy</code> fails, it may lead to undefined behavior.</li>
</ul>
<p><b>Relationship with Other Components</b>:</p><ul>
<li>Depends on the <code><a class="el" href="#a7aab89d371ff013c5c021a191bd7348e" title="Synchronize the tensor data by waiting for all CUDA stream write operations to complete.">syncData()</a></code> function to synchronize the data before the transfer.</li>
<li>Relies on <code>cudaMemcpy</code> to transfer data from the device to the host.</li>
<li>The internal member variables <code>_data</code> and <code>_size</code> are used to access the device data and its size.</li>
</ul>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function is O(n), where n is the number of elements in the tensor (<code>_size</code>).</li>
<li>Ensure that the CUDA runtime environment is properly initialized and the device memory is valid before calling this function.</li>
</ul>
</dd></dl>
<dl class="section warning"><dt>Warning</dt><dd><ul>
<li>If <code>cudaMemcpy</code> fails, the behavior of this function is undefined. Error checking for <code>cudaMemcpy</code> is not performed in this function.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor;</div>
<div class="line">std::vector&lt;Tensor::value_type&gt; <a class="code hl_function" href="#a615af61999990e2edebacf5afbad0e57">hostData</a> = tensor.<a class="code hl_function" href="#a615af61999990e2edebacf5afbad0e57">hostData</a>();</div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_a615af61999990e2edebacf5afbad0e57"><div class="ttname"><a href="#a615af61999990e2edebacf5afbad0e57">nz::data::Tensor::hostData</a></div><div class="ttdeci">std::vector&lt; value_type &gt; hostData() const noexcept</div><div class="ttdoc">Retrieves the tensor data from the device to the host and returns it as a std::vector.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00436">Tensor.cu:436</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00436">436</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a615af61999990e2edebacf5afbad0e57_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a615af61999990e2edebacf5afbad0e57_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a615af61999990e2edebacf5afbad0e57_cgraph" id="aclassnz_1_1data_1_1_tensor_a615af61999990e2edebacf5afbad0e57_cgraph">
<area shape="rect" title="Retrieves the tensor data from the device to the host and returns it as a std::vector." alt="" coords="5,47,182,73"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a7aab89d371ff013c5c021a191bd7348e" title="Synchronize the tensor data by waiting for all CUDA stream write operations to complete." alt="" coords="230,47,409,73"/>
<area shape="poly" title=" " alt="" coords="182,57,214,57,214,63,182,63"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="457,5,642,48"/>
<area shape="poly" title=" " alt="" coords="409,44,441,40,442,45,410,50"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="457,72,642,115"/>
<area shape="poly" title=" " alt="" coords="410,70,442,75,441,80,409,76"/>
</map>
</div>

</div>
</div>
<a id="a7fd4badf84f9c5398e08b23a9826dfbc" name="a7fd4badf84f9c5398e08b23a9826dfbc"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a7fd4badf84f9c5398e08b23a9826dfbc">&#9670;&#160;</a></span>hostGrad()</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">std::vector&lt; Tensor::value_type &gt; nz::data::Tensor::hostGrad </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Retrieves the gradient data of the tensor from the device to the host and returns it as a std::vector. </p>
<p>This member function transfers the gradient data of the tensor from the device memory to the host memory. It returns a <code>std::vector</code> containing the gradient data of the tensor.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">None</td><td></td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>A <code>std::vector</code> of <code>Tensor::value_type</code> containing the gradient data. Memory flow: device - to - host.</dd></dl>
<p><b>Memory Management Strategy</b>:</p><ul>
<li>A temporary array <code>temp</code> of size <code>_size</code> is dynamically allocated on the host using <code>new</code>.</li>
<li>After the gradient data is copied from the device to the host using <code>cudaMemcpy</code>, a <code>std::vector</code> is constructed from the temporary array.</li>
<li>The temporary array <code>temp</code> is then deleted using <code>delete[]</code> to avoid memory leaks.</li>
</ul>
<p><b>Exception Handling Mechanism</b>:</p><ul>
<li>Throws <code>std::runtime_error</code> if the tensor does not require gradients (<code>_requires_grad</code> is <code>false</code>).</li>
<li>If <code>cudaMemcpy</code> fails, it may lead to undefined behavior as error - checking for <code>cudaMemcpy</code> is not performed in this function.</li>
</ul>
<p><b>Relationship with Other Components</b>:</p><ul>
<li>Depends on the <code><a class="el" href="#af28425ddc9bee1f75fd923a0de68c37b" title="Synchronize the gradient data of the tensor if gradient computation is required.">syncGrad()</a></code> function to synchronize the gradient data before the transfer.</li>
<li>Relies on <code>cudaMemcpy</code> to transfer the gradient data from the device to the host.</li>
<li>The internal member variables <code>_grad</code> and <code>_size</code> are used to access the device gradient data and its size.</li>
</ul>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">std::runtime_error</td><td>When the tensor does not require gradients.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function is O(n), where n is the number of elements in the tensor (<code>_size</code>).</li>
<li>Ensure that the CUDA runtime environment is properly initialized and the device memory is valid before calling this function.</li>
<li>Ensure that the tensor requires gradients before calling this function to avoid exceptions.</li>
</ul>
</dd></dl>
<dl class="section warning"><dt>Warning</dt><dd><ul>
<li>If <code>cudaMemcpy</code> fails, the behavior of this function is undefined.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor;</div>
<div class="line"><span class="keywordflow">try</span> {</div>
<div class="line">    std::vector&lt;Tensor::value_type&gt; <a class="code hl_function" href="#a7fd4badf84f9c5398e08b23a9826dfbc">hostGrad</a> = tensor.hostGrad();</div>
<div class="line">} <span class="keywordflow">catch</span> (<span class="keyword">const</span> std::runtime_error&amp; e) {</div>
<div class="line">    std::cerr &lt;&lt; e.what() &lt;&lt; std::endl;</div>
<div class="line">}</div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_a7fd4badf84f9c5398e08b23a9826dfbc"><div class="ttname"><a href="#a7fd4badf84f9c5398e08b23a9826dfbc">nz::data::Tensor::hostGrad</a></div><div class="ttdeci">std::vector&lt; value_type &gt; hostGrad() const</div><div class="ttdoc">Retrieves the gradient data of the tensor from the device to the host and returns it as a std::vector...</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00452">Tensor.cu:452</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00452">452</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a7fd4badf84f9c5398e08b23a9826dfbc_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a7fd4badf84f9c5398e08b23a9826dfbc_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a7fd4badf84f9c5398e08b23a9826dfbc_cgraph" id="aclassnz_1_1data_1_1_tensor_a7fd4badf84f9c5398e08b23a9826dfbc_cgraph">
<area shape="rect" title="Retrieves the gradient data of the tensor from the device to the host and returns it as a std::vector..." alt="" coords="5,47,182,73"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#af28425ddc9bee1f75fd923a0de68c37b" title="Synchronize the gradient data of the tensor if gradient computation is required." alt="" coords="230,47,409,73"/>
<area shape="poly" title=" " alt="" coords="182,57,214,57,214,63,182,63"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="457,5,642,48"/>
<area shape="poly" title=" " alt="" coords="409,44,441,40,442,45,410,50"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="457,72,642,115"/>
<area shape="poly" title=" " alt="" coords="410,70,442,75,441,80,409,76"/>
</map>
</div>

</div>
</div>
<a id="a9131832f57339c0de2e7fb7955940a55" name="a9131832f57339c0de2e7fb7955940a55"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a9131832f57339c0de2e7fb7955940a55">&#9670;&#160;</a></span>max() <span class="overload">[1/2]</span></h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">Tensor::value_type nz::data::Tensor::max </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Finds the maximum value in the tensor. </p>
<p>This member function retrieves the tensor data from the device to the host and then iterates through it to find the maximum value.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">None</td><td></td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>The maximum value of type <code>Tensor::value_type</code> in the tensor. Memory flow: device - to - host.</dd></dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function is O(n), where n is the number of elements in the tensor (<code>_size</code>), due to the linear traversal of the tensor data.</li>
<li>Ensure that the CUDA runtime environment is properly initialized and the device memory is valid before calling this function, as it depends on <code><a class="el" href="#a615af61999990e2edebacf5afbad0e57" title="Retrieves the tensor data from the device to the host and returns it as a std::vector.">hostData()</a></code>.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor;</div>
<div class="line"><span class="keywordflow">try</span> {</div>
<div class="line">    Tensor::value_type maxVal = tensor.max();</div>
<div class="line">    std::cout &lt;&lt; <span class="stringliteral">&quot;The maximum value in the tensor is: &quot;</span> &lt;&lt; maxVal &lt;&lt; std::endl;</div>
<div class="line">} <span class="keywordflow">catch</span> (<span class="keyword">const</span> std::exception&amp; e) {</div>
<div class="line">    std::cerr &lt;&lt; e.what() &lt;&lt; std::endl;</div>
<div class="line">}</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00608">608</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a9131832f57339c0de2e7fb7955940a55_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a9131832f57339c0de2e7fb7955940a55_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a9131832f57339c0de2e7fb7955940a55_cgraph" id="aclassnz_1_1data_1_1_tensor_a9131832f57339c0de2e7fb7955940a55_cgraph">
<area shape="rect" title="Finds the maximum value in the tensor." alt="" coords="5,47,154,73"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a615af61999990e2edebacf5afbad0e57" title="Retrieves the tensor data from the device to the host and returns it as a std::vector." alt="" coords="202,47,378,73"/>
<area shape="poly" title=" " alt="" coords="154,57,186,57,186,63,154,63"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a7aab89d371ff013c5c021a191bd7348e" title="Synchronize the tensor data by waiting for all CUDA stream write operations to complete." alt="" coords="426,47,605,73"/>
<area shape="poly" title=" " alt="" coords="378,57,410,57,410,63,378,63"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="653,5,839,48"/>
<area shape="poly" title=" " alt="" coords="605,44,637,40,638,45,606,50"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="653,72,839,115"/>
<area shape="poly" title=" " alt="" coords="606,70,638,75,637,80,605,76"/>
</map>
</div>

</div>
</div>
<a id="a90f7c7cde42c58b41f77d1b941da129f" name="a90f7c7cde42c58b41f77d1b941da129f"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a90f7c7cde42c58b41f77d1b941da129f">&#9670;&#160;</a></span>max() <span class="overload">[2/2]</span></h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">Tensor::value_type nz::data::Tensor::max </td>
          <td>(</td>
          <td class="paramtype">size_type</td>          <td class="paramname"><span class="paramname"><em>batch</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_type</td>          <td class="paramname"><span class="paramname"><em>channel</em></span>&#160;) const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Finds the maximum value in a specific batch and channel of the tensor. </p>
<p>This member function first validates the provided batch and channel indices. If they are valid, it calculates the offset in the tensor data and then finds the maximum value within that subset of the tensor.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">batch</td><td>The batch index. Memory location: host - to - device (used for index calculation). </td></tr>
    <tr><td class="paramname">channel</td><td>The channel index. Memory location: host - to - device (used for index calculation).</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>The maximum value of type <code>Tensor::value_type</code> in the specified batch and channel of the tensor. Memory flow: device - to - host.</dd></dl>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">std::invalid_argument</td><td>When the <code>batch</code> or <code>channel</code> index is out of bounds.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function is O(m), where m is the number of elements in the specified batch and channel (<code>_shape[2] * _shape[3]</code>), due to the linear traversal of the subset of the tensor data.</li>
<li>Ensure that the CUDA runtime environment is properly initialized and the device memory is valid before calling this function, as it depends on <code><a class="el" href="#a615af61999990e2edebacf5afbad0e57" title="Retrieves the tensor data from the device to the host and returns it as a std::vector.">hostData()</a></code>.</li>
<li>Ensure that the <code>batch</code> and <code>channel</code> indices are within the valid range of the tensor's shape to avoid exceptions.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor;</div>
<div class="line">Tensor::size_type batch = 0;</div>
<div class="line">Tensor::size_type channel = 0;</div>
<div class="line"><span class="keywordflow">try</span> {</div>
<div class="line">    Tensor::value_type maxVal = tensor.max(batch, channel);</div>
<div class="line">    std::cout &lt;&lt; <span class="stringliteral">&quot;The maximum value in batch &quot;</span> &lt;&lt; batch &lt;&lt; <span class="stringliteral">&quot; and channel &quot;</span> &lt;&lt; channel &lt;&lt; <span class="stringliteral">&quot; is: &quot;</span> &lt;&lt; maxVal &lt;&lt; std::endl;</div>
<div class="line">} <span class="keywordflow">catch</span> (<span class="keyword">const</span> std::exception&amp; e) {</div>
<div class="line">    std::cerr &lt;&lt; e.what() &lt;&lt; std::endl;</div>
<div class="line">}</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00619">619</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a90f7c7cde42c58b41f77d1b941da129f_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a90f7c7cde42c58b41f77d1b941da129f_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a90f7c7cde42c58b41f77d1b941da129f_cgraph" id="aclassnz_1_1data_1_1_tensor_a90f7c7cde42c58b41f77d1b941da129f_cgraph">
<area shape="rect" title="Finds the maximum value in a specific batch and channel of the tensor." alt="" coords="5,43,154,69"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a4831fea5aaf7dbad3578d3fa8e55aef1" title="Retrieves the stride value at a specified index within the Dimension object." alt="" coords="221,5,359,48"/>
<area shape="poly" title=" " alt="" coords="154,43,205,36,206,41,154,48"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a615af61999990e2edebacf5afbad0e57" title="Retrieves the tensor data from the device to the host and returns it as a std::vector." alt="" coords="202,72,378,99"/>
<area shape="poly" title=" " alt="" coords="154,64,186,68,185,73,154,69"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a7aab89d371ff013c5c021a191bd7348e" title="Synchronize the tensor data by waiting for all CUDA stream write operations to complete." alt="" coords="426,72,605,99"/>
<area shape="poly" title=" " alt="" coords="378,83,410,83,410,88,378,88"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="653,31,839,73"/>
<area shape="poly" title=" " alt="" coords="605,70,637,65,638,70,606,75"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="653,97,839,140"/>
<area shape="poly" title=" " alt="" coords="606,96,638,100,637,106,605,101"/>
</map>
</div>

</div>
</div>
<a id="a70caeac6652c0008b7554db438db090c" name="a70caeac6652c0008b7554db438db090c"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a70caeac6652c0008b7554db438db090c">&#9670;&#160;</a></span>min() <span class="overload">[1/2]</span></h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">Tensor::value_type nz::data::Tensor::min </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Finds the minimum value in the entire tensor. </p>
<p>This function retrieves the tensor data from the device to the host and iterates through it to determine the minimum value.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">None</td><td></td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>The minimum value of type <code>Tensor::value_type</code> in the tensor. Memory flow: device - to - host.</dd></dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function is O(n), where n is the number of elements in the tensor (<code>_size</code>), due to the linear traversal of the tensor data.</li>
<li>Ensure that the CUDA runtime environment is properly initialized and the device memory is valid before calling this function, as it depends on <code><a class="el" href="#a615af61999990e2edebacf5afbad0e57" title="Retrieves the tensor data from the device to the host and returns it as a std::vector.">hostData()</a></code>.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor;</div>
<div class="line"><span class="keywordflow">try</span> {</div>
<div class="line">    Tensor::value_type minVal = tensor.min();</div>
<div class="line">    std::cout &lt;&lt; <span class="stringliteral">&quot;The minimum value in the tensor is: &quot;</span> &lt;&lt; minVal &lt;&lt; std::endl;</div>
<div class="line">} <span class="keywordflow">catch</span> (<span class="keyword">const</span> std::exception&amp; e) {</div>
<div class="line">    std::cerr &lt;&lt; e.what() &lt;&lt; std::endl;</div>
<div class="line">}</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00634">634</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a70caeac6652c0008b7554db438db090c_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a70caeac6652c0008b7554db438db090c_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a70caeac6652c0008b7554db438db090c_cgraph" id="aclassnz_1_1data_1_1_tensor_a70caeac6652c0008b7554db438db090c_cgraph">
<area shape="rect" title="Finds the minimum value in the entire tensor." alt="" coords="5,47,150,73"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a615af61999990e2edebacf5afbad0e57" title="Retrieves the tensor data from the device to the host and returns it as a std::vector." alt="" coords="198,47,374,73"/>
<area shape="poly" title=" " alt="" coords="150,57,182,57,182,63,150,63"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a7aab89d371ff013c5c021a191bd7348e" title="Synchronize the tensor data by waiting for all CUDA stream write operations to complete." alt="" coords="422,47,601,73"/>
<area shape="poly" title=" " alt="" coords="374,57,406,57,406,63,374,63"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="649,5,835,48"/>
<area shape="poly" title=" " alt="" coords="601,44,633,40,634,45,602,50"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="649,72,835,115"/>
<area shape="poly" title=" " alt="" coords="602,70,634,75,633,80,601,76"/>
</map>
</div>

</div>
</div>
<a id="ae846233848b4cd26181205a594c083b5" name="ae846233848b4cd26181205a594c083b5"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ae846233848b4cd26181205a594c083b5">&#9670;&#160;</a></span>min() <span class="overload">[2/2]</span></h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">Tensor::value_type nz::data::Tensor::min </td>
          <td>(</td>
          <td class="paramtype">size_type</td>          <td class="paramname"><span class="paramname"><em>batch</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_type</td>          <td class="paramname"><span class="paramname"><em>channel</em></span>&#160;) const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Finds the minimum value in a specific batch and channel of the tensor. </p>
<p>This function first validates the provided batch and channel indices. If they are valid, it calculates the offset in the tensor data and then finds the minimum value within that subset of the tensor.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">batch</td><td>The batch index. Memory location: host - to - device (used for index calculation). </td></tr>
    <tr><td class="paramname">channel</td><td>The channel index. Memory location: host - to - device (used for index calculation).</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>The minimum value of type <code>Tensor::value_type</code> in the specified batch and channel of the tensor. Memory flow: device - to - host.</dd></dl>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">std::invalid_argument</td><td>When the <code>batch</code> or <code>channel</code> index is out of bounds.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function is O(m), where m is the number of elements in the specified batch and channel (<code>_shape[2] * _shape[3]</code>), due to the linear traversal of the subset of the tensor data.</li>
<li>Ensure that the CUDA runtime environment is properly initialized and the device memory is valid before calling this function, as it depends on <code><a class="el" href="#a615af61999990e2edebacf5afbad0e57" title="Retrieves the tensor data from the device to the host and returns it as a std::vector.">hostData()</a></code>.</li>
<li>Ensure that the <code>batch</code> and <code>channel</code> indices are within the valid range of the tensor's shape to avoid exceptions.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor;</div>
<div class="line">Tensor::size_type batch = 0;</div>
<div class="line">Tensor::size_type channel = 0;</div>
<div class="line"><span class="keywordflow">try</span> {</div>
<div class="line">    Tensor::value_type minVal = tensor.min(batch, channel);</div>
<div class="line">    std::cout &lt;&lt; <span class="stringliteral">&quot;The minimum value in batch &quot;</span> &lt;&lt; batch &lt;&lt; <span class="stringliteral">&quot; and channel &quot;</span> &lt;&lt; channel &lt;&lt; <span class="stringliteral">&quot; is: &quot;</span> &lt;&lt; minVal &lt;&lt; std::endl;</div>
<div class="line">} <span class="keywordflow">catch</span> (<span class="keyword">const</span> std::exception&amp; e) {</div>
<div class="line">    std::cerr &lt;&lt; e.what() &lt;&lt; std::endl;</div>
<div class="line">}</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00645">645</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_ae846233848b4cd26181205a594c083b5_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_ae846233848b4cd26181205a594c083b5_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_ae846233848b4cd26181205a594c083b5_cgraph" id="aclassnz_1_1data_1_1_tensor_ae846233848b4cd26181205a594c083b5_cgraph">
<area shape="rect" title="Finds the minimum value in a specific batch and channel of the tensor." alt="" coords="5,43,150,69"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a4831fea5aaf7dbad3578d3fa8e55aef1" title="Retrieves the stride value at a specified index within the Dimension object." alt="" coords="217,5,355,48"/>
<area shape="poly" title=" " alt="" coords="150,43,200,36,201,41,151,48"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a615af61999990e2edebacf5afbad0e57" title="Retrieves the tensor data from the device to the host and returns it as a std::vector." alt="" coords="198,72,374,99"/>
<area shape="poly" title=" " alt="" coords="151,64,183,68,182,73,150,69"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a7aab89d371ff013c5c021a191bd7348e" title="Synchronize the tensor data by waiting for all CUDA stream write operations to complete." alt="" coords="422,72,601,99"/>
<area shape="poly" title=" " alt="" coords="374,83,406,83,406,88,374,88"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="649,31,835,73"/>
<area shape="poly" title=" " alt="" coords="601,70,633,65,634,70,602,75"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="649,97,835,140"/>
<area shape="poly" title=" " alt="" coords="602,96,634,100,633,106,601,101"/>
</map>
</div>

</div>
</div>
<a id="aae7b7714f78f4d366e66f1664d37d36a" name="aae7b7714f78f4d366e66f1664d37d36a"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aae7b7714f78f4d366e66f1664d37d36a">&#9670;&#160;</a></span>operator!=()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">bool nz::data::Tensor::operator!= </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;</td>          <td class="paramname"><span class="paramname"><em>other</em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Checks if two <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> objects are not equal. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">other</td><td>The other <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> object to compare with. Memory flow: device - to - host (the comparison in the <code>operator==</code> function may involve data transfer from device to host).</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>Returns true if the two <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> objects are not equal, false otherwise.</dd></dl>
<p>This function checks the inequality of two <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> objects. It simply negates the result of the <code>operator==</code> function. So, it relies on the implementation of the <code>operator==</code> to determine the equality of the two Tensors.</p>
<p><b>Memory Management Strategy</b>:</p><ul>
<li>All memory management related to the comparison is handled by the <code>operator==</code> function. This function itself does not allocate or free any memory.</li>
</ul>
<p><b>Exception Handling Mechanism</b>:</p><ul>
<li>Any exceptions that may occur during the comparison are handled by the <code>operator==</code> function. This function does not have its own exception handling mechanism.</li>
</ul>
<p><b>Relationship with Other Components</b>:</p><ul>
<li>Depends entirely on the <code>operator==</code> function of the <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> class.</li>
</ul>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function is the same as that of the <code>operator==</code> function, which is O(n) where n is the number of elements in the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor1; <span class="comment">// Assume Tensor1 is properly initialized</span></div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor2; <span class="comment">// Assume Tensor2 is properly initialized</span></div>
<div class="line"><span class="keywordtype">bool</span> isNotEqual = tensor1 != tensor2;</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00550">550</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>

</div>
</div>
<a id="aaa22ac6f3de75ee92a4307320eda7e87" name="aaa22ac6f3de75ee92a4307320eda7e87"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aaa22ac6f3de75ee92a4307320eda7e87">&#9670;&#160;</a></span>operator*()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> nz::data::Tensor::operator* </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;</td>          <td class="paramname"><span class="paramname"><em>other</em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs matrix multiplication of two tensors (matrices) and returns the result. </p>
<p>This operator performs matrix multiplication between two tensors (2D matrices) and returns a new tensor containing the result of the multiplication. The number of columns in the first tensor must match the number of rows in the second tensor for matrix multiplication to be valid.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">other</td><td>The tensor (matrix) to multiply with the current tensor. </td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>A new tensor containing the result of the matrix multiplication.</dd></dl>
<p>This function checks if the dimensions of the two tensors are compatible for matrix multiplication. If the number of columns in the current tensor does not match the number of rows in the <code>other</code> tensor, it throws an <code>std::invalid_argument</code> exception. It then creates a new tensor to hold the result of the multiplication and uses a CUDA kernel (<code>GeneralMatrixMul</code>) to perform the matrix multiplication in parallel on the GPU.</p>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">std::invalid_argument</td><td>If the matrix dimensions are incompatible for multiplication.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The number of columns in the current tensor (<code>_shape[1]</code>) must match the number of rows in the <code>other</code> tensor (<code>other._shape[0]</code>) for the multiplication to be valid.</li>
<li>This operator uses a CUDA kernel to perform matrix multiplication, and the result is stored in a new tensor, which is returned.</li>
<li>The original tensors are not modified.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor1({2, 3});  <span class="comment">// Create a 2x3 matrix</span></div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor2({3, 2});  <span class="comment">// Create a 3x2 matrix</span></div>
<div class="line"><a class="code hl_function" href="#ad0dda0efff93778cab46fd5aa708b983">Tensor</a> result = tensor1 * tensor2;  <span class="comment">// Multiply the matrices (result will be 2x2)</span></div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00343">343</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_aaa22ac6f3de75ee92a4307320eda7e87_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_aaa22ac6f3de75ee92a4307320eda7e87_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_aaa22ac6f3de75ee92a4307320eda7e87_cgraph" id="aclassnz_1_1data_1_1_tensor_aaa22ac6f3de75ee92a4307320eda7e87_cgraph">
<area shape="rect" title="Performs matrix multiplication of two tensors (matrices) and returns the result." alt="" coords="5,107,181,133"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#ae1e87c4a462dd60e02821aa27ffc7e09" title="Retrieves the value of the &#39;c&#39; dimension." alt="" coords="258,5,414,32"/>
<area shape="poly" title=" " alt="" coords="113,104,164,73,228,42,249,34,250,39,230,46,166,77,116,109"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a7eb3acc882c48e775c418d97f709240f" title="Retrieves the value of the &#39;h&#39; dimension." alt="" coords="258,56,414,83"/>
<area shape="poly" title=" " alt="" coords="160,103,253,84,254,89,161,109"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#acc472e84b4c44f649f34b6fbb0eeacf7" title="Retrieves the value of the &#39;n&#39; dimension." alt="" coords="258,107,414,133"/>
<area shape="poly" title=" " alt="" coords="181,117,242,117,242,123,181,123"/>
<area shape="rect" href="namespacenz_1_1data.html#a5a166a472b887c45fde9e5815f072234" title="Performs general matrix multiplication on tensors with broadcast compatibility." alt="" coords="229,157,443,184"/>
<area shape="poly" title=" " alt="" coords="161,131,254,151,253,156,160,137"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a65773c675476dfea3f06b30f21ebbedd" title="Retrieves the value of the &#39;w&#39; dimension." alt="" coords="256,208,416,235"/>
<area shape="poly" title=" " alt="" coords="116,131,166,163,230,194,250,201,249,206,228,198,164,167,113,136"/>
</map>
</div>

</div>
</div>
<a id="a36cd1679c45059de64deeca9152b0288" name="a36cd1679c45059de64deeca9152b0288"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a36cd1679c45059de64deeca9152b0288">&#9670;&#160;</a></span>operator+()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> nz::data::Tensor::operator+ </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;</td>          <td class="paramname"><span class="paramname"><em>other</em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Adds two tensors element-wise and returns the result. </p>
<p>This operator performs element-wise addition of two tensors and returns a new tensor containing the sum of the corresponding elements from the two input tensors.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">other</td><td>The tensor to be added to the current tensor. </td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>A new tensor containing the element-wise sum of the two tensors.</dd></dl>
<p>This function checks if the shapes of the two tensors match. If they do not, it throws an <code>std::invalid_argument</code> exception. The function then creates a new tensor to hold the result of the addition and uses a CUDA kernel (<code>MatrixAddKernel</code>) to compute the sum of the tensors' elements in parallel on the GPU.</p>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">std::invalid_argument</td><td>If the shapes of the two tensors do not match.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The tensors must have the same shape. If they do not, an exception is thrown.</li>
<li>This operator uses a CUDA kernel to perform the element-wise addition, and the result is stored in a new tensor, which is returned.</li>
<li>The original tensors are not modified.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor1({2, 3});</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor2({2, 3});</div>
<div class="line"><a class="code hl_function" href="#ad0dda0efff93778cab46fd5aa708b983">Tensor</a> result = tensor1 + tensor2;  <span class="comment">// Adds the two tensors element-wise</span></div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00331">331</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a36cd1679c45059de64deeca9152b0288_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a36cd1679c45059de64deeca9152b0288_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a36cd1679c45059de64deeca9152b0288_cgraph" id="aclassnz_1_1data_1_1_tensor_a36cd1679c45059de64deeca9152b0288_cgraph">
<area shape="rect" title="Adds two tensors element&#45;wise and returns the result." alt="" coords="5,43,184,69"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#ab4f9f0cec97b8e579b62ccb37975de3c" title="Performs broadcasting between two Dimension objects and returns the resulting Dimension." alt="" coords="248,5,386,48"/>
<area shape="poly" title=" " alt="" coords="184,42,232,35,233,40,184,47"/>
<area shape="rect" href="namespacenz_1_1data.html#a8cf4ac2437dd67698684169bebb225d4" title="Performs matrix addition operation on tensors with broadcast compatibility." alt="" coords="232,72,403,99"/>
<area shape="poly" title=" " alt="" coords="184,65,217,69,216,75,184,70"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a4133f0142396fc574d750b30c5c6ea10" title="Retrieves the dimensions of the Dimension object as a std::vector." alt="" coords="663,5,802,48"/>
<area shape="poly" title=" " alt="" coords="387,22,451,20,615,20,648,21,648,26,615,25,451,25,387,27"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a478ed242c20f8f99f9dffcd8eb9b3f52" title="Checks if the current Dimension object is broadcast compatible with another Dimension object." alt="" coords="451,35,615,77"/>
<area shape="poly" title=" " alt="" coords="387,33,436,40,435,45,386,39"/>
<area shape="poly" title=" " alt="" coords="615,41,647,36,648,42,616,47"/>
</map>
</div>

</div>
</div>
<a id="ad66d0c0f5d9ecb375e1006bc0aecf404" name="ad66d0c0f5d9ecb375e1006bc0aecf404"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ad66d0c0f5d9ecb375e1006bc0aecf404">&#9670;&#160;</a></span>operator-() <span class="overload">[1/2]</span></h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> nz::data::Tensor::operator- </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Negates all elements of the tensor and returns the result. </p>
<p>This operator performs element-wise negation of the tensor, returning a new tensor that contains the negated values of the current tensor. Each element in the tensor is multiplied by <code>-1</code> to compute its negation.</p>
<dl class="section return"><dt>Returns</dt><dd>A new tensor containing the element-wise negation of the current tensor.</dd></dl>
<p>This function uses a CUDA kernel (<code>Negation</code>) to perform the negation of each element in the tensor in parallel on the GPU. The result is stored in a new tensor, which is returned.</p>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>This operator does not modify the original tensor; it returns a new tensor with the negated values.</li>
<li>The operation is performed element-wise, meaning each individual element is negated.</li>
<li>The operation utilizes GPU parallelization for efficiency.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({2, 3});</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> result = -tensor;  <span class="comment">// Negates all elements of the tensor</span></div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00498">498</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_ad66d0c0f5d9ecb375e1006bc0aecf404_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_ad66d0c0f5d9ecb375e1006bc0aecf404_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_ad66d0c0f5d9ecb375e1006bc0aecf404_cgraph" id="aclassnz_1_1data_1_1_tensor_ad66d0c0f5d9ecb375e1006bc0aecf404_cgraph">
<area shape="rect" title="Negates all elements of the tensor and returns the result." alt="" coords="5,47,180,73"/>
<area shape="rect" href="namespacenz_1_1krnl.html#af7069a420e81babb49b1bc009333d053" title="Kernel function to negate each element of a matrix on the GPU." alt="" coords="228,47,352,73"/>
<area shape="poly" title=" " alt="" coords="180,57,212,57,212,63,180,63"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="400,5,585,48"/>
<area shape="poly" title=" " alt="" coords="352,47,384,42,385,47,353,52"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a46ce59b45de432842454aadf00b93791" title="Asynchronously submits a CUDA kernel with stream&#45;ordered dependency management." alt="" coords="400,72,585,115"/>
<area shape="poly" title=" " alt="" coords="353,68,385,73,384,78,352,73"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="633,39,819,81"/>
<area shape="poly" title=" " alt="" coords="585,77,617,73,618,78,586,83"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="633,105,819,148"/>
<area shape="poly" title=" " alt="" coords="586,104,618,109,617,114,585,109"/>
</map>
</div>

</div>
</div>
<a id="a25cc6634977413df0b67d6e7365448a2" name="a25cc6634977413df0b67d6e7365448a2"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a25cc6634977413df0b67d6e7365448a2">&#9670;&#160;</a></span>operator-() <span class="overload">[2/2]</span></h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> nz::data::Tensor::operator- </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;</td>          <td class="paramname"><span class="paramname"><em>other</em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Subtracts one tensor from another element-wise and returns the result. </p>
<p>This operator performs element-wise subtraction of two tensors and returns a new tensor containing the result of subtracting the corresponding elements of the two input tensors.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">other</td><td>The tensor to be subtracted from the current tensor. </td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>A new tensor containing the element-wise difference of the two tensors.</dd></dl>
<p>This function checks if the shapes of the two tensors match. If they do not, it throws an <code>std::invalid_argument</code> exception. The function then creates a new tensor to hold the result of the subtraction and uses a CUDA kernel (<code>MatrixSub</code>) to compute the element-wise subtraction in parallel on the GPU.</p>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">std::invalid_argument</td><td>If the shapes of the two tensors do not match.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The tensors must have the same shape. If they do not, an exception is thrown.</li>
<li>This operator uses a CUDA kernel to perform the element-wise subtraction, and the result is stored in a new tensor, which is returned.</li>
<li>The original tensors are not modified.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor1({2, 3});</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor2({2, 3});</div>
<div class="line"><a class="code hl_function" href="#ad0dda0efff93778cab46fd5aa708b983">Tensor</a> result = tensor1 - tensor2;  <span class="comment">// Subtracts tensor2 from tensor1 element-wise</span></div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00337">337</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a25cc6634977413df0b67d6e7365448a2_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a25cc6634977413df0b67d6e7365448a2_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a25cc6634977413df0b67d6e7365448a2_cgraph" id="aclassnz_1_1data_1_1_tensor_a25cc6634977413df0b67d6e7365448a2_cgraph">
<area shape="rect" title="Subtracts one tensor from another element&#45;wise and returns the result." alt="" coords="5,43,180,69"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#ab4f9f0cec97b8e579b62ccb37975de3c" title="Performs broadcasting between two Dimension objects and returns the resulting Dimension." alt="" coords="244,5,382,48"/>
<area shape="poly" title=" " alt="" coords="180,42,228,35,229,41,180,47"/>
<area shape="rect" href="namespacenz_1_1data.html#a7503b6894e8052ed54eb169550d135c0" title="Performs matrix subtraction operation on tensors with broadcast compatibility." alt="" coords="228,72,399,99"/>
<area shape="poly" title=" " alt="" coords="180,65,212,69,212,75,180,70"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a4133f0142396fc574d750b30c5c6ea10" title="Retrieves the dimensions of the Dimension object as a std::vector." alt="" coords="659,5,798,48"/>
<area shape="poly" title=" " alt="" coords="383,22,447,20,611,20,644,21,644,26,611,25,447,25,383,27"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a478ed242c20f8f99f9dffcd8eb9b3f52" title="Checks if the current Dimension object is broadcast compatible with another Dimension object." alt="" coords="447,35,611,77"/>
<area shape="poly" title=" " alt="" coords="383,33,432,40,431,45,382,39"/>
<area shape="poly" title=" " alt="" coords="611,41,643,36,644,42,612,47"/>
</map>
</div>

</div>
</div>
<a id="ad6ac34675276afe1fb2ee2f5d16af538" name="ad6ac34675276afe1fb2ee2f5d16af538"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ad6ac34675276afe1fb2ee2f5d16af538">&#9670;&#160;</a></span>operator/()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> nz::data::Tensor::operator/ </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;</td>          <td class="paramname"><span class="paramname"><em>other</em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs element-wise division between two Tensors. </p>
<p>This function overloads the division operator to perform element-wise division between the current <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> and another <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>. It broadcasts the shapes of the two Tensors if necessary and creates a new <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> to store the result.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">other</td><td>The <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> to divide the current <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> by. Memory flow: host-to-function, as the object is passed from the calling code to the function.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>A new <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> containing the result of the element-wise division. Memory flow: function-to-host, as the result is returned from the function to the calling code.</dd></dl>
<p><b>Memory Management Strategy</b>:</p><ul>
<li>A new <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> object <code>result</code> is created within the function to store the result of the division. The memory for this <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> is managed automatically by its constructor and destructor.</li>
</ul>
<p><b>Exception Handling Mechanism</b>:</p><ul>
<li>This function does not explicitly throw exceptions. However, the <code>_shape.Broadcast</code> method or the <code>tensorElementwiseDivide</code> function may throw exceptions if there are issues with shape broadcasting or the division operation.</li>
</ul>
<p><b>Relationship with Other Components</b>:</p><ul>
<li>This function depends on the <code>_shape.Broadcast</code> method to handle shape broadcasting between the two Tensors.</li>
<li>It also relies on the <code>tensorElementwiseDivide</code> function to perform the actual element-wise division operation.</li>
</ul>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function is O(n), where n is the number of elements in the resulting <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> after broadcasting. This is because the <code>tensorElementwiseDivide</code> function needs to process each element.</li>
<li>Ensure that the <code>_shape.Broadcast</code> method and the <code>tensorElementwiseDivide</code> function are correctly implemented.</li>
</ul>
</dd></dl>
<dl class="section warning"><dt>Warning</dt><dd><ul>
<li>Division by zero may occur if the <code>other</code> <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> contains zero elements, which can lead to undefined behavior.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor1;</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor2;</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> result = tensor1 / tensor2;</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00352">352</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_ad6ac34675276afe1fb2ee2f5d16af538_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_ad6ac34675276afe1fb2ee2f5d16af538_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_ad6ac34675276afe1fb2ee2f5d16af538_cgraph" id="aclassnz_1_1data_1_1_tensor_ad6ac34675276afe1fb2ee2f5d16af538_cgraph">
<area shape="rect" title="Performs element&#45;wise division between two Tensors." alt="" coords="5,47,180,73"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#ab4f9f0cec97b8e579b62ccb37975de3c" title="Performs broadcasting between two Dimension objects and returns the resulting Dimension." alt="" coords="252,5,390,48"/>
<area shape="poly" title=" " alt="" coords="180,45,236,36,237,42,180,50"/>
<area shape="rect" href="namespacenz_1_1data.html#a1da5cd018533919ed5a750b14c7d6d71" title="Performs element &#45; wise division operation on tensors with broadcast compatibility." alt="" coords="228,72,415,115"/>
<area shape="poly" title=" " alt="" coords="180,70,213,75,212,80,180,75"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a4133f0142396fc574d750b30c5c6ea10" title="Retrieves the dimensions of the Dimension object as a std::vector." alt="" coords="675,5,814,48"/>
<area shape="poly" title=" " alt="" coords="391,22,463,20,627,20,660,21,660,26,627,25,463,25,391,27"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a478ed242c20f8f99f9dffcd8eb9b3f52" title="Checks if the current Dimension object is broadcast compatible with another Dimension object." alt="" coords="463,35,627,77"/>
<area shape="poly" title=" " alt="" coords="391,33,448,41,447,46,390,38"/>
<area shape="poly" title=" " alt="" coords="627,41,659,36,660,42,628,47"/>
</map>
</div>

</div>
</div>
<a id="acdb68bf53d38e5a93fdd0effa4c3059a" name="acdb68bf53d38e5a93fdd0effa4c3059a"></a>
<h2 class="memtitle"><span class="permalink"><a href="#acdb68bf53d38e5a93fdd0effa4c3059a">&#9670;&#160;</a></span>operator=() <span class="overload">[1/2]</span></h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp; nz::data::Tensor::operator= </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;</td>          <td class="paramname"><span class="paramname"><em>other</em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Assignment operator for <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">other</td><td>The <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> object to assign from.</td></tr>
  </table>
  </dd>
</dl>
<p>Performs a deep copy of the tensor, including its shape, data, and gradient (if applicable).</p>
<dl class="section return"><dt>Returns</dt><dd>A reference to the assigned <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> object. </dd></dl>

<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00173">173</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_acdb68bf53d38e5a93fdd0effa4c3059a_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_acdb68bf53d38e5a93fdd0effa4c3059a_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_acdb68bf53d38e5a93fdd0effa4c3059a_cgraph" id="aclassnz_1_1data_1_1_tensor_acdb68bf53d38e5a93fdd0effa4c3059a_cgraph">
<area shape="rect" title="Assignment operator for Tensor." alt="" coords="5,113,184,140"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a785cf34395067f425e032d9bd5e1fa20" title="Frees the CUDA device memory pointed to by the given pointer." alt="" coords="232,5,417,48"/>
<area shape="poly" title=" " alt="" coords="121,110,231,58,245,52,247,56,233,62,123,115"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="232,72,417,115"/>
<area shape="poly" title=" " alt="" coords="184,111,216,106,216,112,185,116"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a97f78a2d43f6e0508c82d4f3b629de96" title="Asynchronously allocates device memory for type&#45;specific data with stream&#45;ordered dependency tracking..." alt="" coords="232,139,417,181"/>
<area shape="poly" title=" " alt="" coords="185,137,216,142,216,147,184,142"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#afa38d5c6db0e6b48c8f74ce8ad0df2bc" title="Asynchronously copies data between CUDA device and host memory based on the specified memory copy kin..." alt="" coords="232,205,417,248"/>
<area shape="poly" title=" " alt="" coords="123,138,233,191,247,197,245,202,231,196,121,143"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="465,5,650,48"/>
<area shape="poly" title=" " alt="" coords="417,24,449,24,449,29,417,29"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="465,139,650,181"/>
<area shape="poly" title=" " alt="" coords="417,157,449,157,449,163,417,163"/>
<area shape="poly" title=" " alt="" coords="400,202,465,184,466,189,402,207"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="465,205,650,248"/>
<area shape="poly" title=" " alt="" coords="417,224,449,224,449,229,417,229"/>
</map>
</div>

</div>
</div>
<a id="a26b24cc132d14e054b3c25923516d781" name="a26b24cc132d14e054b3c25923516d781"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a26b24cc132d14e054b3c25923516d781">&#9670;&#160;</a></span>operator=() <span class="overload">[2/2]</span></h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp; nz::data::Tensor::operator= </td>
          <td>(</td>
          <td class="paramtype"><a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;&amp;</td>          <td class="paramname"><span class="paramname"><em>other</em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Move assignment operator for <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">other</td><td>The <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> object to move from.</td></tr>
  </table>
  </dd>
</dl>
<p>Moves the tensor data and ownership of the GPU memory to the new object.</p>
<dl class="section return"><dt>Returns</dt><dd>A reference to the assigned <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> object. </dd></dl>

<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00192">192</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a26b24cc132d14e054b3c25923516d781_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a26b24cc132d14e054b3c25923516d781_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a26b24cc132d14e054b3c25923516d781_cgraph" id="aclassnz_1_1data_1_1_tensor_a26b24cc132d14e054b3c25923516d781_cgraph">
<area shape="rect" title="Move assignment operator for Tensor." alt="" coords="5,47,184,73"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a785cf34395067f425e032d9bd5e1fa20" title="Frees the CUDA device memory pointed to by the given pointer." alt="" coords="232,5,417,48"/>
<area shape="poly" title=" " alt="" coords="184,44,216,40,216,45,185,50"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="232,72,417,115"/>
<area shape="poly" title=" " alt="" coords="185,70,216,75,216,80,184,76"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="465,5,650,48"/>
<area shape="poly" title=" " alt="" coords="417,24,449,24,449,29,417,29"/>
</map>
</div>

</div>
</div>
<a id="a92c7313608326bb4123d6f08341a6d80" name="a92c7313608326bb4123d6f08341a6d80"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a92c7313608326bb4123d6f08341a6d80">&#9670;&#160;</a></span>operator==()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">bool nz::data::Tensor::operator== </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;</td>          <td class="paramname"><span class="paramname"><em>other</em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Checks if two <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> objects are equal. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">other</td><td>The other <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> object to compare with. Memory flow: device - to - host (data is copied from device to host for comparison).</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>Returns true if the two <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> objects are equal, false otherwise.</dd></dl>
<p>This function compares two <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> objects for equality. First, it checks if the <code>_requires_grad</code> flags of the two Tensors are the same. If they differ, the function immediately returns false. Then, it compares the shapes of the two Tensors. If the shapes are not equal, the function also returns false.</p>
<p>After that, it allocates host memory for temporary storage of the data from the device memory of both Tensors. It copies the data from the device to the host and compares each element one by one. If any element in the data differs, it frees the allocated host memory and returns false.</p>
<p>If the <code>_requires_grad</code> flag is set to true, it repeats the same process for the gradients of the Tensors. If any element in the gradients differs, it frees the allocated host memory and returns false.</p>
<p>Finally, if all comparisons pass, it frees the allocated host memory and returns true.</p>
<p><b>Memory Management Strategy</b>:</p><ul>
<li>Two arrays <code>temp</code> and <code>temp_other</code> of size <code>_size</code> are dynamically allocated on the host using <code>new[]</code>. They are freed using <code>delete[]</code> either when a difference is found or at the end of the function.</li>
</ul>
<p><b>Exception Handling Mechanism</b>:</p><ul>
<li>The CUDA memory copy operations (<code>cudaMemcpy</code>) may return error codes indicating failures. It is assumed that the calling code or the CUDA runtime will handle these errors appropriately.</li>
</ul>
<p><b>Relationship with Other Components</b>:</p><ul>
<li>Depends on the <code>_requires_grad</code>, <code>_shape</code>, <code>_size</code>, <code>_data</code>, and <code>_grad</code> members of the <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> class.</li>
<li>Uses CUDA memory copy operations (<code>cudaMemcpy</code>) to transfer data from device to host.</li>
</ul>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Be aware of potential CUDA errors during memory copy operations and handle them appropriately in the calling code.</li>
<li>The function has a time complexity of O(n), where n is the number of elements in the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>, due to the element - by - element comparison.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor1; <span class="comment">// Assume Tensor1 is properly initialized</span></div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor2; <span class="comment">// Assume Tensor2 is properly initialized</span></div>
<div class="line"><span class="keywordtype">bool</span> isEqual = tensor1 == tensor2;</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00506">506</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a92c7313608326bb4123d6f08341a6d80_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a92c7313608326bb4123d6f08341a6d80_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a92c7313608326bb4123d6f08341a6d80_cgraph" id="aclassnz_1_1data_1_1_tensor_a92c7313608326bb4123d6f08341a6d80_cgraph">
<area shape="rect" title="Checks if two Tensor objects are equal." alt="" coords="5,47,192,73"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a0c150b841f02921eb7826a6e03d0267e" title="Synchronize both the tensor data and its gradient data." alt="" coords="240,47,391,73"/>
<area shape="poly" title=" " alt="" coords="192,57,224,57,224,63,192,63"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a7aab89d371ff013c5c021a191bd7348e" title="Synchronize the tensor data by waiting for all CUDA stream write operations to complete." alt="" coords="439,21,618,48"/>
<area shape="poly" title=" " alt="" coords="391,48,423,45,424,50,392,54"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#af28425ddc9bee1f75fd923a0de68c37b" title="Synchronize the gradient data of the tensor if gradient computation is required." alt="" coords="439,72,618,99"/>
<area shape="poly" title=" " alt="" coords="392,66,424,70,423,75,391,72"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="666,5,852,48"/>
<area shape="poly" title=" " alt="" coords="619,29,651,28,651,33,619,34"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="666,72,852,115"/>
<area shape="poly" title=" " alt="" coords="585,46,658,65,657,70,583,51"/>
<area shape="poly" title=" " alt="" coords="583,69,657,50,658,55,585,74"/>
<area shape="poly" title=" " alt="" coords="619,86,651,87,651,92,619,91"/>
</map>
</div>

</div>
</div>
<a id="a2b2309d5428331f2e6f88037bb123c8f" name="a2b2309d5428331f2e6f88037bb123c8f"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a2b2309d5428331f2e6f88037bb123c8f">&#9670;&#160;</a></span>print()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">std::ostream &amp; nz::data::Tensor::print </td>
          <td>(</td>
          <td class="paramtype">std::ostream &amp;</td>          <td class="paramname"><span class="paramname"><em>os</em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Prints the tensor data to an output stream. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">os</td><td>The output stream to which the tensor data will be written (host-to-host).</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>The output stream after the tensor data has been written.</dd></dl>
<p>This function copies the tensor data from device memory to host memory using <code>cudaMemcpy</code>. It then allocates memory on the host using <code>malloc</code> to hold the copied data. After printing the data to the output stream, it frees the allocated host memory using <code>free</code>. The function does not throw any exceptions under normal circumstances. If <code>cudaMemcpy</code> fails, the behavior depends on the <code>CHECK</code> macro, which is assumed to handle errors appropriately.</p>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function is O(n), where n is the total number of elements in the tensor.</li>
<li>Ensure that the CUDA environment is properly initialized before calling this function.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor;</div>
<div class="line">std::ostringstream oss;</div>
<div class="line">tensor.<a class="code hl_function" href="#a2b2309d5428331f2e6f88037bb123c8f">print</a>(oss);</div>
<div class="line">std::cout &lt;&lt; oss.str();</div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_a2b2309d5428331f2e6f88037bb123c8f"><div class="ttname"><a href="#a2b2309d5428331f2e6f88037bb123c8f">nz::data::Tensor::print</a></div><div class="ttdeci">std::ostream &amp; print(std::ostream &amp;os) const</div><div class="ttdoc">Prints the tensor data to an output stream.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00252">Tensor.cu:252</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00252">252</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a2b2309d5428331f2e6f88037bb123c8f_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a2b2309d5428331f2e6f88037bb123c8f_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a2b2309d5428331f2e6f88037bb123c8f_cgraph" id="aclassnz_1_1data_1_1_tensor_a2b2309d5428331f2e6f88037bb123c8f_cgraph">
<area shape="rect" title="Prints the tensor data to an output stream." alt="" coords="5,232,154,259"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#ae1e87c4a462dd60e02821aa27ffc7e09" title="Retrieves the value of the &#39;c&#39; dimension." alt="" coords="216,5,372,32"/>
<area shape="poly" title=" " alt="" coords="83,231,96,193,120,142,155,88,176,63,200,42,207,37,210,42,203,46,180,67,159,91,125,145,101,195,88,233"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a38ba233ef49f34620297f96edd962c55" title="Retrieves a pointer to the tensor&#39;s data stored in GPU memory." alt="" coords="220,56,368,83"/>
<area shape="poly" title=" " alt="" coords="86,230,128,164,161,126,200,92,209,87,212,92,203,97,165,129,132,167,90,233"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a4831fea5aaf7dbad3578d3fa8e55aef1" title="Retrieves the stride value at a specified index within the Dimension object." alt="" coords="225,107,364,149"/>
<area shape="poly" title=" " alt="" coords="96,229,142,195,200,159,212,154,214,158,203,164,145,200,99,234"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a7eb3acc882c48e775c418d97f709240f" title="Retrieves the value of the &#39;h&#39; dimension." alt="" coords="216,173,372,200"/>
<area shape="poly" title=" " alt="" coords="130,229,227,202,229,207,132,234"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="202,224,387,267"/>
<area shape="poly" title=" " alt="" coords="154,243,186,243,186,248,154,248"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#afa38d5c6db0e6b48c8f74ce8ad0df2bc" title="Asynchronously copies data between CUDA device and host memory based on the specified memory copy kin..." alt="" coords="202,291,387,333"/>
<area shape="poly" title=" " alt="" coords="126,257,209,283,208,288,125,262"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#acc472e84b4c44f649f34b6fbb0eeacf7" title="Retrieves the value of the &#39;n&#39; dimension." alt="" coords="216,357,372,384"/>
<area shape="poly" title=" " alt="" coords="95,257,140,300,170,323,203,343,216,349,214,354,200,348,167,327,137,304,92,261"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="202,408,387,451"/>
<area shape="poly" title=" " alt="" coords="91,258,133,322,165,361,203,394,210,398,207,403,200,398,162,364,129,326,86,261"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a65773c675476dfea3f06b30f21ebbedd" title="Retrieves the value of the &#39;w&#39; dimension." alt="" coords="215,475,374,501"/>
<area shape="poly" title=" " alt="" coords="87,258,100,298,123,353,157,411,179,438,203,461,209,465,206,469,200,465,175,441,153,414,118,355,95,300,82,260"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="435,257,620,300"/>
<area shape="poly" title=" " alt="" coords="387,296,419,291,420,297,388,301"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="435,324,620,367"/>
<area shape="poly" title=" " alt="" coords="388,323,420,327,419,333,387,328"/>
</map>
</div>

</div>
</div>
<a id="a4b02ed4d2afec1ce75931201af181e14" name="a4b02ed4d2afec1ce75931201af181e14"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a4b02ed4d2afec1ce75931201af181e14">&#9670;&#160;</a></span>printGrad()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">std::ostream &amp; nz::data::Tensor::printGrad </td>
          <td>(</td>
          <td class="paramtype">std::ostream &amp;</td>          <td class="paramname"><span class="paramname"><em>os</em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Prints the gradient values of the tensor to an output stream. </p>
<p>This function prints the gradient of the tensor (<code>_grad</code>) to the provided output stream (<code>os</code>). The gradient data is first copied from GPU memory to host memory, and then it is printed in a 2D matrix format where each row represents one dimension of the gradient. Each element in the gradient is printed, separated by a space.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">os</td><td>The output stream to which the gradient will be printed. </td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>The same output stream (<code>os</code>), allowing for chaining of stream operations.</dd></dl>
<p>This function performs the following steps:</p><ol type="1">
<li>It allocates memory on the host and copies the gradient data from the device to the host.</li>
<li>It uses <code>std::copy</code> to print the gradient values in a matrix format (row by row).</li>
<li>The function prints each row of the gradient, with each value separated by a space.</li>
</ol>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>This function assumes that the gradient data has already been allocated and is valid.</li>
<li>The gradient is copied from device (GPU) memory to host (CPU) memory for printing, which can be inefficient for large tensors.</li>
<li>The function prints each row of the gradient tensor, enclosed in square brackets, with the elements separated by spaces.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({2, 3}, <span class="keyword">true</span>);  <span class="comment">// Create a tensor with gradient support</span></div>
<div class="line">std::cout &lt;&lt; <span class="stringliteral">&quot;Gradient: &quot;</span> &lt;&lt; std::endl;</div>
<div class="line">tensor.<a class="code hl_function" href="#a4b02ed4d2afec1ce75931201af181e14">printGrad</a>(std::cout);  <span class="comment">// Print the gradient of the tensor</span></div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_a4b02ed4d2afec1ce75931201af181e14"><div class="ttname"><a href="#a4b02ed4d2afec1ce75931201af181e14">nz::data::Tensor::printGrad</a></div><div class="ttdeci">std::ostream &amp; printGrad(std::ostream &amp;os) const</div><div class="ttdoc">Prints the gradient values of the tensor to an output stream.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00464">Tensor.cu:464</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00464">464</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a4b02ed4d2afec1ce75931201af181e14_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a4b02ed4d2afec1ce75931201af181e14_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a4b02ed4d2afec1ce75931201af181e14_cgraph" id="aclassnz_1_1data_1_1_tensor_a4b02ed4d2afec1ce75931201af181e14_cgraph">
<area shape="rect" title="Prints the gradient values of the tensor to an output stream." alt="" coords="5,232,182,259"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#ae1e87c4a462dd60e02821aa27ffc7e09" title="Retrieves the value of the &#39;c&#39; dimension." alt="" coords="244,5,400,32"/>
<area shape="poly" title=" " alt="" coords="98,231,114,193,142,142,180,88,203,63,228,42,236,37,239,41,231,46,206,67,184,91,146,145,119,195,102,233"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a38ba233ef49f34620297f96edd962c55" title="Retrieves a pointer to the tensor&#39;s data stored in GPU memory." alt="" coords="248,56,396,83"/>
<area shape="poly" title=" " alt="" coords="101,230,149,164,186,125,228,92,239,87,241,91,231,97,189,129,153,168,105,233"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a4831fea5aaf7dbad3578d3fa8e55aef1" title="Retrieves the stride value at a specified index within the Dimension object." alt="" coords="253,107,392,149"/>
<area shape="poly" title=" " alt="" coords="113,229,164,195,228,159,241,153,243,158,231,164,167,200,116,234"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a7eb3acc882c48e775c418d97f709240f" title="Retrieves the value of the &#39;h&#39; dimension." alt="" coords="244,173,400,200"/>
<area shape="poly" title=" " alt="" coords="148,229,252,202,253,207,149,234"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="230,224,415,267"/>
<area shape="poly" title=" " alt="" coords="182,243,214,243,214,248,182,248"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#afa38d5c6db0e6b48c8f74ce8ad0df2bc" title="Asynchronously copies data between CUDA device and host memory based on the specified memory copy kin..." alt="" coords="230,291,415,333"/>
<area shape="poly" title=" " alt="" coords="143,257,233,283,231,288,141,262"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#acc472e84b4c44f649f34b6fbb0eeacf7" title="Retrieves the value of the &#39;n&#39; dimension." alt="" coords="244,357,400,384"/>
<area shape="poly" title=" " alt="" coords="111,257,162,299,195,323,231,343,246,349,243,354,228,348,192,327,159,304,108,261"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="230,408,415,451"/>
<area shape="poly" title=" " alt="" coords="106,257,154,322,190,360,231,394,238,398,236,403,228,398,187,364,150,326,101,261"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a65773c675476dfea3f06b30f21ebbedd" title="Retrieves the value of the &#39;w&#39; dimension." alt="" coords="243,475,402,501"/>
<area shape="poly" title=" " alt="" coords="102,258,117,298,144,353,182,411,205,438,231,461,238,465,235,470,228,465,201,441,178,414,139,356,112,300,97,260"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="463,257,648,300"/>
<area shape="poly" title=" " alt="" coords="415,296,447,291,448,297,416,301"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="463,324,648,367"/>
<area shape="poly" title=" " alt="" coords="416,323,448,327,447,333,415,328"/>
</map>
</div>

</div>
</div>
<a id="a7a9f1d5fae2989181645e5f59f7666d8" name="a7a9f1d5fae2989181645e5f59f7666d8"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a7a9f1d5fae2989181645e5f59f7666d8">&#9670;&#160;</a></span>randomize()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void nz::data::Tensor::randomize </td>
          <td>(</td>
          <td class="paramtype">unsigned long long</td>          <td class="paramname"><span class="paramname"><em>seed</em></span><span class="paramdefsep"> = </span><span class="paramdefval">0</span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Randomizes the tensor's data with a uniform distribution. </p>
<p>This function fills the tensor's data with random values sampled from a uniform distribution in the range [0, 1). The random number generator is initialized using the specified seed to ensure reproducibility. The function uses the <code>curand</code> library to generate random numbers on the GPU.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">seed</td><td>A <code>unsigned long long</code> value used to initialize the random number generator. The same seed will produce the same sequence of random numbers, ensuring reproducibility.</td></tr>
  </table>
  </dd>
</dl>
<p>This function performs the following steps:</p><ol type="1">
<li>It creates a random number generator using <code>curandCreateGenerator</code>.</li>
<li>It sets the seed for the random number generator using <code>curandSetPseudoRandomGeneratorSeed</code>.</li>
<li>It generates uniform random numbers in the range [0, 1) and fills the tensor's data with these values.</li>
</ol>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The generated random numbers are uniformly distributed in the range [0, 1).</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({2, 3});  <span class="comment">// Create a tensor with shape 2x3</span></div>
<div class="line">tensor.<a class="code hl_function" href="#a7a9f1d5fae2989181645e5f59f7666d8">randomize</a>(12345);  <span class="comment">// Randomize tensor&#39;s data with a seed of 12345</span></div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_a7a9f1d5fae2989181645e5f59f7666d8"><div class="ttname"><a href="#a7a9f1d5fae2989181645e5f59f7666d8">nz::data::Tensor::randomize</a></div><div class="ttdeci">void randomize(unsigned long long seed=0) const</div><div class="ttdoc">Randomizes the tensor's data with a uniform distribution.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00298">Tensor.cu:298</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00298">298</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a7a9f1d5fae2989181645e5f59f7666d8_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a7a9f1d5fae2989181645e5f59f7666d8_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a7a9f1d5fae2989181645e5f59f7666d8_cgraph" id="aclassnz_1_1data_1_1_tensor_a7a9f1d5fae2989181645e5f59f7666d8_cgraph">
<area shape="rect" title="Randomizes the tensor&#39;s data with a uniform distribution." alt="" coords="5,47,189,73"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="237,5,422,48"/>
<area shape="poly" title=" " alt="" coords="189,44,221,40,221,45,189,49"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a731986c2c4ecd056562eaddadef46df8" title="Generates uniformly distributed random numbers on GPU using CURAND." alt="" coords="237,72,422,115"/>
<area shape="poly" title=" " alt="" coords="189,71,221,75,221,80,189,76"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="470,39,655,81"/>
<area shape="poly" title=" " alt="" coords="422,77,454,73,455,78,423,83"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="470,105,655,148"/>
<area shape="poly" title=" " alt="" coords="423,104,455,109,454,114,422,109"/>
</map>
</div>

</div>
</div>
<a id="a178a2240cd5d441be508490b2613fc55" name="a178a2240cd5d441be508490b2613fc55"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a178a2240cd5d441be508490b2613fc55">&#9670;&#160;</a></span>recip()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void nz::data::Tensor::recip </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Computes the reciprocal (1/x) of each element in the tensor and updates the tensor in-place. </p>
<p>This function computes the reciprocal (1/x) of each element in the tensor and stores the results back into the original tensor. The operation is performed element-wise, where each element of the tensor is replaced by its reciprocal.</p>
<p>The function utilizes a temporary buffer allocated on the GPU to store the intermediate reciprocal values. After the computation, the updated data is copied back to the original tensor in GPU memory.</p>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>This operation is performed element-wise on the tensor's data.</li>
<li>The original tensor is updated with the computed reciprocal values.</li>
<li>The function uses GPU memory for efficient parallel computation.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({2, 3});</div>
<div class="line">tensor.<a class="code hl_function" href="#a178a2240cd5d441be508490b2613fc55">recip</a>();  <span class="comment">// Computes the reciprocal of each element in the tensor</span></div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_a178a2240cd5d441be508490b2613fc55"><div class="ttname"><a href="#a178a2240cd5d441be508490b2613fc55">nz::data::Tensor::recip</a></div><div class="ttdeci">void recip() const</div><div class="ttdoc">Computes the reciprocal (1/x) of each element in the tensor and updates the tensor in-place.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00554">Tensor.cu:554</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00554">554</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a178a2240cd5d441be508490b2613fc55_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a178a2240cd5d441be508490b2613fc55_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a178a2240cd5d441be508490b2613fc55_cgraph" id="aclassnz_1_1data_1_1_tensor_a178a2240cd5d441be508490b2613fc55_cgraph">
<area shape="rect" title="Computes the reciprocal (1/x) of each element in the tensor and updates the tensor in&#45;place." alt="" coords="5,160,157,187"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a38ba233ef49f34620297f96edd962c55" title="Retrieves a pointer to the tensor&#39;s data stored in GPU memory." alt="" coords="223,5,372,32"/>
<area shape="poly" title=" " alt="" coords="89,158,134,102,166,69,203,42,214,36,217,41,206,46,170,73,138,105,93,161"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a785cf34395067f425e032d9bd5e1fa20" title="Frees the CUDA device memory pointed to by the given pointer." alt="" coords="205,56,390,99"/>
<area shape="poly" title=" " alt="" coords="106,157,204,108,217,102,220,107,206,113,108,162"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="438,181,623,224"/>
<area shape="poly" title=" " alt="" coords="157,176,423,193,423,198,157,181"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a97f78a2d43f6e0508c82d4f3b629de96" title="Asynchronously allocates device memory for type&#45;specific data with stream&#45;ordered dependency tracking..." alt="" coords="205,123,390,165"/>
<area shape="poly" title=" " alt="" coords="157,160,189,156,189,161,157,166"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#afa38d5c6db0e6b48c8f74ce8ad0df2bc" title="Asynchronously copies data between CUDA device and host memory based on the specified memory copy kin..." alt="" coords="438,315,623,357"/>
<area shape="poly" title=" " alt="" coords="96,185,141,231,172,256,206,276,259,297,315,312,423,329,422,334,314,317,257,302,203,281,169,260,137,235,92,189"/>
<area shape="rect" href="namespacenz_1_1krnl.html#adc047e65307dbc711235f637227b7d10" title="Kernel function to compute the reciprocal of each element of a matrix on the GPU." alt="" coords="244,240,350,267"/>
<area shape="poly" title=" " alt="" coords="118,185,206,219,242,232,240,237,204,224,116,190"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="438,56,623,99"/>
<area shape="poly" title=" " alt="" coords="390,75,422,75,422,80,390,80"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="671,248,857,291"/>
<area shape="poly" title=" " alt="" coords="390,136,444,136,504,140,565,150,624,167,655,181,683,198,731,235,728,240,680,202,652,185,622,172,564,155,503,146,444,141,390,141"/>
<area shape="poly" title=" " alt="" coords="607,312,671,293,673,298,608,317"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="671,315,857,357"/>
<area shape="poly" title=" " alt="" coords="624,333,656,333,656,339,624,339"/>
<area shape="poly" title=" " alt="" coords="350,239,422,224,423,229,352,244"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a46ce59b45de432842454aadf00b93791" title="Asynchronously submits a CUDA kernel with stream&#45;ordered dependency management." alt="" coords="438,248,623,291"/>
<area shape="poly" title=" " alt="" coords="351,254,422,259,422,265,351,260"/>
<area shape="poly" title=" " alt="" coords="624,267,656,267,656,272,624,272"/>
<area shape="poly" title=" " alt="" coords="608,289,673,307,671,312,607,294"/>
</map>
</div>

</div>
</div>
<a id="a7cbc6dd248b882c95840835d0deaae1c" name="a7cbc6dd248b882c95840835d0deaae1c"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a7cbc6dd248b882c95840835d0deaae1c">&#9670;&#160;</a></span>requiresGrad()</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">bool nz::data::Tensor::requiresGrad </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span><span class="mlabel">noexcept</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Checks whether the tensor requires gradient computation. </p>
<dl class="section return"><dt>Returns</dt><dd><code>true</code> if the tensor requires gradient computation, <code>false</code> otherwise.</dd></dl>
<p>This function allows you to query whether the tensor is marked for gradient tracking, which is essential for backpropagation in neural networks. By default, tensors do not require gradients unless explicitly specified during construction or via <code>setRequiresGrad</code>. </p>

<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00224">224</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>

</div>
</div>
<a id="a877f9f2704e39100142d81d289ddc3f2" name="a877f9f2704e39100142d81d289ddc3f2"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a877f9f2704e39100142d81d289ddc3f2">&#9670;&#160;</a></span>reshape()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void nz::data::Tensor::reshape </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="classnz_1_1data_1_1_dimension.html">shape_type</a> &amp;</td>          <td class="paramname"><span class="paramname"><em>shape</em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Reshapes the tensor to the specified shape. </p>
<p>This function changes the shape of the tensor, adjusting the layout of the data in memory. If the new shape has more elements than the current shape, the extra elements will be initialized to zero. If the new shape has fewer elements, the excess elements will be discarded.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">shape</td><td>A <code>shape_type</code> (alias for <code>std::vector&lt;int&gt;</code>) representing the new dimensions of the tensor. The total number of elements in the new shape can be larger or smaller than the current shape.</td></tr>
  </table>
  </dd>
</dl>
<p>This function performs the following steps:</p><ol type="1">
<li>It updates the tensor's shape to the new dimensions.</li>
<li>If the new shape requires more elements than the original shape, the new elements are initialized to zero.</li>
<li>If the new shape requires fewer elements, the excess data is discarded.</li>
</ol>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>This function does not reallocate memory. It simply adjusts how the existing data is interpreted based on the new shape.</li>
<li>If the new shape has more elements than the current tensor, the excess elements will be initialized to zero.</li>
<li>If the new shape has fewer elements, data beyond the new size will be discarded.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({2, 3});  <span class="comment">// Create a tensor with shape 2x3</span></div>
<div class="line">tensor.<a class="code hl_function" href="#a877f9f2704e39100142d81d289ddc3f2">reshape</a>(std::vector&lt;int&gt;({3, 2}));  <span class="comment">// Reshape the tensor to shape 3x2, unused elements will be filled with zeros</span></div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_a877f9f2704e39100142d81d289ddc3f2"><div class="ttname"><a href="#a877f9f2704e39100142d81d289ddc3f2">nz::data::Tensor::reshape</a></div><div class="ttdeci">void reshape(const shape_type &amp;shape)</div><div class="ttdoc">Reshapes the tensor to the specified shape.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00358">Tensor.cu:358</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00358">358</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a877f9f2704e39100142d81d289ddc3f2_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a877f9f2704e39100142d81d289ddc3f2_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a877f9f2704e39100142d81d289ddc3f2_cgraph" id="aclassnz_1_1data_1_1_tensor_a877f9f2704e39100142d81d289ddc3f2_cgraph">
<area shape="rect" title="Reshapes the tensor to the specified shape." alt="" coords="5,247,175,273"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a785cf34395067f425e032d9bd5e1fa20" title="Frees the CUDA device memory pointed to by the given pointer." alt="" coords="223,5,408,48"/>
<area shape="poly" title=" " alt="" coords="94,245,111,208,138,158,175,105,221,58,226,54,229,59,224,62,179,108,142,161,116,211,99,247"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="223,72,408,115"/>
<area shape="poly" title=" " alt="" coords="99,245,148,187,182,154,221,124,229,120,232,124,224,129,186,158,152,191,103,248"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a97f78a2d43f6e0508c82d4f3b629de96" title="Asynchronously allocates device memory for type&#45;specific data with stream&#45;ordered dependency tracking..." alt="" coords="223,139,408,181"/>
<area shape="poly" title=" " alt="" coords="115,244,222,191,235,185,238,190,224,196,118,248"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#afa38d5c6db0e6b48c8f74ce8ad0df2bc" title="Asynchronously copies data between CUDA device and host memory based on the specified memory copy kin..." alt="" coords="223,205,408,248"/>
<area shape="poly" title=" " alt="" coords="174,245,207,240,207,245,175,250"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a71ad766cb2869d3dd6a3931966e81706" title="Asynchronously sets a block of CUDA device memory to a specified value." alt="" coords="223,272,408,315"/>
<area shape="poly" title=" " alt="" coords="175,270,207,275,207,280,174,275"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#aade7b0c42622279888d755f4f7989aac" title="Retrieves the shape of the tensor." alt="" coords="236,339,394,365"/>
<area shape="poly" title=" " alt="" coords="116,271,165,298,224,324,243,331,241,336,222,329,163,302,114,276"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a073622bb031999163987ccf77f8edfb2" title="Calculates the total number of elements in the Dimension object." alt="" coords="246,389,384,432"/>
<area shape="poly" title=" " alt="" coords="105,272,154,321,188,350,224,375,233,380,231,385,221,380,185,354,151,325,102,276"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a31a3aa01fa3ccb56503994a99e39e177" title="Retrieves the total number of elements in the tensor." alt="" coords="242,456,389,483"/>
<area shape="poly" title=" " alt="" coords="99,272,116,307,143,353,179,401,224,442,232,447,229,451,221,446,175,404,139,356,111,309,95,275"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="456,5,641,48"/>
<area shape="poly" title=" " alt="" coords="408,24,440,24,440,29,408,29"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="456,205,641,248"/>
<area shape="poly" title=" " alt="" coords="393,179,457,198,456,203,391,184"/>
<area shape="poly" title=" " alt="" coords="408,224,440,224,440,229,408,229"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="456,272,641,315"/>
<area shape="poly" title=" " alt="" coords="393,246,457,265,456,270,391,251"/>
<area shape="poly" title=" " alt="" coords="391,269,456,250,457,255,393,274"/>
<area shape="poly" title=" " alt="" coords="408,291,440,291,440,296,408,296"/>
</map>
</div>

</div>
</div>
<a id="a2f9be06ac6766a5fa6de3548c722ef43" name="a2f9be06ac6766a5fa6de3548c722ef43"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a2f9be06ac6766a5fa6de3548c722ef43">&#9670;&#160;</a></span>setData()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void nz::data::Tensor::setData </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="classnz_1_1data_1_1_dimension.html">shape_type</a> &amp;</td>          <td class="paramname"><span class="paramname"><em>position</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">value_type</td>          <td class="paramname"><span class="paramname"><em>value</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">bool</td>          <td class="paramname"><span class="paramname"><em>isGrad</em></span><span class="paramdefsep"> = </span><span class="paramdefval">false</span>&#160;) const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Sets the value of an element in the tensor or its gradient at a specified position. </p>
<p>This member function allows you to set the value of a specific element in the tensor or its gradient. It first validates the position and the gradient setting based on the tensor's requirements.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">position</td><td>The position in the tensor where the value will be set. Memory location: host - to - device. </td></tr>
    <tr><td class="paramname">value</td><td>The value to be set at the specified position. Memory location: host - to - device. </td></tr>
    <tr><td class="paramname">isGrad</td><td>A boolean indicating whether to set the value in the gradient or the tensor data. Memory location: host - to - device.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>None</dd></dl>
<p><b>Memory Management Strategy</b>:</p><ul>
<li>A temporary array <code>data</code> of size <code>_size</code> is allocated on the host using <code>malloc</code>.</li>
<li>The data from the device (either tensor data or gradient) is copied to the host using <code><a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">cuStrm::StreamManager</a>&lt;value_type&gt;::Instance().memcpy</code>.</li>
<li>After the value is set at the specified position in the host - side data, the updated data is copied back to the device.</li>
<li>The temporary array <code>data</code> is freed using <code>free</code> to avoid memory leaks.</li>
</ul>
<p><b>Exception Handling Mechanism</b>:</p><ul>
<li>Throws <code>std::invalid_argument</code> if the <code>position</code> is out of bounds of the tensor's shape.</li>
<li>Throws <code>std::invalid_argument</code> if <code>isGrad</code> is <code>true</code> but the tensor does not require gradients.</li>
<li>If any of the <code><a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">cuStrm::StreamManager</a></code> operations fail, it may lead to undefined behavior as error - checking is not explicitly done in this function.</li>
</ul>
<p><b>Relationship with Other Components</b>:</p><ul>
<li>Depends on <code><a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">cuStrm::StreamManager</a>&lt;value_type&gt;::Instance()</code> for memory copying and data synchronization operations.</li>
<li>Relies on the <code>_shape</code> member variable to validate the position and calculate the index in the data array.</li>
<li>Uses the <code>_data</code> and <code>_grad</code> member variables to access the tensor data and its gradient.</li>
</ul>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">std::invalid_argument</td><td>When the position is out of bounds or when trying to set the gradient of a tensor that does not require gradients.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function is O(n) due to the memory copying operations, where n is the number of elements in the tensor (<code>_size</code>).</li>
<li>Ensure that the CUDA runtime environment is properly initialized and the device memory is valid before calling this function.</li>
<li>Ensure that the <code>position</code> is within the valid range of the tensor's shape to avoid exceptions.</li>
<li>If setting the gradient, ensure that the tensor requires gradients.</li>
</ul>
</dd></dl>
<dl class="section warning"><dt>Warning</dt><dd><ul>
<li>If any of the <code><a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">cuStrm::StreamManager</a></code> operations fail, the behavior of this function is undefined.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor;</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_dimension.html">Tensor::shape_type</a> position = {0, 0, 0, 0};</div>
<div class="line">Tensor::value_type value = 1.0;</div>
<div class="line"><span class="keywordtype">bool</span> isGrad = <span class="keyword">false</span>;</div>
<div class="line"><span class="keywordflow">try</span> {</div>
<div class="line">    tensor.setData(position, value, isGrad);</div>
<div class="line">} <span class="keywordflow">catch</span> (<span class="keyword">const</span> std::invalid_argument&amp; e) {</div>
<div class="line">    std::cerr &lt;&lt; e.what() &lt;&lt; std::endl;</div>
<div class="line">}</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00410">410</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a2f9be06ac6766a5fa6de3548c722ef43_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a2f9be06ac6766a5fa6de3548c722ef43_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a2f9be06ac6766a5fa6de3548c722ef43_cgraph" id="aclassnz_1_1data_1_1_tensor_a2f9be06ac6766a5fa6de3548c722ef43_cgraph">
<area shape="rect" title="Sets the value of an element in the tensor or its gradient at a specified position." alt="" coords="5,131,175,157"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a38ba233ef49f34620297f96edd962c55" title="Retrieves a pointer to the tensor&#39;s data stored in GPU memory." alt="" coords="241,5,390,32"/>
<area shape="poly" title=" " alt="" coords="104,128,153,86,186,62,221,42,236,35,238,40,224,46,189,67,157,90,107,132"/>
<area shape="rect" href="classnz_1_1data_1_1_dimension.html#a4831fea5aaf7dbad3578d3fa8e55aef1" title="Retrieves the stride value at a specified index within the Dimension object." alt="" coords="246,56,384,99"/>
<area shape="poly" title=" " alt="" coords="137,127,230,100,232,105,139,133"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="223,123,408,165"/>
<area shape="poly" title=" " alt="" coords="175,141,207,141,207,147,175,147"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#afa38d5c6db0e6b48c8f74ce8ad0df2bc" title="Asynchronously copies data between CUDA device and host memory based on the specified memory copy kin..." alt="" coords="223,189,408,232"/>
<area shape="poly" title=" " alt="" coords="139,155,227,182,225,187,137,161"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="223,256,408,299"/>
<area shape="poly" title=" " alt="" coords="108,156,158,197,224,242,234,247,232,252,221,246,155,202,105,160"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="456,156,641,199"/>
<area shape="poly" title=" " alt="" coords="408,195,440,190,441,195,409,200"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="456,223,641,265"/>
<area shape="poly" title=" " alt="" coords="409,221,441,226,440,231,408,227"/>
</map>
</div>

</div>
</div>
<a id="abddb47a6dc305d289a1e4f91d01a5082" name="abddb47a6dc305d289a1e4f91d01a5082"></a>
<h2 class="memtitle"><span class="permalink"><a href="#abddb47a6dc305d289a1e4f91d01a5082">&#9670;&#160;</a></span>setRequiresGrad()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void nz::data::Tensor::setRequiresGrad </td>
          <td>(</td>
          <td class="paramtype">bool</td>          <td class="paramname"><span class="paramname"><em>requires_grad</em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Sets whether the tensor requires gradient computation. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">requires_grad</td><td>A boolean indicating whether gradient computation is required.</td></tr>
  </table>
  </dd>
</dl>
<p>This function allows you to enable or disable gradient tracking for the tensor. If gradient computation is enabled, additional memory may be allocated for storing gradients.</p>
<dl class="section note"><dt>Note</dt><dd>Modifying this setting does not affect any existing gradient data stored in the tensor. </dd></dl>

<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00229">229</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_abddb47a6dc305d289a1e4f91d01a5082_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_abddb47a6dc305d289a1e4f91d01a5082_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_abddb47a6dc305d289a1e4f91d01a5082_cgraph" id="aclassnz_1_1data_1_1_tensor_abddb47a6dc305d289a1e4f91d01a5082_cgraph">
<area shape="rect" title="Sets whether the tensor requires gradient computation." alt="" coords="5,80,227,107"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a785cf34395067f425e032d9bd5e1fa20" title="Frees the CUDA device memory pointed to by the given pointer." alt="" coords="275,5,460,48"/>
<area shape="poly" title=" " alt="" coords="169,77,269,50,270,55,170,82"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="275,72,460,115"/>
<area shape="poly" title=" " alt="" coords="227,91,259,91,259,96,227,96"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a97f78a2d43f6e0508c82d4f3b629de96" title="Asynchronously allocates device memory for type&#45;specific data with stream&#45;ordered dependency tracking..." alt="" coords="275,139,460,181"/>
<area shape="poly" title=" " alt="" coords="170,105,270,132,269,137,169,110"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="508,5,693,48"/>
<area shape="poly" title=" " alt="" coords="460,24,492,24,492,29,460,29"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="508,139,693,181"/>
<area shape="poly" title=" " alt="" coords="460,157,492,157,492,163,460,163"/>
</map>
</div>

</div>
</div>
<a id="aade7b0c42622279888d755f4f7989aac" name="aade7b0c42622279888d755f4f7989aac"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aade7b0c42622279888d755f4f7989aac">&#9670;&#160;</a></span>shape()</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname"><a class="el" href="classnz_1_1data_1_1_dimension.html">Tensor::shape_type</a> nz::data::Tensor::shape </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span><span class="mlabel">noexcept</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Retrieves the shape of the tensor. </p>
<dl class="section return"><dt>Returns</dt><dd>A <code>shape_type</code> (alias for <code>std::vector&lt;int&gt;</code>) representing the dimensions of the tensor.</dd></dl>
<p>The shape provides information about the size of each dimension in the tensor. For example, a tensor with shape <code>{2, 3}</code> represents a 2x3 matrix. The shape is defined during construction or reshaping of the tensor. </p>

<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00225">225</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>

</div>
</div>
<a id="a31a3aa01fa3ccb56503994a99e39e177" name="a31a3aa01fa3ccb56503994a99e39e177"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a31a3aa01fa3ccb56503994a99e39e177">&#9670;&#160;</a></span>size()</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">Tensor::size_type nz::data::Tensor::size </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span><span class="mlabel">noexcept</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Retrieves the total number of elements in the tensor. </p>
<dl class="section return"><dt>Returns</dt><dd>A <code>size_type</code> (alias for <code>unsigned long long</code>) representing the total number of elements.</dd></dl>
<p>This function calculates the product of the dimensions in the tensor's shape. For example, a tensor with shape <code>{2, 3}</code> will have a size of 6. This value is useful for memory allocation and tensor operations. </p>

<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00226">226</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>

</div>
</div>
<a id="a4a657091dfa6a490d873ab8e95d9bb9e" name="a4a657091dfa6a490d873ab8e95d9bb9e"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a4a657091dfa6a490d873ab8e95d9bb9e">&#9670;&#160;</a></span>sum() <span class="overload">[1/2]</span></h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">Tensor::value_type nz::data::Tensor::sum </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Compute the sum of all elements in the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>. </p>
<dl class="section return"><dt>Returns</dt><dd>The sum of all elements in the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> as a value of type <code>Tensor::value_type</code>.</dd></dl>
<p>This function calculates the sum of all elements in the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> using CUDA parallel processing. It first determines the block and grid dimensions for the CUDA kernel. Then, it allocates device memory for intermediate results and host memory to store the results copied from the device. The <code><a class="el" href="namespacenz_1_1krnl.html#a1ae846a65c2f5b83cd1b9fc61b877854" title="Kernel function to perform element-wise summation of two arrays.">krnl::Summation</a></code> CUDA kernel is launched to perform partial sums on the device. After the kernel execution, the partial sums are copied from the device to the host using <code>cudaMemcpy</code>. Finally, the partial sums on the host are added together to obtain the total sum, and the allocated host and device memory are freed.</p>
<p>Memory management:</p><ul>
<li>Host memory is allocated for <code>hData</code> using <code>new[]</code> and freed using <code>delete[]</code>.</li>
<li>Device memory is allocated for <code>dData</code> using <code>cudaMalloc</code> and freed using <code>cudaFree</code>.</li>
</ul>
<p>Exception handling:</p><ul>
<li>The <code>CHECK</code> macro is used to handle CUDA API errors. If a CUDA API call fails, the <code>CHECK</code> macro will throw an exception, and the function will terminate.</li>
</ul>
<p>Relationship with other components:</p><ul>
<li>This function depends on the <code><a class="el" href="namespacenz_1_1krnl.html#a1ae846a65c2f5b83cd1b9fc61b877854" title="Kernel function to perform element-wise summation of two arrays.">krnl::Summation</a></code> CUDA kernel to perform partial sums on the device.</li>
<li>It also depends on the <code>CHECK</code> macro to handle CUDA API errors.</li>
</ul>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">[Exception</td><td>type thrown by CHECK macro] If there are CUDA API errors during memory allocation, kernel execution, or memory copying.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function is approximately O(n), where n is the number of elements in the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a> (<code>_size</code>). The CUDA kernel parallelizes the partial sum calculation, and the final sum on the host is a linear operation over the number of grid blocks.</li>
<li>Ensure that the CUDA device is properly initialized before calling this function.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">nz::data::Tensor</a> tensor({2, 3}, <span class="keyword">true</span>);</div>
<div class="line"><span class="comment">// Assume tensor is filled with some values</span></div>
<div class="line">nz::data::Tensor::value_type sum_result = tensor.<a class="code hl_function" href="#a4a657091dfa6a490d873ab8e95d9bb9e">sum</a>();</div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_a4a657091dfa6a490d873ab8e95d9bb9e"><div class="ttname"><a href="#a4a657091dfa6a490d873ab8e95d9bb9e">nz::data::Tensor::sum</a></div><div class="ttdeci">value_type sum() const</div><div class="ttdoc">Compute the sum of all elements in the Tensor.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00565">Tensor.cu:565</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00565">565</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a4a657091dfa6a490d873ab8e95d9bb9e_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a4a657091dfa6a490d873ab8e95d9bb9e_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a4a657091dfa6a490d873ab8e95d9bb9e_cgraph" id="aclassnz_1_1data_1_1_tensor_a4a657091dfa6a490d873ab8e95d9bb9e_cgraph">
<area shape="rect" title="Compute the sum of all elements in the Tensor." alt="" coords="5,160,154,187"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a785cf34395067f425e032d9bd5e1fa20" title="Frees the CUDA device memory pointed to by the given pointer." alt="" coords="202,5,387,48"/>
<area shape="poly" title=" " alt="" coords="92,157,138,113,200,64,221,53,223,57,203,69,141,117,96,161"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="435,35,620,77"/>
<area shape="poly" title=" " alt="" coords="101,157,146,132,201,108,256,92,312,80,419,63,420,68,313,85,257,97,203,113,148,137,104,162"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="435,181,620,224"/>
<area shape="poly" title=" " alt="" coords="154,175,420,193,420,198,154,181"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a97f78a2d43f6e0508c82d4f3b629de96" title="Asynchronously allocates device memory for type&#45;specific data with stream&#45;ordered dependency tracking..." alt="" coords="202,123,387,165"/>
<area shape="poly" title=" " alt="" coords="153,161,186,156,187,161,154,166"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#afa38d5c6db0e6b48c8f74ce8ad0df2bc" title="Asynchronously copies data between CUDA device and host memory based on the specified memory copy kin..." alt="" coords="435,315,620,357"/>
<area shape="poly" title=" " alt="" coords="94,185,139,231,169,256,203,276,256,297,312,312,420,329,419,334,311,318,254,302,200,281,166,260,135,235,90,189"/>
<area shape="rect" href="namespacenz_1_1krnl.html#a1ae846a65c2f5b83cd1b9fc61b877854" title="Kernel function to perform element&#45;wise summation of two arrays." alt="" coords="225,240,364,267"/>
<area shape="poly" title=" " alt="" coords="116,185,203,219,239,232,237,237,201,224,114,190"/>
<area shape="poly" title=" " alt="" coords="388,36,420,40,419,45,387,41"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="668,248,854,291"/>
<area shape="poly" title=" " alt="" coords="387,136,441,136,501,140,562,150,621,167,652,181,680,198,728,235,725,240,677,202,649,185,619,172,561,155,500,146,441,141,387,141"/>
<area shape="poly" title=" " alt="" coords="604,312,668,293,670,298,605,317"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="668,315,854,357"/>
<area shape="poly" title=" " alt="" coords="621,333,653,333,653,339,621,339"/>
<area shape="poly" title=" " alt="" coords="359,237,419,224,420,229,360,242"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a46ce59b45de432842454aadf00b93791" title="Asynchronously submits a CUDA kernel with stream&#45;ordered dependency management." alt="" coords="435,248,620,291"/>
<area shape="poly" title=" " alt="" coords="364,255,420,259,419,265,363,261"/>
<area shape="poly" title=" " alt="" coords="621,267,653,267,653,272,621,272"/>
<area shape="poly" title=" " alt="" coords="605,289,670,307,668,312,604,294"/>
</map>
</div>

</div>
</div>
<a id="a74aa515ba6b83aa1d05a7bb001b297b3" name="a74aa515ba6b83aa1d05a7bb001b297b3"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a74aa515ba6b83aa1d05a7bb001b297b3">&#9670;&#160;</a></span>sum() <span class="overload">[2/2]</span></h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">Tensor::value_type nz::data::Tensor::sum </td>
          <td>(</td>
          <td class="paramtype">size_type</td>          <td class="paramname"><span class="paramname"><em>batch</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">size_type</td>          <td class="paramname"><span class="paramname"><em>channel</em></span>&#160;) const</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">nodiscard</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Computes the sum of elements in a specific batch and channel of a <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">batch</td><td>The batch index. This value should be within the valid range of the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>'s batch dimension. Memory flow: host - to - device (used for index calculation on the host side). </td></tr>
    <tr><td class="paramname">channel</td><td>The channel index. This value should be within the valid range of the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>'s channel dimension. Memory flow: host - to - device (used for index calculation on the host side).</td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>The sum of elements in the specified batch and channel of the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>.</dd></dl>
<p>This function calculates the sum of elements in a particular batch and channel of a <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>. First, it checks if the provided <code>batch</code> and <code>channel</code> indices are valid. If not, it throws a <code>std::invalid_argument</code> exception. Then, it calculates the size of the region to be summed based on the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>'s shape. It allocates device memory for intermediate results and host memory to receive the intermediate results from the device. It determines the offset in the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>'s data based on the <code>batch</code> and <code>channel</code> indices. The <code><a class="el" href="namespacenz_1_1krnl.html#a1ae846a65c2f5b83cd1b9fc61b877854" title="Kernel function to perform element-wise summation of two arrays.">krnl::Summation</a></code> kernel is then launched to perform the partial summation on the device. After that, the intermediate results are copied from the device to the host. Finally, the function sums up all the intermediate results on the host, frees the allocated host and device memory, and returns the final sum.</p>
<p><b>Memory Management Strategy</b>:</p><ul>
<li>On the host side, an array <code>hData</code> of size <code>grid.x</code> is dynamically allocated using <code>new[]</code> and later freed using <code>delete[]</code>.</li>
<li>On the device side, memory for <code>dData</code> is allocated using <code><a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">cuStrm::StreamManager</a>&lt;value_type&gt;::Instance().malloc</code> and freed using <code><a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">cuStrm::StreamManager</a>&lt;value_type&gt;::Instance().free</code>.</li>
</ul>
<p><b>Exception Handling Mechanism</b>:</p><ul>
<li>Throws a <code>std::invalid_argument</code> exception if the provided <code>batch</code> or <code>channel</code> indices are out of the valid range of the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>'s shape.</li>
<li>The CUDA memory allocation, copying, and kernel launch operations may return error codes indicating failures. It is assumed that the calling code or the CUDA runtime will handle these errors appropriately.</li>
</ul>
<p><b>Relationship with Other Components</b>:</p><ul>
<li>Depends on the <code>_shape</code> member of the <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> class to get the shape information and strides.</li>
<li>Uses the <code><a class="el" href="namespacenz_1_1krnl.html#a1ae846a65c2f5b83cd1b9fc61b877854" title="Kernel function to perform element-wise summation of two arrays.">krnl::Summation</a></code> kernel to perform the partial summation on the device.</li>
<li>Relies on <code><a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">cuStrm::StreamManager</a>&lt;value_type&gt;::Instance()</code> for CUDA memory management (malloc, memcpy, free) operations.</li>
</ul>
<dl class="exception"><dt>Exceptions</dt><dd>
  <table class="exception">
    <tr><td class="paramname">std::invalid_argument</td><td>If the provided <code>batch</code> or <code>channel</code> indices are out of the valid range of the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>'s shape.</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>Ensure that the provided <code>batch</code> and <code>channel</code> indices are within the valid range of the <a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a>'s shape to avoid exceptions.</li>
<li>The CUDA operations such as memory allocation, copying, and kernel launch have their own error handling mechanisms. The calling code should be prepared to handle potential CUDA errors.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor; <span class="comment">// Assume Tensor is properly initialized</span></div>
<div class="line">Tensor::size_type batch = 0;</div>
<div class="line">Tensor::size_type channel = 1;</div>
<div class="line">Tensor::value_type sumResult = tensor.<a class="code hl_function" href="#a4a657091dfa6a490d873ab8e95d9bb9e">sum</a>(batch, channel);</div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00584">584</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a74aa515ba6b83aa1d05a7bb001b297b3_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a74aa515ba6b83aa1d05a7bb001b297b3_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a74aa515ba6b83aa1d05a7bb001b297b3_cgraph" id="aclassnz_1_1data_1_1_tensor_a74aa515ba6b83aa1d05a7bb001b297b3_cgraph">
<area shape="rect" title="Computes the sum of elements in a specific batch and channel of a Tensor." alt="" coords="5,173,154,200"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a785cf34395067f425e032d9bd5e1fa20" title="Frees the CUDA device memory pointed to by the given pointer." alt="" coords="202,5,387,48"/>
<area shape="poly" title=" " alt="" coords="90,171,135,121,166,91,200,64,219,53,222,58,203,69,170,95,139,124,94,175"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="435,19,620,61"/>
<area shape="poly" title=" " alt="" coords="106,170,201,127,313,90,419,62,420,67,314,95,203,132,108,175"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="435,99,620,141"/>
<area shape="poly" title=" " alt="" coords="142,170,201,159,419,129,420,134,202,164,143,175"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a97f78a2d43f6e0508c82d4f3b629de96" title="Asynchronously allocates device memory for type&#45;specific data with stream&#45;ordered dependency tracking..." alt="" coords="202,275,387,317"/>
<area shape="poly" title=" " alt="" coords="104,198,203,254,228,265,225,270,200,258,102,203"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#afa38d5c6db0e6b48c8f74ce8ad0df2bc" title="Asynchronously copies data between CUDA device and host memory based on the specified memory copy kin..." alt="" coords="435,232,620,275"/>
<area shape="poly" title=" " alt="" coords="143,198,202,209,420,239,419,245,201,215,142,203"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a31a3aa01fa3ccb56503994a99e39e177" title="Retrieves the total number of elements in the tensor." alt="" coords="221,341,368,368"/>
<area shape="poly" title=" " alt="" coords="91,199,133,261,165,297,203,327,213,332,210,337,200,332,162,301,129,264,86,202"/>
<area shape="rect" href="namespacenz_1_1krnl.html#a1ae846a65c2f5b83cd1b9fc61b877854" title="Kernel function to perform element&#45;wise summation of two arrays." alt="" coords="225,173,364,200"/>
<area shape="poly" title=" " alt="" coords="154,184,210,184,210,189,154,189"/>
<area shape="poly" title=" " alt="" coords="388,29,420,31,419,36,387,35"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="668,232,854,275"/>
<area shape="poly" title=" " alt="" coords="387,296,499,294,620,284,665,276,666,281,621,289,499,299,387,301"/>
<area shape="poly" title=" " alt="" coords="621,251,653,251,653,256,621,256"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="668,165,854,208"/>
<area shape="poly" title=" " alt="" coords="604,229,668,210,670,215,605,234"/>
<area shape="poly" title=" " alt="" coords="343,170,435,144,436,149,345,175"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a46ce59b45de432842454aadf00b93791" title="Asynchronously submits a CUDA kernel with stream&#45;ordered dependency management." alt="" coords="435,165,620,208"/>
<area shape="poly" title=" " alt="" coords="364,184,419,184,419,189,364,189"/>
<area shape="poly" title=" " alt="" coords="605,206,670,225,668,230,604,211"/>
<area shape="poly" title=" " alt="" coords="621,184,653,184,653,189,621,189"/>
</map>
</div>

</div>
</div>
<a id="a0c150b841f02921eb7826a6e03d0267e" name="a0c150b841f02921eb7826a6e03d0267e"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a0c150b841f02921eb7826a6e03d0267e">&#9670;&#160;</a></span>sync()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void nz::data::Tensor::sync </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Synchronize both the tensor data and its gradient data. </p>
<p>This function calls the <code>syncData</code> method to synchronize the tensor data and then calls the <code>syncGrad</code> method to synchronize the gradient data if gradient computation is required. It ensures that all CUDA stream write operations on the data and gradient (if applicable) are completed.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">None</td><td></td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>None</dd></dl>
<p>Memory management for the data and gradient is assumed to be handled by the <code>syncData</code> and <code>syncGrad</code> methods respectively. There is no additional memory allocation or deallocation within this function. This function does not have an explicit exception - handling mechanism. It relies on the exception - handling of the <code>syncData</code> and <code>syncGrad</code> methods to manage any errors that may occur during the synchronization process.</p>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function depends on the time complexity of the <code>syncData</code> and <code>syncGrad</code> methods. In the worst - case scenario, if both operations involve long - running CUDA stream write operations, it may take a significant amount of time.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><span class="comment">// Assume Tensor is defined and an instance is created</span></div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor;</div>
<div class="line">tensor.<a class="code hl_function" href="#a0c150b841f02921eb7826a6e03d0267e">sync</a>();</div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_a0c150b841f02921eb7826a6e03d0267e"><div class="ttname"><a href="#a0c150b841f02921eb7826a6e03d0267e">nz::data::Tensor::sync</a></div><div class="ttdeci">void sync() const</div><div class="ttdoc">Synchronize both the tensor data and its gradient data.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00747">Tensor.cu:747</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00747">747</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a0c150b841f02921eb7826a6e03d0267e_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a0c150b841f02921eb7826a6e03d0267e_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a0c150b841f02921eb7826a6e03d0267e_cgraph" id="aclassnz_1_1data_1_1_tensor_a0c150b841f02921eb7826a6e03d0267e_cgraph">
<area shape="rect" title="Synchronize both the tensor data and its gradient data." alt="" coords="5,47,157,73"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#a7aab89d371ff013c5c021a191bd7348e" title="Synchronize the tensor data by waiting for all CUDA stream write operations to complete." alt="" coords="205,21,384,48"/>
<area shape="poly" title=" " alt="" coords="157,48,189,45,189,50,158,54"/>
<area shape="rect" href="classnz_1_1data_1_1_tensor.html#af28425ddc9bee1f75fd923a0de68c37b" title="Synchronize the gradient data of the tensor if gradient computation is required." alt="" coords="205,72,384,99"/>
<area shape="poly" title=" " alt="" coords="158,66,189,70,189,75,157,72"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="432,5,617,48"/>
<area shape="poly" title=" " alt="" coords="384,29,416,28,417,33,384,34"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="432,72,617,115"/>
<area shape="poly" title=" " alt="" coords="350,46,424,65,423,70,349,51"/>
<area shape="poly" title=" " alt="" coords="349,69,423,50,424,55,350,74"/>
<area shape="poly" title=" " alt="" coords="384,86,417,87,416,92,384,91"/>
</map>
</div>

</div>
</div>
<a id="a7aab89d371ff013c5c021a191bd7348e" name="a7aab89d371ff013c5c021a191bd7348e"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a7aab89d371ff013c5c021a191bd7348e">&#9670;&#160;</a></span>syncData()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void nz::data::Tensor::syncData </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Synchronize the tensor data by waiting for all CUDA stream write operations to complete. </p>
<p>This function accesses the singleton instance of <code><a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">cuStrm::StreamManager</a></code> specialized for the <code>value_type</code> of the <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> class. It then calls the <code>syncData</code> method of this instance, passing the <code>_data</code> member of the <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> object. This operation blocks the host until all CUDA stream write operations on the <code>_data</code> are finished.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">None</td><td></td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>None</dd></dl>
<p>Memory management for the <code>_data</code> is assumed to be handled elsewhere in the codebase. There is no memory allocation or deallocation within this function. This function does not have an explicit exception - handling mechanism. It relies on the <code>syncData</code> method of the <code><a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">cuStrm::StreamManager</a></code> instance to manage any errors that may occur during the synchronization process.</p>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function depends on the time required for the CUDA stream write operations on <code>_data</code> to complete. In the worst - case scenario, if there are long - running write operations, it may take a significant amount of time.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><span class="comment">// Assume Tensor is defined and an instance is created</span></div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor;</div>
<div class="line">tensor.<a class="code hl_function" href="#a7aab89d371ff013c5c021a191bd7348e">syncData</a>();</div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_a7aab89d371ff013c5c021a191bd7348e"><div class="ttname"><a href="#a7aab89d371ff013c5c021a191bd7348e">nz::data::Tensor::syncData</a></div><div class="ttdeci">void syncData() const</div><div class="ttdoc">Synchronize the tensor data by waiting for all CUDA stream write operations to complete.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00737">Tensor.cu:737</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00737">737</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a7aab89d371ff013c5c021a191bd7348e_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a7aab89d371ff013c5c021a191bd7348e_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a7aab89d371ff013c5c021a191bd7348e_cgraph" id="aclassnz_1_1data_1_1_tensor_a7aab89d371ff013c5c021a191bd7348e_cgraph">
<area shape="rect" title="Synchronize the tensor data by waiting for all CUDA stream write operations to complete." alt="" coords="5,47,185,73"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="233,5,418,48"/>
<area shape="poly" title=" " alt="" coords="185,44,217,40,217,45,185,50"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="233,72,418,115"/>
<area shape="poly" title=" " alt="" coords="185,70,217,75,217,80,185,76"/>
</map>
</div>

</div>
</div>
<a id="af28425ddc9bee1f75fd923a0de68c37b" name="af28425ddc9bee1f75fd923a0de68c37b"></a>
<h2 class="memtitle"><span class="permalink"><a href="#af28425ddc9bee1f75fd923a0de68c37b">&#9670;&#160;</a></span>syncGrad()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void nz::data::Tensor::syncGrad </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Synchronize the gradient data of the tensor if gradient computation is required. </p>
<p>This function first checks the <code>_requires_grad</code> flag of the <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> object. If the flag is set to <code>true</code>, it accesses the singleton instance of <code><a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">cuStrm::StreamManager</a></code> specialized for the <code>value_type</code> of the <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> class. Then it calls the <code>syncData</code> method of this instance, passing the <code>_grad</code> member of the <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> object. This operation blocks the host until all CUDA stream write operations on the <code>_grad</code> are completed.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">None</td><td></td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>None</dd></dl>
<p>Memory management for the <code>_grad</code> is assumed to be handled elsewhere in the codebase. There is no memory allocation or deallocation within this function. This function does not have an explicit exception - handling mechanism. It relies on the <code>syncData</code> method of the <code><a class="el" href="classnz_1_1cu_strm_1_1_stream_manager.html" title="Centralized CUDA stream and resource management system with automatic dependency tracking.">cuStrm::StreamManager</a></code> instance to manage any errors that may occur during the synchronization process.</p>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The time complexity of this function depends on whether the <code>_requires_grad</code> flag is <code>true</code> and the time required for the CUDA stream write operations on <code>_grad</code> to complete. If <code>_requires_grad</code> is <code>false</code>, the function has a constant time complexity O(1). Otherwise, in the worst - case scenario with long - running write operations, it may take a significant amount of time.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><span class="comment">// Assume Tensor is defined and an instance is created</span></div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor;</div>
<div class="line">tensor.<a class="code hl_function" href="#af28425ddc9bee1f75fd923a0de68c37b">syncGrad</a>();</div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_af28425ddc9bee1f75fd923a0de68c37b"><div class="ttname"><a href="#af28425ddc9bee1f75fd923a0de68c37b">nz::data::Tensor::syncGrad</a></div><div class="ttdeci">void syncGrad() const</div><div class="ttdoc">Synchronize the gradient data of the tensor if gradient computation is required.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00741">Tensor.cu:741</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00741">741</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_af28425ddc9bee1f75fd923a0de68c37b_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_af28425ddc9bee1f75fd923a0de68c37b_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_af28425ddc9bee1f75fd923a0de68c37b_cgraph" id="aclassnz_1_1data_1_1_tensor_af28425ddc9bee1f75fd923a0de68c37b_cgraph">
<area shape="rect" title="Synchronize the gradient data of the tensor if gradient computation is required." alt="" coords="5,47,185,73"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="233,5,418,48"/>
<area shape="poly" title=" " alt="" coords="185,44,217,40,217,45,185,50"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#abe439fa00c0bd369c0b2345b095ed5af" title="Synchronizes host thread with completion events for a specific data object." alt="" coords="233,72,418,115"/>
<area shape="poly" title=" " alt="" coords="185,70,217,75,217,80,185,76"/>
</map>
</div>

</div>
</div>
<a id="a45e6f84ae74111ced9a96bdf204b2294" name="a45e6f84ae74111ced9a96bdf204b2294"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a45e6f84ae74111ced9a96bdf204b2294">&#9670;&#160;</a></span>transpose()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void nz::data::Tensor::transpose </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Transposes the tensor by swapping its dimensions and rearranging the data. </p>
<p>This function performs a transpose on the tensor by swapping its rows and columns. For a 2D tensor (matrix), it swaps the first and second dimensions, effectively turning the rows into columns and vice versa. The tensor's data is rearranged using a temporary buffer, and the shape is updated accordingly. The data is first copied to a temporary memory space, then a CUDA kernel is used to perform the transposition.</p>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>This function involves memory allocation and data copying. It creates a temporary tensor in GPU memory to hold the transposed data.</li>
<li>After the transposition, the tensor's shape is updated, and the temporary buffer is freed.</li>
<li>The function does not modify the original tensor's data but instead reinterprets the data with the new shape.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({2, 3});  <span class="comment">// Create a tensor with shape 2x3</span></div>
<div class="line">tensor.<a class="code hl_function" href="#a45e6f84ae74111ced9a96bdf204b2294">transpose</a>();     <span class="comment">// Transpose the tensor to shape 3x2</span></div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_a45e6f84ae74111ced9a96bdf204b2294"><div class="ttname"><a href="#a45e6f84ae74111ced9a96bdf204b2294">nz::data::Tensor::transpose</a></div><div class="ttdeci">void transpose()</div><div class="ttdoc">Transposes the tensor by swapping its dimensions and rearranging the data.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00385">Tensor.cu:385</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00385">385</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a45e6f84ae74111ced9a96bdf204b2294_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a45e6f84ae74111ced9a96bdf204b2294_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a45e6f84ae74111ced9a96bdf204b2294_cgraph" id="aclassnz_1_1data_1_1_tensor_a45e6f84ae74111ced9a96bdf204b2294_cgraph">
<area shape="rect" title="Transposes the tensor by swapping its dimensions and rearranging the data." alt="" coords="5,109,186,136"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1084057ef6f5b2871c60702209bb4469" title="Asynchronously frees the CUDA device memory pointed to by the given pointer." alt="" coords="467,60,652,103"/>
<area shape="poly" title=" " alt="" coords="186,112,451,88,452,94,186,117"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="467,193,652,236"/>
<area shape="poly" title=" " alt="" coords="129,134,178,153,234,171,346,192,452,205,451,210,345,197,233,176,176,158,127,139"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a97f78a2d43f6e0508c82d4f3b629de96" title="Asynchronously allocates device memory for type&#45;specific data with stream&#45;ordered dependency tracking..." alt="" coords="234,5,419,48"/>
<area shape="poly" title=" " alt="" coords="129,106,258,52,260,57,131,111"/>
<area shape="rect" href="namespacenz_1_1krnl.html#afe3f38f788c735b7eb718443eb0fd094" title="Kernel function to transpose a matrix on the GPU." alt="" coords="260,135,393,161"/>
<area shape="poly" title=" " alt="" coords="186,130,245,136,244,142,185,135"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="700,60,886,103"/>
<area shape="poly" title=" " alt="" coords="653,79,685,79,685,84,653,84"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="700,127,886,169"/>
<area shape="poly" title=" " alt="" coords="637,101,702,119,700,124,636,106"/>
<area shape="poly" title=" " alt="" coords="420,26,531,32,653,45,696,53,695,59,652,51,531,37,419,31"/>
<area shape="poly" title=" " alt="" coords="377,159,468,186,467,191,375,165"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a46ce59b45de432842454aadf00b93791" title="Asynchronously submits a CUDA kernel with stream&#45;ordered dependency management." alt="" coords="467,127,652,169"/>
<area shape="poly" title=" " alt="" coords="393,145,451,145,451,151,393,151"/>
<area shape="poly" title=" " alt="" coords="636,124,700,105,702,110,637,129"/>
<area shape="poly" title=" " alt="" coords="653,145,685,145,685,151,653,151"/>
</map>
</div>

</div>
</div>
<a id="a6fed8efad540a7621dd6640b2f2466d0" name="a6fed8efad540a7621dd6640b2f2466d0"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a6fed8efad540a7621dd6640b2f2466d0">&#9670;&#160;</a></span>zeroGrad()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void nz::data::Tensor::zeroGrad </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td> const</td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Resets the gradient data to zero. </p>
<p>This function sets the gradient data of the tensor to zero. It is typically used during training in neural networks to clear the gradients before the next backpropagation pass. The gradient memory will remain allocated, but its contents will be zeroed out.</p>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>This function does not deallocate the gradient memory; it only resets the stored gradient values.</li>
<li>The tensor must have been created with <code>requires_grad</code> set to <code>true</code>, otherwise the function does nothing.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({2, 3}, <span class="keyword">true</span>);  <span class="comment">// Create a tensor with gradient support</span></div>
<div class="line">tensor.<a class="code hl_function" href="#a6fed8efad540a7621dd6640b2f2466d0">zeroGrad</a>();  <span class="comment">// Reset the gradients to zero</span></div>
<div class="line">```</div>
<div class="ttc" id="aclassnz_1_1data_1_1_tensor_html_a6fed8efad540a7621dd6640b2f2466d0"><div class="ttname"><a href="#a6fed8efad540a7621dd6640b2f2466d0">nz::data::Tensor::zeroGrad</a></div><div class="ttdeci">void zeroGrad() const</div><div class="ttdoc">Resets the gradient data to zero.</div><div class="ttdef"><b>Definition</b> <a href="_tensor_8cu_source.html#l00246">Tensor.cu:246</a></div></div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00246">246</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1data_1_1_tensor_a6fed8efad540a7621dd6640b2f2466d0_cgraph.png" border="0" usemap="#aclassnz_1_1data_1_1_tensor_a6fed8efad540a7621dd6640b2f2466d0_cgraph" alt=""/></div>
<map name="aclassnz_1_1data_1_1_tensor_a6fed8efad540a7621dd6640b2f2466d0_cgraph" id="aclassnz_1_1data_1_1_tensor_a6fed8efad540a7621dd6640b2f2466d0_cgraph">
<area shape="rect" title="Resets the gradient data to zero." alt="" coords="5,47,182,73"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="230,5,415,48"/>
<area shape="poly" title=" " alt="" coords="182,44,214,40,215,45,183,50"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a71ad766cb2869d3dd6a3931966e81706" title="Asynchronously sets a block of CUDA device memory to a specified value." alt="" coords="230,72,415,115"/>
<area shape="poly" title=" " alt="" coords="183,70,215,75,214,80,182,76"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="463,39,648,81"/>
<area shape="poly" title=" " alt="" coords="415,77,447,73,448,78,416,83"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="463,105,648,148"/>
<area shape="poly" title=" " alt="" coords="416,104,448,109,447,114,415,109"/>
</map>
</div>

</div>
</div>
<h2 class="groupheader">Friends And Related Symbol Documentation</h2>
<a id="ab8eaa8e06861a868b7df1a9ee0616a1a" name="ab8eaa8e06861a868b7df1a9ee0616a1a"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ab8eaa8e06861a868b7df1a9ee0616a1a">&#9670;&#160;</a></span>operator&lt;&lt;</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">DL_API std::ostream &amp; operator&lt;&lt; </td>
          <td>(</td>
          <td class="paramtype">std::ostream &amp;</td>          <td class="paramname"><span class="paramname"><em>os</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;</td>          <td class="paramname"><span class="paramname"><em>tensor</em></span>&#160;)</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">friend</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Overloads the <code>&lt;&lt;</code> operator to print the tensor's data to an output stream. </p>
<p>This function is a friend of the <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> class and provides an overloaded version of the output stream operator (<code>&lt;&lt;</code>) to print the contents of a tensor to the specified output stream (e.g., <code>std::cout</code> or a file stream).</p>
<p>The tensor's data is first copied from GPU memory to host memory for printing, and then the data is printed in a 2D matrix format. Each row of the tensor is printed on a new line, and each element in a row is separated by a space. Each row is enclosed in square brackets.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">os</td><td>The output stream to which the tensor will be printed. </td></tr>
    <tr><td class="paramname">tensor</td><td>The tensor whose contents will be printed. </td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>The output stream (<code>os</code>) after the tensor has been printed, allowing for chaining of operations.</dd></dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>This operator works by accessing the tensor's private data members (e.g., <code>_data</code>) directly.</li>
<li>The tensor's data is assumed to be in a valid state (i.e., properly allocated in GPU memory) before printing.</li>
<li>The function copies the tensor's data from device (GPU) memory to host (CPU) memory using <code>cudaMemcpy</code>, which may introduce performance overhead for large tensors.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({2, 3});</div>
<div class="line">tensor.<a class="code hl_function" href="#ad220de56b18c404611f07f2290cd7e9d">fill</a>(1.0f);  <span class="comment">// Fill the tensor with 1.0f</span></div>
<div class="line">std::cout &lt;&lt; tensor &lt;&lt; std::endl;  <span class="comment">// Prints the tensor to standard output in matrix format</span></div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00039">39</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>

</div>
</div>
<a id="a1ae147fdd4255f7d148aef41e3e436a9" name="a1ae147fdd4255f7d148aef41e3e436a9"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a1ae147fdd4255f7d148aef41e3e436a9">&#9670;&#160;</a></span>operator&gt;&gt;</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">DL_API std::istream &amp; operator&gt;&gt; </td>
          <td>(</td>
          <td class="paramtype">std::istream &amp;</td>          <td class="paramname"><span class="paramname"><em>is</em></span>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="classnz_1_1data_1_1_tensor.html">Tensor</a> &amp;</td>          <td class="paramname"><span class="paramname"><em>tensor</em></span>&#160;)</td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">friend</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Overloads the <code>&gt;&gt;</code> operator to read a tensor's data from an input stream. </p>
<p>This function is a friend of the <code><a class="el" href="classnz_1_1data_1_1_tensor.html" title="A class for representing and manipulating multidimensional arrays (tensors) in GPU memory.">Tensor</a></code> class and provides an overloaded version of the input stream operator (<code>&gt;&gt;</code>) to read the contents of a tensor from the specified input stream (e.g., <code>std::cin</code> or a file stream).</p>
<p>The function reads the tensor's data element by element from the input stream and stores the values in a temporary buffer. Once all the data has been read, it is copied from the host memory back into the tensor's GPU memory using <code>cudaMemcpy</code>.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">is</td><td>The input stream from which the tensor's data will be read. </td></tr>
    <tr><td class="paramname">tensor</td><td>The tensor to which the data will be read. </td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>The input stream (<code>is</code>) after reading the tensor's data, allowing for chaining of operations.</dd></dl>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>This operator works by reading data from the input stream and storing it in a temporary buffer on the host.</li>
<li>The function assumes that the input data matches the size of the tensor. If the data is malformed or does not match, the behavior may be undefined.</li>
<li>After reading, the data is copied from host memory back into the tensor's GPU memory.</li>
</ul>
</dd></dl>
<div class="fragment"><div class="line">```cpp</div>
<div class="line"><a class="code hl_class" href="classnz_1_1data_1_1_tensor.html">Tensor</a> tensor({2, 3});</div>
<div class="line">std::cin &gt;&gt; tensor;  <span class="comment">// Reads the tensor&#39;s data from standard input</span></div>
<div class="line">```</div>
</div><!-- fragment --> 
<p class="definition">Definition at line <a class="el" href="_tensor_8cu_source.html#l00076">76</a> of file <a class="el" href="_tensor_8cu_source.html">Tensor.cu</a>.</p>

</div>
</div>
<hr/>The documentation for this class was generated from the following files:<ul>
<li>D:/Users/Mgepahmge/Documents/C Program/NeuZephyr/include/NeuZephyr/<a class="el" href="_tensor_8cuh_source.html">Tensor.cuh</a></li>
<li>D:/Users/Mgepahmge/Documents/C Program/NeuZephyr/src/<a class="el" href="_tensor_8cu_source.html">Tensor.cu</a></li>
</ul>
</div><!-- contents -->
<!-- start footer part -->
<hr class="footer"/><address class="footer"><small>
Generated by&#160;<a href="https://www.doxygen.org/index.html"><img class="footer" src="doxygen.svg" width="104" height="31" alt="doxygen"/></a> 1.12.0
</small></address>
</div><!-- doc-content -->
</body>
</html>
