<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en-US">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=11"/>
<meta name="generator" content="Doxygen 1.12.0"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>NeuZephyr: D:/Users/Mgepahmge/Documents/C Program/NeuZephyr/include/NeuZephyr/TensorOperations.cuh Source File</title>
<link rel="icon" href="NZ_logo2.png" type="image/x-icon" />
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
 <tbody>
 <tr id="projectrow">
  <td id="projectlogo"><img alt="Logo" src="NZ_logo2.png"/></td>
  <td id="projectalign">
   <div id="projectname">NeuZephyr
   </div>
   <div id="projectbrief">Simple DL Framework</div>
  </td>
 </tr>
 </tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.12.0 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&amp;dn=expat.txt MIT */
$(function() { codefold.init(0); });
/* @license-end */
</script>
  <div id="navrow1" class="tabs">
    <ul class="tablist">
      <li><a href="index.html"><span>Main&#160;Page</span></a></li>
      <li><a href="pages.html"><span>Related&#160;Pages</span></a></li>
      <li><a href="namespaces.html"><span>Namespaces</span></a></li>
      <li><a href="annotated.html"><span>Classes</span></a></li>
      <li class="current"><a href="files.html"><span>Files</span></a></li>
    </ul>
  </div>
  <div id="navrow2" class="tabs2">
    <ul class="tablist">
      <li><a href="files.html"><span>File&#160;List</span></a></li>
    </ul>
  </div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&amp;dn=expat.txt MIT */
$(function(){ initResizable(false); });
/* @license-end */
</script>
<div id="nav-path" class="navpath">
  <ul>
<li class="navelem"><a class="el" href="dir_d522931ffa1371640980b621734a4381.html">Users</a></li><li class="navelem"><a class="el" href="dir_a7e6ee1ae3f772c9504a0b543f2027e2.html">Mgepahmge</a></li><li class="navelem"><a class="el" href="dir_e03f57e346cc4845a4c354a35630b169.html">Documents</a></li><li class="navelem"><a class="el" href="dir_231a0482af2b83c895f27ba7fe745141.html">C Program</a></li><li class="navelem"><a class="el" href="dir_0fa7fc3a0dfd304dbfc9dce9f6facfa2.html">NeuZephyr</a></li><li class="navelem"><a class="el" href="dir_e7295b03dab2e9cdf32139bd8ec2e607.html">include</a></li><li class="navelem"><a class="el" href="dir_657344ecc65cfc28732701509f8d8421.html">NeuZephyr</a></li>  </ul>
</div>
</div><!-- top -->
<div id="doc-content">
<div class="header">
  <div class="headertitle"><div class="title">TensorOperations.cuh</div></div>
</div><!--header-->
<div class="contents">
<div class="fragment"><div class="line"><a id="l00001" name="l00001"></a><span class="lineno">    1</span><span class="preprocessor">#ifndef TENSOROPERATIONS_CUH</span></div>
<div class="line"><a id="l00002" name="l00002"></a><span class="lineno">    2</span><span class="preprocessor">#define TENSOROPERATIONS_CUH</span></div>
<div class="line"><a id="l00003" name="l00003"></a><span class="lineno">    3</span><span class="preprocessor">#include &quot;dl_export.cuh&quot;</span></div>
<div class="line"><a id="l00004" name="l00004"></a><span class="lineno">    4</span><span class="preprocessor">#include &quot;<a class="code" href="_tensor_8cuh.html">Tensor.cuh</a>&quot;</span></div>
<div class="line"><a id="l00005" name="l00005"></a><span class="lineno">    5</span><span class="preprocessor">#include &quot;MappedTensor.cuh&quot;</span></div>
<div class="line"><a id="l00006" name="l00006"></a><span class="lineno">    6</span><span class="preprocessor">#include &quot;<a class="code" href="_operation_kernels_8cuh.html">OperationKernels.cuh</a>&quot;</span></div>
<div class="line"><a id="l00007" name="l00007"></a><span class="lineno">    7</span><span class="preprocessor">#include &quot;utils.cuh&quot;</span></div>
<div class="line"><a id="l00008" name="l00008"></a><span class="lineno">    8</span><span class="preprocessor">#define BLOCKSIZE 512</span></div>
<div class="line"><a id="l00009" name="l00009"></a><span class="lineno">    9</span> </div>
<div class="line"><a id="l00010" name="l00010"></a><span class="lineno">   10</span><span class="keyword">namespace </span><a class="code hl_namespace" href="namespacenz_1_1data.html">nz::data</a> {</div>
<div class="line"><a id="l00011" name="l00011"></a><span class="lineno">   11</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00012" name="l00012"></a><span class="lineno">   12</span>    <span class="keyword">struct </span>is_valid_tensor_type : std::disjunction&lt;</div>
<div class="line"><a id="l00013" name="l00013"></a><span class="lineno">   13</span>            std::is_same&lt;T, Tensor&gt;,</div>
<div class="line"><a id="l00014" name="l00014"></a><span class="lineno">   14</span>            std::is_same&lt;T, MappedTensor&gt;</div>
<div class="line"><a id="l00015" name="l00015"></a><span class="lineno">   15</span>        &gt; {</div>
<div class="line"><a id="l00016" name="l00016"></a><span class="lineno">   16</span>    };</div>
<div class="line"><a id="l00017" name="l00017"></a><span class="lineno">   17</span> </div>
<div class="line"><a id="l00018" name="l00018"></a><span class="lineno">   18</span> </div>
<div class="line"><a id="l00019" name="l00019"></a><span class="lineno">   19</span>    DL_API <span class="keywordtype">void</span> iRELU(<span class="keywordtype">float</span>* output, <span class="keywordtype">float</span>* input, <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span> size);</div>
<div class="line"><a id="l00020" name="l00020"></a><span class="lineno">   20</span> </div>
<div class="line"><a id="l00048" name="l00048"></a><span class="lineno">   48</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00049" name="l00049"></a><span class="lineno">   49</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00050" data-start="{" data-end="}">
<div class="line"><a id="l00050" name="l00050"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#a4706224f5e7c9a0cfe4c74983aaef1bd">   50</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#a4706224f5e7c9a0cfe4c74983aaef1bd">ReLU</a>(T&amp; input) {</div>
<div class="line"><a id="l00051" name="l00051"></a><span class="lineno">   51</span>        T result(input.shape(), input.requiresGrad());</div>
<div class="line"><a id="l00052" name="l00052"></a><span class="lineno">   52</span>        iRELU(result.data(), input.data(), input.size());</div>
<div class="line"><a id="l00053" name="l00053"></a><span class="lineno">   53</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00054" name="l00054"></a><span class="lineno">   54</span>    }</div>
</div>
<div class="line"><a id="l00055" name="l00055"></a><span class="lineno">   55</span> </div>
<div class="line"><a id="l00056" name="l00056"></a><span class="lineno">   56</span>    DL_API <span class="keywordtype">void</span> iSigmoid(<span class="keywordtype">float</span>* output, <span class="keywordtype">float</span>* input, <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span> size);</div>
<div class="line"><a id="l00057" name="l00057"></a><span class="lineno">   57</span> </div>
<div class="line"><a id="l00086" name="l00086"></a><span class="lineno">   86</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00087" name="l00087"></a><span class="lineno">   87</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00088" data-start="{" data-end="}">
<div class="line"><a id="l00088" name="l00088"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#aa9a6da30ae0d71faa4ac32efb9dd1f2f">   88</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#aa9a6da30ae0d71faa4ac32efb9dd1f2f">Sigmoid</a>(T&amp; input) {</div>
<div class="line"><a id="l00089" name="l00089"></a><span class="lineno">   89</span>        T result(input.shape(), input.requiresGrad());</div>
<div class="line"><a id="l00090" name="l00090"></a><span class="lineno">   90</span>        iSigmoid(result.data(), input.data(), input.size());</div>
<div class="line"><a id="l00091" name="l00091"></a><span class="lineno">   91</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00092" name="l00092"></a><span class="lineno">   92</span>    }</div>
</div>
<div class="line"><a id="l00093" name="l00093"></a><span class="lineno">   93</span> </div>
<div class="line"><a id="l00094" name="l00094"></a><span class="lineno">   94</span>    DL_API <span class="keywordtype">void</span> iTanh(<span class="keywordtype">float</span>* output, <span class="keywordtype">float</span>* input, <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span> size);</div>
<div class="line"><a id="l00095" name="l00095"></a><span class="lineno">   95</span> </div>
<div class="line"><a id="l00124" name="l00124"></a><span class="lineno">  124</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00125" name="l00125"></a><span class="lineno">  125</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00126" data-start="{" data-end="}">
<div class="line"><a id="l00126" name="l00126"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#aed71109d5ed6ecdb7181afc751fa2aa1">  126</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#aed71109d5ed6ecdb7181afc751fa2aa1">Tanh</a>(T&amp; input) {</div>
<div class="line"><a id="l00127" name="l00127"></a><span class="lineno">  127</span>        T result(input.shape(), input.requiresGrad());</div>
<div class="line"><a id="l00128" name="l00128"></a><span class="lineno">  128</span>        iTanh(result.data(), input.data(), input.size());</div>
<div class="line"><a id="l00129" name="l00129"></a><span class="lineno">  129</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00130" name="l00130"></a><span class="lineno">  130</span>    }</div>
</div>
<div class="line"><a id="l00131" name="l00131"></a><span class="lineno">  131</span> </div>
<div class="line"><a id="l00132" name="l00132"></a><span class="lineno">  132</span>    DL_API <span class="keywordtype">void</span> iLeakyReLU(<span class="keywordtype">float</span>* output, <span class="keywordtype">float</span>* input, <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span> size, <span class="keywordtype">float</span> alpha);</div>
<div class="line"><a id="l00133" name="l00133"></a><span class="lineno">  133</span> </div>
<div class="line"><a id="l00163" name="l00163"></a><span class="lineno">  163</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00164" name="l00164"></a><span class="lineno">  164</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00165" data-start="{" data-end="}">
<div class="line"><a id="l00165" name="l00165"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#ae8fb3052fdc2304fbb68c8dbad90e4ed">  165</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#ae8fb3052fdc2304fbb68c8dbad90e4ed">LeakyReLU</a>(T&amp; input, <span class="keyword">const</span> <span class="keywordtype">float</span> alpha = 0.01f) {</div>
<div class="line"><a id="l00166" name="l00166"></a><span class="lineno">  166</span>        T result(input.shape(), input.requiresGrad());</div>
<div class="line"><a id="l00167" name="l00167"></a><span class="lineno">  167</span>        iLeakyReLU(result.data(), input.data(), input.size(), alpha);</div>
<div class="line"><a id="l00168" name="l00168"></a><span class="lineno">  168</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00169" name="l00169"></a><span class="lineno">  169</span>    }</div>
</div>
<div class="line"><a id="l00170" name="l00170"></a><span class="lineno">  170</span> </div>
<div class="line"><a id="l00171" name="l00171"></a><span class="lineno">  171</span>    DL_API <span class="keywordtype">void</span> iSwish(<span class="keywordtype">float</span>* output, <span class="keywordtype">float</span>* input, <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span> size);</div>
<div class="line"><a id="l00172" name="l00172"></a><span class="lineno">  172</span> </div>
<div class="line"><a id="l00200" name="l00200"></a><span class="lineno">  200</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00201" name="l00201"></a><span class="lineno">  201</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00202" data-start="{" data-end="}">
<div class="line"><a id="l00202" name="l00202"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#ae563f53512549e2e54f066f7bf06622e">  202</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#ae563f53512549e2e54f066f7bf06622e">Swish</a>(T&amp; input) {</div>
<div class="line"><a id="l00203" name="l00203"></a><span class="lineno">  203</span>        T result(input.shape(), input.requiresGrad());</div>
<div class="line"><a id="l00204" name="l00204"></a><span class="lineno">  204</span>        iSwish(result.data(), input.data(), input.size());</div>
<div class="line"><a id="l00205" name="l00205"></a><span class="lineno">  205</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00206" name="l00206"></a><span class="lineno">  206</span>    }</div>
</div>
<div class="line"><a id="l00207" name="l00207"></a><span class="lineno">  207</span> </div>
<div class="line"><a id="l00208" name="l00208"></a><span class="lineno">  208</span>    DL_API <span class="keywordtype">void</span> iELU(<span class="keywordtype">float</span>* output, <span class="keywordtype">float</span>* input, <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span> size, <span class="keywordtype">float</span> alpha);</div>
<div class="line"><a id="l00209" name="l00209"></a><span class="lineno">  209</span> </div>
<div class="line"><a id="l00239" name="l00239"></a><span class="lineno">  239</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00240" name="l00240"></a><span class="lineno">  240</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00241" data-start="{" data-end="}">
<div class="line"><a id="l00241" name="l00241"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#adae3ca94a8c203f1e444751a1cba0d6d">  241</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#adae3ca94a8c203f1e444751a1cba0d6d">ELU</a>(T&amp; input, <span class="keyword">const</span> <span class="keywordtype">float</span> alpha = 1.0f) {</div>
<div class="line"><a id="l00242" name="l00242"></a><span class="lineno">  242</span>        T result(input.shape(), input.requiresGrad());</div>
<div class="line"><a id="l00243" name="l00243"></a><span class="lineno">  243</span>        iELU(result.data(), input.data(), input.size(), alpha);</div>
<div class="line"><a id="l00244" name="l00244"></a><span class="lineno">  244</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00245" name="l00245"></a><span class="lineno">  245</span>    }</div>
</div>
<div class="line"><a id="l00246" name="l00246"></a><span class="lineno">  246</span> </div>
<div class="line"><a id="l00247" name="l00247"></a><span class="lineno">  247</span>    DL_API <span class="keywordtype">void</span> iHardSigmoid(<span class="keywordtype">float</span>* output, <span class="keywordtype">float</span>* input, <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span> size, <span class="keywordtype">float</span> alpha, <span class="keywordtype">float</span> beta);</div>
<div class="line"><a id="l00248" name="l00248"></a><span class="lineno">  248</span> </div>
<div class="line"><a id="l00279" name="l00279"></a><span class="lineno">  279</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00280" name="l00280"></a><span class="lineno">  280</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00281" data-start="{" data-end="}">
<div class="line"><a id="l00281" name="l00281"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#a241d72367c091d0724b524f55289b2f0">  281</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#a241d72367c091d0724b524f55289b2f0">HardSigmoid</a>(T&amp; input, <span class="keyword">const</span> <span class="keywordtype">float</span> alpha = 0.2f, <span class="keyword">const</span> <span class="keywordtype">float</span> beta = 0.5f) {</div>
<div class="line"><a id="l00282" name="l00282"></a><span class="lineno">  282</span>        T result(input.shape(), input.requiresGrad());</div>
<div class="line"><a id="l00283" name="l00283"></a><span class="lineno">  283</span>        iHardSigmoid(result.data(), input.data(), input.size(), alpha, beta);</div>
<div class="line"><a id="l00284" name="l00284"></a><span class="lineno">  284</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00285" name="l00285"></a><span class="lineno">  285</span>    }</div>
</div>
<div class="line"><a id="l00286" name="l00286"></a><span class="lineno">  286</span> </div>
<div class="line"><a id="l00287" name="l00287"></a><span class="lineno">  287</span>    DL_API <span class="keywordtype">void</span> iHardSwish(<span class="keywordtype">float</span>* output, <span class="keywordtype">float</span>* input, <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span> size, <span class="keywordtype">float</span> alpha, <span class="keywordtype">float</span> beta);</div>
<div class="line"><a id="l00288" name="l00288"></a><span class="lineno">  288</span> </div>
<div class="line"><a id="l00319" name="l00319"></a><span class="lineno">  319</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00320" name="l00320"></a><span class="lineno">  320</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00321" data-start="{" data-end="}">
<div class="line"><a id="l00321" name="l00321"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#ac716ac93e673f4706963d194e8ea523e">  321</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#ac716ac93e673f4706963d194e8ea523e">HardSwish</a>(T&amp; input, <span class="keyword">const</span> <span class="keywordtype">float</span> alpha = 0.5f, <span class="keyword">const</span> <span class="keywordtype">float</span> beta = 0.5f) {</div>
<div class="line"><a id="l00322" name="l00322"></a><span class="lineno">  322</span>        T result(input.shape(), input.requiresGrad());</div>
<div class="line"><a id="l00323" name="l00323"></a><span class="lineno">  323</span>        iHardSwish(result.data(), input.data(), input.size(), alpha, beta);</div>
<div class="line"><a id="l00324" name="l00324"></a><span class="lineno">  324</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00325" name="l00325"></a><span class="lineno">  325</span>    }</div>
</div>
<div class="line"><a id="l00326" name="l00326"></a><span class="lineno">  326</span> </div>
<div class="line"><a id="l00327" name="l00327"></a><span class="lineno">  327</span>    DL_API <span class="keywordtype">void</span> iSoftmax(<span class="keywordtype">float</span>* output, <span class="keywordtype">float</span>* input, <span class="keyword">const</span> std::vector&lt;float&gt;&amp; sum, <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span> size,</div>
<div class="line"><a id="l00328" name="l00328"></a><span class="lineno">  328</span>                         <span class="keyword">const</span> std::vector&lt;size_t&gt;&amp; offset);</div>
<div class="line"><a id="l00329" name="l00329"></a><span class="lineno">  329</span> </div>
<div class="line"><a id="l00362" name="l00362"></a><span class="lineno">  362</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00363" name="l00363"></a><span class="lineno">  363</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00364" data-start="{" data-end="}">
<div class="line"><a id="l00364" name="l00364"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#a55e8a3fae0d75e214cd714fde8811543">  364</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#a55e8a3fae0d75e214cd714fde8811543">Softmax</a>(T&amp; input) {</div>
<div class="line"><a id="l00365" name="l00365"></a><span class="lineno">  365</span>        T result(input.shape(), input.requiresGrad());</div>
<div class="line"><a id="l00366" name="l00366"></a><span class="lineno">  366</span>        <span class="keyword">auto</span> size = input.shape()[2] * input.shape()[3];</div>
<div class="line"><a id="l00367" name="l00367"></a><span class="lineno">  367</span>        std::vector&lt;size_t&gt; offset;</div>
<div class="line"><a id="l00368" name="l00368"></a><span class="lineno">  368</span>        std::vector&lt;float&gt; sum;</div>
<div class="line"><a id="l00369" name="l00369"></a><span class="lineno">  369</span>        <span class="keywordflow">for</span> (<span class="keyword">auto</span> i = 0; i &lt; input.shape()[0]; i++) {</div>
<div class="line"><a id="l00370" name="l00370"></a><span class="lineno">  370</span>            <span class="keywordflow">for</span> (<span class="keyword">auto</span> j = 0; j &lt; input.shape()[1]; j++) {</div>
<div class="line"><a id="l00371" name="l00371"></a><span class="lineno">  371</span>                offset.push_back(i * input.shape().getStride(0) + j * input.shape().getStride(1));</div>
<div class="line"><a id="l00372" name="l00372"></a><span class="lineno">  372</span>                sum.push_back(input.expSum(i, j));</div>
<div class="line"><a id="l00373" name="l00373"></a><span class="lineno">  373</span>            }</div>
<div class="line"><a id="l00374" name="l00374"></a><span class="lineno">  374</span>        }</div>
<div class="line"><a id="l00375" name="l00375"></a><span class="lineno">  375</span>        iSoftmax(result.data(), input.data(), sum, size, offset);</div>
<div class="line"><a id="l00376" name="l00376"></a><span class="lineno">  376</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00377" name="l00377"></a><span class="lineno">  377</span>    }</div>
</div>
<div class="line"><a id="l00378" name="l00378"></a><span class="lineno">  378</span> </div>
<div class="line"><a id="l00379" name="l00379"></a><span class="lineno">  379</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00380" name="l00380"></a><span class="lineno">  380</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, <span class="keywordtype">void</span>&gt;</div>
<div class="line"><a id="l00381" name="l00381"></a><span class="lineno">  381</span>    <a class="code hl_function" href="namespacenz_1_1data.html#a55e8a3fae0d75e214cd714fde8811543">Softmax</a>(T&amp; output, T&amp; input) {</div>
<div class="line"><a id="l00382" name="l00382"></a><span class="lineno">  382</span>        <span class="keyword">auto</span> size = input.shape()[2] * input.shape()[3];</div>
<div class="line"><a id="l00383" name="l00383"></a><span class="lineno">  383</span>        std::vector&lt;size_t&gt; offset;</div>
<div class="line"><a id="l00384" name="l00384"></a><span class="lineno">  384</span>        std::vector&lt;float&gt; sum;</div>
<div class="line"><a id="l00385" name="l00385"></a><span class="lineno">  385</span>        <span class="keywordflow">for</span> (<span class="keyword">auto</span> i = 0; i &lt; input.shape()[0]; i++) {</div>
<div class="line"><a id="l00386" name="l00386"></a><span class="lineno">  386</span>            <span class="keywordflow">for</span> (<span class="keyword">auto</span> j = 0; j &lt; input.shape()[1]; j++) {</div>
<div class="line"><a id="l00387" name="l00387"></a><span class="lineno">  387</span>                offset.push_back(i * input.shape().getStride(0) + j * input.shape().getStride(1));</div>
<div class="line"><a id="l00388" name="l00388"></a><span class="lineno">  388</span>                sum.push_back(input.expSum(i, j));</div>
<div class="line"><a id="l00389" name="l00389"></a><span class="lineno">  389</span>            }</div>
<div class="line"><a id="l00390" name="l00390"></a><span class="lineno">  390</span>        }</div>
<div class="line"><a id="l00391" name="l00391"></a><span class="lineno">  391</span>        iSoftmax(output.data(), input.data(), sum, size, offset);</div>
<div class="line"><a id="l00392" name="l00392"></a><span class="lineno">  392</span>    }</div>
<div class="line"><a id="l00393" name="l00393"></a><span class="lineno">  393</span> </div>
<div class="line"><a id="l00394" name="l00394"></a><span class="lineno">  394</span>    DL_API <span class="keywordtype">void</span> iScalarAdd(<span class="keywordtype">float</span>* output, <span class="keywordtype">float</span>* input, <span class="keywordtype">float</span> scalar, <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span> size);</div>
<div class="line"><a id="l00395" name="l00395"></a><span class="lineno">  395</span> </div>
<div class="line"><a id="l00396" name="l00396"></a><span class="lineno">  396</span>    DL_API <span class="keywordtype">void</span> iScalarDiv(<span class="keywordtype">float</span>* output, <span class="keywordtype">float</span>* input, <span class="keywordtype">float</span> scalar, <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span> size);</div>
<div class="line"><a id="l00397" name="l00397"></a><span class="lineno">  397</span> </div>
<div class="line"><a id="l00398" name="l00398"></a><span class="lineno">  398</span>    DL_API <span class="keywordtype">void</span> iScalarMul(<span class="keywordtype">float</span>* output, <span class="keywordtype">float</span>* input, <span class="keywordtype">float</span> scalar, <span class="keywordtype">unsigned</span> <span class="keywordtype">long</span> <span class="keywordtype">long</span> size);</div>
<div class="line"><a id="l00399" name="l00399"></a><span class="lineno">  399</span> </div>
<div class="line"><a id="l00434" name="l00434"></a><span class="lineno">  434</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00435" name="l00435"></a><span class="lineno">  435</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00436" data-start="{" data-end="}">
<div class="line"><a id="l00436" name="l00436"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#ab99b7c0a7c96a6de43f5b3f25af7f918">  436</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#ab99b7c0a7c96a6de43f5b3f25af7f918">operator+</a>(T&amp; lhs, <span class="keyword">const</span> <span class="keywordtype">float</span> rhs) {</div>
<div class="line"><a id="l00437" name="l00437"></a><span class="lineno">  437</span>        T result(lhs.shape(), lhs.requiresGrad());</div>
<div class="line"><a id="l00438" name="l00438"></a><span class="lineno">  438</span>        iScalarAdd(result.data(), lhs.data(), rhs, lhs.size());</div>
<div class="line"><a id="l00439" name="l00439"></a><span class="lineno">  439</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00440" name="l00440"></a><span class="lineno">  440</span>    }</div>
</div>
<div class="line"><a id="l00441" name="l00441"></a><span class="lineno">  441</span> </div>
<div class="line"><a id="l00476" name="l00476"></a><span class="lineno">  476</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00477" name="l00477"></a><span class="lineno">  477</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00478" data-start="{" data-end="}">
<div class="line"><a id="l00478" name="l00478"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#a5cbc31234b126e3ce84c273e0cc8714a">  478</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#ab99b7c0a7c96a6de43f5b3f25af7f918">operator+</a>(<span class="keyword">const</span> <span class="keywordtype">float</span> lhs, T&amp; rhs) {</div>
<div class="line"><a id="l00479" name="l00479"></a><span class="lineno">  479</span>        T result(rhs.shape(), rhs.requiresGrad());</div>
<div class="line"><a id="l00480" name="l00480"></a><span class="lineno">  480</span>        iScalarAdd(result.data(), rhs.data(), lhs, rhs.size());</div>
<div class="line"><a id="l00481" name="l00481"></a><span class="lineno">  481</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00482" name="l00482"></a><span class="lineno">  482</span>    }</div>
</div>
<div class="line"><a id="l00483" name="l00483"></a><span class="lineno">  483</span> </div>
<div class="line"><a id="l00518" name="l00518"></a><span class="lineno">  518</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00519" name="l00519"></a><span class="lineno">  519</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00520" data-start="{" data-end="}">
<div class="line"><a id="l00520" name="l00520"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#acc650ae262aba5f1b0fa9cca8cae311e">  520</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#acc650ae262aba5f1b0fa9cca8cae311e">operator-</a>(T&amp; lhs, <span class="keyword">const</span> <span class="keywordtype">float</span> rhs) {</div>
<div class="line"><a id="l00521" name="l00521"></a><span class="lineno">  521</span>        T result(lhs.shape(), lhs.requiresGrad());</div>
<div class="line"><a id="l00522" name="l00522"></a><span class="lineno">  522</span>        iScalarAdd(result.data(), lhs.data(), -rhs, lhs.size());</div>
<div class="line"><a id="l00523" name="l00523"></a><span class="lineno">  523</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00524" name="l00524"></a><span class="lineno">  524</span>    }</div>
</div>
<div class="line"><a id="l00525" name="l00525"></a><span class="lineno">  525</span> </div>
<div class="line"><a id="l00560" name="l00560"></a><span class="lineno">  560</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00561" name="l00561"></a><span class="lineno">  561</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00562" data-start="{" data-end="}">
<div class="line"><a id="l00562" name="l00562"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#a5ecefd608c1f6b3ce4e9d752dd05c0e7">  562</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#acc650ae262aba5f1b0fa9cca8cae311e">operator-</a>(<span class="keyword">const</span> <span class="keywordtype">float</span> lhs, T&amp; rhs) {</div>
<div class="line"><a id="l00563" name="l00563"></a><span class="lineno">  563</span>        T result = -rhs;</div>
<div class="line"><a id="l00564" name="l00564"></a><span class="lineno">  564</span>        iScalarAdd(result.data(), result.data(), lhs, result.size());</div>
<div class="line"><a id="l00565" name="l00565"></a><span class="lineno">  565</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00566" name="l00566"></a><span class="lineno">  566</span>    }</div>
</div>
<div class="line"><a id="l00567" name="l00567"></a><span class="lineno">  567</span> </div>
<div class="line"><a id="l00602" name="l00602"></a><span class="lineno">  602</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00603" name="l00603"></a><span class="lineno">  603</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00604" data-start="{" data-end="}">
<div class="line"><a id="l00604" name="l00604"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#a8730252e35a8e59aacb429efb0d6b828">  604</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#a8730252e35a8e59aacb429efb0d6b828">operator*</a>(T&amp; lhs, <span class="keyword">const</span> <span class="keywordtype">float</span> rhs) {</div>
<div class="line"><a id="l00605" name="l00605"></a><span class="lineno">  605</span>        T result(lhs.shape(), lhs.requiresGrad());</div>
<div class="line"><a id="l00606" name="l00606"></a><span class="lineno">  606</span>        iScalarMul(result.data(), lhs.data(), rhs, lhs.size());</div>
<div class="line"><a id="l00607" name="l00607"></a><span class="lineno">  607</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00608" name="l00608"></a><span class="lineno">  608</span>    }</div>
</div>
<div class="line"><a id="l00609" name="l00609"></a><span class="lineno">  609</span> </div>
<div class="line"><a id="l00644" name="l00644"></a><span class="lineno">  644</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00645" name="l00645"></a><span class="lineno">  645</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00646" data-start="{" data-end="}">
<div class="line"><a id="l00646" name="l00646"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#a6f0029a210088048368560c6e4c4d8a6">  646</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#a8730252e35a8e59aacb429efb0d6b828">operator*</a>(<span class="keyword">const</span> <span class="keywordtype">float</span> lhs, T&amp; rhs) {</div>
<div class="line"><a id="l00647" name="l00647"></a><span class="lineno">  647</span>        T result(rhs.shape(), rhs.requiresGrad());</div>
<div class="line"><a id="l00648" name="l00648"></a><span class="lineno">  648</span>        iScalarMul(result.data(), rhs.data(), lhs, rhs.size());</div>
<div class="line"><a id="l00649" name="l00649"></a><span class="lineno">  649</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00650" name="l00650"></a><span class="lineno">  650</span>    }</div>
</div>
<div class="line"><a id="l00651" name="l00651"></a><span class="lineno">  651</span> </div>
<div class="line"><a id="l00687" name="l00687"></a><span class="lineno">  687</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00688" name="l00688"></a><span class="lineno">  688</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00689" data-start="{" data-end="}">
<div class="line"><a id="l00689" name="l00689"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#a771a257e9dd839ce330e9b40fd1dda56">  689</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#a771a257e9dd839ce330e9b40fd1dda56">operator/</a>(T&amp; lhs, <span class="keyword">const</span> <span class="keywordtype">float</span> rhs) {</div>
<div class="line"><a id="l00690" name="l00690"></a><span class="lineno">  690</span>        T result(lhs.shape(), lhs.requiresGrad());</div>
<div class="line"><a id="l00691" name="l00691"></a><span class="lineno">  691</span>        iScalarDiv(result.data(), lhs.data(), rhs, lhs.size());</div>
<div class="line"><a id="l00692" name="l00692"></a><span class="lineno">  692</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00693" name="l00693"></a><span class="lineno">  693</span>    }</div>
</div>
<div class="line"><a id="l00694" name="l00694"></a><span class="lineno">  694</span> </div>
<div class="line"><a id="l00730" name="l00730"></a><span class="lineno">  730</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00731" name="l00731"></a><span class="lineno">  731</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen00732" data-start="{" data-end="}">
<div class="line"><a id="l00732" name="l00732"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#a275956a1088d701845f4599da84cdc84">  732</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#a771a257e9dd839ce330e9b40fd1dda56">operator/</a>(<span class="keyword">const</span> <span class="keywordtype">float</span> lhs, T&amp; rhs) {</div>
<div class="line"><a id="l00733" name="l00733"></a><span class="lineno">  733</span>        T result = rhs;</div>
<div class="line"><a id="l00734" name="l00734"></a><span class="lineno">  734</span>        result.recip();</div>
<div class="line"><a id="l00735" name="l00735"></a><span class="lineno">  735</span>        iScalarMul(result.data(), result.data(), lhs, result.size());</div>
<div class="line"><a id="l00736" name="l00736"></a><span class="lineno">  736</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l00737" name="l00737"></a><span class="lineno">  737</span>    }</div>
</div>
<div class="line"><a id="l00738" name="l00738"></a><span class="lineno">  738</span> </div>
<div class="line"><a id="l00739" name="l00739"></a><span class="lineno">  739</span>    DL_API <span class="keywordtype">void</span> iMatrixAdd(<span class="keywordtype">float</span>* out, <span class="keywordtype">float</span>* in1, <span class="keywordtype">float</span>* in2, <span class="keywordtype">size_t</span> n, <span class="keyword">const</span> std::vector&lt;size_t&gt;&amp; offset_o,</div>
<div class="line"><a id="l00740" name="l00740"></a><span class="lineno">  740</span>                           <span class="keyword">const</span> std::vector&lt;size_t&gt;&amp; offset_i1, <span class="keyword">const</span> std::vector&lt;size_t&gt;&amp; offset_i2);</div>
<div class="line"><a id="l00741" name="l00741"></a><span class="lineno">  741</span> </div>
<div class="line"><a id="l00785" name="l00785"></a><span class="lineno">  785</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00786" name="l00786"></a><span class="lineno">  786</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, <span class="keywordtype">void</span>&gt;</div>
<div class="foldopen" id="foldopen00787" data-start="{" data-end="}">
<div class="line"><a id="l00787" name="l00787"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#a8cf4ac2437dd67698684169bebb225d4">  787</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#a8cf4ac2437dd67698684169bebb225d4">tensorMatrixAdd</a>(T&amp; out, <span class="keyword">const</span> T&amp; lhs, <span class="keyword">const</span> T&amp; rhs) {</div>
<div class="line"><a id="l00788" name="l00788"></a><span class="lineno">  788</span>        <span class="keywordflow">if</span> (!lhs.shape().isBroadcastCompatible(rhs.shape()) || lhs.shape().H() != rhs.shape().H() || lhs.shape().W() !=</div>
<div class="line"><a id="l00789" name="l00789"></a><span class="lineno">  789</span>            rhs.shape().</div>
<div class="line"><a id="l00790" name="l00790"></a><span class="lineno">  790</span>                W()) {</div>
<div class="line"><a id="l00791" name="l00791"></a><span class="lineno">  791</span>            <span class="keywordflow">throw</span> std::invalid_argument(<span class="stringliteral">&quot;Shapes are not broadcast compatible.&quot;</span>);</div>
<div class="line"><a id="l00792" name="l00792"></a><span class="lineno">  792</span>        }</div>
<div class="line"><a id="l00793" name="l00793"></a><span class="lineno">  793</span>        std::vector&lt;size_t&gt; offsetC;</div>
<div class="line"><a id="l00794" name="l00794"></a><span class="lineno">  794</span>        std::vector&lt;size_t&gt; offsetA;</div>
<div class="line"><a id="l00795" name="l00795"></a><span class="lineno">  795</span>        std::vector&lt;size_t&gt; offsetB;</div>
<div class="line"><a id="l00796" name="l00796"></a><span class="lineno">  796</span>        <span class="keyword">const</span> <span class="keywordtype">size_t</span> n = lhs.shape().H() * lhs.shape().W();</div>
<div class="line"><a id="l00797" name="l00797"></a><span class="lineno">  797</span>        <span class="keywordflow">for</span> (<span class="keyword">auto</span> i = 0; i &lt; out.shape()[0]; i++) {</div>
<div class="line"><a id="l00798" name="l00798"></a><span class="lineno">  798</span>            <span class="keywordflow">for</span> (<span class="keyword">auto</span> j = 0; j &lt; out.shape()[1]; j++) {</div>
<div class="line"><a id="l00799" name="l00799"></a><span class="lineno">  799</span>                offsetC.push_back(i * out.shape().getStride(0) + j * out.shape().getStride(1));</div>
<div class="line"><a id="l00800" name="l00800"></a><span class="lineno">  800</span>                offsetA.push_back(i * (lhs.shape().N() &gt; 1 ? lhs.shape().getStride(0) : 0) + j * (lhs.shape().C() &gt; 1</div>
<div class="line"><a id="l00801" name="l00801"></a><span class="lineno">  801</span>                    ? lhs.shape().getStride(1)</div>
<div class="line"><a id="l00802" name="l00802"></a><span class="lineno">  802</span>                    : 0));</div>
<div class="line"><a id="l00803" name="l00803"></a><span class="lineno">  803</span>                offsetB.push_back(i * (rhs.shape().N() &gt; 1 ? rhs.shape().getStride(0) : 0) + j * (</div>
<div class="line"><a id="l00804" name="l00804"></a><span class="lineno">  804</span>                    rhs.shape().C() &gt; 1 ? rhs.shape().getStride(1) : 0));</div>
<div class="line"><a id="l00805" name="l00805"></a><span class="lineno">  805</span>            }</div>
<div class="line"><a id="l00806" name="l00806"></a><span class="lineno">  806</span>        }</div>
<div class="line"><a id="l00807" name="l00807"></a><span class="lineno">  807</span>        iMatrixAdd(out.data(), lhs.data(), rhs.data(), n, offsetC, offsetA, offsetB);</div>
<div class="line"><a id="l00808" name="l00808"></a><span class="lineno">  808</span>    }</div>
</div>
<div class="line"><a id="l00809" name="l00809"></a><span class="lineno">  809</span> </div>
<div class="line"><a id="l00810" name="l00810"></a><span class="lineno">  810</span>    DL_API <span class="keywordtype">void</span> iMatrixSub(<span class="keywordtype">float</span>* out, <span class="keywordtype">float</span>* in1, <span class="keywordtype">float</span>* in2, <span class="keywordtype">size_t</span> n, <span class="keyword">const</span> std::vector&lt;size_t&gt;&amp; offset_o,</div>
<div class="line"><a id="l00811" name="l00811"></a><span class="lineno">  811</span>                           <span class="keyword">const</span> std::vector&lt;size_t&gt;&amp; offset_i1, <span class="keyword">const</span> std::vector&lt;size_t&gt;&amp; offset_i2);</div>
<div class="line"><a id="l00812" name="l00812"></a><span class="lineno">  812</span> </div>
<div class="line"><a id="l00856" name="l00856"></a><span class="lineno">  856</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00857" name="l00857"></a><span class="lineno">  857</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, <span class="keywordtype">void</span>&gt;</div>
<div class="foldopen" id="foldopen00858" data-start="{" data-end="}">
<div class="line"><a id="l00858" name="l00858"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#a7503b6894e8052ed54eb169550d135c0">  858</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#a7503b6894e8052ed54eb169550d135c0">tensorMatrixSub</a>(T&amp; out, <span class="keyword">const</span> T&amp; lhs, <span class="keyword">const</span> T&amp; rhs) {</div>
<div class="line"><a id="l00859" name="l00859"></a><span class="lineno">  859</span>        <span class="keywordflow">if</span> (!lhs.shape().isBroadcastCompatible(rhs.shape()) || lhs.shape().H() != rhs.shape().H() || lhs.shape().W() !=</div>
<div class="line"><a id="l00860" name="l00860"></a><span class="lineno">  860</span>            rhs.shape().</div>
<div class="line"><a id="l00861" name="l00861"></a><span class="lineno">  861</span>                W()) {</div>
<div class="line"><a id="l00862" name="l00862"></a><span class="lineno">  862</span>            <span class="keywordflow">throw</span> std::invalid_argument(<span class="stringliteral">&quot;Shapes are not broadcast compatible.&quot;</span>);</div>
<div class="line"><a id="l00863" name="l00863"></a><span class="lineno">  863</span>        }</div>
<div class="line"><a id="l00864" name="l00864"></a><span class="lineno">  864</span>        std::vector&lt;size_t&gt; offsetC;</div>
<div class="line"><a id="l00865" name="l00865"></a><span class="lineno">  865</span>        std::vector&lt;size_t&gt; offsetA;</div>
<div class="line"><a id="l00866" name="l00866"></a><span class="lineno">  866</span>        std::vector&lt;size_t&gt; offsetB;</div>
<div class="line"><a id="l00867" name="l00867"></a><span class="lineno">  867</span>        <span class="keyword">const</span> <span class="keywordtype">size_t</span> n = lhs.shape().H() * lhs.shape().W();</div>
<div class="line"><a id="l00868" name="l00868"></a><span class="lineno">  868</span>        <span class="keywordflow">for</span> (<span class="keyword">auto</span> i = 0; i &lt; out.shape()[0]; i++) {</div>
<div class="line"><a id="l00869" name="l00869"></a><span class="lineno">  869</span>            <span class="keywordflow">for</span> (<span class="keyword">auto</span> j = 0; j &lt; out.shape()[1]; j++) {</div>
<div class="line"><a id="l00870" name="l00870"></a><span class="lineno">  870</span>                offsetC.push_back(i * out.shape().getStride(0) + j * out.shape().getStride(1));</div>
<div class="line"><a id="l00871" name="l00871"></a><span class="lineno">  871</span>                offsetA.push_back(i * (lhs.shape().N() &gt; 1 ? lhs.shape().getStride(0) : 0) + j * (lhs.shape().C() &gt; 1</div>
<div class="line"><a id="l00872" name="l00872"></a><span class="lineno">  872</span>                    ? lhs.shape().getStride(1)</div>
<div class="line"><a id="l00873" name="l00873"></a><span class="lineno">  873</span>                    : 0));</div>
<div class="line"><a id="l00874" name="l00874"></a><span class="lineno">  874</span>                offsetB.push_back(i * (rhs.shape().N() &gt; 1 ? rhs.shape().getStride(0) : 0) + j * (</div>
<div class="line"><a id="l00875" name="l00875"></a><span class="lineno">  875</span>                    rhs.shape().C() &gt; 1 ? rhs.shape().getStride(1) : 0));</div>
<div class="line"><a id="l00876" name="l00876"></a><span class="lineno">  876</span>            }</div>
<div class="line"><a id="l00877" name="l00877"></a><span class="lineno">  877</span>        }</div>
<div class="line"><a id="l00878" name="l00878"></a><span class="lineno">  878</span>        iMatrixSub(out.data(), lhs.data(), rhs.data(), n, offsetC, offsetA, offsetB);</div>
<div class="line"><a id="l00879" name="l00879"></a><span class="lineno">  879</span>    }</div>
</div>
<div class="line"><a id="l00880" name="l00880"></a><span class="lineno">  880</span> </div>
<div class="line"><a id="l00881" name="l00881"></a><span class="lineno">  881</span>    DL_API <span class="keywordtype">void</span> iElementwiseDivide(<span class="keywordtype">float</span>* out, <span class="keywordtype">float</span>* in1, <span class="keywordtype">float</span>* in2, <span class="keywordtype">size_t</span> n, <span class="keyword">const</span> std::vector&lt;size_t&gt;&amp; offset_o,</div>
<div class="line"><a id="l00882" name="l00882"></a><span class="lineno">  882</span>                                   <span class="keyword">const</span> std::vector&lt;size_t&gt;&amp; offset_i1, <span class="keyword">const</span> std::vector&lt;size_t&gt;&amp; offset_i2);</div>
<div class="line"><a id="l00883" name="l00883"></a><span class="lineno">  883</span> </div>
<div class="line"><a id="l00926" name="l00926"></a><span class="lineno">  926</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00927" name="l00927"></a><span class="lineno">  927</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, <span class="keywordtype">void</span>&gt;</div>
<div class="foldopen" id="foldopen00928" data-start="{" data-end="}">
<div class="line"><a id="l00928" name="l00928"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#a1da5cd018533919ed5a750b14c7d6d71">  928</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#a1da5cd018533919ed5a750b14c7d6d71">tensorElementwiseDivide</a>(T&amp; out, <span class="keyword">const</span> T&amp; lhs, <span class="keyword">const</span> T&amp; rhs) {</div>
<div class="line"><a id="l00929" name="l00929"></a><span class="lineno">  929</span>        <span class="keywordflow">if</span> (!lhs.shape().isBroadcastCompatible(rhs.shape()) || lhs.shape().H() != rhs.shape().H() || lhs.shape().W() !=</div>
<div class="line"><a id="l00930" name="l00930"></a><span class="lineno">  930</span>            rhs.shape().</div>
<div class="line"><a id="l00931" name="l00931"></a><span class="lineno">  931</span>                W()) {</div>
<div class="line"><a id="l00932" name="l00932"></a><span class="lineno">  932</span>            <span class="keywordflow">throw</span> std::invalid_argument(<span class="stringliteral">&quot;Shapes are not broadcast compatible.&quot;</span>);</div>
<div class="line"><a id="l00933" name="l00933"></a><span class="lineno">  933</span>        }</div>
<div class="line"><a id="l00934" name="l00934"></a><span class="lineno">  934</span>        std::vector&lt;size_t&gt; offsetC;</div>
<div class="line"><a id="l00935" name="l00935"></a><span class="lineno">  935</span>        std::vector&lt;size_t&gt; offsetA;</div>
<div class="line"><a id="l00936" name="l00936"></a><span class="lineno">  936</span>        std::vector&lt;size_t&gt; offsetB;</div>
<div class="line"><a id="l00937" name="l00937"></a><span class="lineno">  937</span>        <span class="keyword">const</span> <span class="keywordtype">size_t</span> n = lhs.shape().H() * lhs.shape().W();</div>
<div class="line"><a id="l00938" name="l00938"></a><span class="lineno">  938</span>        <span class="keywordflow">for</span> (<span class="keyword">auto</span> i = 0; i &lt; out.shape()[0]; i++) {</div>
<div class="line"><a id="l00939" name="l00939"></a><span class="lineno">  939</span>            <span class="keywordflow">for</span> (<span class="keyword">auto</span> j = 0; j &lt; out.shape()[1]; j++) {</div>
<div class="line"><a id="l00940" name="l00940"></a><span class="lineno">  940</span>                offsetC.push_back(i * out.shape().getStride(0) + j * out.shape().getStride(1));</div>
<div class="line"><a id="l00941" name="l00941"></a><span class="lineno">  941</span>                offsetA.push_back(i * (lhs.shape().N() &gt; 1 ? lhs.shape().getStride(0) : 0) + j * (lhs.shape().C() &gt; 1</div>
<div class="line"><a id="l00942" name="l00942"></a><span class="lineno">  942</span>                    ? lhs.shape().getStride(1)</div>
<div class="line"><a id="l00943" name="l00943"></a><span class="lineno">  943</span>                    : 0));</div>
<div class="line"><a id="l00944" name="l00944"></a><span class="lineno">  944</span>                offsetB.push_back(i * (rhs.shape().N() &gt; 1 ? rhs.shape().getStride(0) : 0) + j * (</div>
<div class="line"><a id="l00945" name="l00945"></a><span class="lineno">  945</span>                    rhs.shape().C() &gt; 1 ? rhs.shape().getStride(1) : 0));</div>
<div class="line"><a id="l00946" name="l00946"></a><span class="lineno">  946</span>            }</div>
<div class="line"><a id="l00947" name="l00947"></a><span class="lineno">  947</span>        }</div>
<div class="line"><a id="l00948" name="l00948"></a><span class="lineno">  948</span>        iElementwiseDivide(out.data(), lhs.data(), rhs.data(), n, offsetC, offsetA, offsetB);</div>
<div class="line"><a id="l00949" name="l00949"></a><span class="lineno">  949</span>    }</div>
</div>
<div class="line"><a id="l00950" name="l00950"></a><span class="lineno">  950</span> </div>
<div class="line"><a id="l00951" name="l00951"></a><span class="lineno">  951</span>    DL_API <span class="keywordtype">void</span> iGeneralMatrixMul(<span class="keywordtype">float</span>* A, <span class="keywordtype">float</span>* B, <span class="keywordtype">float</span>* C, <span class="keywordtype">size_t</span> M, <span class="keywordtype">size_t</span> N, <span class="keywordtype">size_t</span> K,</div>
<div class="line"><a id="l00952" name="l00952"></a><span class="lineno">  952</span>                                  <span class="keyword">const</span> std::vector&lt;size_t&gt;&amp; offsetC, <span class="keyword">const</span> std::vector&lt;size_t&gt;&amp; offsetA,</div>
<div class="line"><a id="l00953" name="l00953"></a><span class="lineno">  953</span>                                  <span class="keyword">const</span> std::vector&lt;size_t&gt;&amp; offsetB);</div>
<div class="line"><a id="l00954" name="l00954"></a><span class="lineno">  954</span> </div>
<div class="line"><a id="l00998" name="l00998"></a><span class="lineno">  998</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l00999" name="l00999"></a><span class="lineno">  999</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, <span class="keywordtype">void</span>&gt;</div>
<div class="foldopen" id="foldopen01000" data-start="{" data-end="}">
<div class="line"><a id="l01000" name="l01000"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#a5a166a472b887c45fde9e5815f072234"> 1000</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#a5a166a472b887c45fde9e5815f072234">tensorGeneralMatrixMul</a>(T&amp; out, <span class="keyword">const</span> T&amp; lhs, <span class="keyword">const</span> T&amp; rhs) {</div>
<div class="line"><a id="l01001" name="l01001"></a><span class="lineno"> 1001</span>        <span class="keywordflow">if</span> (!lhs.shape().isBroadcastCompatible(rhs.shape()) || lhs.shape().W() != rhs.shape().H()) {</div>
<div class="line"><a id="l01002" name="l01002"></a><span class="lineno"> 1002</span>            <span class="keywordflow">throw</span> std::invalid_argument(<span class="stringliteral">&quot;Shapes are not broadcast compatible.&quot;</span>);</div>
<div class="line"><a id="l01003" name="l01003"></a><span class="lineno"> 1003</span>        }</div>
<div class="line"><a id="l01004" name="l01004"></a><span class="lineno"> 1004</span>        std::vector&lt;size_t&gt; offsetC;</div>
<div class="line"><a id="l01005" name="l01005"></a><span class="lineno"> 1005</span>        std::vector&lt;size_t&gt; offsetA;</div>
<div class="line"><a id="l01006" name="l01006"></a><span class="lineno"> 1006</span>        std::vector&lt;size_t&gt; offsetB;</div>
<div class="line"><a id="l01007" name="l01007"></a><span class="lineno"> 1007</span>        <span class="keywordflow">for</span> (<span class="keyword">auto</span> i = 0; i &lt; out.shape()[0]; i++) {</div>
<div class="line"><a id="l01008" name="l01008"></a><span class="lineno"> 1008</span>            <span class="keywordflow">for</span> (<span class="keyword">auto</span> j = 0; j &lt; out.shape()[1]; j++) {</div>
<div class="line"><a id="l01009" name="l01009"></a><span class="lineno"> 1009</span>                offsetC.push_back(i * out.shape().getStride(0) + j * out.shape().getStride(1));</div>
<div class="line"><a id="l01010" name="l01010"></a><span class="lineno"> 1010</span>                offsetA.push_back(i * (lhs.shape().N() &gt; 1 ? lhs.shape().getStride(0) : 0) + j * (lhs.shape().C() &gt; 1</div>
<div class="line"><a id="l01011" name="l01011"></a><span class="lineno"> 1011</span>                    ? lhs.shape().getStride(1)</div>
<div class="line"><a id="l01012" name="l01012"></a><span class="lineno"> 1012</span>                    : 0));</div>
<div class="line"><a id="l01013" name="l01013"></a><span class="lineno"> 1013</span>                offsetB.push_back(i * (rhs.shape().N() &gt; 1 ? rhs.shape().getStride(0) : 0) + j * (</div>
<div class="line"><a id="l01014" name="l01014"></a><span class="lineno"> 1014</span>                    rhs.shape().C() &gt; 1 ? rhs.shape().getStride(1) : 0));</div>
<div class="line"><a id="l01015" name="l01015"></a><span class="lineno"> 1015</span>            }</div>
<div class="line"><a id="l01016" name="l01016"></a><span class="lineno"> 1016</span>        }</div>
<div class="line"><a id="l01017" name="l01017"></a><span class="lineno"> 1017</span>        iGeneralMatrixMul(lhs.data(), rhs.data(), out.data(), lhs.shape().H(), rhs.shape().W(), lhs.shape().W(),</div>
<div class="line"><a id="l01018" name="l01018"></a><span class="lineno"> 1018</span>                          offsetC, offsetA, offsetB);</div>
<div class="line"><a id="l01019" name="l01019"></a><span class="lineno"> 1019</span>    }</div>
</div>
<div class="line"><a id="l01020" name="l01020"></a><span class="lineno"> 1020</span> </div>
<div class="line"><a id="l01021" name="l01021"></a><span class="lineno"> 1021</span>    DL_API <span class="keywordtype">void</span> iTensorCoreGEMM(<span class="keywordtype">float</span>* A, <span class="keywordtype">float</span>* B, <span class="keywordtype">float</span>* C, <span class="keyword">const</span> Dimension&amp; shapeA, <span class="keyword">const</span> Dimension&amp; shapeB,</div>
<div class="line"><a id="l01022" name="l01022"></a><span class="lineno"> 1022</span>                                <span class="keyword">const</span> Dimension&amp; shapeC);</div>
<div class="line"><a id="l01023" name="l01023"></a><span class="lineno"> 1023</span> </div>
<div class="line"><a id="l01024" name="l01024"></a><span class="lineno"> 1024</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l01025" name="l01025"></a><span class="lineno"> 1025</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, <span class="keywordtype">void</span>&gt;</div>
<div class="line"><a id="l01026" name="l01026"></a><span class="lineno"> 1026</span>    GEMMTensorCore(T&amp; out, <span class="keyword">const</span> T&amp; lhs, <span class="keyword">const</span> T&amp; rhs) {</div>
<div class="line"><a id="l01027" name="l01027"></a><span class="lineno"> 1027</span>        iTensorCoreGEMM(lhs.data(), rhs.data(), out.data(), lhs.shape(), rhs.shape(), out.shape());</div>
<div class="line"><a id="l01028" name="l01028"></a><span class="lineno"> 1028</span>    }</div>
<div class="line"><a id="l01029" name="l01029"></a><span class="lineno"> 1029</span> </div>
<div class="line"><a id="l01030" name="l01030"></a><span class="lineno"> 1030</span>    DL_API <span class="keywordtype">void</span> iGEMMBackward(<span class="keywordtype">float</span>* A, <span class="keywordtype">float</span>* B, <span class="keywordtype">float</span>* C, <span class="keyword">const</span> Dimension&amp; shapeA, <span class="keyword">const</span> Dimension&amp; shapeB,</div>
<div class="line"><a id="l01031" name="l01031"></a><span class="lineno"> 1031</span>                              <span class="keyword">const</span> Dimension&amp; shapeC);</div>
<div class="line"><a id="l01032" name="l01032"></a><span class="lineno"> 1032</span> </div>
<div class="line"><a id="l01033" name="l01033"></a><span class="lineno"> 1033</span>    DL_API <span class="keywordtype">void</span> iTranspose(<span class="keywordtype">float</span>* out, <span class="keywordtype">float</span>* in, <span class="keywordtype">size_t</span> rows, <span class="keywordtype">size_t</span> cols, <span class="keyword">const</span> std::vector&lt;size_t&gt;&amp; offset);</div>
<div class="line"><a id="l01034" name="l01034"></a><span class="lineno"> 1034</span> </div>
<div class="line"><a id="l01071" name="l01071"></a><span class="lineno"> 1071</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l01072" name="l01072"></a><span class="lineno"> 1072</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="foldopen" id="foldopen01073" data-start="{" data-end="}">
<div class="line"><a id="l01073" name="l01073"></a><span class="lineno"><a class="line" href="namespacenz_1_1data.html#ac8d64dd271e9a2e50682e733bd14ec19"> 1073</a></span>    <a class="code hl_function" href="namespacenz_1_1data.html#ac8d64dd271e9a2e50682e733bd14ec19">transpose</a>(<span class="keyword">const</span> T&amp; in) {</div>
<div class="line"><a id="l01074" name="l01074"></a><span class="lineno"> 1074</span>        T result({in.shape()[0], in.shape()[1], in.shape()[3], in.shape()[2]}, in.requiresGrad());</div>
<div class="line"><a id="l01075" name="l01075"></a><span class="lineno"> 1075</span>        std::vector&lt;size_t&gt; offset;</div>
<div class="line"><a id="l01076" name="l01076"></a><span class="lineno"> 1076</span>        <span class="keywordflow">for</span> (<span class="keyword">auto</span> i = 0; i &lt; in.shape()[0]; i += 1) {</div>
<div class="line"><a id="l01077" name="l01077"></a><span class="lineno"> 1077</span>            <span class="keywordflow">for</span> (<span class="keyword">auto</span> j = 0; j &lt; in.shape()[1]; j += 1) {</div>
<div class="line"><a id="l01078" name="l01078"></a><span class="lineno"> 1078</span>                offset.push_back(i * in.shape().getStride(0) + j * in.shape().getStride(1));</div>
<div class="line"><a id="l01079" name="l01079"></a><span class="lineno"> 1079</span>            }</div>
<div class="line"><a id="l01080" name="l01080"></a><span class="lineno"> 1080</span>        }</div>
<div class="line"><a id="l01081" name="l01081"></a><span class="lineno"> 1081</span>        iTranspose(result.data(), in.data(), in.shape()[2], in.shape()[3], offset);</div>
<div class="line"><a id="l01082" name="l01082"></a><span class="lineno"> 1082</span>        <span class="keywordflow">if</span> (in.requiresGrad()) {</div>
<div class="line"><a id="l01083" name="l01083"></a><span class="lineno"> 1083</span>            iTranspose(result.grad(), in.grad(), in.shape()[2], in.shape()[3], offset);</div>
<div class="line"><a id="l01084" name="l01084"></a><span class="lineno"> 1084</span>        }</div>
<div class="line"><a id="l01085" name="l01085"></a><span class="lineno"> 1085</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l01086" name="l01086"></a><span class="lineno"> 1086</span>    }</div>
</div>
<div class="line"><a id="l01087" name="l01087"></a><span class="lineno"> 1087</span> </div>
<div class="line"><a id="l01088" name="l01088"></a><span class="lineno"> 1088</span>    DL_API <span class="keywordtype">void</span> iSoftmaxJacobian(<span class="keywordtype">float</span>* out, <span class="keywordtype">float</span>* in, <span class="keywordtype">size_t</span> n, <span class="keyword">const</span> std::vector&lt;size_t&gt;&amp; offset_o,</div>
<div class="line"><a id="l01089" name="l01089"></a><span class="lineno"> 1089</span>                                 <span class="keyword">const</span> std::vector&lt;size_t&gt;&amp; offset_i);</div>
<div class="line"><a id="l01090" name="l01090"></a><span class="lineno"> 1090</span> </div>
<div class="line"><a id="l01091" name="l01091"></a><span class="lineno"> 1091</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l01092" name="l01092"></a><span class="lineno"> 1092</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="line"><a id="l01093" name="l01093"></a><span class="lineno"> 1093</span>    softmaxJacobian(<span class="keyword">const</span> T&amp; in) {</div>
<div class="line"><a id="l01094" name="l01094"></a><span class="lineno"> 1094</span>        <span class="keyword">const</span> <span class="keywordtype">size_t</span> n = std::max(in.shape()[2], in.shape()[3]);</div>
<div class="line"><a id="l01095" name="l01095"></a><span class="lineno"> 1095</span>        T result({in.shape()[0], in.shape()[1], n, n});</div>
<div class="line"><a id="l01096" name="l01096"></a><span class="lineno"> 1096</span>        std::vector&lt;size_t&gt; offset_o;</div>
<div class="line"><a id="l01097" name="l01097"></a><span class="lineno"> 1097</span>        std::vector&lt;size_t&gt; offset_i;</div>
<div class="line"><a id="l01098" name="l01098"></a><span class="lineno"> 1098</span>        <span class="keywordflow">for</span> (<span class="keyword">auto</span> i = 0; i &lt; in.shape()[0]; i++) {</div>
<div class="line"><a id="l01099" name="l01099"></a><span class="lineno"> 1099</span>            <span class="keywordflow">for</span> (<span class="keyword">auto</span> j = 0; j &lt; in.shape()[1]; j++) {</div>
<div class="line"><a id="l01100" name="l01100"></a><span class="lineno"> 1100</span>                offset_o.push_back(i * result.shape().getStride(0) + j * result.shape().getStride(1));</div>
<div class="line"><a id="l01101" name="l01101"></a><span class="lineno"> 1101</span>                offset_i.push_back(i * in.shape().getStride(0) + j * in.shape().getStride(1));</div>
<div class="line"><a id="l01102" name="l01102"></a><span class="lineno"> 1102</span>            }</div>
<div class="line"><a id="l01103" name="l01103"></a><span class="lineno"> 1103</span>        }</div>
<div class="line"><a id="l01104" name="l01104"></a><span class="lineno"> 1104</span>        iSoftmaxJacobian(result.data(), in.data(), n, offset_o, offset_i);</div>
<div class="line"><a id="l01105" name="l01105"></a><span class="lineno"> 1105</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l01106" name="l01106"></a><span class="lineno"> 1106</span>    }</div>
<div class="line"><a id="l01107" name="l01107"></a><span class="lineno"> 1107</span> </div>
<div class="line"><a id="l01108" name="l01108"></a><span class="lineno"> 1108</span>    DL_API <span class="keywordtype">void</span> iImg2col(<span class="keywordtype">float</span>* out, <span class="keywordtype">float</span>* in, <span class="keyword">const</span> <span class="keywordtype">size_t</span> H_out,</div>
<div class="line"><a id="l01109" name="l01109"></a><span class="lineno"> 1109</span>                         <span class="keyword">const</span> <span class="keywordtype">size_t</span> W_out, <span class="keyword">const</span> <span class="keywordtype">size_t</span> C, <span class="keyword">const</span> <span class="keywordtype">size_t</span> K_h, <span class="keyword">const</span> <span class="keywordtype">size_t</span> K_w, <span class="keyword">const</span> <span class="keywordtype">size_t</span> stride,</div>
<div class="line"><a id="l01110" name="l01110"></a><span class="lineno"> 1110</span>                         <span class="keyword">const</span> <span class="keywordtype">size_t</span> pad, <span class="keyword">const</span> <span class="keywordtype">size_t</span> H_in, <span class="keyword">const</span> <span class="keywordtype">size_t</span> W_in, <span class="keyword">const</span> <span class="keywordtype">size_t</span> batch);</div>
<div class="line"><a id="l01111" name="l01111"></a><span class="lineno"> 1111</span> </div>
<div class="line"><a id="l01112" name="l01112"></a><span class="lineno"> 1112</span>    DL_API <span class="keywordtype">void</span> iImg2colBackward(<span class="keywordtype">float</span>* out, <span class="keywordtype">float</span>* in, <span class="keyword">const</span> <span class="keywordtype">size_t</span> H_out,</div>
<div class="line"><a id="l01113" name="l01113"></a><span class="lineno"> 1113</span>              <span class="keyword">const</span> <span class="keywordtype">size_t</span> W_out, <span class="keyword">const</span> <span class="keywordtype">size_t</span> C, <span class="keyword">const</span> <span class="keywordtype">size_t</span> K_h, <span class="keyword">const</span> <span class="keywordtype">size_t</span> K_w, <span class="keyword">const</span> <span class="keywordtype">size_t</span> stride,</div>
<div class="line"><a id="l01114" name="l01114"></a><span class="lineno"> 1114</span>              <span class="keyword">const</span> <span class="keywordtype">size_t</span> pad, <span class="keyword">const</span> <span class="keywordtype">size_t</span> H_in, <span class="keyword">const</span> <span class="keywordtype">size_t</span> W_in, <span class="keyword">const</span> <span class="keywordtype">size_t</span> batch);</div>
<div class="line"><a id="l01115" name="l01115"></a><span class="lineno"> 1115</span> </div>
<div class="line"><a id="l01116" name="l01116"></a><span class="lineno"> 1116</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l01117" name="l01117"></a><span class="lineno"> 1117</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="line"><a id="l01118" name="l01118"></a><span class="lineno"> 1118</span>    tensorImg2col(<span class="keyword">const</span> T&amp; in, <span class="keyword">const</span> <span class="keywordtype">size_t</span> K_h, <span class="keyword">const</span> <span class="keywordtype">size_t</span> K_w, <span class="keyword">const</span> <span class="keywordtype">size_t</span> stride,</div>
<div class="line"><a id="l01119" name="l01119"></a><span class="lineno"> 1119</span>                  <span class="keyword">const</span> <span class="keywordtype">size_t</span> pad) {</div>
<div class="line"><a id="l01120" name="l01120"></a><span class="lineno"> 1120</span>        <span class="keyword">const</span> <span class="keywordtype">size_t</span> H_out = (in.shape().H() + 2 * pad - K_h) / stride + 1;</div>
<div class="line"><a id="l01121" name="l01121"></a><span class="lineno"> 1121</span>        <span class="keyword">const</span> <span class="keywordtype">size_t</span> W_out = (in.shape().W() + 2 * pad - K_w) / stride + 1;</div>
<div class="line"><a id="l01122" name="l01122"></a><span class="lineno"> 1122</span>        T result({in.shape()[0], 1, H_out * W_out, in.shape().C() * K_h * K_w}, in.requiresGrad());</div>
<div class="line"><a id="l01123" name="l01123"></a><span class="lineno"> 1123</span>        iImg2col(result.data(), in.data(), H_out, W_out, in.shape().C(), K_h, K_w, stride, pad,</div>
<div class="line"><a id="l01124" name="l01124"></a><span class="lineno"> 1124</span>                 in.shape().H(), in.shape().W(), in.shape()[0]);</div>
<div class="line"><a id="l01125" name="l01125"></a><span class="lineno"> 1125</span>        <span class="keywordflow">if</span> (in.requiresGrad()) {</div>
<div class="line"><a id="l01126" name="l01126"></a><span class="lineno"> 1126</span>            iImg2col(result.grad(), in.grad(), H_out, W_out, in.shape().C(), K_h, K_w, stride, pad,</div>
<div class="line"><a id="l01127" name="l01127"></a><span class="lineno"> 1127</span>                     in.shape().H(), in.shape().W(), in.shape()[0]);</div>
<div class="line"><a id="l01128" name="l01128"></a><span class="lineno"> 1128</span>        }</div>
<div class="line"><a id="l01129" name="l01129"></a><span class="lineno"> 1129</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l01130" name="l01130"></a><span class="lineno"> 1130</span>    }</div>
<div class="line"><a id="l01131" name="l01131"></a><span class="lineno"> 1131</span> </div>
<div class="line"><a id="l01132" name="l01132"></a><span class="lineno"> 1132</span>    DL_API <span class="keywordtype">void</span> iCol2img(<span class="keywordtype">float</span>* out, <span class="keywordtype">float</span>* in, <span class="keywordtype">size_t</span> H_out,</div>
<div class="line"><a id="l01133" name="l01133"></a><span class="lineno"> 1133</span>                         <span class="keywordtype">size_t</span> W_out, <span class="keywordtype">size_t</span> C_out, <span class="keywordtype">size_t</span> batches);</div>
<div class="line"><a id="l01134" name="l01134"></a><span class="lineno"> 1134</span> </div>
<div class="line"><a id="l01135" name="l01135"></a><span class="lineno"> 1135</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l01136" name="l01136"></a><span class="lineno"> 1136</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="line"><a id="l01137" name="l01137"></a><span class="lineno"> 1137</span>    tensorCol2img(<span class="keyword">const</span> T&amp; in, <span class="keyword">const</span> <span class="keywordtype">size_t</span> H_out, <span class="keyword">const</span> <span class="keywordtype">size_t</span> W_out) {</div>
<div class="line"><a id="l01138" name="l01138"></a><span class="lineno"> 1138</span>        T result({in.shape()[0], in.shape()[3], H_out, W_out}, in.requiresGrad());</div>
<div class="line"><a id="l01139" name="l01139"></a><span class="lineno"> 1139</span>        iCol2img(result.data(), in.data(), H_out, W_out, in.shape()[3], in.shape()[0]);</div>
<div class="line"><a id="l01140" name="l01140"></a><span class="lineno"> 1140</span>        <span class="keywordflow">if</span> (in.requiresGrad()) {</div>
<div class="line"><a id="l01141" name="l01141"></a><span class="lineno"> 1141</span>            iCol2img(result.grad(), in.grad(), H_out, W_out, in.shape()[3], in.shape()[0]);</div>
<div class="line"><a id="l01142" name="l01142"></a><span class="lineno"> 1142</span>        }</div>
<div class="line"><a id="l01143" name="l01143"></a><span class="lineno"> 1143</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l01144" name="l01144"></a><span class="lineno"> 1144</span>    }</div>
<div class="line"><a id="l01145" name="l01145"></a><span class="lineno"> 1145</span> </div>
<div class="line"><a id="l01146" name="l01146"></a><span class="lineno"> 1146</span>    DL_API <span class="keywordtype">void</span> iCol2imgBackward(<span class="keywordtype">float</span>* out, <span class="keywordtype">float</span>* in, <span class="keywordtype">size_t</span> H_out, <span class="keywordtype">size_t</span> W_out, <span class="keywordtype">size_t</span> C_out, <span class="keywordtype">size_t</span> batches);</div>
<div class="line"><a id="l01147" name="l01147"></a><span class="lineno"> 1147</span> </div>
<div class="line"><a id="l01148" name="l01148"></a><span class="lineno"> 1148</span>    DL_API <span class="keywordtype">void</span> iAveragePooling(<span class="keywordtype">float</span>* out, <span class="keywordtype">float</span>* in,</div>
<div class="line"><a id="l01149" name="l01149"></a><span class="lineno"> 1149</span>                                <span class="keywordtype">size_t</span> pool_size, <span class="keywordtype">size_t</span> stride, <span class="keywordtype">size_t</span> padding,</div>
<div class="line"><a id="l01150" name="l01150"></a><span class="lineno"> 1150</span>                                <span class="keywordtype">size_t</span> batches, <span class="keywordtype">size_t</span> channels, <span class="keywordtype">size_t</span> H_in, <span class="keywordtype">size_t</span> W_in,</div>
<div class="line"><a id="l01151" name="l01151"></a><span class="lineno"> 1151</span>                                <span class="keywordtype">size_t</span> H_out, <span class="keywordtype">size_t</span> W_out);</div>
<div class="line"><a id="l01152" name="l01152"></a><span class="lineno"> 1152</span> </div>
<div class="line"><a id="l01153" name="l01153"></a><span class="lineno"> 1153</span>    <span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a id="l01154" name="l01154"></a><span class="lineno"> 1154</span>    std::enable_if_t&lt;is_valid_tensor_type&lt;T&gt;::value, T&gt;</div>
<div class="line"><a id="l01155" name="l01155"></a><span class="lineno"> 1155</span>    tensorAveragePooling(<span class="keyword">const</span> T&amp; in, <span class="keyword">const</span> <span class="keywordtype">size_t</span> pool_size, <span class="keyword">const</span> <span class="keywordtype">size_t</span> stride,</div>
<div class="line"><a id="l01156" name="l01156"></a><span class="lineno"> 1156</span>                          <span class="keyword">const</span> <span class="keywordtype">size_t</span> padding) {</div>
<div class="line"><a id="l01157" name="l01157"></a><span class="lineno"> 1157</span>        <span class="keyword">const</span> <span class="keywordtype">size_t</span> H_out = OUTPUT_DIM(in.shape().H(), pool_size, stride, padding);</div>
<div class="line"><a id="l01158" name="l01158"></a><span class="lineno"> 1158</span>        <span class="keyword">const</span> <span class="keywordtype">size_t</span> W_out = OUTPUT_DIM(in.shape().W(), pool_size, stride, padding);</div>
<div class="line"><a id="l01159" name="l01159"></a><span class="lineno"> 1159</span>        T result({in.shape()[0], in.shape()[1], H_out, W_out}, in.requiresGrad());</div>
<div class="line"><a id="l01160" name="l01160"></a><span class="lineno"> 1160</span>        iAveragePooling(result.data(), in.data(), pool_size, stride, padding,</div>
<div class="line"><a id="l01161" name="l01161"></a><span class="lineno"> 1161</span>                        in.shape()[0], in.shape()[1], in.shape().H(), in.shape().W(),</div>
<div class="line"><a id="l01162" name="l01162"></a><span class="lineno"> 1162</span>                        H_out, W_out);</div>
<div class="line"><a id="l01163" name="l01163"></a><span class="lineno"> 1163</span>        <span class="keywordflow">if</span> (in.requiresGrad()) {</div>
<div class="line"><a id="l01164" name="l01164"></a><span class="lineno"> 1164</span>            iAveragePooling(result.grad(), in.grad(), pool_size, stride, padding,</div>
<div class="line"><a id="l01165" name="l01165"></a><span class="lineno"> 1165</span>                            in.shape()[0], in.shape()[1], in.shape().H(), in.shape().W(),</div>
<div class="line"><a id="l01166" name="l01166"></a><span class="lineno"> 1166</span>                            H_out, W_out);</div>
<div class="line"><a id="l01167" name="l01167"></a><span class="lineno"> 1167</span>        }</div>
<div class="line"><a id="l01168" name="l01168"></a><span class="lineno"> 1168</span>        <span class="keywordflow">return</span> result;</div>
<div class="line"><a id="l01169" name="l01169"></a><span class="lineno"> 1169</span>    }</div>
<div class="line"><a id="l01170" name="l01170"></a><span class="lineno"> 1170</span> </div>
<div class="line"><a id="l01171" name="l01171"></a><span class="lineno"> 1171</span>    DL_API <span class="keywordtype">void</span> iAveragePoolingBackward(<span class="keywordtype">float</span>* out, <span class="keywordtype">float</span>* in,</div>
<div class="line"><a id="l01172" name="l01172"></a><span class="lineno"> 1172</span>                                <span class="keywordtype">size_t</span> pool_size, <span class="keywordtype">size_t</span> stride, <span class="keywordtype">size_t</span> padding,</div>
<div class="line"><a id="l01173" name="l01173"></a><span class="lineno"> 1173</span>                                <span class="keywordtype">size_t</span> batches, <span class="keywordtype">size_t</span> channels, <span class="keywordtype">size_t</span> H_in, <span class="keywordtype">size_t</span> W_in,</div>
<div class="line"><a id="l01174" name="l01174"></a><span class="lineno"> 1174</span>                                <span class="keywordtype">size_t</span> H_out, <span class="keywordtype">size_t</span> W_out);</div>
<div class="line"><a id="l01175" name="l01175"></a><span class="lineno"> 1175</span> </div>
<div class="line"><a id="l01176" name="l01176"></a><span class="lineno"> 1176</span>    DL_API <span class="keywordtype">void</span> iGlobalAvgPoolBackward(<span class="keywordtype">float</span>* output, <span class="keywordtype">float</span>* in,</div>
<div class="line"><a id="l01177" name="l01177"></a><span class="lineno"> 1177</span>                                       <span class="keywordtype">size_t</span> batches, <span class="keywordtype">size_t</span> channels, <span class="keywordtype">size_t</span> height, <span class="keywordtype">size_t</span> width);</div>
<div class="line"><a id="l01178" name="l01178"></a><span class="lineno"> 1178</span> </div>
<div class="line"><a id="l01179" name="l01179"></a><span class="lineno"> 1179</span>    DL_API <span class="keywordtype">void</span> iMaxPooling(<span class="keywordtype">float</span>* output, <span class="keywordtype">float</span>* position, <span class="keywordtype">float</span>* input,</div>
<div class="line"><a id="l01180" name="l01180"></a><span class="lineno"> 1180</span>                            <span class="keywordtype">size_t</span> pool_size, <span class="keywordtype">size_t</span> stride, <span class="keywordtype">size_t</span> padding,</div>
<div class="line"><a id="l01181" name="l01181"></a><span class="lineno"> 1181</span>                            <span class="keywordtype">size_t</span> batches, <span class="keywordtype">size_t</span> channels, <span class="keywordtype">size_t</span> H_in, <span class="keywordtype">size_t</span> W_in,</div>
<div class="line"><a id="l01182" name="l01182"></a><span class="lineno"> 1182</span>                            <span class="keywordtype">size_t</span> H_out, <span class="keywordtype">size_t</span> W_out);</div>
<div class="line"><a id="l01183" name="l01183"></a><span class="lineno"> 1183</span> </div>
<div class="line"><a id="l01184" name="l01184"></a><span class="lineno"> 1184</span>    DL_API <span class="keywordtype">void</span> iMaxPoolingBackward(<span class="keywordtype">float</span>* output, <span class="keywordtype">float</span>* position, <span class="keywordtype">float</span>* input,</div>
<div class="line"><a id="l01185" name="l01185"></a><span class="lineno"> 1185</span>                            <span class="keywordtype">size_t</span> pool_size, <span class="keywordtype">size_t</span> stride, <span class="keywordtype">size_t</span> padding,</div>
<div class="line"><a id="l01186" name="l01186"></a><span class="lineno"> 1186</span>                            <span class="keywordtype">size_t</span> batches, <span class="keywordtype">size_t</span> channels, <span class="keywordtype">size_t</span> H_in, <span class="keywordtype">size_t</span> W_in,</div>
<div class="line"><a id="l01187" name="l01187"></a><span class="lineno"> 1187</span>                            <span class="keywordtype">size_t</span> H_out, <span class="keywordtype">size_t</span> W_out);</div>
<div class="line"><a id="l01188" name="l01188"></a><span class="lineno"> 1188</span>}</div>
<div class="line"><a id="l01189" name="l01189"></a><span class="lineno"> 1189</span><span class="preprocessor">#endif </span><span class="comment">//TENSOROPERATIONS_CUH</span></div>
<div class="ttc" id="a_operation_kernels_8cuh_html"><div class="ttname"><a href="_operation_kernels_8cuh.html">OperationKernels.cuh</a></div><div class="ttdoc">CUDA Kernel Definitions for High-Performance Tensor Operations.</div></div>
<div class="ttc" id="a_tensor_8cuh_html"><div class="ttname"><a href="_tensor_8cuh.html">Tensor.cuh</a></div><div class="ttdoc">Definition of the Tensor class for GPU-based tensor operations.</div></div>
<div class="ttc" id="anamespacenz_1_1data_html"><div class="ttname"><a href="namespacenz_1_1data.html">nz::data</a></div><div class="ttdoc">Contains data structures and utilities for tensor operations in machine learning workflows.</div><div class="ttdef"><b>Definition</b> <a href="_dimension_8cuh_source.html#l00009">Dimension.cuh:9</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_a1da5cd018533919ed5a750b14c7d6d71"><div class="ttname"><a href="namespacenz_1_1data.html#a1da5cd018533919ed5a750b14c7d6d71">nz::data::tensorElementwiseDivide</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, void &gt; tensorElementwiseDivide(T &amp;out, const T &amp;lhs, const T &amp;rhs)</div><div class="ttdoc">Performs element - wise division operation on tensors with broadcast compatibility.</div><div class="ttdef"><b>Definition</b> <a href="#l00928">TensorOperations.cuh:928</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_a241d72367c091d0724b524f55289b2f0"><div class="ttname"><a href="namespacenz_1_1data.html#a241d72367c091d0724b524f55289b2f0">nz::data::HardSigmoid</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, T &gt; HardSigmoid(T &amp;input, const float alpha=0.2f, const float beta=0.5f)</div><div class="ttdoc">Apply the Hard Sigmoid activation function element-wise to an input tensor.</div><div class="ttdef"><b>Definition</b> <a href="#l00281">TensorOperations.cuh:281</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_a4706224f5e7c9a0cfe4c74983aaef1bd"><div class="ttname"><a href="namespacenz_1_1data.html#a4706224f5e7c9a0cfe4c74983aaef1bd">nz::data::ReLU</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, T &gt; ReLU(T &amp;input)</div><div class="ttdoc">Apply the Rectified Linear Unit (ReLU) activation function element-wise to an input tensor.</div><div class="ttdef"><b>Definition</b> <a href="#l00050">TensorOperations.cuh:50</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_a55e8a3fae0d75e214cd714fde8811543"><div class="ttname"><a href="namespacenz_1_1data.html#a55e8a3fae0d75e214cd714fde8811543">nz::data::Softmax</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, T &gt; Softmax(T &amp;input)</div><div class="ttdoc">Compute the softmax function for a given input of type T.</div><div class="ttdef"><b>Definition</b> <a href="#l00364">TensorOperations.cuh:364</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_a5a166a472b887c45fde9e5815f072234"><div class="ttname"><a href="namespacenz_1_1data.html#a5a166a472b887c45fde9e5815f072234">nz::data::tensorGeneralMatrixMul</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, void &gt; tensorGeneralMatrixMul(T &amp;out, const T &amp;lhs, const T &amp;rhs)</div><div class="ttdoc">Performs general matrix multiplication on tensors with broadcast compatibility.</div><div class="ttdef"><b>Definition</b> <a href="#l01000">TensorOperations.cuh:1000</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_a7503b6894e8052ed54eb169550d135c0"><div class="ttname"><a href="namespacenz_1_1data.html#a7503b6894e8052ed54eb169550d135c0">nz::data::tensorMatrixSub</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, void &gt; tensorMatrixSub(T &amp;out, const T &amp;lhs, const T &amp;rhs)</div><div class="ttdoc">Performs matrix subtraction operation on tensors with broadcast compatibility.</div><div class="ttdef"><b>Definition</b> <a href="#l00858">TensorOperations.cuh:858</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_a771a257e9dd839ce330e9b40fd1dda56"><div class="ttname"><a href="namespacenz_1_1data.html#a771a257e9dd839ce330e9b40fd1dda56">nz::data::operator/</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, T &gt; operator/(T &amp;lhs, const float rhs)</div><div class="ttdoc">Overload the division operator to divide a tensor of type T by a scalar float.</div><div class="ttdef"><b>Definition</b> <a href="#l00689">TensorOperations.cuh:689</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_a8730252e35a8e59aacb429efb0d6b828"><div class="ttname"><a href="namespacenz_1_1data.html#a8730252e35a8e59aacb429efb0d6b828">nz::data::operator*</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, T &gt; operator*(T &amp;lhs, const float rhs)</div><div class="ttdoc">Overload the multiplication operator to multiply a tensor of type T by a scalar float.</div><div class="ttdef"><b>Definition</b> <a href="#l00604">TensorOperations.cuh:604</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_a8cf4ac2437dd67698684169bebb225d4"><div class="ttname"><a href="namespacenz_1_1data.html#a8cf4ac2437dd67698684169bebb225d4">nz::data::tensorMatrixAdd</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, void &gt; tensorMatrixAdd(T &amp;out, const T &amp;lhs, const T &amp;rhs)</div><div class="ttdoc">Performs matrix addition operation on tensors with broadcast compatibility.</div><div class="ttdef"><b>Definition</b> <a href="#l00787">TensorOperations.cuh:787</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_aa9a6da30ae0d71faa4ac32efb9dd1f2f"><div class="ttname"><a href="namespacenz_1_1data.html#aa9a6da30ae0d71faa4ac32efb9dd1f2f">nz::data::Sigmoid</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, T &gt; Sigmoid(T &amp;input)</div><div class="ttdoc">Apply the sigmoid activation function element-wise to an input tensor.</div><div class="ttdef"><b>Definition</b> <a href="#l00088">TensorOperations.cuh:88</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_ab99b7c0a7c96a6de43f5b3f25af7f918"><div class="ttname"><a href="namespacenz_1_1data.html#ab99b7c0a7c96a6de43f5b3f25af7f918">nz::data::operator+</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, T &gt; operator+(T &amp;lhs, const float rhs)</div><div class="ttdoc">Overload the addition operator to add a scalar float to a tensor of type T.</div><div class="ttdef"><b>Definition</b> <a href="#l00436">TensorOperations.cuh:436</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_ac716ac93e673f4706963d194e8ea523e"><div class="ttname"><a href="namespacenz_1_1data.html#ac716ac93e673f4706963d194e8ea523e">nz::data::HardSwish</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, T &gt; HardSwish(T &amp;input, const float alpha=0.5f, const float beta=0.5f)</div><div class="ttdoc">Apply the Hard Swish activation function element-wise to an input tensor.</div><div class="ttdef"><b>Definition</b> <a href="#l00321">TensorOperations.cuh:321</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_ac8d64dd271e9a2e50682e733bd14ec19"><div class="ttname"><a href="namespacenz_1_1data.html#ac8d64dd271e9a2e50682e733bd14ec19">nz::data::transpose</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, T &gt; transpose(const T &amp;in)</div><div class="ttdoc">Transposes a tensor with a valid tensor type.</div><div class="ttdef"><b>Definition</b> <a href="#l01073">TensorOperations.cuh:1073</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_acc650ae262aba5f1b0fa9cca8cae311e"><div class="ttname"><a href="namespacenz_1_1data.html#acc650ae262aba5f1b0fa9cca8cae311e">nz::data::operator-</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, T &gt; operator-(T &amp;lhs, const float rhs)</div><div class="ttdoc">Overload the subtraction operator to subtract a scalar float from a tensor of type T.</div><div class="ttdef"><b>Definition</b> <a href="#l00520">TensorOperations.cuh:520</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_adae3ca94a8c203f1e444751a1cba0d6d"><div class="ttname"><a href="namespacenz_1_1data.html#adae3ca94a8c203f1e444751a1cba0d6d">nz::data::ELU</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, T &gt; ELU(T &amp;input, const float alpha=1.0f)</div><div class="ttdoc">Apply the Exponential Linear Unit (ELU) activation function element-wise to an input tensor.</div><div class="ttdef"><b>Definition</b> <a href="#l00241">TensorOperations.cuh:241</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_ae563f53512549e2e54f066f7bf06622e"><div class="ttname"><a href="namespacenz_1_1data.html#ae563f53512549e2e54f066f7bf06622e">nz::data::Swish</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, T &gt; Swish(T &amp;input)</div><div class="ttdoc">Apply the Swish activation function element-wise to an input tensor.</div><div class="ttdef"><b>Definition</b> <a href="#l00202">TensorOperations.cuh:202</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_ae8fb3052fdc2304fbb68c8dbad90e4ed"><div class="ttname"><a href="namespacenz_1_1data.html#ae8fb3052fdc2304fbb68c8dbad90e4ed">nz::data::LeakyReLU</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, T &gt; LeakyReLU(T &amp;input, const float alpha=0.01f)</div><div class="ttdoc">Apply the Leaky Rectified Linear Unit (Leaky ReLU) activation function element-wise to an input tenso...</div><div class="ttdef"><b>Definition</b> <a href="#l00165">TensorOperations.cuh:165</a></div></div>
<div class="ttc" id="anamespacenz_1_1data_html_aed71109d5ed6ecdb7181afc751fa2aa1"><div class="ttname"><a href="namespacenz_1_1data.html#aed71109d5ed6ecdb7181afc751fa2aa1">nz::data::Tanh</a></div><div class="ttdeci">std::enable_if_t&lt; is_valid_tensor_type&lt; T &gt;::value, T &gt; Tanh(T &amp;input)</div><div class="ttdoc">Apply the hyperbolic tangent (tanh) activation function element-wise to an input tensor.</div><div class="ttdef"><b>Definition</b> <a href="#l00126">TensorOperations.cuh:126</a></div></div>
</div><!-- fragment --></div><!-- contents -->
<!-- start footer part -->
<hr class="footer"/><address class="footer"><small>
Generated by&#160;<a href="https://www.doxygen.org/index.html"><img class="footer" src="doxygen.svg" width="104" height="31" alt="doxygen"/></a> 1.12.0
</small></address>
</div><!-- doc-content -->
</body>
</html>
