<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en-US">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=11"/>
<meta name="generator" content="Doxygen 1.12.0"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>NeuZephyr: nz::nodes::calc::SoftmaxNode Class Reference</title>
<link rel="icon" href="NZ_logo2.png" type="image/x-icon" />
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
 <tbody>
 <tr id="projectrow">
  <td id="projectlogo"><img alt="Logo" src="NZ_logo2.png"/></td>
  <td id="projectalign">
   <div id="projectname">NeuZephyr
   </div>
   <div id="projectbrief">Simple DL Framework</div>
  </td>
 </tr>
 </tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.12.0 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&amp;dn=expat.txt MIT */
$(function() { codefold.init(0); });
/* @license-end */
</script>
  <div id="navrow1" class="tabs">
    <ul class="tablist">
      <li><a href="index.html"><span>Main&#160;Page</span></a></li>
      <li><a href="pages.html"><span>Related&#160;Pages</span></a></li>
      <li><a href="namespaces.html"><span>Namespaces</span></a></li>
      <li class="current"><a href="annotated.html"><span>Classes</span></a></li>
      <li><a href="files.html"><span>Files</span></a></li>
    </ul>
  </div>
  <div id="navrow2" class="tabs2">
    <ul class="tablist">
      <li><a href="annotated.html"><span>Class&#160;List</span></a></li>
      <li><a href="classes.html"><span>Class&#160;Index</span></a></li>
      <li><a href="inherits.html"><span>Class&#160;Hierarchy</span></a></li>
      <li><a href="functions.html"><span>Class&#160;Members</span></a></li>
    </ul>
  </div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&amp;dn=expat.txt MIT */
$(function(){ initResizable(false); });
/* @license-end */
</script>
<div id="nav-path" class="navpath">
  <ul>
<li class="navelem"><b>nz</b></li><li class="navelem"><a class="el" href="namespacenz_1_1nodes.html">nodes</a></li><li class="navelem"><a class="el" href="namespacenz_1_1nodes_1_1calc.html">calc</a></li><li class="navelem"><a class="el" href="classnz_1_1nodes_1_1calc_1_1_softmax_node.html">SoftmaxNode</a></li>  </ul>
</div>
</div><!-- top -->
<div id="doc-content">
<div class="header">
  <div class="summary">
<a href="#pub-methods">Public Member Functions</a> &#124;
<a href="classnz_1_1nodes_1_1calc_1_1_softmax_node-members.html">List of all members</a>  </div>
  <div class="headertitle"><div class="title">nz::nodes::calc::SoftmaxNode Class Reference</div></div>
</div><!--header-->
<div class="contents">

<p>Implements the Softmax activation function as a node in a neural network computational graph.  
 <a href="#details">More...</a></p>
<div class="dynheader">
Inheritance diagram for nz::nodes::calc::SoftmaxNode:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1nodes_1_1calc_1_1_softmax_node__inherit__graph.png" border="0" usemap="#anz_1_1nodes_1_1calc_1_1_softmax_node_inherit__map" alt="Inheritance graph"/></div>
<map name="anz_1_1nodes_1_1calc_1_1_softmax_node_inherit__map" id="anz_1_1nodes_1_1calc_1_1_softmax_node_inherit__map">
<area shape="rect" title="Implements the Softmax activation function as a node in a neural network computational graph." alt="" coords="5,80,204,107"/>
<area shape="rect" href="classnz_1_1nodes_1_1_node.html" title="Base class for nodes in a neural network or computational graph." alt="" coords="46,5,163,32"/>
<area shape="poly" title=" " alt="" coords="107,48,107,80,102,80,102,48"/>
</map>
<center><span class="legend">[<a href="graph_legend.html">legend</a>]</span></center></div>
<div class="dynheader">
Collaboration diagram for nz::nodes::calc::SoftmaxNode:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1nodes_1_1calc_1_1_softmax_node__coll__graph.png" border="0" usemap="#anz_1_1nodes_1_1calc_1_1_softmax_node_coll__map" alt="Collaboration graph"/></div>
<map name="anz_1_1nodes_1_1calc_1_1_softmax_node_coll__map" id="anz_1_1nodes_1_1calc_1_1_softmax_node_coll__map">
<area shape="rect" title="Implements the Softmax activation function as a node in a neural network computational graph." alt="" coords="5,80,204,107"/>
<area shape="rect" href="classnz_1_1nodes_1_1_node.html" title="Base class for nodes in a neural network or computational graph." alt="" coords="46,5,163,32"/>
<area shape="poly" title=" " alt="" coords="107,48,107,80,102,80,102,48"/>
</map>
<center><span class="legend">[<a href="graph_legend.html">legend</a>]</span></center></div>
<table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a id="pub-methods" name="pub-methods"></a>
Public Member Functions</h2></td></tr>
<tr class="memitem:a6bd70cb3436435bac2055e86dfdb078b" id="r_a6bd70cb3436435bac2055e86dfdb078b"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a6bd70cb3436435bac2055e86dfdb078b">SoftmaxNode</a> (<a class="el" href="classnz_1_1nodes_1_1_node.html">Node</a> *input)</td></tr>
<tr class="memdesc:a6bd70cb3436435bac2055e86dfdb078b"><td class="mdescLeft">&#160;</td><td class="mdescRight">Constructor to initialize a <code><a class="el" href="classnz_1_1nodes_1_1calc_1_1_softmax_node.html" title="Implements the Softmax activation function as a node in a neural network computational graph.">SoftmaxNode</a></code> for applying the Softmax activation function.  <br /></td></tr>
<tr class="separator:a6bd70cb3436435bac2055e86dfdb078b"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a93f7d936ff487db8e7dceb6ee0cdc38e" id="r_a93f7d936ff487db8e7dceb6ee0cdc38e"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#a93f7d936ff487db8e7dceb6ee0cdc38e">forward</a> () override</td></tr>
<tr class="memdesc:a93f7d936ff487db8e7dceb6ee0cdc38e"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs the forward pass of the Softmax operation.  <br /></td></tr>
<tr class="separator:a93f7d936ff487db8e7dceb6ee0cdc38e"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:aa991e3bde7a3a5edbee62fab1cabba23" id="r_aa991e3bde7a3a5edbee62fab1cabba23"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="#aa991e3bde7a3a5edbee62fab1cabba23">backward</a> () override</td></tr>
<tr class="memdesc:aa991e3bde7a3a5edbee62fab1cabba23"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs the backward pass of the Softmax operation.  <br /></td></tr>
<tr class="separator:aa991e3bde7a3a5edbee62fab1cabba23"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="inherit_header pub_methods_classnz_1_1nodes_1_1_node"><td colspan="2" onclick="javascript:dynsection.toggleInherit('pub_methods_classnz_1_1nodes_1_1_node')"><img src="closed.png" alt="-"/>&#160;Public Member Functions inherited from <a class="el" href="classnz_1_1nodes_1_1_node.html">nz::nodes::Node</a></td></tr>
<tr class="memitem:a687ee9c34eb61f8f28caa201ca42696e inherit pub_methods_classnz_1_1nodes_1_1_node" id="r_a687ee9c34eb61f8f28caa201ca42696e"><td class="memItemLeft" align="right" valign="top">virtual void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classnz_1_1nodes_1_1_node.html#a687ee9c34eb61f8f28caa201ca42696e">print</a> (std::ostream &amp;os) const</td></tr>
<tr class="memdesc:a687ee9c34eb61f8f28caa201ca42696e inherit pub_methods_classnz_1_1nodes_1_1_node"><td class="mdescLeft">&#160;</td><td class="mdescRight">Prints the type, data, and gradient of the node.  <br /></td></tr>
<tr class="separator:a687ee9c34eb61f8f28caa201ca42696e inherit pub_methods_classnz_1_1nodes_1_1_node"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a9b85913e12422bb4ac2fff483427bb47 inherit pub_methods_classnz_1_1nodes_1_1_node" id="r_a9b85913e12422bb4ac2fff483427bb47"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classnz_1_1nodes_1_1_node.html#a9b85913e12422bb4ac2fff483427bb47">dataInject</a> (Tensor::value_type *data, bool grad=false) const</td></tr>
<tr class="memdesc:a9b85913e12422bb4ac2fff483427bb47 inherit pub_methods_classnz_1_1nodes_1_1_node"><td class="mdescLeft">&#160;</td><td class="mdescRight">Injects data into a relevant tensor object, optionally setting its gradient requirement.  <br /></td></tr>
<tr class="separator:a9b85913e12422bb4ac2fff483427bb47 inherit pub_methods_classnz_1_1nodes_1_1_node"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a609f1730085dd1d31e0ddcbbae48a065 inherit pub_methods_classnz_1_1nodes_1_1_node" id="r_a609f1730085dd1d31e0ddcbbae48a065"><td class="memTemplParams" colspan="2">template&lt;typename Iterator &gt; </td></tr>
<tr class="memitem:a609f1730085dd1d31e0ddcbbae48a065 inherit pub_methods_classnz_1_1nodes_1_1_node"><td class="memTemplItemLeft" align="right" valign="top">void&#160;</td><td class="memTemplItemRight" valign="bottom"><a class="el" href="classnz_1_1nodes_1_1_node.html#a609f1730085dd1d31e0ddcbbae48a065">dataInject</a> (Iterator begin, Iterator end, const bool grad=false) const</td></tr>
<tr class="memdesc:a609f1730085dd1d31e0ddcbbae48a065 inherit pub_methods_classnz_1_1nodes_1_1_node"><td class="mdescLeft">&#160;</td><td class="mdescRight">Injects data from an iterator range into the output tensor of the InputNode, optionally setting its gradient requirement.  <br /></td></tr>
<tr class="separator:a609f1730085dd1d31e0ddcbbae48a065 inherit pub_methods_classnz_1_1nodes_1_1_node"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:af8b4bab3271df92ca1f0914f7a97b1e8 inherit pub_methods_classnz_1_1nodes_1_1_node" id="r_af8b4bab3271df92ca1f0914f7a97b1e8"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classnz_1_1nodes_1_1_node.html#af8b4bab3271df92ca1f0914f7a97b1e8">dataInject</a> (const std::initializer_list&lt; Tensor::value_type &gt; &amp;data, bool grad=false) const</td></tr>
<tr class="memdesc:af8b4bab3271df92ca1f0914f7a97b1e8 inherit pub_methods_classnz_1_1nodes_1_1_node"><td class="mdescLeft">&#160;</td><td class="mdescRight">Injects data from a std::initializer_list into the output tensor of the <a class="el" href="classnz_1_1nodes_1_1_node.html" title="Base class for nodes in a neural network or computational graph.">Node</a>, optionally setting its gradient requirement.  <br /></td></tr>
<tr class="separator:af8b4bab3271df92ca1f0914f7a97b1e8 inherit pub_methods_classnz_1_1nodes_1_1_node"><td class="memSeparator" colspan="2">&#160;</td></tr>
</table>
<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
<div class="textblock"><p>Implements the Softmax activation function as a node in a neural network computational graph. </p>
<p>The <code><a class="el" href="classnz_1_1nodes_1_1calc_1_1_softmax_node.html" title="Implements the Softmax activation function as a node in a neural network computational graph.">SoftmaxNode</a></code> class applies the Softmax activation function to the input tensor, transforming it into a probability distribution. This node is commonly used as the final layer in classification networks to convert raw scores into probabilities.</p>
<p>The Softmax function is defined as: </p><div class="fragment"><div class="line"><a class="code hl_function" href="namespacenz_1_1krnl.html#adbafc409d57fa0a9d78ecac5bf7b10a3">Softmax</a>(x_i) = exp(x_i) / sum(exp(x_j))</div>
<div class="ttc" id="anamespacenz_1_1krnl_html_adbafc409d57fa0a9d78ecac5bf7b10a3"><div class="ttname"><a href="namespacenz_1_1krnl.html#adbafc409d57fa0a9d78ecac5bf7b10a3">nz::krnl::Softmax</a></div><div class="ttdeci">void Softmax(dim3 gridDim, dim3 blockDim, float *out, float *in, float exp_sum_of_input, unsigned long long n, size_t offset=0)</div><div class="ttdoc">Kernel function to apply the Softmax function on the GPU.</div><div class="ttdef"><b>Definition</b> <a href="_operation_kernels_8cu_source.html#l00525">OperationKernels.cu:525</a></div></div>
</div><!-- fragment --><p> where x_i is the i-th element of the input vector and the sum is over all elements j.</p>
<p>Key features and characteristics:</p><ul>
<li><b>Probability Output</b>: Transforms input into a probability distribution where all elements sum to 1.</li>
<li><b>Numerically Stable</b>: Implements a numerically stable version of Softmax to prevent overflow.</li>
<li><b>Shape Preservation</b>: The output tensor maintains the same shape as the input tensor.</li>
<li><b>GPU Acceleration</b>: Utilizes CUDA for efficient parallel computation on GPU.</li>
<li><b>Gradient Computation</b>: Supports backward pass for gradient calculation in neural network training.</li>
<li><b>Precomputation Optimization</b>: Precomputes exponential sum in the constructor for efficiency.</li>
</ul>
<p>Implementation details:</p><ul>
<li>The constructor precomputes the sum of exponentials to optimize the forward pass.</li>
<li>The forward pass applies the Softmax function using the precomputed sum.</li>
<li>The backward pass computes the full Jacobian matrix for accurate gradient calculation.</li>
<li>CUDA kernels are used for parallel computation in both forward and backward passes.</li>
</ul>
<p>Use cases:</p><ul>
<li>Output layer of multi-class classification networks.</li>
<li>Attention mechanisms in sequence-to-sequence models.</li>
<li>Any scenario requiring normalization of a vector into a probability distribution.</li>
</ul>
<p>Limitations and considerations:</p><ul>
<li>May suffer from underflow or overflow for extreme input values.</li>
<li>The full Jacobian computation in backward pass can be memory-intensive for large outputs.</li>
</ul>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>This implementation assumes the input is a 1D or 2D tensor. For higher dimensions, consider using a dimension-specific Softmax implementation.</li>
<li>The node automatically handles gradient tracking based on the input tensor's requirements.</li>
<li>For very large inputs, consider using LogSoftmax for improved numerical stability.</li>
</ul>
</dd></dl>
<dl class="section see"><dt>See also</dt><dd><a class="el" href="#a93f7d936ff487db8e7dceb6ee0cdc38e" title="Performs the forward pass of the Softmax operation.">forward()</a> for the <a class="el" href="namespacenz_1_1krnl.html#adbafc409d57fa0a9d78ecac5bf7b10a3" title="Kernel function to apply the Softmax function on the GPU.">Softmax</a> computation in the <a class="el" href="#a93f7d936ff487db8e7dceb6ee0cdc38e" title="Performs the forward pass of the Softmax operation.">forward</a> pass. </dd>
<dd>
<a class="el" href="#aa991e3bde7a3a5edbee62fab1cabba23" title="Performs the backward pass of the Softmax operation.">backward()</a> for gradient computation in the <a class="el" href="#aa991e3bde7a3a5edbee62fab1cabba23" title="Performs the backward pass of the Softmax operation.">backward</a> pass.</dd></dl>
<h3><a class="anchor" id="autotoc_md104"></a>
Usage Example:</h3>
<div class="fragment"><div class="line"><span class="comment">// Creating a Softmax node in a neural network</span></div>
<div class="line">InputNode input({1, 1, 1, 5}, <span class="keyword">true</span>);  <span class="comment">// Input node with shape {1, 1, 1, 5}</span></div>
<div class="line">std::vector&lt;float&gt; logits{2.0f, 1.0f, 0.1f, 3.0f, -1.0f};</div>
<div class="line">input.output-&gt;dataInject(logits.begin(), logits.end());</div>
<div class="line"> </div>
<div class="line"><a class="code hl_function" href="#a6bd70cb3436435bac2055e86dfdb078b">SoftmaxNode</a> softmax(&amp;input);</div>
<div class="line">softmax.forward();</div>
<div class="line"> </div>
<div class="line"><span class="comment">// The output tensor now contains the probability distribution</span></div>
<div class="line">std::cout &lt;&lt; <span class="stringliteral">&quot;Probabilities: &quot;</span> &lt;&lt; *softmax.output &lt;&lt; std::endl;</div>
<div class="line"> </div>
<div class="line"><span class="comment">// Backward pass for gradient computation</span></div>
<div class="line">softmax.backward();</div>
<div class="ttc" id="aclassnz_1_1nodes_1_1calc_1_1_softmax_node_html_a6bd70cb3436435bac2055e86dfdb078b"><div class="ttname"><a href="#a6bd70cb3436435bac2055e86dfdb078b">nz::nodes::calc::SoftmaxNode::SoftmaxNode</a></div><div class="ttdeci">SoftmaxNode(Node *input)</div><div class="ttdoc">Constructor to initialize a SoftmaxNode for applying the Softmax activation function.</div><div class="ttdef"><b>Definition</b> <a href="_nodes_8cu_source.html#l00524">Nodes.cu:524</a></div></div>
</div><!-- fragment --><dl class="section author"><dt>Author</dt><dd>Mgepahmge (<a href="https://github.com/Mgepahmge">https://github.com/Mgepahmge</a>)</dd></dl>
<dl class="section date"><dt>Date</dt><dd>2024/12/5 </dd></dl>

<p class="definition">Definition at line <a class="el" href="_nodes_8cuh_source.html#l03152">3152</a> of file <a class="el" href="_nodes_8cuh_source.html">Nodes.cuh</a>.</p>
</div><h2 class="groupheader">Constructor &amp; Destructor Documentation</h2>
<a id="a6bd70cb3436435bac2055e86dfdb078b" name="a6bd70cb3436435bac2055e86dfdb078b"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a6bd70cb3436435bac2055e86dfdb078b">&#9670;&#160;</a></span>SoftmaxNode()</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">nz::nodes::calc::SoftmaxNode::SoftmaxNode </td>
          <td>(</td>
          <td class="paramtype"><a class="el" href="classnz_1_1nodes_1_1_node.html">Node</a> *</td>          <td class="paramname"><span class="paramname"><em>input</em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">explicit</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Constructor to initialize a <code><a class="el" href="classnz_1_1nodes_1_1calc_1_1_softmax_node.html" title="Implements the Softmax activation function as a node in a neural network computational graph.">SoftmaxNode</a></code> for applying the Softmax activation function. </p>
<p>The constructor initializes a <code><a class="el" href="classnz_1_1nodes_1_1calc_1_1_softmax_node.html" title="Implements the Softmax activation function as a node in a neural network computational graph.">SoftmaxNode</a></code>, which applies the Softmax activation function to an input tensor. It establishes a connection to the input node, initializes the output tensor, and sets up the node for Softmax computation.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">input</td><td>A pointer to the input node. Its <code>output</code> tensor will have the Softmax activation applied.</td></tr>
  </table>
  </dd>
</dl>
<p>The Softmax activation function is defined as: </p><div class="fragment"><div class="line"><a class="code hl_function" href="namespacenz_1_1krnl.html#adbafc409d57fa0a9d78ecac5bf7b10a3">Softmax</a>(x_i) = exp(x_i) / sum(exp(x_j))</div>
</div><!-- fragment --><p> where x_i is the i-th element of the input vector and the sum is over all elements j.</p>
<p>Key operations performed by the constructor:</p><ul>
<li>Initializes the <code>sum</code> member variable to 0, which may be used in future computations.</li>
<li>Adds the input node to the <code>inputs</code> vector, establishing the connection in the computational graph.</li>
<li>Determines if gradient tracking is required based on the input tensor's <code>requiresGrad</code> property.</li>
<li>Initializes the <code>output</code> tensor with the same shape as the input tensor and appropriate gradient tracking.</li>
<li>Sets the node type to "Softmax" for identification in the computational graph.</li>
</ul>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>The Softmax function normalizes the input to a probability distribution over predicted output classes.</li>
<li>This constructor only sets up the node structure; the actual Softmax computation is performed in the forward pass.</li>
<li>Gradient tracking for the output tensor is automatically set based on the input tensor's requirements.</li>
<li>The <code>sum</code> variable initialized here may be used for optimizations in the forward or backward passes.</li>
</ul>
</dd></dl>
<dl class="section see"><dt>See also</dt><dd><a class="el" href="#a93f7d936ff487db8e7dceb6ee0cdc38e" title="Performs the forward pass of the Softmax operation.">forward()</a> for the implementation of the <a class="el" href="namespacenz_1_1krnl.html#adbafc409d57fa0a9d78ecac5bf7b10a3" title="Kernel function to apply the Softmax function on the GPU.">Softmax</a> computation in the <a class="el" href="#a93f7d936ff487db8e7dceb6ee0cdc38e" title="Performs the forward pass of the Softmax operation.">forward</a> pass. </dd>
<dd>
<a class="el" href="#aa991e3bde7a3a5edbee62fab1cabba23" title="Performs the backward pass of the Softmax operation.">backward()</a> for the gradient computation in the <a class="el" href="#aa991e3bde7a3a5edbee62fab1cabba23" title="Performs the backward pass of the Softmax operation.">backward</a> pass.</dd></dl>
<dl class="section author"><dt>Author</dt><dd>Mgepahmge (<a href="https://github.com/Mgepahmge">https://github.com/Mgepahmge</a>)</dd></dl>
<dl class="section date"><dt>Date</dt><dd>2023/12/06 </dd></dl>

<p class="definition">Definition at line <a class="el" href="_nodes_8cu_source.html#l00524">524</a> of file <a class="el" href="_nodes_8cu_source.html">Nodes.cu</a>.</p>

</div>
</div>
<h2 class="groupheader">Member Function Documentation</h2>
<a id="aa991e3bde7a3a5edbee62fab1cabba23" name="aa991e3bde7a3a5edbee62fab1cabba23"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aa991e3bde7a3a5edbee62fab1cabba23">&#9670;&#160;</a></span>backward()</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void nz::nodes::calc::SoftmaxNode::backward </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">override</span><span class="mlabel">virtual</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Performs the backward pass of the Softmax operation. </p>
<p>This method implements the gradient computation for the Softmax activation function. It calculates the Jacobian matrix of the Softmax function and then uses it to compute the gradient with respect to the input.</p>
<p>The backward pass is implemented in two main steps:</p><ol type="1">
<li>Calculation of the Softmax Jacobian:<ul>
<li>Computes the Jacobian matrix for the Softmax function using CUDA parallelization.</li>
</ul>
</li>
<li>Gradient computation:<ul>
<li>Performs matrix multiplication between the Jacobian and the output gradient to obtain the input gradient.</li>
</ul>
</li>
</ol>
<p>The Jacobian of the Softmax function is defined as: </p><div class="fragment"><div class="line">J_ij = softmax_i * (δ_ij - softmax_j)</div>
</div><!-- fragment --><p> where δ_ij is the Kronecker delta.</p>
<p>Key operations:</p><ul>
<li>Initialization of the Jacobian tensor.</li>
<li>CUDA kernel setup for parallel computation of the Jacobian.</li>
<li>Execution of the SoftmaxJacobian CUDA kernel to compute the Jacobian matrix.</li>
<li>CUDA kernel setup for matrix multiplication.</li>
<li>Execution of the GeneralMatrixMul CUDA kernel to compute the final gradient.</li>
</ul>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>This implementation utilizes CUDA for efficient parallel computation on GPU.</li>
<li>The Jacobian computation and matrix multiplication are performed entirely on the GPU.</li>
<li>The method assumes that the output gradient (output-&gt;grad()) has already been set.</li>
<li>The computed gradient is stored in the input node's gradient (inputs[0]-&gt;output-&gt;grad()).</li>
</ul>
</dd></dl>
<dl class="section see"><dt>See also</dt><dd><a class="el" href="#a93f7d936ff487db8e7dceb6ee0cdc38e" title="Performs the forward pass of the Softmax operation.">forward()</a> for the corresponding <a class="el" href="#a93f7d936ff487db8e7dceb6ee0cdc38e" title="Performs the forward pass of the Softmax operation.">forward</a> pass implementation.</dd></dl>
<dl class="section author"><dt>Author</dt><dd>Mgepahmge (<a href="https://github.com/Mgepahmge">https://github.com/Mgepahmge</a>)</dd></dl>
<dl class="section date"><dt>Date</dt><dd>2023/12/06 </dd></dl>

<p>Implements <a class="el" href="classnz_1_1nodes_1_1_node.html#a0a9ecbaa3d790ba38e8218aca7837fd0">nz::nodes::Node</a>.</p>

<p class="definition">Definition at line <a class="el" href="_nodes_8cu_source.html#l00538">538</a> of file <a class="el" href="_nodes_8cu_source.html">Nodes.cu</a>.</p>

</div>
</div>
<a id="a93f7d936ff487db8e7dceb6ee0cdc38e" name="a93f7d936ff487db8e7dceb6ee0cdc38e"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a93f7d936ff487db8e7dceb6ee0cdc38e">&#9670;&#160;</a></span>forward()</h2>

<div class="memitem">
<div class="memproto">
<table class="mlabels">
  <tr>
  <td class="mlabels-left">
      <table class="memname">
        <tr>
          <td class="memname">void nz::nodes::calc::SoftmaxNode::forward </td>
          <td>(</td>
          <td class="paramname"><span class="paramname"><em></em></span></td><td>)</td>
          <td></td>
        </tr>
      </table>
  </td>
  <td class="mlabels-right">
<span class="mlabels"><span class="mlabel">override</span><span class="mlabel">virtual</span></span>  </td>
  </tr>
</table>
</div><div class="memdoc">

<p>Performs the forward pass of the Softmax operation. </p>
<p>This method implements the forward computation for the Softmax activation function. It calculates the exponential sum of the input elements and then applies the Softmax function to each element.</p>
<p>The forward pass is implemented in two main steps:</p><ol type="1">
<li>Calculation of the sum of exponentials:<ul>
<li>Uses CUDA parallelization to compute exp(x) for each input element.</li>
<li>Accumulates these exponentials to get the sum for normalization.</li>
</ul>
</li>
<li>Application of the Softmax function:<ul>
<li>Computes exp(x_i) / sum(exp(x_j)) for each element using CUDA.</li>
</ul>
</li>
</ol>
<p>The Softmax function is defined as: </p><div class="fragment"><div class="line"><a class="code hl_function" href="namespacenz_1_1krnl.html#adbafc409d57fa0a9d78ecac5bf7b10a3">Softmax</a>(x_i) = exp(x_i) / sum(exp(x_j))</div>
</div><!-- fragment --><p> where x_i is the i-th element of the input vector and the sum is over all elements j.</p>
<p>Key operations:</p><ul>
<li>CUDA kernel setup for parallel computation.</li>
<li>Memory allocation and management for intermediate results.</li>
<li>Execution of the SummationExp CUDA kernel for exponential sum calculation.</li>
<li>Data transfer between GPU and CPU for sum accumulation.</li>
<li>Execution of the Softmax CUDA kernel for final output computation.</li>
</ul>
<dl class="section note"><dt>Note</dt><dd><ul>
<li>This implementation utilizes CUDA for efficient parallel computation on GPU.</li>
<li>The method handles both the exponential sum calculation and the final Softmax normalization.</li>
<li>Temporary memory is allocated and freed for intermediate calculations.</li>
<li>The final output is stored in the node's output tensor.</li>
</ul>
</dd></dl>
<dl class="section see"><dt>See also</dt><dd><a class="el" href="namespacenz_1_1krnl.html#adbafc409d57fa0a9d78ecac5bf7b10a3" title="Kernel function to apply the Softmax function on the GPU.">Softmax</a> CUDA kernel for the implementation of the final <a class="el" href="namespacenz_1_1krnl.html#adbafc409d57fa0a9d78ecac5bf7b10a3" title="Kernel function to apply the Softmax function on the GPU.">Softmax</a> computation. </dd>
<dd>
<a class="el" href="#aa991e3bde7a3a5edbee62fab1cabba23" title="Performs the backward pass of the Softmax operation.">backward()</a> for the corresponding <a class="el" href="#aa991e3bde7a3a5edbee62fab1cabba23" title="Performs the backward pass of the Softmax operation.">backward</a> pass implementation.</dd></dl>
<dl class="section author"><dt>Author</dt><dd>Mgepahmge (<a href="https://github.com/Mgepahmge">https://github.com/Mgepahmge</a>)</dd></dl>
<dl class="section date"><dt>Date</dt><dd>2023/12/06 </dd></dl>

<p>Implements <a class="el" href="classnz_1_1nodes_1_1_node.html#a8a828c2e91a4aa2a9ab7b94554e4685b">nz::nodes::Node</a>.</p>

<p class="definition">Definition at line <a class="el" href="_nodes_8cu_source.html#l00534">534</a> of file <a class="el" href="_nodes_8cu_source.html">Nodes.cu</a>.</p>
<div class="dynheader">
Here is the call graph for this function:</div>
<div class="dyncontent">
<div class="center"><img src="classnz_1_1nodes_1_1calc_1_1_softmax_node_a93f7d936ff487db8e7dceb6ee0cdc38e_cgraph.png" border="0" usemap="#aclassnz_1_1nodes_1_1calc_1_1_softmax_node_a93f7d936ff487db8e7dceb6ee0cdc38e_cgraph" alt=""/></div>
<map name="aclassnz_1_1nodes_1_1calc_1_1_softmax_node_a93f7d936ff487db8e7dceb6ee0cdc38e_cgraph" id="aclassnz_1_1nodes_1_1calc_1_1_softmax_node_a93f7d936ff487db8e7dceb6ee0cdc38e_cgraph">
<area shape="rect" title="Performs the forward pass of the Softmax operation." alt="" coords="5,39,173,81"/>
<area shape="rect" href="namespacenz_1_1krnl.html#adbafc409d57fa0a9d78ecac5bf7b10a3" title="Kernel function to apply the Softmax function on the GPU." alt="" coords="221,47,342,73"/>
<area shape="poly" title=" " alt="" coords="173,57,205,57,205,63,173,63"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#ab4b2eb422e0e1ee44bdfdc0eb94457ce" title="Returns a reference to the singleton instance of the StreamManager." alt="" coords="390,5,575,48"/>
<area shape="poly" title=" " alt="" coords="342,47,374,42,375,47,343,53"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a46ce59b45de432842454aadf00b93791" title="Asynchronously submits a CUDA kernel with stream&#45;ordered dependency management." alt="" coords="390,72,575,115"/>
<area shape="poly" title=" " alt="" coords="343,67,375,73,374,78,342,73"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#a1de1cf3aadea137faf90a2f9b4b7abe2" title="Acquires CUDA stream from pool using round&#45;robin scheduling." alt="" coords="623,39,809,81"/>
<area shape="poly" title=" " alt="" coords="575,77,607,73,608,78,576,83"/>
<area shape="rect" href="classnz_1_1cu_strm_1_1_stream_manager.html#adb1078a67c6e38932d7d58c2adb05ec0" title="Synchronizes CUDA stream execution until data writes complete." alt="" coords="623,105,809,148"/>
<area shape="poly" title=" " alt="" coords="576,104,608,109,607,114,575,109"/>
</map>
</div>

</div>
</div>
<hr/>The documentation for this class was generated from the following files:<ul>
<li>D:/Users/Mgepahmge/Documents/C Program/NeuZephyr/include/NeuZephyr/<a class="el" href="_nodes_8cuh_source.html">Nodes.cuh</a></li>
<li>D:/Users/Mgepahmge/Documents/C Program/NeuZephyr/src/<a class="el" href="_nodes_8cu_source.html">Nodes.cu</a></li>
</ul>
</div><!-- contents -->
<!-- start footer part -->
<hr class="footer"/><address class="footer"><small>
Generated by&#160;<a href="https://www.doxygen.org/index.html"><img class="footer" src="doxygen.svg" width="104" height="31" alt="doxygen"/></a> 1.12.0
</small></address>
</div><!-- doc-content -->
</body>
</html>
