<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.9.1"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>AIfES 2: aimath_f32_default.h File Reference</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtreedata.js"></script>
<script type="text/javascript" src="navtree.js"></script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/x-mathjax-config">
  MathJax.Hub.Config({
    extensions: ["tex2jax.js"],
    jax: ["input/TeX","output/HTML-CSS"],
});
</script>
<script type="text/javascript" async="async" src="https://cdn.jsdelivr.net/npm/mathjax@2/MathJax.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
 <tbody>
 <tr style="height: 56px;">
  <td id="projectlogo"><img alt="Logo" src="AIfES_logo_small.png"/></td>
  <td id="projectalign" style="padding-left: 0.5em;">
   <div id="projectname">AIfES 2
   &#160;<span id="projectnumber">2.0.0</span>
   </div>
  </td>
 </tr>
 </tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.9.1 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
var searchBox = new SearchBox("searchBox", "search",false,'Search','.html');
/* @license-end */
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(function() {
  initMenu('',true,false,'search.php','Search');
  $(document).ready(function() { init_search(); });
});
/* @license-end */</script>
<div id="main-nav"></div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
  <div id="nav-tree">
    <div id="nav-tree-contents">
      <div id="nav-sync" class="sync"></div>
    </div>
  </div>
  <div id="splitbar" style="-moz-user-select:none;" 
       class="ui-resizable-handle">
  </div>
</div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(function(){initNavTree('aimath__f32__default_8h.html',''); initResizable(); });
/* @license-end */
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
     onmouseover="return searchBox.OnSearchSelectShow()"
     onmouseout="return searchBox.OnSearchSelectHide()"
     onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>

<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0" 
        name="MSearchResults" id="MSearchResults">
</iframe>
</div>

<div class="header">
  <div class="summary">
<a href="#func-members">Functions</a>  </div>
  <div class="headertitle">
<div class="title">aimath_f32_default.h File Reference</div>  </div>
</div><!--header-->
<div class="contents">

<p>Math functions for <a class="el" href="aimath__f32_8h.html">F32 </a> data type, default implementation.  
<a href="#details">More...</a></p>

<p><a href="aimath__f32__default_8h_source.html">Go to the source code of this file.</a></p>
<table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="func-members"></a>
Functions</h2></td></tr>
<tr class="memitem:a1ce664be9a6c513d1fb7cae44d1d7e17"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a1ce664be9a6c513d1fb7cae44d1d7e17">aimath_f32_default_linear</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *a, const <a class="el" href="structaitensor.html">aitensor_t</a> *b, const <a class="el" href="structaitensor.html">aitensor_t</a> *c, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a1ce664be9a6c513d1fb7cae44d1d7e17"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs a matrix multiplication of <a class="el" href="aimath__f32_8h.html">F32 </a> matrices a and b and adds a vector c to each row.  <a href="aimath__f32__default_8h.html#a1ce664be9a6c513d1fb7cae44d1d7e17">More...</a><br /></td></tr>
<tr class="separator:a1ce664be9a6c513d1fb7cae44d1d7e17"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a6fd225be785420477239f8e72e26a77f"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a6fd225be785420477239f8e72e26a77f">aimath_f32_default_linear_at</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *a, const <a class="el" href="structaitensor.html">aitensor_t</a> *b, const <a class="el" href="structaitensor.html">aitensor_t</a> *c, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a6fd225be785420477239f8e72e26a77f"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs a matrix multiplication of <a class="el" href="aimath__f32_8h.html">F32 </a> matrices a (transposed) and b and adds a vector c to each row.  <a href="aimath__f32__default_8h.html#a6fd225be785420477239f8e72e26a77f">More...</a><br /></td></tr>
<tr class="separator:a6fd225be785420477239f8e72e26a77f"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a234496e739207585fd533a33be9f1292"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a234496e739207585fd533a33be9f1292">aimath_f32_default_linear_bt</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *a, const <a class="el" href="structaitensor.html">aitensor_t</a> *b, const <a class="el" href="structaitensor.html">aitensor_t</a> *c, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a234496e739207585fd533a33be9f1292"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs a matrix multiplication of <a class="el" href="aimath__f32_8h.html">F32 </a> matrices a and b (transposed) and adds a vector c to each row.  <a href="aimath__f32__default_8h.html#a234496e739207585fd533a33be9f1292">More...</a><br /></td></tr>
<tr class="separator:a234496e739207585fd533a33be9f1292"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:adc591f099e035057257fa4e64a6efbb7"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#adc591f099e035057257fa4e64a6efbb7">aimath_f32_default_linear_atrt</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *a, const <a class="el" href="structaitensor.html">aitensor_t</a> *b, const <a class="el" href="structaitensor.html">aitensor_t</a> *c, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:adc591f099e035057257fa4e64a6efbb7"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs a matrix multiplication with transposed result of <a class="el" href="aimath__f32_8h.html">F32 </a> matrices a (transposed) and b and adds a vector c to each row.  <a href="aimath__f32__default_8h.html#adc591f099e035057257fa4e64a6efbb7">More...</a><br /></td></tr>
<tr class="separator:adc591f099e035057257fa4e64a6efbb7"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a76f4b7504be8560f3b8ce8fd651402a7"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a76f4b7504be8560f3b8ce8fd651402a7">aimath_f32_default_mat_mul</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *a, const <a class="el" href="structaitensor.html">aitensor_t</a> *b, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a76f4b7504be8560f3b8ce8fd651402a7"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs a matrix multiplication of <a class="el" href="aimath__f32_8h.html">F32 </a> matrices a and b.  <a href="aimath__f32__default_8h.html#a76f4b7504be8560f3b8ce8fd651402a7">More...</a><br /></td></tr>
<tr class="separator:a76f4b7504be8560f3b8ce8fd651402a7"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ade94370f491c7e34a95c892d36611548"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#ade94370f491c7e34a95c892d36611548">aimath_f32_default_mat_mul_at</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *a, const <a class="el" href="structaitensor.html">aitensor_t</a> *b, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:ade94370f491c7e34a95c892d36611548"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs a matrix multiplication of <a class="el" href="aimath__f32_8h.html">F32 </a> matrices a (transposed) and b.  <a href="aimath__f32__default_8h.html#ade94370f491c7e34a95c892d36611548">More...</a><br /></td></tr>
<tr class="separator:ade94370f491c7e34a95c892d36611548"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a04af5314c51e75495016026343dec02e"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a04af5314c51e75495016026343dec02e">aimath_f32_default_mat_mul_bt</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *a, const <a class="el" href="structaitensor.html">aitensor_t</a> *b, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a04af5314c51e75495016026343dec02e"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs a matrix multiplication of <a class="el" href="aimath__f32_8h.html">F32 </a> matrices a and b (transposed)  <a href="aimath__f32__default_8h.html#a04af5314c51e75495016026343dec02e">More...</a><br /></td></tr>
<tr class="separator:a04af5314c51e75495016026343dec02e"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a3a3abf9e2a6a93a2097d08b6e8c31891"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a3a3abf9e2a6a93a2097d08b6e8c31891">aimath_f32_default_mat_mul_atrt</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *a, const <a class="el" href="structaitensor.html">aitensor_t</a> *b, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a3a3abf9e2a6a93a2097d08b6e8c31891"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs a matrix multiplication with transposed result of <a class="el" href="aimath__f32_8h.html">F32 </a> matrices a (transposed) and b.  <a href="aimath__f32__default_8h.html#a3a3abf9e2a6a93a2097d08b6e8c31891">More...</a><br /></td></tr>
<tr class="separator:a3a3abf9e2a6a93a2097d08b6e8c31891"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:abf4023f330ca3318b600967710aa4de1"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#abf4023f330ca3318b600967710aa4de1">aimath_f32_default_multiply</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *a, const <a class="el" href="structaitensor.html">aitensor_t</a> *b, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:abf4023f330ca3318b600967710aa4de1"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs an element wise multiplication of <a class="el" href="aimath__f32_8h.html">F32 </a> tensors a and b (Hadamard product)  <a href="aimath__f32__default_8h.html#abf4023f330ca3318b600967710aa4de1">More...</a><br /></td></tr>
<tr class="separator:abf4023f330ca3318b600967710aa4de1"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a572de6eb62cc767cb2654dba7c5a41c2"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a572de6eb62cc767cb2654dba7c5a41c2">aimath_f32_default_divide</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *a, const <a class="el" href="structaitensor.html">aitensor_t</a> *b, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a572de6eb62cc767cb2654dba7c5a41c2"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs an element wise division of <a class="el" href="aimath__f32_8h.html">F32 </a> tensors a and b (Hadamard division)  <a href="aimath__f32__default_8h.html#a572de6eb62cc767cb2654dba7c5a41c2">More...</a><br /></td></tr>
<tr class="separator:a572de6eb62cc767cb2654dba7c5a41c2"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a62d9322b1fe696d91a6bb1d2f265e7ed"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a62d9322b1fe696d91a6bb1d2f265e7ed">aimath_f32_default_scalar_mul</a> (const void *scalar, const <a class="el" href="structaitensor.html">aitensor_t</a> *a, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a62d9322b1fe696d91a6bb1d2f265e7ed"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs a scalar multiplication (scaling) of <a class="el" href="aimath__f32_8h.html">F32 </a> tensor a and a scalar.  <a href="aimath__f32__default_8h.html#a62d9322b1fe696d91a6bb1d2f265e7ed">More...</a><br /></td></tr>
<tr class="separator:a62d9322b1fe696d91a6bb1d2f265e7ed"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:abddd24f14f793be28cb73d4273a7f827"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#abddd24f14f793be28cb73d4273a7f827">aimath_f32_default_scalar_add</a> (const void *scalar, const <a class="el" href="structaitensor.html">aitensor_t</a> *a, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:abddd24f14f793be28cb73d4273a7f827"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs an element wise addition of a scalar to a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#abddd24f14f793be28cb73d4273a7f827">More...</a><br /></td></tr>
<tr class="separator:abddd24f14f793be28cb73d4273a7f827"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a235398f06dc3faf34d12712f2d3ee887"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a235398f06dc3faf34d12712f2d3ee887">aimath_f32_default_tensor_add</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *a, const <a class="el" href="structaitensor.html">aitensor_t</a> *b, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a235398f06dc3faf34d12712f2d3ee887"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs an element wise addition of <a class="el" href="aimath__f32_8h.html">F32 </a> tensors a and b.  <a href="aimath__f32__default_8h.html#a235398f06dc3faf34d12712f2d3ee887">More...</a><br /></td></tr>
<tr class="separator:a235398f06dc3faf34d12712f2d3ee887"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:af1409f9886a9614f83cb0a5d5360be60"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#af1409f9886a9614f83cb0a5d5360be60">aimath_f32_default_tensor_sub</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *a, const <a class="el" href="structaitensor.html">aitensor_t</a> *b, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:af1409f9886a9614f83cb0a5d5360be60"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs a element wise subtraction of <a class="el" href="aimath__f32_8h.html">F32 </a> tensors a and b.  <a href="aimath__f32__default_8h.html#af1409f9886a9614f83cb0a5d5360be60">More...</a><br /></td></tr>
<tr class="separator:af1409f9886a9614f83cb0a5d5360be60"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:acbf32eb2340529176399d24fa2ceaae2"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#acbf32eb2340529176399d24fa2ceaae2">aimath_f32_default_tensor_sub_sparse8</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *a, const <a class="el" href="structaitensor.html">aitensor_t</a> *b, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:acbf32eb2340529176399d24fa2ceaae2"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs a subtraction between a <a class="el" href="aimath__f32_8h.html">F32 </a> matrix a and a <a class="el" href="aimath__u8_8h.html">U8 </a> sparse matrix b.  <a href="aimath__f32__default_8h.html#acbf32eb2340529176399d24fa2ceaae2">More...</a><br /></td></tr>
<tr class="separator:acbf32eb2340529176399d24fa2ceaae2"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:aa0056570df0d8bd0b7e3221f5abfe61a"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#aa0056570df0d8bd0b7e3221f5abfe61a">aimath_f32_default_copy_tensor</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *from, <a class="el" href="structaitensor.html">aitensor_t</a> *to)</td></tr>
<tr class="memdesc:aa0056570df0d8bd0b7e3221f5abfe61a"><td class="mdescLeft">&#160;</td><td class="mdescRight">Performs an element wise copy of <a class="el" href="aimath__f32_8h.html">F32 </a> tensors.  <a href="aimath__f32__default_8h.html#aa0056570df0d8bd0b7e3221f5abfe61a">More...</a><br /></td></tr>
<tr class="separator:aa0056570df0d8bd0b7e3221f5abfe61a"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a750f616bbc38cd336a0d75581a811101"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a750f616bbc38cd336a0d75581a811101">aimath_f32_default_transpose_vector</a> (<a class="el" href="structaitensor.html">aitensor_t</a> *vector)</td></tr>
<tr class="memdesc:a750f616bbc38cd336a0d75581a811101"><td class="mdescLeft">&#160;</td><td class="mdescRight">Transposes a <a class="el" href="aimath__f32_8h.html">F32 </a> vector.  <a href="aimath__f32__default_8h.html#a750f616bbc38cd336a0d75581a811101">More...</a><br /></td></tr>
<tr class="separator:a750f616bbc38cd336a0d75581a811101"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a458efd0b4b295af5f84c15123158770d"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a458efd0b4b295af5f84c15123158770d">aimath_f32_default_transpose_matrix</a> (<a class="el" href="structaitensor.html">aitensor_t</a> *x)</td></tr>
<tr class="memdesc:a458efd0b4b295af5f84c15123158770d"><td class="mdescLeft">&#160;</td><td class="mdescRight">Transpose a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#a458efd0b4b295af5f84c15123158770d">More...</a><br /></td></tr>
<tr class="separator:a458efd0b4b295af5f84c15123158770d"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a2276eb0146a4ef4f106f0aaf7cc7003f"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a2276eb0146a4ef4f106f0aaf7cc7003f">aimath_f32_default_norm_squared</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, void *result)</td></tr>
<tr class="memdesc:a2276eb0146a4ef4f106f0aaf7cc7003f"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the squared sum of all elements in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#a2276eb0146a4ef4f106f0aaf7cc7003f">More...</a><br /></td></tr>
<tr class="separator:a2276eb0146a4ef4f106f0aaf7cc7003f"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a1c4d0fe9ee55e7b4677eff2143ed2328"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a1c4d0fe9ee55e7b4677eff2143ed2328">aimath_f32_default_sum</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, void *result)</td></tr>
<tr class="memdesc:a1c4d0fe9ee55e7b4677eff2143ed2328"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the sum of all elements in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#a1c4d0fe9ee55e7b4677eff2143ed2328">More...</a><br /></td></tr>
<tr class="separator:a1c4d0fe9ee55e7b4677eff2143ed2328"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a29b2f81f252312acbac15df0d415d791"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a29b2f81f252312acbac15df0d415d791">aimath_f32_default_min</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, void *result)</td></tr>
<tr class="memdesc:a29b2f81f252312acbac15df0d415d791"><td class="mdescLeft">&#160;</td><td class="mdescRight">Identifies the minimum value in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#a29b2f81f252312acbac15df0d415d791">More...</a><br /></td></tr>
<tr class="separator:a29b2f81f252312acbac15df0d415d791"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a2cca0f45a41c48a7730664f43933c066"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a2cca0f45a41c48a7730664f43933c066">aimath_f32_default_max</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, void *result)</td></tr>
<tr class="memdesc:a2cca0f45a41c48a7730664f43933c066"><td class="mdescLeft">&#160;</td><td class="mdescRight">Identifies the maximum value in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#a2cca0f45a41c48a7730664f43933c066">More...</a><br /></td></tr>
<tr class="separator:a2cca0f45a41c48a7730664f43933c066"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a9c2af96bf91c4443ab75036f585fdba3"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a9c2af96bf91c4443ab75036f585fdba3">aimath_f32_default_sigmoid</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a9c2af96bf91c4443ab75036f585fdba3"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the sigmoid of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#a9c2af96bf91c4443ab75036f585fdba3">More...</a><br /></td></tr>
<tr class="separator:a9c2af96bf91c4443ab75036f585fdba3"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a36fce20e46763185dfc0dc15e9d37a7c"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a36fce20e46763185dfc0dc15e9d37a7c">aimath_f32_default_d_sigmoid</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *sigmoid_x, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a36fce20e46763185dfc0dc15e9d37a7c"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the derivative sigmoid of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#a36fce20e46763185dfc0dc15e9d37a7c">More...</a><br /></td></tr>
<tr class="separator:a36fce20e46763185dfc0dc15e9d37a7c"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:abd86f478417339f44905ede947a5e089"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#abd86f478417339f44905ede947a5e089">aimath_f32_default_tanh</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:abd86f478417339f44905ede947a5e089"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the tanh of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#abd86f478417339f44905ede947a5e089">More...</a><br /></td></tr>
<tr class="separator:abd86f478417339f44905ede947a5e089"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a56415d86de146515ace7247a05f1f152"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a56415d86de146515ace7247a05f1f152">aimath_f32_default_d_tanh</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *tanh_x, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a56415d86de146515ace7247a05f1f152"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the tanh derivative of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#a56415d86de146515ace7247a05f1f152">More...</a><br /></td></tr>
<tr class="separator:a56415d86de146515ace7247a05f1f152"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a69ea48225650ecaf3493d137f3e91c4e"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a69ea48225650ecaf3493d137f3e91c4e">aimath_f32_default_relu</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a69ea48225650ecaf3493d137f3e91c4e"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the rectifier (ReLU) value of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#a69ea48225650ecaf3493d137f3e91c4e">More...</a><br /></td></tr>
<tr class="separator:a69ea48225650ecaf3493d137f3e91c4e"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ab40f960fd9c4ac7835c3ba61e11d5293"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#ab40f960fd9c4ac7835c3ba61e11d5293">aimath_f32_default_d_relu</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:ab40f960fd9c4ac7835c3ba61e11d5293"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the rectifier (ReLU) derivative of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#ab40f960fd9c4ac7835c3ba61e11d5293">More...</a><br /></td></tr>
<tr class="separator:ab40f960fd9c4ac7835c3ba61e11d5293"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:afaafd34ad1adc476a7c120dce8f39498"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#afaafd34ad1adc476a7c120dce8f39498">aimath_f32_default_leaky_relu</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, const void *alpha, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:afaafd34ad1adc476a7c120dce8f39498"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the leaky rectifier (leaky ReLU) value of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#afaafd34ad1adc476a7c120dce8f39498">More...</a><br /></td></tr>
<tr class="separator:afaafd34ad1adc476a7c120dce8f39498"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a99a65ffaa0b6636cbc26267695b7118d"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a99a65ffaa0b6636cbc26267695b7118d">aimath_f32_default_d_leaky_relu</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, const void *alpha, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a99a65ffaa0b6636cbc26267695b7118d"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the leaky rectifier (leaky ReLU) derivative of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#a99a65ffaa0b6636cbc26267695b7118d">More...</a><br /></td></tr>
<tr class="separator:a99a65ffaa0b6636cbc26267695b7118d"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a76692ffb6acf0f7a291d6b4c86311edf"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a76692ffb6acf0f7a291d6b4c86311edf">aimath_f32_default_elu</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, const void *alpha, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a76692ffb6acf0f7a291d6b4c86311edf"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the exponential rectifier (ELU) value of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#a76692ffb6acf0f7a291d6b4c86311edf">More...</a><br /></td></tr>
<tr class="separator:a76692ffb6acf0f7a291d6b4c86311edf"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a29f6be81cd62775d6824866e5427c9b8"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a29f6be81cd62775d6824866e5427c9b8">aimath_f32_default_d_elu</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, const void *alpha, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a29f6be81cd62775d6824866e5427c9b8"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the exponential rectifier (ELU) derivative of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#a29f6be81cd62775d6824866e5427c9b8">More...</a><br /></td></tr>
<tr class="separator:a29f6be81cd62775d6824866e5427c9b8"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ac14896a86a6600a4be48b84c9977acaf"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#ac14896a86a6600a4be48b84c9977acaf">aimath_f32_default_softmax</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:ac14896a86a6600a4be48b84c9977acaf"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the softmax value of each row of a <a class="el" href="aimath__f32_8h.html">F32 </a> matrix.  <a href="aimath__f32__default_8h.html#ac14896a86a6600a4be48b84c9977acaf">More...</a><br /></td></tr>
<tr class="separator:ac14896a86a6600a4be48b84c9977acaf"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a6f36c52ff560b8172098f69aab5389c9"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a6f36c52ff560b8172098f69aab5389c9">aimath_f32_default_softsign</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a6f36c52ff560b8172098f69aab5389c9"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the softsign value of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#a6f36c52ff560b8172098f69aab5389c9">More...</a><br /></td></tr>
<tr class="separator:a6f36c52ff560b8172098f69aab5389c9"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a0e1b0cb1f84a73659b5b61b59045207c"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a0e1b0cb1f84a73659b5b61b59045207c">aimath_f32_default_d_softsign</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a0e1b0cb1f84a73659b5b61b59045207c"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the softsign derivative of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#a0e1b0cb1f84a73659b5b61b59045207c">More...</a><br /></td></tr>
<tr class="separator:a0e1b0cb1f84a73659b5b61b59045207c"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a018ddfb520239b37f6924dce444e1d57"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a018ddfb520239b37f6924dce444e1d57">aimath_f32_default_binary_crossentropy_sum</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *predicted_data, const <a class="el" href="structaitensor.html">aitensor_t</a> *target_data, void *result)</td></tr>
<tr class="memdesc:a018ddfb520239b37f6924dce444e1d57"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the binary cross entropy between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted and the target data using a sum reduction.  <a href="aimath__f32__default_8h.html#a018ddfb520239b37f6924dce444e1d57">More...</a><br /></td></tr>
<tr class="separator:a018ddfb520239b37f6924dce444e1d57"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:af0c6e0860a56b1dad01f2da30ea85e8b"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#af0c6e0860a56b1dad01f2da30ea85e8b">aimath_f32_default_binary_crossentropy_mean</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *predicted_data, const <a class="el" href="structaitensor.html">aitensor_t</a> *target_data, void *result)</td></tr>
<tr class="memdesc:af0c6e0860a56b1dad01f2da30ea85e8b"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the binary cross entropy between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted and the target data using a mean reduction.  <a href="aimath__f32__default_8h.html#af0c6e0860a56b1dad01f2da30ea85e8b">More...</a><br /></td></tr>
<tr class="separator:af0c6e0860a56b1dad01f2da30ea85e8b"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ab64d737f8edfb64854254e8c9efff55b"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#ab64d737f8edfb64854254e8c9efff55b">aimath_f32_default_categorical_crossentropy_sum</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *predicted_data, const <a class="el" href="structaitensor.html">aitensor_t</a> *target_data, void *result)</td></tr>
<tr class="memdesc:ab64d737f8edfb64854254e8c9efff55b"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the categorical cross entropy between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted and the target data using a sum reduction.  <a href="aimath__f32__default_8h.html#ab64d737f8edfb64854254e8c9efff55b">More...</a><br /></td></tr>
<tr class="separator:ab64d737f8edfb64854254e8c9efff55b"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a50c88d7354f3dbae71ad79897fd489f7"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a50c88d7354f3dbae71ad79897fd489f7">aimath_f32_default_categorical_crossentropy_mean</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *predicted_data, const <a class="el" href="structaitensor.html">aitensor_t</a> *target_data, void *result)</td></tr>
<tr class="memdesc:a50c88d7354f3dbae71ad79897fd489f7"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the categorical cross entropy between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted and the target data using a mean reduction.  <a href="aimath__f32__default_8h.html#a50c88d7354f3dbae71ad79897fd489f7">More...</a><br /></td></tr>
<tr class="separator:a50c88d7354f3dbae71ad79897fd489f7"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:aa9d04eb0036b9b2f0640d72d2952026b"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#aa9d04eb0036b9b2f0640d72d2952026b">aimath_f32_default_categorical_crossentropy_sum_sparse8</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *predicted_data, const <a class="el" href="structaitensor.html">aitensor_t</a> *target_data, void *result)</td></tr>
<tr class="memdesc:aa9d04eb0036b9b2f0640d72d2952026b"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the categorical Cross-Entropy between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted data and the <a class="el" href="aimath__u8_8h.html">U8 </a> target data in sparse representation using a sum reduction.  <a href="aimath__f32__default_8h.html#aa9d04eb0036b9b2f0640d72d2952026b">More...</a><br /></td></tr>
<tr class="separator:aa9d04eb0036b9b2f0640d72d2952026b"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a89770c6699211e25f45e55ac04355eb4"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a89770c6699211e25f45e55ac04355eb4">aimath_f32_default_categorical_crossentropy_mean_sparse8</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *predicted_data, const <a class="el" href="structaitensor.html">aitensor_t</a> *target_data, void *result)</td></tr>
<tr class="memdesc:a89770c6699211e25f45e55ac04355eb4"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the categorical Cross-Entropy between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted data and the <a class="el" href="aimath__u8_8h.html">U8 </a> target data in sparse representation using a mean reduction.  <a href="aimath__f32__default_8h.html#a89770c6699211e25f45e55ac04355eb4">More...</a><br /></td></tr>
<tr class="separator:a89770c6699211e25f45e55ac04355eb4"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a3bf3e26a0873244ef65ed20775360312"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a3bf3e26a0873244ef65ed20775360312">aimath_f32_default_sqrt</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a3bf3e26a0873244ef65ed20775360312"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the element wise square root of a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor.  <a href="aimath__f32__default_8h.html#a3bf3e26a0873244ef65ed20775360312">More...</a><br /></td></tr>
<tr class="separator:a3bf3e26a0873244ef65ed20775360312"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a072ffbef0c60343957e4621f01551e85"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a072ffbef0c60343957e4621f01551e85">aimath_f32_default_zero_tensor</a> (<a class="el" href="structaitensor.html">aitensor_t</a> *tensor)</td></tr>
<tr class="memdesc:a072ffbef0c60343957e4621f01551e85"><td class="mdescLeft">&#160;</td><td class="mdescRight">Fills a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor with zeros.  <a href="aimath__f32__default_8h.html#a072ffbef0c60343957e4621f01551e85">More...</a><br /></td></tr>
<tr class="separator:a072ffbef0c60343957e4621f01551e85"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a760163e6cee0f2a5e341069ad20bfcde"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a760163e6cee0f2a5e341069ad20bfcde">aimath_f32_default_init_zeros</a> (<a class="el" href="structaitensor.html">aitensor_t</a> *tensor)</td></tr>
<tr class="memdesc:a760163e6cee0f2a5e341069ad20bfcde"><td class="mdescLeft">&#160;</td><td class="mdescRight">Fills a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor with zeros.  <a href="aimath__f32__default_8h.html#a760163e6cee0f2a5e341069ad20bfcde">More...</a><br /></td></tr>
<tr class="separator:a760163e6cee0f2a5e341069ad20bfcde"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a2209ddbfefea6e2a3b0c23141ba731ba"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a2209ddbfefea6e2a3b0c23141ba731ba">aimath_f32_default_init_ones</a> (<a class="el" href="structaitensor.html">aitensor_t</a> *tensor)</td></tr>
<tr class="memdesc:a2209ddbfefea6e2a3b0c23141ba731ba"><td class="mdescLeft">&#160;</td><td class="mdescRight">Fills a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor with ones.  <a href="aimath__f32__default_8h.html#a2209ddbfefea6e2a3b0c23141ba731ba">More...</a><br /></td></tr>
<tr class="separator:a2209ddbfefea6e2a3b0c23141ba731ba"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a580a19afaf81fa085160be7fead0ac2f"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a580a19afaf81fa085160be7fead0ac2f">aimath_f32_default_tensor_init_uniform</a> (<a class="el" href="structaitensor.html">aitensor_t</a> *tensor, float from, float to)</td></tr>
<tr class="memdesc:a580a19afaf81fa085160be7fead0ac2f"><td class="mdescLeft">&#160;</td><td class="mdescRight">Fills a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor with random numbers created from a uniform distribution within given range.  <a href="aimath__f32__default_8h.html#a580a19afaf81fa085160be7fead0ac2f">More...</a><br /></td></tr>
<tr class="separator:a580a19afaf81fa085160be7fead0ac2f"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:aae8d8c59a70df08142d376e134944317"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#aae8d8c59a70df08142d376e134944317">aimath_f32_default_init_glorot_uniform</a> (<a class="el" href="structaitensor.html">aitensor_t</a> *tensor)</td></tr>
<tr class="memdesc:aae8d8c59a70df08142d376e134944317"><td class="mdescLeft">&#160;</td><td class="mdescRight">Fills a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor with random numbers uniformly within given range, according to Glorot et al.  <a href="aimath__f32__default_8h.html#aae8d8c59a70df08142d376e134944317">More...</a><br /></td></tr>
<tr class="separator:aae8d8c59a70df08142d376e134944317"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a9a8744295c9182d50c6da19d54c4e75a"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a9a8744295c9182d50c6da19d54c4e75a">aimath_f32_default_init_glorot_uniform_cdim</a> (<a class="el" href="structaitensor.html">aitensor_t</a> *tensor, int8_t cin_axis, int8_t cout_axis)</td></tr>
<tr class="memdesc:a9a8744295c9182d50c6da19d54c4e75a"><td class="mdescLeft">&#160;</td><td class="mdescRight">Fills a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor with random numbers uniformly within given range, according to Glorot et al.  <a href="aimath__f32__default_8h.html#a9a8744295c9182d50c6da19d54c4e75a">More...</a><br /></td></tr>
<tr class="separator:a9a8744295c9182d50c6da19d54c4e75a"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a11d31daf4d011186446fd77a253b9f7f"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a11d31daf4d011186446fd77a253b9f7f">aimath_f32_default_init_he_uniform</a> (<a class="el" href="structaitensor.html">aitensor_t</a> *tensor)</td></tr>
<tr class="memdesc:a11d31daf4d011186446fd77a253b9f7f"><td class="mdescLeft">&#160;</td><td class="mdescRight">Fills a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor with uniformly drawn random numbers within given range, according to He et al.  <a href="aimath__f32__default_8h.html#a11d31daf4d011186446fd77a253b9f7f">More...</a><br /></td></tr>
<tr class="separator:a11d31daf4d011186446fd77a253b9f7f"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:afe89616689db127803144f477557a10a"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#afe89616689db127803144f477557a10a">aimath_f32_default_init_he_uniform_cdim</a> (<a class="el" href="structaitensor.html">aitensor_t</a> *tensor, int8_t cout_axis)</td></tr>
<tr class="memdesc:afe89616689db127803144f477557a10a"><td class="mdescLeft">&#160;</td><td class="mdescRight">Fills a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor with uniformly drawn random numbers within given range, according to He et al.  <a href="aimath__f32__default_8h.html#afe89616689db127803144f477557a10a">More...</a><br /></td></tr>
<tr class="separator:afe89616689db127803144f477557a10a"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a99c2dd3ad66544c1648ff6113f059a76"><td class="memItemLeft" align="right" valign="top">float&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a99c2dd3ad66544c1648ff6113f059a76">aimath_f32_default_expf_fast</a> (const float x)</td></tr>
<tr class="memdesc:a99c2dd3ad66544c1648ff6113f059a76"><td class="mdescLeft">&#160;</td><td class="mdescRight">Fast approximation of the exponential function.  <a href="aimath__f32__default_8h.html#a99c2dd3ad66544c1648ff6113f059a76">More...</a><br /></td></tr>
<tr class="separator:a99c2dd3ad66544c1648ff6113f059a76"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a0bf115cae42c704411916294aa391169"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a0bf115cae42c704411916294aa391169">aimath_f32_default_mean_channelwise</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, int8_t channel_axis, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a0bf115cae42c704411916294aa391169"><td class="mdescLeft">&#160;</td><td class="mdescRight">Sums up all values of a channel of the <a class="el" href="aimath__f32_8h.html">F32 </a> tensor x.  <a href="aimath__f32__default_8h.html#a0bf115cae42c704411916294aa391169">More...</a><br /></td></tr>
<tr class="separator:a0bf115cae42c704411916294aa391169"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:afd8b40e9cb1cfb0e1b2a6f700d21a086"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#afd8b40e9cb1cfb0e1b2a6f700d21a086">aimath_f32_default_variance_channelwise</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *x, int8_t channel_axis, const <a class="el" href="structaitensor.html">aitensor_t</a> *means, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:afd8b40e9cb1cfb0e1b2a6f700d21a086"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculate the channel-wise variances of the <a class="el" href="aimath__f32_8h.html">F32 </a> tensor x.  <a href="aimath__f32__default_8h.html#afd8b40e9cb1cfb0e1b2a6f700d21a086">More...</a><br /></td></tr>
<tr class="separator:afd8b40e9cb1cfb0e1b2a6f700d21a086"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:af5ee421317c740d6765ba1fbf413492a"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#af5ee421317c740d6765ba1fbf413492a">aimath_f32_default_exponential_moving_average</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *new_data, const void *momentum, <a class="el" href="structaitensor.html">aitensor_t</a> *average)</td></tr>
<tr class="memdesc:af5ee421317c740d6765ba1fbf413492a"><td class="mdescLeft">&#160;</td><td class="mdescRight">Perform an exponential moving average.  <a href="aimath__f32__default_8h.html#af5ee421317c740d6765ba1fbf413492a">More...</a><br /></td></tr>
<tr class="separator:af5ee421317c740d6765ba1fbf413492a"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a44b61123d9b51b3d5abf710fdefa6695"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a44b61123d9b51b3d5abf710fdefa6695">aimath_f32_default_mse_gradients_sum</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *predicted, const <a class="el" href="structaitensor.html">aitensor_t</a> *target, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a44b61123d9b51b3d5abf710fdefa6695"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the gradients of the mean squared error between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted and the target data using a sum reduction.  <a href="aimath__f32__default_8h.html#a44b61123d9b51b3d5abf710fdefa6695">More...</a><br /></td></tr>
<tr class="separator:a44b61123d9b51b3d5abf710fdefa6695"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a7e43d536e6b3bf837d6003a879e6c62b"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a7e43d536e6b3bf837d6003a879e6c62b">aimath_f32_default_mse_gradients_mean</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *predicted, const <a class="el" href="structaitensor.html">aitensor_t</a> *target, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a7e43d536e6b3bf837d6003a879e6c62b"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the gradients of the mean squared error between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted and the target data using a mean reduction.  <a href="aimath__f32__default_8h.html#a7e43d536e6b3bf837d6003a879e6c62b">More...</a><br /></td></tr>
<tr class="separator:a7e43d536e6b3bf837d6003a879e6c62b"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a875479b966264b5e6d395514f0ca7e25"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a875479b966264b5e6d395514f0ca7e25">aimath_f32_default_mse_loss_sum</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *predicted, const <a class="el" href="structaitensor.html">aitensor_t</a> *target, void *result)</td></tr>
<tr class="memdesc:a875479b966264b5e6d395514f0ca7e25"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the mean squared error between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted and the target data using a sum reduction.  <a href="aimath__f32__default_8h.html#a875479b966264b5e6d395514f0ca7e25">More...</a><br /></td></tr>
<tr class="separator:a875479b966264b5e6d395514f0ca7e25"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a8daa94f78e201dd90a3fcb91ec83845c"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a8daa94f78e201dd90a3fcb91ec83845c">aimath_f32_default_mse_loss_mean</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *predicted, const <a class="el" href="structaitensor.html">aitensor_t</a> *target, void *result)</td></tr>
<tr class="memdesc:a8daa94f78e201dd90a3fcb91ec83845c"><td class="mdescLeft">&#160;</td><td class="mdescRight">Calculates the mean squared error between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted and the target data using a mean reduction.  <a href="aimath__f32__default_8h.html#a8daa94f78e201dd90a3fcb91ec83845c">More...</a><br /></td></tr>
<tr class="separator:a8daa94f78e201dd90a3fcb91ec83845c"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a81128f005039e7eb349fdb0af24fdd2c"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="aimath__f32__default_8h.html#a81128f005039e7eb349fdb0af24fdd2c">aimath_f32_default_scale_by_batch_size</a> (const <a class="el" href="structaitensor.html">aitensor_t</a> *a, <a class="el" href="structaitensor.html">aitensor_t</a> *result)</td></tr>
<tr class="memdesc:a81128f005039e7eb349fdb0af24fdd2c"><td class="mdescLeft">&#160;</td><td class="mdescRight">Scales a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor by batch size (size of first dimension)  <a href="aimath__f32__default_8h.html#a81128f005039e7eb349fdb0af24fdd2c">More...</a><br /></td></tr>
<tr class="separator:a81128f005039e7eb349fdb0af24fdd2c"><td class="memSeparator" colspan="2">&#160;</td></tr>
</table>
<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
<div class="textblock"><p>Math functions for <a class="el" href="aimath__f32_8h.html">F32 </a> data type, default implementation. </p>
<dl class="section version"><dt>Version</dt><dd>2.2.0 </dd></dl>
<dl class="section copyright"><dt>Copyright</dt><dd>Copyright (C) 2020-2023 Fraunhofer Institute for Microelectronic Circuits and Systems. All rights reserved.<br  />
<br  />
 AIfES is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.<br  />
<br  />
 This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details.<br  />
<br  />
 You should have received a copy of the GNU Affero General Public License along with this program. If not, see <a href="https://www.gnu.org/licenses/">https://www.gnu.org/licenses/</a>.</dd></dl>
<p>These functions can be used when no hardware specific implementation is available. </p>
</div><h2 class="groupheader">Function Documentation</h2>
<a id="af0c6e0860a56b1dad01f2da30ea85e8b"></a>
<h2 class="memtitle"><span class="permalink"><a href="#af0c6e0860a56b1dad01f2da30ea85e8b">&#9670;&nbsp;</a></span>aimath_f32_default_binary_crossentropy_mean()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_binary_crossentropy_mean </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>predicted_data</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>target_data</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">void *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the binary cross entropy between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted and the target data using a mean reduction. </p>
<p class="formulaDsp">
\[ result = - \sum_i (target_i \cdot \log(predicted_i) + (1 - target_i) \cdot \log(1 - predicted_i)) \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t p_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> p_data[2*3] = {0.8f, 0.1f, 0.7f,</div>
<div class="line">                     0.2f, 0.3f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> p = AITENSOR_2D_F32(p_shape, p_data);</div>
<div class="line"> </div>
<div class="line">uint16_t t_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> t_data[2*3] = {1.0f, 0.0f, 1.0f,</div>
<div class="line">                     0.0f, 0.0f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> t = AITENSOR_2D_F32(t_shape, t_data);</div>
<div class="line"> </div>
<div class="line"><span class="keywordtype">float</span> result;</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#af0c6e0860a56b1dad01f2da30ea85e8b">aimath_f32_default_binary_crossentropy_mean</a>(&amp;p, &amp;t, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#aaa9ca757028820849ef3dde13cc46565">print_aiscalar</a>(&amp;result, <a class="code" href="aimath__f32_8h.html#a06eea7384624233f57daab2648d8ce37">aif32</a>);</div>
<div class="ttc" id="aaimath__basic_8h_html_aaa9ca757028820849ef3dde13cc46565"><div class="ttname"><a href="aimath__basic_8h.html#aaa9ca757028820849ef3dde13cc46565">print_aiscalar</a></div><div class="ttdeci">void print_aiscalar(const void *scalar, const aimath_dtype_t *dtype)</div><div class="ttdoc">Printing a scalar to console.</div></div>
<div class="ttc" id="aaimath__f32_8h_html_a06eea7384624233f57daab2648d8ce37"><div class="ttname"><a href="aimath__f32_8h.html#a06eea7384624233f57daab2648d8ce37">aif32</a></div><div class="ttdeci">const aimath_dtype_t * aif32</div><div class="ttdoc">The F32 data-type indicator.</div></div>
<div class="ttc" id="aaimath__f32__default_8h_html_af0c6e0860a56b1dad01f2da30ea85e8b"><div class="ttname"><a href="aimath__f32__default_8h.html#af0c6e0860a56b1dad01f2da30ea85e8b">aimath_f32_default_binary_crossentropy_mean</a></div><div class="ttdeci">void aimath_f32_default_binary_crossentropy_mean(const aitensor_t *predicted_data, const aitensor_t *target_data, void *result)</div><div class="ttdoc">Calculates the binary cross entropy between the F32  predicted and the target data using a mean reduc...</div></div>
<div class="ttc" id="astructaitensor_html"><div class="ttname"><a href="structaitensor.html">aitensor</a></div><div class="ttdoc">A tensor in AIfES.</div><div class="ttdef"><b>Definition:</b> aifes_math.h:89</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*predicted_data</td><td>F32 matrix with the predicted or calculated values (2D tensor) </td></tr>
    <tr><td class="paramname">*target_data</td><td>F32 matrix with the target data / true values / labels (2D tensor with binary values 0 or 1) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 matrix (2D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a018ddfb520239b37f6924dce444e1d57"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a018ddfb520239b37f6924dce444e1d57">&#9670;&nbsp;</a></span>aimath_f32_default_binary_crossentropy_sum()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_binary_crossentropy_sum </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>predicted_data</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>target_data</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">void *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the binary cross entropy between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted and the target data using a sum reduction. </p>
<p class="formulaDsp">
\[ result = - \sum_i (target_i \cdot \log(predicted_i) + (1 - target_i) \cdot \log(1 - predicted_i)) \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t p_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> p_data[2*3] = {0.8f, 0.1f, 0.7f,</div>
<div class="line">                     0.2f, 0.3f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> p = AITENSOR_2D_F32(p_shape, p_data);</div>
<div class="line"> </div>
<div class="line">uint16_t t_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> t_data[2*3] = {1.0f, 0.0f, 1.0f,</div>
<div class="line">                     0.0f, 0.0f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> t = AITENSOR_2D_F32(t_shape, t_data);</div>
<div class="line"> </div>
<div class="line"><span class="keywordtype">float</span> result;</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a018ddfb520239b37f6924dce444e1d57">aimath_f32_default_binary_crossentropy_sum</a>(&amp;p, &amp;t, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#aaa9ca757028820849ef3dde13cc46565">print_aiscalar</a>(&amp;result, <a class="code" href="aimath__f32_8h.html#a06eea7384624233f57daab2648d8ce37">aif32</a>);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a018ddfb520239b37f6924dce444e1d57"><div class="ttname"><a href="aimath__f32__default_8h.html#a018ddfb520239b37f6924dce444e1d57">aimath_f32_default_binary_crossentropy_sum</a></div><div class="ttdeci">void aimath_f32_default_binary_crossentropy_sum(const aitensor_t *predicted_data, const aitensor_t *target_data, void *result)</div><div class="ttdoc">Calculates the binary cross entropy between the F32  predicted and the target data using a sum reduct...</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*predicted_data</td><td>F32 matrix with the predicted or calculated values (2D tensor) </td></tr>
    <tr><td class="paramname">*target_data</td><td>F32 matrix with the target data / true values / labels (2D tensor with binary values 0 or 1) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 matrix (2D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a50c88d7354f3dbae71ad79897fd489f7"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a50c88d7354f3dbae71ad79897fd489f7">&#9670;&nbsp;</a></span>aimath_f32_default_categorical_crossentropy_mean()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_categorical_crossentropy_mean </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>predicted_data</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>target_data</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">void *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the categorical cross entropy between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted and the target data using a mean reduction. </p>
<p class="formulaDsp">
\[ result = - \sum_i target_i \cdot \log(predicted_i) \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t p_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> p_data[2*3] = {0.2f, 0.1f, 0.7f,</div>
<div class="line">                     0.9f, 0.1f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> p = AITENSOR_2D_F32(p_shape, p_data);</div>
<div class="line"> </div>
<div class="line">uint16_t t_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> t_data[2*3] = {0.0f, 0.0f, 1.0f,</div>
<div class="line">                     1.0f, 0.0f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> t = AITENSOR_2D_F32(t_shape, t_data);</div>
<div class="line"> </div>
<div class="line"><span class="keywordtype">float</span> result;</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a50c88d7354f3dbae71ad79897fd489f7">aimath_f32_default_categorical_crossentropy_mean</a>(&amp;p, &amp;t, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#aaa9ca757028820849ef3dde13cc46565">print_aiscalar</a>(&amp;result, <a class="code" href="aimath__f32_8h.html#a06eea7384624233f57daab2648d8ce37">aif32</a>);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a50c88d7354f3dbae71ad79897fd489f7"><div class="ttname"><a href="aimath__f32__default_8h.html#a50c88d7354f3dbae71ad79897fd489f7">aimath_f32_default_categorical_crossentropy_mean</a></div><div class="ttdeci">void aimath_f32_default_categorical_crossentropy_mean(const aitensor_t *predicted_data, const aitensor_t *target_data, void *result)</div><div class="ttdoc">Calculates the categorical cross entropy between the F32  predicted and the target data using a mean ...</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*predicted_data</td><td>F32 matrix with the predicted or calculated values (2D tensor) </td></tr>
    <tr><td class="paramname">*target_data</td><td>F32 matrix with the target data / true values / labels (2D tensor, rows one-hot encoded) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 matrix (2D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a89770c6699211e25f45e55ac04355eb4"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a89770c6699211e25f45e55ac04355eb4">&#9670;&nbsp;</a></span>aimath_f32_default_categorical_crossentropy_mean_sparse8()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_categorical_crossentropy_mean_sparse8 </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>predicted_data</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>target_data</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">void *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the categorical Cross-Entropy between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted data and the <a class="el" href="aimath__u8_8h.html">U8 </a> target data in sparse representation using a mean reduction. </p>
<p>This function can calculate the crossentropy between a row wise one-hot encoded matrix in sparse representation (just the integer index of the 1 is stored) and a normal F32 matrix a.</p>
<p>For example the matrix </p><p class="formulaDsp">
\[ \left( \begin{array}{ccc} 0 &amp; 0 &amp; 0 &amp; 1 \\ 1 &amp; 0 &amp; 0 &amp; 0 \\ 0 &amp; 0 &amp; 1 &amp; 0 \end{array}\right) \]
</p>
<p> in sparse representation is </p><p class="formulaDsp">
\[ \left( \begin{array}{ccc} 3 \\ 0 \\ 2 \end{array}\right) \]
</p>
<p>The result is then calculated from the one-hot encoded target matrix: </p><p class="formulaDsp">
\[ result = - \sum_i target_{one-hot,i} \cdot \log(predicted_i) \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t p_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> p_data[2*3] = {0.2f, 0.1f, 0.7f,</div>
<div class="line">                     0.9f, 0.1f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> p = AITENSOR_2D_F32(p_shape, p_data);</div>
<div class="line"> </div>
<div class="line">uint16_t t_shape[2] = {2, 1};</div>
<div class="line">uint8_t t_data[2*1] = {2,</div>
<div class="line">                       0};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> t = AITENSOR_2D_U8(t_shape, t_data);</div>
<div class="line"> </div>
<div class="line"><span class="keywordtype">float</span> result;</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a89770c6699211e25f45e55ac04355eb4">aimath_f32_default_categorical_crossentropy_mean_sparse8</a>(&amp;p, &amp;t, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#aaa9ca757028820849ef3dde13cc46565">print_aiscalar</a>(&amp;result, <a class="code" href="aimath__f32_8h.html#a06eea7384624233f57daab2648d8ce37">aif32</a>);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a89770c6699211e25f45e55ac04355eb4"><div class="ttname"><a href="aimath__f32__default_8h.html#a89770c6699211e25f45e55ac04355eb4">aimath_f32_default_categorical_crossentropy_mean_sparse8</a></div><div class="ttdeci">void aimath_f32_default_categorical_crossentropy_mean_sparse8(const aitensor_t *predicted_data, const aitensor_t *target_data, void *result)</div><div class="ttdoc">Calculates the categorical Cross-Entropy between the F32  predicted data and the U8  target data in s...</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*predicted_data</td><td>F32 matrix with the predicted or calculated values (2D tensor of shape [N x M]) </td></tr>
    <tr><td class="paramname">*target_data</td><td>U8 matrix with the target data / true values / labels (2D tensor of shape [N x 1] with true class indices) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 matrix (2D tensor of shape [N x M]) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="ab64d737f8edfb64854254e8c9efff55b"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ab64d737f8edfb64854254e8c9efff55b">&#9670;&nbsp;</a></span>aimath_f32_default_categorical_crossentropy_sum()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_categorical_crossentropy_sum </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>predicted_data</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>target_data</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">void *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the categorical cross entropy between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted and the target data using a sum reduction. </p>
<p class="formulaDsp">
\[ result = - \sum_i target_i \cdot \log(predicted_i) \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t p_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> p_data[2*3] = {0.2f, 0.1f, 0.7f,</div>
<div class="line">                     0.9f, 0.1f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> p = AITENSOR_2D_F32(p_shape, p_data);</div>
<div class="line"> </div>
<div class="line">uint16_t t_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> t_data[2*3] = {0.0f, 0.0f, 1.0f,</div>
<div class="line">                     1.0f, 0.0f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> t = AITENSOR_2D_F32(t_shape, t_data);</div>
<div class="line"> </div>
<div class="line"><span class="keywordtype">float</span> result;</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#ab64d737f8edfb64854254e8c9efff55b">aimath_f32_default_categorical_crossentropy_sum</a>(&amp;p, &amp;t, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#aaa9ca757028820849ef3dde13cc46565">print_aiscalar</a>(&amp;result, <a class="code" href="aimath__f32_8h.html#a06eea7384624233f57daab2648d8ce37">aif32</a>);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_ab64d737f8edfb64854254e8c9efff55b"><div class="ttname"><a href="aimath__f32__default_8h.html#ab64d737f8edfb64854254e8c9efff55b">aimath_f32_default_categorical_crossentropy_sum</a></div><div class="ttdeci">void aimath_f32_default_categorical_crossentropy_sum(const aitensor_t *predicted_data, const aitensor_t *target_data, void *result)</div><div class="ttdoc">Calculates the categorical cross entropy between the F32  predicted and the target data using a sum r...</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*predicted_data</td><td>F32 matrix with the predicted or calculated values (2D tensor) </td></tr>
    <tr><td class="paramname">*target_data</td><td>F32 matrix with the target data / true values / labels (2D tensor, rows one-hot encoded) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 matrix (2D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="aa9d04eb0036b9b2f0640d72d2952026b"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aa9d04eb0036b9b2f0640d72d2952026b">&#9670;&nbsp;</a></span>aimath_f32_default_categorical_crossentropy_sum_sparse8()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_categorical_crossentropy_sum_sparse8 </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>predicted_data</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>target_data</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">void *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the categorical Cross-Entropy between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted data and the <a class="el" href="aimath__u8_8h.html">U8 </a> target data in sparse representation using a sum reduction. </p>
<p>This function can calculate the crossentropy between a row wise one-hot encoded matrix in sparse representation (just the integer index of the 1 is stored) and a normal F32 matrix a.</p>
<p>For example the matrix </p><p class="formulaDsp">
\[ \left( \begin{array}{ccc} 0 &amp; 0 &amp; 0 &amp; 1 \\ 1 &amp; 0 &amp; 0 &amp; 0 \\ 0 &amp; 0 &amp; 1 &amp; 0 \end{array}\right) \]
</p>
<p> in sparse representation is </p><p class="formulaDsp">
\[ \left( \begin{array}{ccc} 3 \\ 0 \\ 2 \end{array}\right) \]
</p>
<p>The result is then calculated from the one-hot encoded target matrix: </p><p class="formulaDsp">
\[ result = - \sum_i target_{one-hot,i} \cdot \log(predicted_i) \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t p_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> p_data[2*3] = {0.2f, 0.1f, 0.7f,</div>
<div class="line">                     0.9f, 0.1f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> p = AITENSOR_2D_F32(p_shape, p_data);</div>
<div class="line"> </div>
<div class="line">uint16_t t_shape[2] = {2, 1};</div>
<div class="line">uint8_t t_data[2*1] = {2,</div>
<div class="line">                       0};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> t = AITENSOR_2D_U8(t_shape, t_data);</div>
<div class="line"> </div>
<div class="line"><span class="keywordtype">float</span> result;</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#aa9d04eb0036b9b2f0640d72d2952026b">aimath_f32_default_categorical_crossentropy_sum_sparse8</a>(&amp;p, &amp;t, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#aaa9ca757028820849ef3dde13cc46565">print_aiscalar</a>(&amp;result, <a class="code" href="aimath__f32_8h.html#a06eea7384624233f57daab2648d8ce37">aif32</a>);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_aa9d04eb0036b9b2f0640d72d2952026b"><div class="ttname"><a href="aimath__f32__default_8h.html#aa9d04eb0036b9b2f0640d72d2952026b">aimath_f32_default_categorical_crossentropy_sum_sparse8</a></div><div class="ttdeci">void aimath_f32_default_categorical_crossentropy_sum_sparse8(const aitensor_t *predicted_data, const aitensor_t *target_data, void *result)</div><div class="ttdoc">Calculates the categorical Cross-Entropy between the F32  predicted data and the U8  target data in s...</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*predicted_data</td><td>F32 matrix with the predicted or calculated values (2D tensor of shape [N x M]) </td></tr>
    <tr><td class="paramname">*target_data</td><td>U8 matrix with the target data / true values / labels (2D tensor of shape [N x 1] with true class indices) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 matrix (2D tensor of shape [N x M]) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="aa0056570df0d8bd0b7e3221f5abfe61a"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aa0056570df0d8bd0b7e3221f5abfe61a">&#9670;&nbsp;</a></span>aimath_f32_default_copy_tensor()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_copy_tensor </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>from</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>to</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs an element wise copy of <a class="el" href="aimath__f32_8h.html">F32 </a> tensors. </p>
<p class="formulaDsp">
\[ to \leftarrow from \]
</p>
<p>Dimension and shape of from and to tensors have to be the same.</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t from_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> from_data[2*3] = {1.0f, 2.0f, 3.0f,</div>
<div class="line">                        4.0f, 5.0f, 6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> from = AITENSOR_2D_F32(from_shape, from_data);</div>
<div class="line"> </div>
<div class="line">uint16_t to_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> to_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> to = AITENSOR_2D_F32(to_shape, to_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#aa0056570df0d8bd0b7e3221f5abfe61a">aimath_f32_default_copy_tensor</a>(&amp;from, &amp;to);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;to);</div>
<div class="ttc" id="aaimath__basic_8h_html_ab10c8d06990943806f0be8fcc6af03fc"><div class="ttname"><a href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a></div><div class="ttdeci">void print_aitensor(const aitensor_t *tensor)</div><div class="ttdoc">Printing a tensor to console.</div></div>
<div class="ttc" id="aaimath__f32__default_8h_html_aa0056570df0d8bd0b7e3221f5abfe61a"><div class="ttname"><a href="aimath__f32__default_8h.html#aa0056570df0d8bd0b7e3221f5abfe61a">aimath_f32_default_copy_tensor</a></div><div class="ttdeci">void aimath_f32_default_copy_tensor(const aitensor_t *from, aitensor_t *to)</div><div class="ttdoc">Performs an element wise copy of F32  tensors.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*from</td><td>F32 tensor to copy from (N-D tensor) </td></tr>
    <tr><td class="paramname">*to</td><td>F32 tensor to copy to (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a29f6be81cd62775d6824866e5427c9b8"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a29f6be81cd62775d6824866e5427c9b8">&#9670;&nbsp;</a></span>aimath_f32_default_d_elu()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_d_elu </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const void *&#160;</td>
          <td class="paramname"><em>alpha</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the exponential rectifier (ELU) derivative of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result_{i} = \begin{cases} \alpha \cdot e^{x_i} &amp; \text{if } x_i &lt; 0\\ 1 &amp; \text{if } x_i \geq 0 \end{cases} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = { 1.0f, -2.0f,  3.0f,</div>
<div class="line">                     -4.0f,  5.0f, -6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line"><span class="keywordtype">float</span> alpha = 1.0f;</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a29f6be81cd62775d6824866e5427c9b8">aimath_f32_default_d_elu</a>(&amp;x, &amp;alpha, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a29f6be81cd62775d6824866e5427c9b8"><div class="ttname"><a href="aimath__f32__default_8h.html#a29f6be81cd62775d6824866e5427c9b8">aimath_f32_default_d_elu</a></div><div class="ttdeci">void aimath_f32_default_d_elu(const aitensor_t *x, const void *alpha, aitensor_t *result)</div><div class="ttdoc">Calculates the exponential rectifier (ELU) derivative of each element in a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 tensor to calculate the ELU derivative from (N-D tensor) </td></tr>
    <tr><td class="paramname">*alpha</td><td>Scalar \( \alpha \) (type aiscalar_f32_t / float) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a99a65ffaa0b6636cbc26267695b7118d"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a99a65ffaa0b6636cbc26267695b7118d">&#9670;&nbsp;</a></span>aimath_f32_default_d_leaky_relu()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_d_leaky_relu </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const void *&#160;</td>
          <td class="paramname"><em>alpha</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the leaky rectifier (leaky ReLU) derivative of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result_{i} = \begin{cases} \alpha &amp; \text{if } x_i &lt; 0\\ 1 &amp; \text{if } x_i \geq 0 \end{cases} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = { 1.0f, -2.0f,  3.0f,</div>
<div class="line">                     -4.0f,  5.0f, -6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line"><span class="keywordtype">float</span> alpha = 0.01f;</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a99a65ffaa0b6636cbc26267695b7118d">aimath_f32_default_d_leaky_relu</a>(&amp;x, &amp;alpha, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a99a65ffaa0b6636cbc26267695b7118d"><div class="ttname"><a href="aimath__f32__default_8h.html#a99a65ffaa0b6636cbc26267695b7118d">aimath_f32_default_d_leaky_relu</a></div><div class="ttdeci">void aimath_f32_default_d_leaky_relu(const aitensor_t *x, const void *alpha, aitensor_t *result)</div><div class="ttdoc">Calculates the leaky rectifier (leaky ReLU) derivative of each element in a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 tensor to calculate the leaky ReLU derivative from (N-D tensor) </td></tr>
    <tr><td class="paramname">*alpha</td><td>Scalar \( \alpha \) (type aiscalar_f32_t / float) for the leakage </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="ab40f960fd9c4ac7835c3ba61e11d5293"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ab40f960fd9c4ac7835c3ba61e11d5293">&#9670;&nbsp;</a></span>aimath_f32_default_d_relu()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_d_relu </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the rectifier (ReLU) derivative of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result_{ij} = \begin{cases} 0 &amp; \text{if } x_i &lt; 0\\ 1 &amp; \text{if } x_i \geq 0 \end{cases} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = { 1.0f, -2.0f,  3.0f,</div>
<div class="line">                     -4.0f,  5.0f, -6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#ab40f960fd9c4ac7835c3ba61e11d5293">aimath_f32_default_d_relu</a>(&amp;x, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_ab40f960fd9c4ac7835c3ba61e11d5293"><div class="ttname"><a href="aimath__f32__default_8h.html#ab40f960fd9c4ac7835c3ba61e11d5293">aimath_f32_default_d_relu</a></div><div class="ttdeci">void aimath_f32_default_d_relu(const aitensor_t *x, aitensor_t *result)</div><div class="ttdoc">Calculates the rectifier (ReLU) derivative of each element in a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 tensor to calculate the ReLU derivative from (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a36fce20e46763185dfc0dc15e9d37a7c"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a36fce20e46763185dfc0dc15e9d37a7c">&#9670;&nbsp;</a></span>aimath_f32_default_d_sigmoid()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_d_sigmoid </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>sigmoid_x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the derivative sigmoid of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result_{i} = \sigma&#39;(x_{i}) = \sigma(x_{i}) \cdot (1 - \sigma(x_{i})) \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = { 1.0f, -2.0f,  3.0f,</div>
<div class="line">                     -4.0f,  5.0f, -6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a9c2af96bf91c4443ab75036f585fdba3">aimath_f32_default_sigmoid</a>(&amp;x, &amp;result);</div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a36fce20e46763185dfc0dc15e9d37a7c">aimath_f32_default_d_sigmoid</a>(&amp;result, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a36fce20e46763185dfc0dc15e9d37a7c"><div class="ttname"><a href="aimath__f32__default_8h.html#a36fce20e46763185dfc0dc15e9d37a7c">aimath_f32_default_d_sigmoid</a></div><div class="ttdeci">void aimath_f32_default_d_sigmoid(const aitensor_t *sigmoid_x, aitensor_t *result)</div><div class="ttdoc">Calculates the derivative sigmoid of each element in a F32  tensor.</div></div>
<div class="ttc" id="aaimath__f32__default_8h_html_a9c2af96bf91c4443ab75036f585fdba3"><div class="ttname"><a href="aimath__f32__default_8h.html#a9c2af96bf91c4443ab75036f585fdba3">aimath_f32_default_sigmoid</a></div><div class="ttdeci">void aimath_f32_default_sigmoid(const aitensor_t *x, aitensor_t *result)</div><div class="ttdoc">Calculates the sigmoid of each element in a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*sigmoid_x</td><td>F32 tensor with the sigmoid values \( \sigma(x_{i}) \) (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a0e1b0cb1f84a73659b5b61b59045207c"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a0e1b0cb1f84a73659b5b61b59045207c">&#9670;&nbsp;</a></span>aimath_f32_default_d_softsign()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_d_softsign </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the softsign derivative of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result_{i} = \frac {x_i} {(1 + |x_i|)^2} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = { 1.0f, -2.0f,  3.0f,</div>
<div class="line">                     -4.0f,  5.0f, -6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a0e1b0cb1f84a73659b5b61b59045207c">aimath_f32_default_d_softsign</a>(&amp;x, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a0e1b0cb1f84a73659b5b61b59045207c"><div class="ttname"><a href="aimath__f32__default_8h.html#a0e1b0cb1f84a73659b5b61b59045207c">aimath_f32_default_d_softsign</a></div><div class="ttdeci">void aimath_f32_default_d_softsign(const aitensor_t *x, aitensor_t *result)</div><div class="ttdoc">Calculates the softsign derivative of each element in a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 tensor to calculate the softsign derivative from (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a56415d86de146515ace7247a05f1f152"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a56415d86de146515ace7247a05f1f152">&#9670;&nbsp;</a></span>aimath_f32_default_d_tanh()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_d_tanh </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>tanh_x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the tanh derivative of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result_{i} = tanh&#39;(x_{i}) = 1 - tanh(x_{i})^2 \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = { 1.0f, -2.0f,  3.0f,</div>
<div class="line">                     -4.0f,  5.0f, -6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#abd86f478417339f44905ede947a5e089">aimath_f32_default_tanh</a>(&amp;x, &amp;result);</div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a56415d86de146515ace7247a05f1f152">aimath_f32_default_d_tanh</a>(&amp;result, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a56415d86de146515ace7247a05f1f152"><div class="ttname"><a href="aimath__f32__default_8h.html#a56415d86de146515ace7247a05f1f152">aimath_f32_default_d_tanh</a></div><div class="ttdeci">void aimath_f32_default_d_tanh(const aitensor_t *tanh_x, aitensor_t *result)</div><div class="ttdoc">Calculates the tanh derivative of each element in a F32  tensor.</div></div>
<div class="ttc" id="aaimath__f32__default_8h_html_abd86f478417339f44905ede947a5e089"><div class="ttname"><a href="aimath__f32__default_8h.html#abd86f478417339f44905ede947a5e089">aimath_f32_default_tanh</a></div><div class="ttdeci">void aimath_f32_default_tanh(const aitensor_t *x, aitensor_t *result)</div><div class="ttdoc">Calculates the tanh of each element in a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*tanh_x</td><td>F32 tensor with the tanh values \( \tanh(x_{i}) \) (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a572de6eb62cc767cb2654dba7c5a41c2"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a572de6eb62cc767cb2654dba7c5a41c2">&#9670;&nbsp;</a></span>aimath_f32_default_divide()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_divide </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>a</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>b</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs an element wise division of <a class="el" href="aimath__f32_8h.html">F32 </a> tensors a and b (Hadamard division) </p>
<p class="formulaDsp">
\[ result = a \oslash b \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t a_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> a_data[2*3] = {1.0f, 2.0f, 3.0f,</div>
<div class="line">                     4.0f, 5.0f, 6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> a = AITENSOR_2D_F32(a_shape, a_data);</div>
<div class="line"> </div>
<div class="line">uint16_t b_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> b_data[2*3] = {1.0f, 2.0f, 3.0f,</div>
<div class="line">                     4.0f, 5.0f, 6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> b = AITENSOR_2D_F32(b_shape, b_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a572de6eb62cc767cb2654dba7c5a41c2">aimath_f32_default_divide</a>(&amp;a, &amp;b, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a572de6eb62cc767cb2654dba7c5a41c2"><div class="ttname"><a href="aimath__f32__default_8h.html#a572de6eb62cc767cb2654dba7c5a41c2">aimath_f32_default_divide</a></div><div class="ttdeci">void aimath_f32_default_divide(const aitensor_t *a, const aitensor_t *b, aitensor_t *result)</div><div class="ttdoc">Performs an element wise division of F32  tensors a and b (Hadamard division)</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*a</td><td>F32 tensor a (N-D tensor) </td></tr>
    <tr><td class="paramname">*b</td><td>F32 tensor b (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor of the element wise division (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a76692ffb6acf0f7a291d6b4c86311edf"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a76692ffb6acf0f7a291d6b4c86311edf">&#9670;&nbsp;</a></span>aimath_f32_default_elu()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_elu </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const void *&#160;</td>
          <td class="paramname"><em>alpha</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the exponential rectifier (ELU) value of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result_{i} = \begin{cases} \alpha \cdot (e^{x_i} - 1) &amp; \text{if } x_i &lt; 0 \\ x_i &amp; \text{if } x_i \geq 0 \end{cases} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = { 1.0f, -2.0f,  3.0f,</div>
<div class="line">                     -4.0f,  5.0f, -6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line"><span class="keywordtype">float</span> alpha = 1.0f;</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a76692ffb6acf0f7a291d6b4c86311edf">aimath_f32_default_elu</a>(&amp;x, &amp;alpha, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a76692ffb6acf0f7a291d6b4c86311edf"><div class="ttname"><a href="aimath__f32__default_8h.html#a76692ffb6acf0f7a291d6b4c86311edf">aimath_f32_default_elu</a></div><div class="ttdeci">void aimath_f32_default_elu(const aitensor_t *x, const void *alpha, aitensor_t *result)</div><div class="ttdoc">Calculates the exponential rectifier (ELU) value of each element in a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 tensor to calculate the ELU from (N-D tensor) </td></tr>
    <tr><td class="paramname">*alpha</td><td>Scalar \( \alpha \) (type aiscalar_f32_t / float) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a99c2dd3ad66544c1648ff6113f059a76"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a99c2dd3ad66544c1648ff6113f059a76">&#9670;&nbsp;</a></span>aimath_f32_default_expf_fast()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">float aimath_f32_default_expf_fast </td>
          <td>(</td>
          <td class="paramtype">const float&#160;</td>
          <td class="paramname"><em>x</em></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Fast approximation of the exponential function. </p>
<dl class="section see"><dt>See also</dt><dd><a href="http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.9.4508&rep=rep1&type=pdf">http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.9.4508&amp;rep=rep1&amp;type=pdf</a></dd></dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">x</td><td>Input of the exponential function </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="af5ee421317c740d6765ba1fbf413492a"></a>
<h2 class="memtitle"><span class="permalink"><a href="#af5ee421317c740d6765ba1fbf413492a">&#9670;&nbsp;</a></span>aimath_f32_default_exponential_moving_average()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_exponential_moving_average </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>new_data</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const void *&#160;</td>
          <td class="paramname"><em>momentum</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>average</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Perform an exponential moving average. </p>
<p>Updates the moving average with a new data point:<br  />
 </p><p class="formulaDsp">
\[ average \leftarrow momentum \cdot average + (1 - momentum) \cdot newdata \]
</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">new_data</td><td>Input tensor (N-D) with the new data point. </td></tr>
    <tr><td class="paramname">momentum</td><td>aiscalar_t (float value) which controls the momentum of the average (range [0, 1]). </td></tr>
    <tr><td class="paramname">average</td><td>The average tensor (N-D) that is modified (input and output value), </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="aae8d8c59a70df08142d376e134944317"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aae8d8c59a70df08142d376e134944317">&#9670;&nbsp;</a></span>aimath_f32_default_init_glorot_uniform()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_init_glorot_uniform </td>
          <td>(</td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>tensor</em></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Fills a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor with random numbers uniformly within given range, according to Glorot et al. </p>
<p>Same functionality as <a class="el" href="aimath__f32__default_8h.html#a9a8744295c9182d50c6da19d54c4e75a" title="Fills a F32  tensor with random numbers uniformly within given range, according to Glorot et al.">aimath_f32_default_init_glorot_uniform_cdim()</a> with cin_axis = 0 and cout_axis = 1 (channels last dataformat).</p>
<p class="formulaDsp">
\[ fan_{avg} = \frac{fan_{in} + fan_{out}}{2} \]
</p>
 <p class="formulaDsp">
\[ r = \sqrt{\frac{3}{fan_{avg}}} \]
</p>
 <p class="formulaDsp">
\[ tensor_i \in \mathcal{U(-r, r)} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t tensor_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> tensor_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> tensor = AITENSOR_2D_F32(tensor_shape, tensor_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#aae8d8c59a70df08142d376e134944317">aimath_f32_default_init_glorot_uniform</a>(&amp;tensor);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;tensor);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_aae8d8c59a70df08142d376e134944317"><div class="ttname"><a href="aimath__f32__default_8h.html#aae8d8c59a70df08142d376e134944317">aimath_f32_default_init_glorot_uniform</a></div><div class="ttdeci">void aimath_f32_default_init_glorot_uniform(aitensor_t *tensor)</div><div class="ttdoc">Fills a F32  tensor with random numbers uniformly within given range, according to Glorot et al.</div></div>
</div><!-- fragment --><dl class="section see"><dt>See also</dt><dd>Glorot et al., 2010 ( <a href="http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf">http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf</a> )</dd></dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*tensor</td><td>F32 tensor to initialize with random numbers (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a9a8744295c9182d50c6da19d54c4e75a"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a9a8744295c9182d50c6da19d54c4e75a">&#9670;&nbsp;</a></span>aimath_f32_default_init_glorot_uniform_cdim()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_init_glorot_uniform_cdim </td>
          <td>(</td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>tensor</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">int8_t&#160;</td>
          <td class="paramname"><em>cin_axis</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">int8_t&#160;</td>
          <td class="paramname"><em>cout_axis</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Fills a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor with random numbers uniformly within given range, according to Glorot et al. </p>
<p class="formulaDsp">
\[ fan_{avg} = \frac{fan_{in} + fan_{out}}{2} \]
</p>
 <p class="formulaDsp">
\[ r = \sqrt{\frac{3}{fan_{avg}}} \]
</p>
 <p class="formulaDsp">
\[ tensor_i \in \mathcal{U(-r, r)} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t tensor_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> tensor_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> tensor = AITENSOR_2D_F32(tensor_shape, tensor_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a9a8744295c9182d50c6da19d54c4e75a">aimath_f32_default_init_glorot_uniform_cdim</a>(&amp;tensor, 0, 1);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;tensor);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a9a8744295c9182d50c6da19d54c4e75a"><div class="ttname"><a href="aimath__f32__default_8h.html#a9a8744295c9182d50c6da19d54c4e75a">aimath_f32_default_init_glorot_uniform_cdim</a></div><div class="ttdeci">void aimath_f32_default_init_glorot_uniform_cdim(aitensor_t *tensor, int8_t cin_axis, int8_t cout_axis)</div><div class="ttdoc">Fills a F32  tensor with random numbers uniformly within given range, according to Glorot et al.</div></div>
</div><!-- fragment --><dl class="section see"><dt>See also</dt><dd>Glorot et al., 2010 ( <a href="http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf">http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf</a> )</dd></dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*tensor</td><td>F32 tensor to initialize with random numbers (N-D tensor) </td></tr>
    <tr><td class="paramname">cin_axis</td><td>Axis of the input channels (negative number means indexing from the end) </td></tr>
    <tr><td class="paramname">cout_axis</td><td>Axis of the output channels (negative number means indexing from the end) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a11d31daf4d011186446fd77a253b9f7f"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a11d31daf4d011186446fd77a253b9f7f">&#9670;&nbsp;</a></span>aimath_f32_default_init_he_uniform()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_init_he_uniform </td>
          <td>(</td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>tensor</em></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Fills a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor with uniformly drawn random numbers within given range, according to He et al. </p>
<p>Same functionality as <a class="el" href="aimath__f32__default_8h.html#afe89616689db127803144f477557a10a" title="Fills a F32  tensor with uniformly drawn random numbers within given range, according to He et al.">aimath_f32_default_init_he_uniform_cdim()</a> with cout_axis = 1 (channels last dataformat).</p>
<p class="formulaDsp">
\[ fan_{avg} = \frac{fan_{in}}{2} \]
</p>
 <p class="formulaDsp">
\[ r = \sqrt{\frac{3}{fan_{avg}}} \]
</p>
 <p class="formulaDsp">
\[ tensor_i \in \mathcal{U(-r, r)} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t tensor_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> tensor_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> tensor = AITENSOR_2D_F32(tensor_shape, tensor_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a11d31daf4d011186446fd77a253b9f7f">aimath_f32_default_init_he_uniform</a>(&amp;tensor);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;tensor);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a11d31daf4d011186446fd77a253b9f7f"><div class="ttname"><a href="aimath__f32__default_8h.html#a11d31daf4d011186446fd77a253b9f7f">aimath_f32_default_init_he_uniform</a></div><div class="ttdeci">void aimath_f32_default_init_he_uniform(aitensor_t *tensor)</div><div class="ttdoc">Fills a F32  tensor with uniformly drawn random numbers within given range, according to He et al.</div></div>
</div><!-- fragment --><dl class="section see"><dt>See also</dt><dd>He et al., 2015 ( <a href="https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html">https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html</a> )</dd></dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*tensor</td><td>F32 tensor to initialize with random numbers (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="afe89616689db127803144f477557a10a"></a>
<h2 class="memtitle"><span class="permalink"><a href="#afe89616689db127803144f477557a10a">&#9670;&nbsp;</a></span>aimath_f32_default_init_he_uniform_cdim()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_init_he_uniform_cdim </td>
          <td>(</td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>tensor</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">int8_t&#160;</td>
          <td class="paramname"><em>cout_axis</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Fills a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor with uniformly drawn random numbers within given range, according to He et al. </p>
<p class="formulaDsp">
\[ fan_{avg} = \frac{fan_{in}}{2} \]
</p>
 <p class="formulaDsp">
\[ r = \sqrt{\frac{3}{fan_{avg}}} \]
</p>
 <p class="formulaDsp">
\[ tensor_i \in \mathcal{U(-r, r)} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t tensor_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> tensor_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> tensor = AITENSOR_2D_F32(tensor_shape, tensor_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#afe89616689db127803144f477557a10a">aimath_f32_default_init_he_uniform_cdim</a>(&amp;tensor, 1);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;tensor);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_afe89616689db127803144f477557a10a"><div class="ttname"><a href="aimath__f32__default_8h.html#afe89616689db127803144f477557a10a">aimath_f32_default_init_he_uniform_cdim</a></div><div class="ttdeci">void aimath_f32_default_init_he_uniform_cdim(aitensor_t *tensor, int8_t cout_axis)</div><div class="ttdoc">Fills a F32  tensor with uniformly drawn random numbers within given range, according to He et al.</div></div>
</div><!-- fragment --><dl class="section see"><dt>See also</dt><dd>He et al., 2015 ( <a href="https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html">https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html</a> )</dd></dl>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*tensor</td><td>F32 tensor to initialize with random numbers (N-D tensor) </td></tr>
    <tr><td class="paramname">cout_axis</td><td>Axis of the output channels (negative number means indexing from the end) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a2209ddbfefea6e2a3b0c23141ba731ba"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a2209ddbfefea6e2a3b0c23141ba731ba">&#9670;&nbsp;</a></span>aimath_f32_default_init_ones()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_init_ones </td>
          <td>(</td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>tensor</em></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Fills a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor with ones. </p>
<p class="formulaDsp">
\[ tensor_{i} = 1 \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t tensor_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> tensor_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> tensor = AITENSOR_2D_F32(tensor_shape, tensor_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a2209ddbfefea6e2a3b0c23141ba731ba">aimath_f32_default_init_ones</a>(&amp;tensor);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;tensor);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a2209ddbfefea6e2a3b0c23141ba731ba"><div class="ttname"><a href="aimath__f32__default_8h.html#a2209ddbfefea6e2a3b0c23141ba731ba">aimath_f32_default_init_ones</a></div><div class="ttdeci">void aimath_f32_default_init_ones(aitensor_t *tensor)</div><div class="ttdoc">Fills a F32  tensor with ones.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*tensor</td><td>F32 tensor to set to zero (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a760163e6cee0f2a5e341069ad20bfcde"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a760163e6cee0f2a5e341069ad20bfcde">&#9670;&nbsp;</a></span>aimath_f32_default_init_zeros()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_init_zeros </td>
          <td>(</td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>tensor</em></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Fills a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor with zeros. </p>
<p class="formulaDsp">
\[ tensor_{i} = 0 \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t tensor_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> tensor_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> tensor = AITENSOR_2D_F32(tensor_shape, tensor_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a760163e6cee0f2a5e341069ad20bfcde">aimath_f32_default_init_zeros</a>(&amp;tensor);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;tensor);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a760163e6cee0f2a5e341069ad20bfcde"><div class="ttname"><a href="aimath__f32__default_8h.html#a760163e6cee0f2a5e341069ad20bfcde">aimath_f32_default_init_zeros</a></div><div class="ttdeci">void aimath_f32_default_init_zeros(aitensor_t *tensor)</div><div class="ttdoc">Fills a F32  tensor with zeros.</div></div>
</div><!-- fragment --><p>In the F32 implementation of this function, there is no difference between <a class="el" href="aimath__f32__default_8h.html#a072ffbef0c60343957e4621f01551e85" title="Fills a F32  tensor with zeros.">aimath_f32_default_zero_tensor()</a> and <a class="el" href="aimath__f32__default_8h.html#a760163e6cee0f2a5e341069ad20bfcde" title="Fills a F32  tensor with zeros.">aimath_f32_default_init_zeros()</a>.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*tensor</td><td>F32 tensor to set to zero (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="afaafd34ad1adc476a7c120dce8f39498"></a>
<h2 class="memtitle"><span class="permalink"><a href="#afaafd34ad1adc476a7c120dce8f39498">&#9670;&nbsp;</a></span>aimath_f32_default_leaky_relu()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_leaky_relu </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const void *&#160;</td>
          <td class="paramname"><em>alpha</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the leaky rectifier (leaky ReLU) value of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result_{i} = \begin{cases} \alpha \cdot x_i &amp; \text{if } x_i &lt; 0 \\ x_i &amp; \text{if } x_i \geq 0 \end{cases} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = { 1.0f, -2.0f,  3.0f,</div>
<div class="line">                     -4.0f,  5.0f, -6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line"><span class="keywordtype">float</span> alpha = 0.01f;</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#afaafd34ad1adc476a7c120dce8f39498">aimath_f32_default_leaky_relu</a>(&amp;x, &amp;alpha, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_afaafd34ad1adc476a7c120dce8f39498"><div class="ttname"><a href="aimath__f32__default_8h.html#afaafd34ad1adc476a7c120dce8f39498">aimath_f32_default_leaky_relu</a></div><div class="ttdeci">void aimath_f32_default_leaky_relu(const aitensor_t *x, const void *alpha, aitensor_t *result)</div><div class="ttdoc">Calculates the leaky rectifier (leaky ReLU) value of each element in a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 tensor to calculate the leaky ReLU from (N-D tensor) </td></tr>
    <tr><td class="paramname">*alpha</td><td>Scalar \( \alpha \) (type aiscalar_f32_t / float) for the leakage </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a1ce664be9a6c513d1fb7cae44d1d7e17"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a1ce664be9a6c513d1fb7cae44d1d7e17">&#9670;&nbsp;</a></span>aimath_f32_default_linear()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_linear </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>a</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>b</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>c</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs a matrix multiplication of <a class="el" href="aimath__f32_8h.html">F32 </a> matrices a and b and adds a vector c to each row. </p>
<p>The addition of the horizontal vector c is performed via broadcast, i.e. element wise in each column Mathematically this broadcast is equal to multiplying c with an vertical vector (with the same number of elements as c) and adding the result to \( a \cdot b \).</p>
<p class="formulaDsp">
\[ result = a \cdot b + \left( \begin{array}{c} 1 \\ 1 \\ \vdots \\ 1 \\ \end{array}\right) \cdot c \]
</p>
<p>Example: </p><p class="formulaDsp">
\[ a = \left( \begin{array}{rrr} 1 &amp; 2 &amp; 3 \\ 4 &amp; 5 &amp; 6 \\ 7 &amp; 8 &amp; 9 \end{array}\right) \]
</p>
<p class="formulaDsp">
\[ b = \left( \begin{array}{rr} 1 &amp; 0 \\ 0 &amp; 1 \\ 0 &amp; 0 \end{array}\right) \]
</p>
<p class="formulaDsp">
\[ c = \left( \begin{array}{rr} 2 &amp; 5 \end{array}\right) \]
</p>
<p class="formulaDsp">
\[ result = a \cdot b + \left( \begin{array}{r} 1 \\ 1 \\ 1 \\ \end{array}\right) \cdot c \]
</p>
<p class="formulaDsp">
\[ = \left( \begin{array}{rr} 1 &amp; 2 \\ 4 &amp; 5 \\ 7 &amp; 8 \end{array}\right) + \left( \begin{array}{rr} 2 &amp; 5 \\ 2 &amp; 5 \\ 2 &amp; 5 \end{array}\right) \]
</p>
<p class="formulaDsp">
\[ = \left( \begin{array}{rr} 3 &amp; 7 \\ 6 &amp; 10 \\ 9 &amp; 13 \end{array}\right) \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t a_shape[2] = {3, 3};</div>
<div class="line"><span class="keywordtype">float</span> a_data[3*3] = {1.0f, 2.0f, 3.0f,</div>
<div class="line">                     4.0f, 5.0f, 6.0f,</div>
<div class="line">                     7.0f, 8.0f, 9.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> a = AITENSOR_2D_F32(a_shape, a_data);</div>
<div class="line"> </div>
<div class="line">uint16_t b_shape[2] = {3, 2};</div>
<div class="line"><span class="keywordtype">float</span> b_data[3*2] = {1.0f, 0.0f,</div>
<div class="line">                     0.0f, 1.0f,</div>
<div class="line">                     0.0f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> b = AITENSOR_2D_F32(b_shape, b_data);</div>
<div class="line"> </div>
<div class="line">uint16_t c_shape[2] = {1, 2};</div>
<div class="line"><span class="keywordtype">float</span> c_data[1*2] = {2.0f, 5.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> c = AITENSOR_2D_F32(c_shape, c_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {3, 2};</div>
<div class="line"><span class="keywordtype">float</span> result_data[3*2];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a1ce664be9a6c513d1fb7cae44d1d7e17">aimath_f32_default_linear</a>(&amp;a, &amp;b, &amp;c, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a1ce664be9a6c513d1fb7cae44d1d7e17"><div class="ttname"><a href="aimath__f32__default_8h.html#a1ce664be9a6c513d1fb7cae44d1d7e17">aimath_f32_default_linear</a></div><div class="ttdeci">void aimath_f32_default_linear(const aitensor_t *a, const aitensor_t *b, const aitensor_t *c, aitensor_t *result)</div><div class="ttdoc">Performs a matrix multiplication of F32  matrices a and b and adds a vector c to each row.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*a</td><td>F32 matrix a (2D tensor of shape [N x K]) </td></tr>
    <tr><td class="paramname">*b</td><td>F32 matrix b (2D tensor of shape [K x M]) </td></tr>
    <tr><td class="paramname">*c</td><td>F32 vector c (2D tensor of shape [1 x M] or 1D tensor of shape [M]) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 matrix (2D tensor of shape [N x M]) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a6fd225be785420477239f8e72e26a77f"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a6fd225be785420477239f8e72e26a77f">&#9670;&nbsp;</a></span>aimath_f32_default_linear_at()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_linear_at </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>a</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>b</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>c</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs a matrix multiplication of <a class="el" href="aimath__f32_8h.html">F32 </a> matrices a (transposed) and b and adds a vector c to each row. </p>
<p>Same operation as <a class="el" href="aimath__f32__default_8h.html#a1ce664be9a6c513d1fb7cae44d1d7e17" title="Performs a matrix multiplication of F32  matrices a and b and adds a vector c to each row.">aimath_f32_default_linear()</a> but with a transposed a matrix.</p>
<p>The addition of the horizontal vector c is performed via broadcast, i.e. element wise in each column Mathematically this broadcast is equal to multiplying c with an vertical vector (with the same number of elements as c) and adding the result to \( a^T \cdot b \).</p>
<p class="formulaDsp">
\[ result = a \cdot b^T + \left( \begin{array}{c} 1 \\ 1 \\ \vdots \\ 1 \\ \end{array}\right) \cdot c \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t a_shape[2] = {3, 3};</div>
<div class="line"><span class="keywordtype">float</span> a_data[3*3] = {1.0f, 2.0f, 3.0f,</div>
<div class="line">                     4.0f, 5.0f, 6.0f,</div>
<div class="line">                     7.0f, 8.0f, 9.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> a = AITENSOR_2D_F32(a_shape, a_data);</div>
<div class="line"> </div>
<div class="line">uint16_t b_shape[2] = {3, 2};</div>
<div class="line"><span class="keywordtype">float</span> b_data[3*2] = {1.0f, 0.0f,</div>
<div class="line">                     0.0f, 1.0f,</div>
<div class="line">                     0.0f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> b = AITENSOR_2D_F32(b_shape, b_data);</div>
<div class="line"> </div>
<div class="line">uint16_t c_shape[2] = {1, 2};</div>
<div class="line"><span class="keywordtype">float</span> c_data[1*2] = {2.0f, 5.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> c = AITENSOR_2D_F32(c_shape, c_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {3, 2};</div>
<div class="line"><span class="keywordtype">float</span> result_data[3*2];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a6fd225be785420477239f8e72e26a77f">aimath_f32_default_linear_at</a>(&amp;a, &amp;b, &amp;c, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a6fd225be785420477239f8e72e26a77f"><div class="ttname"><a href="aimath__f32__default_8h.html#a6fd225be785420477239f8e72e26a77f">aimath_f32_default_linear_at</a></div><div class="ttdeci">void aimath_f32_default_linear_at(const aitensor_t *a, const aitensor_t *b, const aitensor_t *c, aitensor_t *result)</div><div class="ttdoc">Performs a matrix multiplication of F32  matrices a (transposed) and b and adds a vector c to each ro...</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*a</td><td>F32 matrix a (2D tensor of shape [K x N]) </td></tr>
    <tr><td class="paramname">*b</td><td>F32 matrix b (2D tensor of shape [K x M]) </td></tr>
    <tr><td class="paramname">*c</td><td>F32 vector c (2D tensor of shape [1 x M] or 1D tensor of shape [M]) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 matrix (2D tensor of shape [N x M]) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="adc591f099e035057257fa4e64a6efbb7"></a>
<h2 class="memtitle"><span class="permalink"><a href="#adc591f099e035057257fa4e64a6efbb7">&#9670;&nbsp;</a></span>aimath_f32_default_linear_atrt()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_linear_atrt </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>a</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>b</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>c</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs a matrix multiplication with transposed result of <a class="el" href="aimath__f32_8h.html">F32 </a> matrices a (transposed) and b and adds a vector c to each row. </p>
<p>Same operation as <a class="el" href="aimath__f32__default_8h.html#a1ce664be9a6c513d1fb7cae44d1d7e17" title="Performs a matrix multiplication of F32  matrices a and b and adds a vector c to each row.">aimath_f32_default_linear()</a> but with a transposed a matrix and transposed result.</p>
<p>The addition of the horizontal vector c is performed via broadcast, i.e. element wise in each column Mathematically this broadcast is equal to multiplying c with an vertical vector (with the same number of elements as c) and adding the result to \( a^T \cdot b \).</p>
<p class="formulaDsp">
\[ result = \left( a \cdot b^T + \left( \begin{array}{c} 1 \\ 1 \\ \vdots \\ 1 \\ \end{array}\right) \cdot c \right)^T \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t a_shape[2] = {3, 3};</div>
<div class="line"><span class="keywordtype">float</span> a_data[3*3] = {1.0f, 2.0f, 3.0f,</div>
<div class="line">                     4.0f, 5.0f, 6.0f,</div>
<div class="line">                     7.0f, 8.0f, 9.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> a = AITENSOR_2D_F32(a_shape, a_data);</div>
<div class="line"> </div>
<div class="line">uint16_t b_shape[2] = {3, 2};</div>
<div class="line"><span class="keywordtype">float</span> b_data[3*2] = {1.0f, 0.0f,</div>
<div class="line">                     0.0f, 1.0f,</div>
<div class="line">                     0.0f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> b = AITENSOR_2D_F32(b_shape, b_data);</div>
<div class="line"> </div>
<div class="line">uint16_t c_shape[2] = {1, 2};</div>
<div class="line"><span class="keywordtype">float</span> c_data[1*2] = {2.0f, 5.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> c = AITENSOR_2D_F32(c_shape, c_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#adc591f099e035057257fa4e64a6efbb7">aimath_f32_default_linear_atrt</a>(&amp;a, &amp;b, &amp;c, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_adc591f099e035057257fa4e64a6efbb7"><div class="ttname"><a href="aimath__f32__default_8h.html#adc591f099e035057257fa4e64a6efbb7">aimath_f32_default_linear_atrt</a></div><div class="ttdeci">void aimath_f32_default_linear_atrt(const aitensor_t *a, const aitensor_t *b, const aitensor_t *c, aitensor_t *result)</div><div class="ttdoc">Performs a matrix multiplication with transposed result of F32  matrices a (transposed) and b and add...</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*a</td><td>F32 matrix a (2D tensor of shape [K x N]) </td></tr>
    <tr><td class="paramname">*b</td><td>F32 matrix b (2D tensor of shape [K x M]) </td></tr>
    <tr><td class="paramname">*c</td><td>F32 vector c (2D tensor of shape [1 x M] or 1D tensor of shape [M]) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 matrix (2D tensor of shape [M x N]) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a234496e739207585fd533a33be9f1292"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a234496e739207585fd533a33be9f1292">&#9670;&nbsp;</a></span>aimath_f32_default_linear_bt()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_linear_bt </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>a</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>b</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>c</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs a matrix multiplication of <a class="el" href="aimath__f32_8h.html">F32 </a> matrices a and b (transposed) and adds a vector c to each row. </p>
<p>Same operation as <a class="el" href="aimath__f32__default_8h.html#a1ce664be9a6c513d1fb7cae44d1d7e17" title="Performs a matrix multiplication of F32  matrices a and b and adds a vector c to each row.">aimath_f32_default_linear()</a> but with a transposed b matrix.</p>
<p>The addition of the horizontal vector c is performed via broadcast, i.e. element wise in each column Mathematically this broadcast is equal to multiplying c with an vertical vector (with the same number of elements as c) and adding the result to \( a \cdot b^T \).</p>
<p class="formulaDsp">
\[ result = a \cdot b^T + \left( \begin{array}{c} 1 \\ 1 \\ \vdots \\ 1 \\ \end{array}\right) \cdot c \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t a_shape[2] = {3, 3};</div>
<div class="line"><span class="keywordtype">float</span> a_data[3*3] = {1.0f, 2.0f, 3.0f,</div>
<div class="line">                     4.0f, 5.0f, 6.0f,</div>
<div class="line">                     7.0f, 8.0f, 9.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> a = AITENSOR_2D_F32(a_shape, a_data);</div>
<div class="line"> </div>
<div class="line">uint16_t b_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> b_data[2*3] = {1.0f, 0.0f, 0.0f,</div>
<div class="line">                     0.0f, 1.0f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> b = AITENSOR_2D_F32(b_shape, b_data);</div>
<div class="line"> </div>
<div class="line">uint16_t c_shape[2] = {1, 2};</div>
<div class="line"><span class="keywordtype">float</span> c_data[1*2] = {2.0f, 5.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> c = AITENSOR_2D_F32(c_shape, c_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {3, 2};</div>
<div class="line"><span class="keywordtype">float</span> result_data[3*2];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a234496e739207585fd533a33be9f1292">aimath_f32_default_linear_bt</a>(&amp;a, &amp;b, &amp;c, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a234496e739207585fd533a33be9f1292"><div class="ttname"><a href="aimath__f32__default_8h.html#a234496e739207585fd533a33be9f1292">aimath_f32_default_linear_bt</a></div><div class="ttdeci">void aimath_f32_default_linear_bt(const aitensor_t *a, const aitensor_t *b, const aitensor_t *c, aitensor_t *result)</div><div class="ttdoc">Performs a matrix multiplication of F32  matrices a and b (transposed) and adds a vector c to each ro...</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*a</td><td>F32 matrix a (2D tensor of shape [N x K]) </td></tr>
    <tr><td class="paramname">*b</td><td>F32 matrix b (2D tensor of shape [M x K]) </td></tr>
    <tr><td class="paramname">*c</td><td>F32 vector c (2D tensor of shape [1 x M] or 1D tensor of shape [M]) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 matrix (2D tensor of shape [N x M]) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a76f4b7504be8560f3b8ce8fd651402a7"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a76f4b7504be8560f3b8ce8fd651402a7">&#9670;&nbsp;</a></span>aimath_f32_default_mat_mul()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_mat_mul </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>a</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>b</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs a matrix multiplication of <a class="el" href="aimath__f32_8h.html">F32 </a> matrices a and b. </p>
<p class="formulaDsp">
\[ result = a \cdot b \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t a_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> a_data[2*3] = {1.0f, 2.0f, 3.0f,</div>
<div class="line">                     4.0f, 5.0f, 6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> a = AITENSOR_2D_F32(a_shape, a_data);</div>
<div class="line"> </div>
<div class="line">uint16_t b_shape[2] = {3, 2};</div>
<div class="line"><span class="keywordtype">float</span> b_data[3*2] = {1.0f, 0.0f,</div>
<div class="line">                     0.0f, 1.0f,</div>
<div class="line">                     0.0f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> b = AITENSOR_2D_F32(b_shape, b_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 2};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*2];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a76f4b7504be8560f3b8ce8fd651402a7">aimath_f32_default_mat_mul</a>(&amp;a, &amp;b, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a76f4b7504be8560f3b8ce8fd651402a7"><div class="ttname"><a href="aimath__f32__default_8h.html#a76f4b7504be8560f3b8ce8fd651402a7">aimath_f32_default_mat_mul</a></div><div class="ttdeci">void aimath_f32_default_mat_mul(const aitensor_t *a, const aitensor_t *b, aitensor_t *result)</div><div class="ttdoc">Performs a matrix multiplication of F32  matrices a and b.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*a</td><td>F32 matrix a (2D tensor of shape [N x K]) </td></tr>
    <tr><td class="paramname">*b</td><td>F32 matrix b (2D tensor of shape [K x M]) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 matrix of the multiplication (2D tensor of shape [N x M]) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="ade94370f491c7e34a95c892d36611548"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ade94370f491c7e34a95c892d36611548">&#9670;&nbsp;</a></span>aimath_f32_default_mat_mul_at()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_mat_mul_at </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>a</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>b</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs a matrix multiplication of <a class="el" href="aimath__f32_8h.html">F32 </a> matrices a (transposed) and b. </p>
<p>Same operation as <a class="el" href="aimath__f32__default_8h.html#a76f4b7504be8560f3b8ce8fd651402a7" title="Performs a matrix multiplication of F32  matrices a and b.">aimath_f32_default_mat_mul()</a> but with a transposed a matrix.</p>
<p class="formulaDsp">
\[ result = a^T \cdot b \]
</p>
<p>Example: </p><div class="fragment"><div class="line"> uint16_t a_shape[2] = {3, 2};</div>
<div class="line"> <span class="keywordtype">float</span> a_data[3*2] = {1.0f, 2.0f,</div>
<div class="line">                      4.0f, 5.0f,</div>
<div class="line">                      7.0f, 8.0f,};</div>
<div class="line"> <a class="code" href="structaitensor.html">aitensor_t</a> a = AITENSOR_2D_F32(a_shape, a_data);</div>
<div class="line"> </div>
<div class="line">uint16_t b_shape[2] = {3, 2};</div>
<div class="line"><span class="keywordtype">float</span> b_data[3*2] = {1.0f, 0.0f,</div>
<div class="line">                     0.0f, 1.0f,</div>
<div class="line">                     0.0f, 0.0f};</div>
<div class="line"> <a class="code" href="structaitensor.html">aitensor_t</a> b = AITENSOR_2D_F32(b_shape, b_data);</div>
<div class="line"> </div>
<div class="line"> uint16_t result_shape[2] = {2, 2};</div>
<div class="line"> <span class="keywordtype">float</span> result_data[2*2];</div>
<div class="line"> <a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"> <a class="code" href="aimath__f32__default_8h.html#ade94370f491c7e34a95c892d36611548">aimath_f32_default_mat_mul_at</a>(&amp;a, &amp;b, &amp;result);</div>
<div class="line"> </div>
<div class="line"> <a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_ade94370f491c7e34a95c892d36611548"><div class="ttname"><a href="aimath__f32__default_8h.html#ade94370f491c7e34a95c892d36611548">aimath_f32_default_mat_mul_at</a></div><div class="ttdeci">void aimath_f32_default_mat_mul_at(const aitensor_t *a, const aitensor_t *b, aitensor_t *result)</div><div class="ttdoc">Performs a matrix multiplication of F32  matrices a (transposed) and b.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*a</td><td>F32 matrix a (2D tensor of shape [K x N]) </td></tr>
    <tr><td class="paramname">*b</td><td>F32 matrix b (2D tensor of shape [K x M]) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 matrix of the multiplication (2D tensor of shape [N x M]) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a3a3abf9e2a6a93a2097d08b6e8c31891"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a3a3abf9e2a6a93a2097d08b6e8c31891">&#9670;&nbsp;</a></span>aimath_f32_default_mat_mul_atrt()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_mat_mul_atrt </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>a</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>b</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs a matrix multiplication with transposed result of <a class="el" href="aimath__f32_8h.html">F32 </a> matrices a (transposed) and b. </p>
<p>Same operation as <a class="el" href="aimath__f32__default_8h.html#a76f4b7504be8560f3b8ce8fd651402a7" title="Performs a matrix multiplication of F32  matrices a and b.">aimath_f32_default_mat_mul()</a> but with a transposed a matrix.</p>
<p class="formulaDsp">
\[ result = a^T \cdot b \]
</p>
<p>Example: </p><div class="fragment"><div class="line"> uint16_t a_shape[2] = {3, 2};</div>
<div class="line"> <span class="keywordtype">float</span> a_data[3*2] = {1.0f, 2.0f,</div>
<div class="line">                      4.0f, 5.0f,</div>
<div class="line">                      7.0f, 8.0f,};</div>
<div class="line"> <a class="code" href="structaitensor.html">aitensor_t</a> a = AITENSOR_2D_F32(a_shape, a_data);</div>
<div class="line"> </div>
<div class="line">uint16_t b_shape[2] = {3, 1};</div>
<div class="line"><span class="keywordtype">float</span> b_data[3*2] = {1.0f,</div>
<div class="line">                     0.0f,</div>
<div class="line">                     0.0f};</div>
<div class="line"> <a class="code" href="structaitensor.html">aitensor_t</a> b = AITENSOR_2D_F32(b_shape, b_data);</div>
<div class="line"> </div>
<div class="line"> uint16_t result_shape[2] = {1, 2};</div>
<div class="line"> <span class="keywordtype">float</span> result_data[1*2];</div>
<div class="line"> <a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"> <a class="code" href="aimath__f32__default_8h.html#a3a3abf9e2a6a93a2097d08b6e8c31891">aimath_f32_default_mat_mul_atrt</a>(&amp;a, &amp;b, &amp;result);</div>
<div class="line"> </div>
<div class="line"> <a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a3a3abf9e2a6a93a2097d08b6e8c31891"><div class="ttname"><a href="aimath__f32__default_8h.html#a3a3abf9e2a6a93a2097d08b6e8c31891">aimath_f32_default_mat_mul_atrt</a></div><div class="ttdeci">void aimath_f32_default_mat_mul_atrt(const aitensor_t *a, const aitensor_t *b, aitensor_t *result)</div><div class="ttdoc">Performs a matrix multiplication with transposed result of F32  matrices a (transposed) and b.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*a</td><td>F32 matrix a (2D tensor of shape [K x N]) </td></tr>
    <tr><td class="paramname">*b</td><td>F32 matrix b (2D tensor of shape [K x M]) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 matrix of the multiplication (2D tensor of shape [M x N]) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a04af5314c51e75495016026343dec02e"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a04af5314c51e75495016026343dec02e">&#9670;&nbsp;</a></span>aimath_f32_default_mat_mul_bt()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_mat_mul_bt </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>a</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>b</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs a matrix multiplication of <a class="el" href="aimath__f32_8h.html">F32 </a> matrices a and b (transposed) </p>
<p>Same operation as <a class="el" href="aimath__f32__default_8h.html#a76f4b7504be8560f3b8ce8fd651402a7" title="Performs a matrix multiplication of F32  matrices a and b.">aimath_f32_default_mat_mul()</a> but with a transposed b matrix.</p>
<p class="formulaDsp">
\[ result = a \cdot b^T \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t a_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> a_data[2*3] = {1.0f, 2.0f, 3.0f,</div>
<div class="line">                     4.0f, 5.0f, 6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> a = AITENSOR_2D_F32(a_shape, a_data);</div>
<div class="line"> </div>
<div class="line">uint16_t b_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> b_data[2*3] = {1.0f, 0.0f, 0.0f,</div>
<div class="line">                     0.0f, 1.0f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> b = AITENSOR_2D_F32(b_shape, b_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 2};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*2];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a04af5314c51e75495016026343dec02e">aimath_f32_default_mat_mul_bt</a>(&amp;a, &amp;b, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a04af5314c51e75495016026343dec02e"><div class="ttname"><a href="aimath__f32__default_8h.html#a04af5314c51e75495016026343dec02e">aimath_f32_default_mat_mul_bt</a></div><div class="ttdeci">void aimath_f32_default_mat_mul_bt(const aitensor_t *a, const aitensor_t *b, aitensor_t *result)</div><div class="ttdoc">Performs a matrix multiplication of F32  matrices a and b (transposed)</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*a</td><td>F32 matrix a (2D tensor of shape [N x K]) </td></tr>
    <tr><td class="paramname">*b</td><td>F32 matrix b (2D tensor of shape [M x K]) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 matrix of the multiplication (2D tensor of shape [N x M]) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a2cca0f45a41c48a7730664f43933c066"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a2cca0f45a41c48a7730664f43933c066">&#9670;&nbsp;</a></span>aimath_f32_default_max()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_max </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">void *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Identifies the maximum value in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result = max(x) \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = {0.0f, 1.0f, 2.0f,</div>
<div class="line">                     3.0f, 4.0f, 5.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line"><span class="keywordtype">float</span> result;</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a2cca0f45a41c48a7730664f43933c066">aimath_f32_default_max</a>(&amp;x, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#aaa9ca757028820849ef3dde13cc46565">print_aiscalar</a>(&amp;result, <a class="code" href="aimath__f32_8h.html#a06eea7384624233f57daab2648d8ce37">aif32</a>);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a2cca0f45a41c48a7730664f43933c066"><div class="ttname"><a href="aimath__f32__default_8h.html#a2cca0f45a41c48a7730664f43933c066">aimath_f32_default_max</a></div><div class="ttdeci">void aimath_f32_default_max(const aitensor_t *x, void *result)</div><div class="ttdoc">Identifies the maximum value in a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 tensor x to get the maximum value of (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Scalar result (type aiscalar_f32_t / float) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a0bf115cae42c704411916294aa391169"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a0bf115cae42c704411916294aa391169">&#9670;&nbsp;</a></span>aimath_f32_default_mean_channelwise()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_mean_channelwise </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">int8_t&#160;</td>
          <td class="paramname"><em>channel_axis</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Sums up all values of a channel of the <a class="el" href="aimath__f32_8h.html">F32 </a> tensor x. </p>
<p>Calculates the sum of all elements of each channel c. The result tensor is 1D: </p><p class="formulaDsp">
\[ result_c = \sum_i(x_{ci}) @f @param x F32 input tensor (N-D) @param channel_axis Index of the channel axis (negative values mean indexing from the end). @param result F32 result vector (1D) */ void aimath_f32_default_sum_channelwise(const aitensor_t *x, int8_t channel_axis, aitensor_t *result); /** @brief Calculate the channel-wise mean values of the \link aimath_f32.h F32 \endlink tensor x Calculates the empirical mean for each channel of the given axis:\n @f[ means_i = \frac{1}{m} \sum_{j=1}^{m} x_{i,j} \]
</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">x</td><td>F32 input tensor (N-D) </td></tr>
    <tr><td class="paramname">channel_axis</td><td>Index of the channel axis (negative values mean indexing from the end) </td></tr>
    <tr><td class="paramname">result</td><td>F32 result vector (1D) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a29b2f81f252312acbac15df0d415d791"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a29b2f81f252312acbac15df0d415d791">&#9670;&nbsp;</a></span>aimath_f32_default_min()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_min </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">void *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Identifies the minimum value in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result = min(x) \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = {0.0f, 1.0f, 2.0f,</div>
<div class="line">                     3.0f, 4.0f, 5.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line"><span class="keywordtype">float</span> result;</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a29b2f81f252312acbac15df0d415d791">aimath_f32_default_min</a>(&amp;x, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#aaa9ca757028820849ef3dde13cc46565">print_aiscalar</a>(&amp;result, <a class="code" href="aimath__f32_8h.html#a06eea7384624233f57daab2648d8ce37">aif32</a>);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a29b2f81f252312acbac15df0d415d791"><div class="ttname"><a href="aimath__f32__default_8h.html#a29b2f81f252312acbac15df0d415d791">aimath_f32_default_min</a></div><div class="ttdeci">void aimath_f32_default_min(const aitensor_t *x, void *result)</div><div class="ttdoc">Identifies the minimum value in a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 tensor x to get the minimum value of (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Scalar result (type aiscalar_f32_t / float) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a7e43d536e6b3bf837d6003a879e6c62b"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a7e43d536e6b3bf837d6003a879e6c62b">&#9670;&nbsp;</a></span>aimath_f32_default_mse_gradients_mean()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_mse_gradients_mean </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>predicted</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>target</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the gradients of the mean squared error between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted and the target data using a mean reduction. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*predicted_data</td><td>F32 matrix with the predicted or calculated values (2D tensor) </td></tr>
    <tr><td class="paramname">*target_data</td><td>F32 matrix with the target data / true values / labels (2D tensor, rows one-hot encoded) </td></tr>
    <tr><td class="paramname">*result</td><td>F32 tensor containing the gradients </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a44b61123d9b51b3d5abf710fdefa6695"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a44b61123d9b51b3d5abf710fdefa6695">&#9670;&nbsp;</a></span>aimath_f32_default_mse_gradients_sum()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_mse_gradients_sum </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>predicted</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>target</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the gradients of the mean squared error between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted and the target data using a sum reduction. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*predicted_data</td><td>F32 matrix with the predicted or calculated values (2D tensor) </td></tr>
    <tr><td class="paramname">*target_data</td><td>F32 matrix with the target data / true values / labels (2D tensor, rows one-hot encoded) </td></tr>
    <tr><td class="paramname">*result</td><td>F32 tensor containing the gradients </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a8daa94f78e201dd90a3fcb91ec83845c"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a8daa94f78e201dd90a3fcb91ec83845c">&#9670;&nbsp;</a></span>aimath_f32_default_mse_loss_mean()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_mse_loss_mean </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>predicted</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>target</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">void *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the mean squared error between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted and the target data using a mean reduction. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*predicted_data</td><td>F32 matrix with the predicted or calculated values (2D tensor) </td></tr>
    <tr><td class="paramname">*target_data</td><td>F32 matrix with the target data / true values / labels (2D tensor, rows one-hot encoded) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting loss (float) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a875479b966264b5e6d395514f0ca7e25"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a875479b966264b5e6d395514f0ca7e25">&#9670;&nbsp;</a></span>aimath_f32_default_mse_loss_sum()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_mse_loss_sum </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>predicted</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>target</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">void *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the mean squared error between the <a class="el" href="aimath__f32_8h.html">F32 </a> predicted and the target data using a sum reduction. </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*predicted_data</td><td>F32 matrix with the predicted or calculated values (2D tensor) </td></tr>
    <tr><td class="paramname">*target_data</td><td>F32 matrix with the target data / true values / labels (2D tensor, rows one-hot encoded) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting loss (float) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="abf4023f330ca3318b600967710aa4de1"></a>
<h2 class="memtitle"><span class="permalink"><a href="#abf4023f330ca3318b600967710aa4de1">&#9670;&nbsp;</a></span>aimath_f32_default_multiply()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_multiply </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>a</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>b</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs an element wise multiplication of <a class="el" href="aimath__f32_8h.html">F32 </a> tensors a and b (Hadamard product) </p>
<p class="formulaDsp">
\[ result = a \circ b \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t a_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> a_data[2*3] = {1.0f, 2.0f, 3.0f,</div>
<div class="line">                     4.0f, 5.0f, 6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> a = AITENSOR_2D_F32(a_shape, a_data);</div>
<div class="line"> </div>
<div class="line">uint16_t b_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> b_data[2*3] = {1.0f, 2.0f, 3.0f,</div>
<div class="line">                     4.0f, 5.0f, 6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> b = AITENSOR_2D_F32(b_shape, b_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#abf4023f330ca3318b600967710aa4de1">aimath_f32_default_multiply</a>(&amp;a, &amp;b, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_abf4023f330ca3318b600967710aa4de1"><div class="ttname"><a href="aimath__f32__default_8h.html#abf4023f330ca3318b600967710aa4de1">aimath_f32_default_multiply</a></div><div class="ttdeci">void aimath_f32_default_multiply(const aitensor_t *a, const aitensor_t *b, aitensor_t *result)</div><div class="ttdoc">Performs an element wise multiplication of F32  tensors a and b (Hadamard product)</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*a</td><td>F32 tensor a (N-D tensor) </td></tr>
    <tr><td class="paramname">*b</td><td>F32 tensor b (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor of the element wise multiplication (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a2276eb0146a4ef4f106f0aaf7cc7003f"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a2276eb0146a4ef4f106f0aaf7cc7003f">&#9670;&nbsp;</a></span>aimath_f32_default_norm_squared()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_norm_squared </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">void *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the squared sum of all elements in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result = \sum_i x_{i}^2 \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = {0.0f, 1.0f, 2.0f,</div>
<div class="line">                     3.0f, 4.0f, 5.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line"><span class="keywordtype">float</span> result;</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a2276eb0146a4ef4f106f0aaf7cc7003f">aimath_f32_default_norm_squared</a>(&amp;x, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#aaa9ca757028820849ef3dde13cc46565">print_aiscalar</a>(&amp;result, <a class="code" href="aimath__f32_8h.html#a06eea7384624233f57daab2648d8ce37">aif32</a>);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a2276eb0146a4ef4f106f0aaf7cc7003f"><div class="ttname"><a href="aimath__f32__default_8h.html#a2276eb0146a4ef4f106f0aaf7cc7003f">aimath_f32_default_norm_squared</a></div><div class="ttdeci">void aimath_f32_default_norm_squared(const aitensor_t *x, void *result)</div><div class="ttdoc">Calculates the squared sum of all elements in a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 tensor x (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Scalar result (type aiscalar_f32_t / float) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a69ea48225650ecaf3493d137f3e91c4e"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a69ea48225650ecaf3493d137f3e91c4e">&#9670;&nbsp;</a></span>aimath_f32_default_relu()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_relu </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the rectifier (ReLU) value of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result_{i} = max(0, x_{i}) \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = { 1.0f, -2.0f,  3.0f,</div>
<div class="line">                     -4.0f,  5.0f, -6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a69ea48225650ecaf3493d137f3e91c4e">aimath_f32_default_relu</a>(&amp;x, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a69ea48225650ecaf3493d137f3e91c4e"><div class="ttname"><a href="aimath__f32__default_8h.html#a69ea48225650ecaf3493d137f3e91c4e">aimath_f32_default_relu</a></div><div class="ttdeci">void aimath_f32_default_relu(const aitensor_t *x, aitensor_t *result)</div><div class="ttdoc">Calculates the rectifier (ReLU) value of each element in a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 tensor to calculate the ReLU from (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="abddd24f14f793be28cb73d4273a7f827"></a>
<h2 class="memtitle"><span class="permalink"><a href="#abddd24f14f793be28cb73d4273a7f827">&#9670;&nbsp;</a></span>aimath_f32_default_scalar_add()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_scalar_add </td>
          <td>(</td>
          <td class="paramtype">const void *&#160;</td>
          <td class="paramname"><em>scalar</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>a</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs an element wise addition of a scalar to a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result = a + \left( \begin{array}{ccc} 1 &amp; \ldots &amp; 1 \\ \vdots &amp; \ddots &amp; \vdots \\ 1 &amp; \ldots &amp; 1 \end{array}\right) \cdot scalar \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t a_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> a_data[2*3] = {0.0f, 1.0f, 2.0f,</div>
<div class="line">                     3.0f, 4.0f, 5.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> a = AITENSOR_2D_F32(a_shape, a_data);</div>
<div class="line"> </div>
<div class="line"><span class="keywordtype">float</span> scalar = 0.1f;</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#abddd24f14f793be28cb73d4273a7f827">aimath_f32_default_scalar_add</a>(&amp;scalar, &amp;a, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_abddd24f14f793be28cb73d4273a7f827"><div class="ttname"><a href="aimath__f32__default_8h.html#abddd24f14f793be28cb73d4273a7f827">aimath_f32_default_scalar_add</a></div><div class="ttdeci">void aimath_f32_default_scalar_add(const void *scalar, const aitensor_t *a, aitensor_t *result)</div><div class="ttdoc">Performs an element wise addition of a scalar to a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*scalar</td><td>Scalar (type aiscalar_f32_t / float) </td></tr>
    <tr><td class="paramname">*a</td><td>F32 tensor a (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor of the element wise scalar addition (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a62d9322b1fe696d91a6bb1d2f265e7ed"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a62d9322b1fe696d91a6bb1d2f265e7ed">&#9670;&nbsp;</a></span>aimath_f32_default_scalar_mul()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_scalar_mul </td>
          <td>(</td>
          <td class="paramtype">const void *&#160;</td>
          <td class="paramname"><em>scalar</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>a</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs a scalar multiplication (scaling) of <a class="el" href="aimath__f32_8h.html">F32 </a> tensor a and a scalar. </p>
<p class="formulaDsp">
\[ result = scalar \cdot a \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t a_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> a_data[2*3] = {0.0f, 1.0f, 2.0f,</div>
<div class="line">                     3.0f, 4.0f, 5.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> a = AITENSOR_2D_F32(a_shape, a_data);</div>
<div class="line"> </div>
<div class="line"><span class="keywordtype">float</span> scalar = 0.1f;</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a62d9322b1fe696d91a6bb1d2f265e7ed">aimath_f32_default_scalar_mul</a>(&amp;scalar, &amp;a, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a62d9322b1fe696d91a6bb1d2f265e7ed"><div class="ttname"><a href="aimath__f32__default_8h.html#a62d9322b1fe696d91a6bb1d2f265e7ed">aimath_f32_default_scalar_mul</a></div><div class="ttdeci">void aimath_f32_default_scalar_mul(const void *scalar, const aitensor_t *a, aitensor_t *result)</div><div class="ttdoc">Performs a scalar multiplication (scaling) of F32  tensor a and a scalar.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*scalar</td><td>Scalar (type aiscalar_f32_t / float) </td></tr>
    <tr><td class="paramname">*a</td><td>F32 tensor a (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor of the scalar multiplication (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a81128f005039e7eb349fdb0af24fdd2c"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a81128f005039e7eb349fdb0af24fdd2c">&#9670;&nbsp;</a></span>aimath_f32_default_scale_by_batch_size()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_scale_by_batch_size </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>a</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Scales a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor by batch size (size of first dimension) </p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*a</td><td>F32 tensor that is going to be scaled by its batch size </td></tr>
    <tr><td class="paramname">*result</td><td>Scaled F32 tensor </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a9c2af96bf91c4443ab75036f585fdba3"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a9c2af96bf91c4443ab75036f585fdba3">&#9670;&nbsp;</a></span>aimath_f32_default_sigmoid()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_sigmoid </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the sigmoid of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result_{i} = \sigma(x_{i}) = \frac{1}{1 + e^{-x_{i}}} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = { 1.0f, -2.0f,  3.0f,</div>
<div class="line">                     -4.0f,  5.0f, -6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a9c2af96bf91c4443ab75036f585fdba3">aimath_f32_default_sigmoid</a>(&amp;x, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 tensor to calculate the sigmoid from (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="ac14896a86a6600a4be48b84c9977acaf"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ac14896a86a6600a4be48b84c9977acaf">&#9670;&nbsp;</a></span>aimath_f32_default_softmax()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_softmax </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the softmax value of each row of a <a class="el" href="aimath__f32_8h.html">F32 </a> matrix. </p>
<p class="formulaDsp">
\[ result_{i} = \frac{e^{x_i}}{\sum_{j=1}^{K} e^{x_j}} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = { 1.0f, -2.0f,  3.0f,</div>
<div class="line">                     -4.0f,  5.0f, -6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#ac14896a86a6600a4be48b84c9977acaf">aimath_f32_default_softmax</a>(&amp;x, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_ac14896a86a6600a4be48b84c9977acaf"><div class="ttname"><a href="aimath__f32__default_8h.html#ac14896a86a6600a4be48b84c9977acaf">aimath_f32_default_softmax</a></div><div class="ttdeci">void aimath_f32_default_softmax(const aitensor_t *x, aitensor_t *result)</div><div class="ttdoc">Calculates the softmax value of each row of a F32  matrix.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 matrix to calculate the softmax from (2D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 matrix (2D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a6f36c52ff560b8172098f69aab5389c9"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a6f36c52ff560b8172098f69aab5389c9">&#9670;&nbsp;</a></span>aimath_f32_default_softsign()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_softsign </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the softsign value of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result_{i} = \frac {x_i} {1 + |x_i|} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = { 1.0f, -2.0f,  3.0f,</div>
<div class="line">                     -4.0f,  5.0f, -6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a6f36c52ff560b8172098f69aab5389c9">aimath_f32_default_softsign</a>(&amp;x, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a6f36c52ff560b8172098f69aab5389c9"><div class="ttname"><a href="aimath__f32__default_8h.html#a6f36c52ff560b8172098f69aab5389c9">aimath_f32_default_softsign</a></div><div class="ttdeci">void aimath_f32_default_softsign(const aitensor_t *x, aitensor_t *result)</div><div class="ttdoc">Calculates the softsign value of each element in a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 tensor to calculate the softsign from (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a3bf3e26a0873244ef65ed20775360312"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a3bf3e26a0873244ef65ed20775360312">&#9670;&nbsp;</a></span>aimath_f32_default_sqrt()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_sqrt </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the element wise square root of a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result_{i} = \sqrt{x_{i}} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = {1.0f, 2.0f, 3.0f,</div>
<div class="line">                     4.0f, 5.0f, 6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a3bf3e26a0873244ef65ed20775360312">aimath_f32_default_sqrt</a>(&amp;x, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a3bf3e26a0873244ef65ed20775360312"><div class="ttname"><a href="aimath__f32__default_8h.html#a3bf3e26a0873244ef65ed20775360312">aimath_f32_default_sqrt</a></div><div class="ttdeci">void aimath_f32_default_sqrt(const aitensor_t *x, aitensor_t *result)</div><div class="ttdoc">Calculates the element wise square root of a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 tensor to calculate the square root from (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a1c4d0fe9ee55e7b4677eff2143ed2328"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a1c4d0fe9ee55e7b4677eff2143ed2328">&#9670;&nbsp;</a></span>aimath_f32_default_sum()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_sum </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">void *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the sum of all elements in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result = \sum_i x_{i} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = {0.0f, 1.0f, 2.0f,</div>
<div class="line">                     3.0f, 4.0f, 5.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line"><span class="keywordtype">float</span> result;</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a1c4d0fe9ee55e7b4677eff2143ed2328">aimath_f32_default_sum</a>(&amp;x, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#aaa9ca757028820849ef3dde13cc46565">print_aiscalar</a>(&amp;result, <a class="code" href="aimath__f32_8h.html#a06eea7384624233f57daab2648d8ce37">aif32</a>);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a1c4d0fe9ee55e7b4677eff2143ed2328"><div class="ttname"><a href="aimath__f32__default_8h.html#a1c4d0fe9ee55e7b4677eff2143ed2328">aimath_f32_default_sum</a></div><div class="ttdeci">void aimath_f32_default_sum(const aitensor_t *x, void *result)</div><div class="ttdoc">Calculates the sum of all elements in a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 tensor x (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Scalar result (type aiscalar_f32_t / float) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="abd86f478417339f44905ede947a5e089"></a>
<h2 class="memtitle"><span class="permalink"><a href="#abd86f478417339f44905ede947a5e089">&#9670;&nbsp;</a></span>aimath_f32_default_tanh()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_tanh </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculates the tanh of each element in a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ result_{i} = \tanh(x_{i}) = \frac{e^{x_i} - e^{-x_i}}{e^{x_i} + e^{-x_i}} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = { 1.0f, -2.0f,  3.0f,</div>
<div class="line">                     -4.0f,  5.0f, -6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#abd86f478417339f44905ede947a5e089">aimath_f32_default_tanh</a>(&amp;x, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 tensor to calculate the tanh from (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a235398f06dc3faf34d12712f2d3ee887"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a235398f06dc3faf34d12712f2d3ee887">&#9670;&nbsp;</a></span>aimath_f32_default_tensor_add()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_tensor_add </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>a</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>b</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs an element wise addition of <a class="el" href="aimath__f32_8h.html">F32 </a> tensors a and b. </p>
<p class="formulaDsp">
\[ result = a + b \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t a_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> a_data[2*3] = {1.0f, 2.0f, 3.0f,</div>
<div class="line">                     4.0f, 5.0f, 6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> a = AITENSOR_2D_F32(a_shape, a_data);</div>
<div class="line"> </div>
<div class="line">uint16_t b_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> b_data[2*3] = {1.0f, 2.0f, 3.0f,</div>
<div class="line">                     4.0f, 5.0f, 6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> b = AITENSOR_2D_F32(b_shape, b_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a235398f06dc3faf34d12712f2d3ee887">aimath_f32_default_tensor_add</a>(&amp;a, &amp;b, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a235398f06dc3faf34d12712f2d3ee887"><div class="ttname"><a href="aimath__f32__default_8h.html#a235398f06dc3faf34d12712f2d3ee887">aimath_f32_default_tensor_add</a></div><div class="ttdeci">void aimath_f32_default_tensor_add(const aitensor_t *a, const aitensor_t *b, aitensor_t *result)</div><div class="ttdoc">Performs an element wise addition of F32  tensors a and b.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*a</td><td>F32 tensor a (N-D tensor) </td></tr>
    <tr><td class="paramname">*b</td><td>F32 tensor b (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor of the element wise addition (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a580a19afaf81fa085160be7fead0ac2f"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a580a19afaf81fa085160be7fead0ac2f">&#9670;&nbsp;</a></span>aimath_f32_default_tensor_init_uniform()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_tensor_init_uniform </td>
          <td>(</td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>tensor</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">float&#160;</td>
          <td class="paramname"><em>from</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">float&#160;</td>
          <td class="paramname"><em>to</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Fills a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor with random numbers created from a uniform distribution within given range. </p>
<p class="formulaDsp">
\[ tensor_i \in \mathcal{U(from, to)} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t tensor_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> tensor_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> tensor = AITENSOR_2D_F32(tensor_shape, tensor_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a580a19afaf81fa085160be7fead0ac2f">aimath_f32_default_tensor_init_uniform</a>(&amp;tensor, -1.5f, 1.5f);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;tensor);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a580a19afaf81fa085160be7fead0ac2f"><div class="ttname"><a href="aimath__f32__default_8h.html#a580a19afaf81fa085160be7fead0ac2f">aimath_f32_default_tensor_init_uniform</a></div><div class="ttdeci">void aimath_f32_default_tensor_init_uniform(aitensor_t *tensor, float from, float to)</div><div class="ttdoc">Fills a F32  tensor with random numbers created from a uniform distribution within given range.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*tensor</td><td>F32 tensor to initialize with random numbers (N-D tensor) </td></tr>
    <tr><td class="paramname">from</td><td>Minimum value of the uniform distribution </td></tr>
    <tr><td class="paramname">to</td><td>Maximum value of the uniform distribution </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="af1409f9886a9614f83cb0a5d5360be60"></a>
<h2 class="memtitle"><span class="permalink"><a href="#af1409f9886a9614f83cb0a5d5360be60">&#9670;&nbsp;</a></span>aimath_f32_default_tensor_sub()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_tensor_sub </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>a</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>b</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs a element wise subtraction of <a class="el" href="aimath__f32_8h.html">F32 </a> tensors a and b. </p>
<p class="formulaDsp">
\[ result = a - b \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t a_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> a_data[2*3] = {1.0f, 2.0f, 3.0f,</div>
<div class="line">                     4.0f, 5.0f, 6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> a = AITENSOR_2D_F32(a_shape, a_data);</div>
<div class="line"> </div>
<div class="line">uint16_t b_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> b_data[2*3] = {1.0f, 2.0f, 3.0f,</div>
<div class="line">                     4.0f, 5.0f, 6.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> b = AITENSOR_2D_F32(b_shape, b_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#af1409f9886a9614f83cb0a5d5360be60">aimath_f32_default_tensor_sub</a>(&amp;a, &amp;b, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_af1409f9886a9614f83cb0a5d5360be60"><div class="ttname"><a href="aimath__f32__default_8h.html#af1409f9886a9614f83cb0a5d5360be60">aimath_f32_default_tensor_sub</a></div><div class="ttdeci">void aimath_f32_default_tensor_sub(const aitensor_t *a, const aitensor_t *b, aitensor_t *result)</div><div class="ttdoc">Performs a element wise subtraction of F32  tensors a and b.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*a</td><td>F32 tensor a (N-D tensor) </td></tr>
    <tr><td class="paramname">*b</td><td>F32 tensor b (N-D tensor) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor of the element wise subtraction (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="acbf32eb2340529176399d24fa2ceaae2"></a>
<h2 class="memtitle"><span class="permalink"><a href="#acbf32eb2340529176399d24fa2ceaae2">&#9670;&nbsp;</a></span>aimath_f32_default_tensor_sub_sparse8()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_tensor_sub_sparse8 </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>a</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>b</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Performs a subtraction between a <a class="el" href="aimath__f32_8h.html">F32 </a> matrix a and a <a class="el" href="aimath__u8_8h.html">U8 </a> sparse matrix b. </p>
<p>This function can subtract a row wise one-hot encoded matrix in sparse representation (just the integer index of the 1 is stored) from a normal F32 matrix a.</p>
<p>For example the matrix </p><p class="formulaDsp">
\[ \left( \begin{array}{ccc} 0 &amp; 0 &amp; 0 &amp; 1 \\ 1 &amp; 0 &amp; 0 &amp; 0 \\ 0 &amp; 0 &amp; 1 &amp; 0 \end{array}\right) \]
</p>
<p> in sparse representation is </p><p class="formulaDsp">
\[ \left( \begin{array}{ccc} 3 \\ 0 \\ 2 \end{array}\right) \]
</p>
<p>The result is then calculated as </p><p class="formulaDsp">
\[ result_{ij} = \begin{cases} a_{ij} - 1 &amp; \text{if } j = b_i\\ a_{ij} &amp; \text{if } j \neq b_i \end{cases} \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t a_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> a_data[2*3] = {0.2f, 0.1f, 0.7f,</div>
<div class="line">                     0.9f, 0.1f, 0.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> a = AITENSOR_2D_F32(a_shape, a_data);</div>
<div class="line"> </div>
<div class="line">uint16_t b_shape[2] = {2, 1};</div>
<div class="line">uint8_t b_data[2*1] = {2,</div>
<div class="line">                       0};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> b = AITENSOR_2D_U8(b_shape, b_data);</div>
<div class="line"> </div>
<div class="line">uint16_t result_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> result_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> result = AITENSOR_2D_F32(result_shape, result_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#acbf32eb2340529176399d24fa2ceaae2">aimath_f32_default_tensor_sub_sparse8</a>(&amp;a, &amp;b, &amp;result);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;result);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_acbf32eb2340529176399d24fa2ceaae2"><div class="ttname"><a href="aimath__f32__default_8h.html#acbf32eb2340529176399d24fa2ceaae2">aimath_f32_default_tensor_sub_sparse8</a></div><div class="ttdeci">void aimath_f32_default_tensor_sub_sparse8(const aitensor_t *a, const aitensor_t *b, aitensor_t *result)</div><div class="ttdoc">Performs a subtraction between a F32  matrix a and a U8  sparse matrix b.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*a</td><td>F32 matrix a (2D tensor of shape [N x M]) </td></tr>
    <tr><td class="paramname">*b</td><td>U8 sparse matrix b (2D tensor of shape [N x 1]) </td></tr>
    <tr><td class="paramname">*result</td><td>Resulting F32 tensor of the subtraction (2D tensor of shape [N x M]) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a458efd0b4b295af5f84c15123158770d"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a458efd0b4b295af5f84c15123158770d">&#9670;&nbsp;</a></span>aimath_f32_default_transpose_matrix()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_transpose_matrix </td>
          <td>(</td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Transpose a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor. </p>
<p class="formulaDsp">
\[ x \leftarrow x^T \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t x_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> x_data[2*3] = {  2.0f, -4.0f,   6.0f,</div>
<div class="line">                      -8.0f, 10.0f, -12.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> x = AITENSOR_2D_F32(x_shape, x_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a458efd0b4b295af5f84c15123158770d">aimath_f32_default_transpose_matrix</a>(&amp;x);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;x);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a458efd0b4b295af5f84c15123158770d"><div class="ttname"><a href="aimath__f32__default_8h.html#a458efd0b4b295af5f84c15123158770d">aimath_f32_default_transpose_matrix</a></div><div class="ttdeci">void aimath_f32_default_transpose_matrix(aitensor_t *x)</div><div class="ttdoc">Transpose a F32  tensor.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*x</td><td>F32 tensor to be transposed (2D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a750f616bbc38cd336a0d75581a811101"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a750f616bbc38cd336a0d75581a811101">&#9670;&nbsp;</a></span>aimath_f32_default_transpose_vector()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_transpose_vector </td>
          <td>(</td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>vector</em></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Transposes a <a class="el" href="aimath__f32_8h.html">F32 </a> vector. </p>
<p>The given tensor must be a vector (2D tensor of shape [1 x N] or [N x 1]).</p>
<p class="formulaDsp">
\[ vector \leftarrow vector^T \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t vector_shape[2] = {1, 3};</div>
<div class="line"><span class="keywordtype">float</span> vector_data[1*3] = {1.0f, 2.0f, 3.0f};</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> vector = AITENSOR_2D_F32(vector_shape, vector_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a750f616bbc38cd336a0d75581a811101">aimath_f32_default_transpose_vector</a>(&amp;vector);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;vector);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a750f616bbc38cd336a0d75581a811101"><div class="ttname"><a href="aimath__f32__default_8h.html#a750f616bbc38cd336a0d75581a811101">aimath_f32_default_transpose_vector</a></div><div class="ttdeci">void aimath_f32_default_transpose_vector(aitensor_t *vector)</div><div class="ttdoc">Transposes a F32  vector.</div></div>
</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*vector</td><td>F32 vector (2D tensor of shape [1 x N] or [N x 1]) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="afd8b40e9cb1cfb0e1b2a6f700d21a086"></a>
<h2 class="memtitle"><span class="permalink"><a href="#afd8b40e9cb1cfb0e1b2a6f700d21a086">&#9670;&nbsp;</a></span>aimath_f32_default_variance_channelwise()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_variance_channelwise </td>
          <td>(</td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>x</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">int8_t&#160;</td>
          <td class="paramname"><em>channel_axis</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>means</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>result</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Calculate the channel-wise variances of the <a class="el" href="aimath__f32_8h.html">F32 </a> tensor x. </p>
<p>Calculates the empirical variance for each channel of the given axis:<br  />
 </p><p class="formulaDsp">
\[ variances_i = \frac{1}{m} \sum_{j=1}^{m} (x_{i,j} - \mu_i)^2 \]
</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">x</td><td>F32 input tensor (N-D) </td></tr>
    <tr><td class="paramname">channel_axis</td><td>Index of the channel axis (negative values mean indexing from the end) </td></tr>
    <tr><td class="paramname">means</td><td>F32 mean vector (1D) for variance calculation </td></tr>
    <tr><td class="paramname">result</td><td>F32 result vector (1D) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
<a id="a072ffbef0c60343957e4621f01551e85"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a072ffbef0c60343957e4621f01551e85">&#9670;&nbsp;</a></span>aimath_f32_default_zero_tensor()</h2>

<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void aimath_f32_default_zero_tensor </td>
          <td>(</td>
          <td class="paramtype"><a class="el" href="structaitensor.html">aitensor_t</a> *&#160;</td>
          <td class="paramname"><em>tensor</em></td><td>)</td>
          <td></td>
        </tr>
      </table>
</div><div class="memdoc">

<p>Fills a <a class="el" href="aimath__f32_8h.html">F32 </a> tensor with zeros. </p>
<p class="formulaDsp">
\[ tensor_{i} = 0 \]
</p>
<p>Example: </p><div class="fragment"><div class="line">uint16_t tensor_shape[2] = {2, 3};</div>
<div class="line"><span class="keywordtype">float</span> tensor_data[2*3];</div>
<div class="line"><a class="code" href="structaitensor.html">aitensor_t</a> tensor = AITENSOR_2D_F32(tensor_shape, tensor_data);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__f32__default_8h.html#a072ffbef0c60343957e4621f01551e85">aimath_f32_default_zero_tensor</a>(&amp;tensor);</div>
<div class="line"> </div>
<div class="line"><a class="code" href="aimath__basic_8h.html#ab10c8d06990943806f0be8fcc6af03fc">print_aitensor</a>(&amp;tensor);</div>
<div class="ttc" id="aaimath__f32__default_8h_html_a072ffbef0c60343957e4621f01551e85"><div class="ttname"><a href="aimath__f32__default_8h.html#a072ffbef0c60343957e4621f01551e85">aimath_f32_default_zero_tensor</a></div><div class="ttdeci">void aimath_f32_default_zero_tensor(aitensor_t *tensor)</div><div class="ttdoc">Fills a F32  tensor with zeros.</div></div>
</div><!-- fragment --><p>In the F32 implementation of this function, there is no difference between <a class="el" href="aimath__f32__default_8h.html#a072ffbef0c60343957e4621f01551e85" title="Fills a F32  tensor with zeros.">aimath_f32_default_zero_tensor()</a> and <a class="el" href="aimath__f32__default_8h.html#a760163e6cee0f2a5e341069ad20bfcde" title="Fills a F32  tensor with zeros.">aimath_f32_default_init_zeros()</a>.</p>
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramname">*tensor</td><td>F32 tensor to set to zero (N-D tensor) </td></tr>
  </table>
  </dd>
</dl>

</div>
</div>
</div><!-- contents -->
</div><!-- doc-content -->
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
  <ul>
    <li class="navelem"><a class="el" href="dir_d44c64559bbebec7f509842c48db8b23.html">include</a></li><li class="navelem"><a class="el" href="dir_1e5d3661ed79af157d57e64a38265d09.html">basic</a></li><li class="navelem"><a class="el" href="dir_6f3c54947e40ccd50db54894d07fbfc0.html">default</a></li><li class="navelem"><a class="el" href="dir_973bc2385ef3e651973e652a47ef087c.html">aimath</a></li><li class="navelem"><a class="el" href="aimath__f32__default_8h.html">aimath_f32_default.h</a></li>
    <li class="footer">Generated by <a href="https://www.doxygen.org/index.html"><img class="footer" src="doxygen.svg" width="104" height="31" alt="doxygen"/></a> 1.9.1 </li>
  </ul>
</div>
</body>
</html>
