<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<title>Softmax Functions</title>
<title>CMSIS-NN: Softmax Functions</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<link href="cmsis.css" rel="stylesheet" type="text/css" />
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<script type="text/javascript" src="printComponentTabs.js"></script>
<script type="text/javascript" src="cmsis_footer.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtree.js"></script>
<script type="text/javascript">
  $(document).ready(initResizable);
  $(window).load(resizeHeight);
</script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/javascript">
  $(document).ready(function() { searchBox.OnSelectItem(0); });
</script>
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
 <tbody>
 <tr style="height: 46px;">
  <td id="projectlogo"><img alt="Logo" src="CMSIS_Logo_Final.png"/></td>
  <td style="padding-left: 0.5em;">
   <div id="projectname">CMSIS-NN
   &#160;<span id="projectnumber">Version 3.1.0</span>
   </div>
   <div id="projectbrief">CMSIS NN Software Library</div>
  </td>
 </tr>
 </tbody>
</table>
</div>
<!-- end header part -->
<div id="CMSISnav" class="tabs1">
    <ul class="tablist">
      <script type="text/javascript">
		<!--
		writeComponentTabs.call(this);
		//-->
      </script>
	  </ul>
</div>
<!-- Generated by Doxygen 1.8.6 -->
<script type="text/javascript">
var searchBox = new SearchBox("searchBox", "search",false,'Search');
</script>
  <div id="navrow1" class="tabs">
    <ul class="tablist">
      <li><a href="index.html"><span>Main&#160;Page</span></a></li>
      <li><a href="pages.html"><span>Usage&#160;and&#160;Description</span></a></li>
      <li><a href="modules.html"><span>Reference</span></a></li>
      <li>
        <div id="MSearchBox" class="MSearchBoxInactive">
        <span class="left">
          <img id="MSearchSelect" src="search/mag_sel.png"
               onmouseover="return searchBox.OnSearchSelectShow()"
               onmouseout="return searchBox.OnSearchSelectHide()"
               alt=""/>
          <input type="text" id="MSearchField" value="Search" accesskey="S"
               onfocus="searchBox.OnSearchFieldFocus(true)" 
               onblur="searchBox.OnSearchFieldFocus(false)" 
               onkeyup="searchBox.OnSearchFieldChange(event)"/>
          </span><span class="right">
            <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
          </span>
        </div>
      </li>
    </ul>
  </div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
  <div id="nav-tree">
    <div id="nav-tree-contents">
      <div id="nav-sync" class="sync"></div>
    </div>
  </div>
  <div id="splitbar" style="-moz-user-select:none;" 
       class="ui-resizable-handle">
  </div>
</div>
<script type="text/javascript">
$(document).ready(function(){initNavTree('group__Softmax.html','');});
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
     onmouseover="return searchBox.OnSearchSelectShow()"
     onmouseout="return searchBox.OnSearchSelectHide()"
     onkeydown="return searchBox.OnSearchSelectKey(event)">
<a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(0)"><span class="SelectionMark">&#160;</span>All</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(1)"><span class="SelectionMark">&#160;</span>Data Structures</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(2)"><span class="SelectionMark">&#160;</span>Files</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(3)"><span class="SelectionMark">&#160;</span>Functions</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(4)"><span class="SelectionMark">&#160;</span>Variables</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(5)"><span class="SelectionMark">&#160;</span>Enumerations</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(6)"><span class="SelectionMark">&#160;</span>Enumerator</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(7)"><span class="SelectionMark">&#160;</span>Macros</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(8)"><span class="SelectionMark">&#160;</span>Groups</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(9)"><span class="SelectionMark">&#160;</span>Pages</a></div>

<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0" 
        name="MSearchResults" id="MSearchResults">
</iframe>
</div>

<div class="header">
  <div class="summary">
<a href="#func-members">Functions</a>  </div>
  <div class="headertitle">
<div class="title">Softmax Functions<div class="ingroups"><a class="el" href="group__groupNN.html">Neural Network Functions</a></div></div>  </div>
</div><!--header-->
<div class="contents">
<table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="func-members"></a>
Functions</h2></td></tr>
<tr class="memitem:gaa5632ba67b623b5dff0c4a2d8e2a9a3a"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__Softmax.html#gaa5632ba67b623b5dff0c4a2d8e2a9a3a">arm_nn_softmax_common_s8</a> (const int8_t *input, const int32_t num_rows, const int32_t row_size, const int32_t mult, const int32_t shift, const int32_t diff_min, const bool int16_output, void *output)</td></tr>
<tr class="memdesc:gaa5632ba67b623b5dff0c4a2d8e2a9a3a"><td class="mdescLeft">&#160;</td><td class="mdescRight">Common softmax function for s8 input and s8 or s16 output.  <a href="#gaa5632ba67b623b5dff0c4a2d8e2a9a3a">More...</a><br/></td></tr>
<tr class="separator:gaa5632ba67b623b5dff0c4a2d8e2a9a3a"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ga1cacd8b84b8363079311987d0016ebe5"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__Softmax.html#ga1cacd8b84b8363079311987d0016ebe5">arm_softmax_q15</a> (const q15_t *vec_in, const uint16_t dim_vec, q15_t *p_out)</td></tr>
<tr class="memdesc:ga1cacd8b84b8363079311987d0016ebe5"><td class="mdescLeft">&#160;</td><td class="mdescRight">Q15 softmax function.  <a href="#ga1cacd8b84b8363079311987d0016ebe5">More...</a><br/></td></tr>
<tr class="separator:ga1cacd8b84b8363079311987d0016ebe5"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ga89aff212a97a3cf32d9d7ddf11a8f43e"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__Softmax.html#ga89aff212a97a3cf32d9d7ddf11a8f43e">arm_softmax_q7</a> (const q7_t *vec_in, const uint16_t dim_vec, q7_t *p_out)</td></tr>
<tr class="memdesc:ga89aff212a97a3cf32d9d7ddf11a8f43e"><td class="mdescLeft">&#160;</td><td class="mdescRight">Q7 softmax function.  <a href="#ga89aff212a97a3cf32d9d7ddf11a8f43e">More...</a><br/></td></tr>
<tr class="separator:ga89aff212a97a3cf32d9d7ddf11a8f43e"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ga3bc3ad13727a8a9d2cf7d0fba1209879"><td class="memItemLeft" align="right" valign="top">arm_status&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__Softmax.html#ga3bc3ad13727a8a9d2cf7d0fba1209879">arm_softmax_s16</a> (const int16_t *input, const int32_t num_rows, const int32_t row_size, const int32_t mult, const int32_t shift, const <a class="el" href="structcmsis__nn__softmax__lut__s16.html">cmsis_nn_softmax_lut_s16</a> *softmax_params, int16_t *output)</td></tr>
<tr class="memdesc:ga3bc3ad13727a8a9d2cf7d0fba1209879"><td class="mdescLeft">&#160;</td><td class="mdescRight">S16 softmax function.  <a href="#ga3bc3ad13727a8a9d2cf7d0fba1209879">More...</a><br/></td></tr>
<tr class="separator:ga3bc3ad13727a8a9d2cf7d0fba1209879"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:gaf309cdd53978a85a39c9bfdc476aea17"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__Softmax.html#gaf309cdd53978a85a39c9bfdc476aea17">arm_softmax_s8</a> (const int8_t *input, const int32_t num_rows, const int32_t row_size, const int32_t mult, const int32_t shift, const int32_t diff_min, int8_t *output)</td></tr>
<tr class="memdesc:gaf309cdd53978a85a39c9bfdc476aea17"><td class="mdescLeft">&#160;</td><td class="mdescRight">S8 softmax function.  <a href="#gaf309cdd53978a85a39c9bfdc476aea17">More...</a><br/></td></tr>
<tr class="separator:gaf309cdd53978a85a39c9bfdc476aea17"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ga4c00979132b735e75525296bb5fa830f"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__Softmax.html#ga4c00979132b735e75525296bb5fa830f">arm_softmax_s8_s16</a> (const int8_t *input, const int32_t num_rows, const int32_t row_size, const int32_t mult, const int32_t shift, const int32_t diff_min, int16_t *output)</td></tr>
<tr class="memdesc:ga4c00979132b735e75525296bb5fa830f"><td class="mdescLeft">&#160;</td><td class="mdescRight">S8 to s16 softmax function.  <a href="#ga4c00979132b735e75525296bb5fa830f">More...</a><br/></td></tr>
<tr class="separator:ga4c00979132b735e75525296bb5fa830f"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:gaa1627ed96bd597a8046d00689f077dce"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__Softmax.html#gaa1627ed96bd597a8046d00689f077dce">arm_softmax_u8</a> (const uint8_t *input, const int32_t num_rows, const int32_t row_size, const int32_t mult, const int32_t shift, const int32_t diff_min, uint8_t *output)</td></tr>
<tr class="memdesc:gaa1627ed96bd597a8046d00689f077dce"><td class="mdescLeft">&#160;</td><td class="mdescRight">U8 softmax function.  <a href="#gaa1627ed96bd597a8046d00689f077dce">More...</a><br/></td></tr>
<tr class="separator:gaa1627ed96bd597a8046d00689f077dce"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ga894cfd80c260b946702755b5754e520f"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="group__Softmax.html#ga894cfd80c260b946702755b5754e520f">arm_softmax_with_batch_q7</a> (const q7_t *vec_in, const uint16_t nb_batches, const uint16_t dim_vec, q7_t *p_out)</td></tr>
<tr class="memdesc:ga894cfd80c260b946702755b5754e520f"><td class="mdescLeft">&#160;</td><td class="mdescRight">Q7 softmax function with batch parameter.  <a href="#ga894cfd80c260b946702755b5754e520f">More...</a><br/></td></tr>
<tr class="separator:ga894cfd80c260b946702755b5754e520f"><td class="memSeparator" colspan="2">&#160;</td></tr>
</table>
<a name="details" id="details"></a><h2 class="groupheader">Description</h2>
<p>EXP(2) based softmax functions. </p>
<h2 class="groupheader">Function Documentation</h2>
<a class="anchor" id="gaa5632ba67b623b5dff0c4a2d8e2a9a3a"></a>
<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void arm_nn_softmax_common_s8 </td>
          <td>(</td>
          <td class="paramtype">const int8_t *&#160;</td>
          <td class="paramname"><em>input</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>num_rows</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>row_size</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>mult</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>shift</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>diff_min</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const bool&#160;</td>
          <td class="paramname"><em>int16_output</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">void *&#160;</td>
          <td class="paramname"><em>output</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramdir">[in]</td><td class="paramname">input</td><td>Pointer to the input tensor </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">num_rows</td><td>Number of rows in the input tensor </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">row_size</td><td>Number of elements in each input row </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">mult</td><td>Input quantization multiplier </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">shift</td><td>Input quantization shift within the range [0, 31] </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">diff_min</td><td>Minimum difference with max in row. Used to check if the quantized exponential operation can be performed </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">int16_output</td><td>Indicating s8 output if 0 else s16 output </td></tr>
    <tr><td class="paramdir">[out]</td><td class="paramname">output</td><td>Pointer to the output tensor</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd>Supported framework: TensorFlow Lite micro (bit-accurate) </dd></dl>

<p>References <a class="el" href="arm__nn__softmax__common__s8_8c.html#a401e2dfaf6a8f0ef34f15295e026fd79">ACCUM_BITS</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#abe05f0e80d965ae31dec16ba4063f48a">CLAMP</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#a7ce5ee6d8839bf541fb4bbdf4ef80eb1">DIV_POW2</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#a97055bb1e8a21ead129caecdfb24cfb1">EXP_ON_NEG</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#ad935f1ff1a50822e317bdb321ce991ad">MAX</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#a518f7e0db18bea6b61a2b88f266aef20">MUL_SAT</a>, <a class="el" href="arm__nn__math__types_8h.html#a30fe8775d9c28c72fb7c336ab4751269">NN_Q15_MAX</a>, <a class="el" href="arm__nn__math__types_8h.html#a51e8177e289decc5ccf94be9c160064d">NN_Q15_MIN</a>, <a class="el" href="arm__nn__math__types_8h.html#a3fc66c904b4e92c8e06803dac0ee54a1">NN_Q7_MAX</a>, <a class="el" href="arm__nn__math__types_8h.html#a342199b16a4abf4da4a7607a3a552b4f">NN_Q7_MIN</a>, and <a class="el" href="arm__nnsupportfunctions_8h.html#a82ac477c930f5b05e8f71f6f61e405a8">ONE_OVER1</a>.</p>

<p>Referenced by <a class="el" href="group__Softmax.html#gaf309cdd53978a85a39c9bfdc476aea17">arm_softmax_s8()</a>, and <a class="el" href="group__Softmax.html#ga4c00979132b735e75525296bb5fa830f">arm_softmax_s8_s16()</a>.</p>

</div>
</div>
<a class="anchor" id="ga1cacd8b84b8363079311987d0016ebe5"></a>
<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void arm_softmax_q15 </td>
          <td>(</td>
          <td class="paramtype">const q15_t *&#160;</td>
          <td class="paramname"><em>vec_in</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const uint16_t&#160;</td>
          <td class="paramname"><em>dim_vec</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">q15_t *&#160;</td>
          <td class="paramname"><em>p_out</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramdir">[in]</td><td class="paramname">vec_in</td><td>pointer to input vector </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">dim_vec</td><td>input vector dimention </td></tr>
    <tr><td class="paramdir">[out]</td><td class="paramname">p_out</td><td>pointer to output vector</td></tr>
  </table>
  </dd>
</dl>
<p>Here, instead of typical e based softmax, we use 2-based softmax, i.e.,:</p>
<p>y_i = 2^(x_i) / sum(2^x_j)</p>
<p>The relative output will be different here. But mathematically, the gradient will be the same with a log(2) scaling factor. </p>

</div>
</div>
<a class="anchor" id="ga89aff212a97a3cf32d9d7ddf11a8f43e"></a>
<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void arm_softmax_q7 </td>
          <td>(</td>
          <td class="paramtype">const q7_t *&#160;</td>
          <td class="paramname"><em>vec_in</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const uint16_t&#160;</td>
          <td class="paramname"><em>dim_vec</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">q7_t *&#160;</td>
          <td class="paramname"><em>p_out</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramdir">[in]</td><td class="paramname">vec_in</td><td>pointer to input vector </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">dim_vec</td><td>input vector dimention </td></tr>
    <tr><td class="paramdir">[out]</td><td class="paramname">p_out</td><td>pointer to output vector</td></tr>
  </table>
  </dd>
</dl>
<p>Here, instead of typical natural logarithm e based softmax, we use 2-based softmax here, i.e.,:</p>
<p>y_i = 2^(x_i) / sum(2^x_j)</p>
<p>The relative output will be different here. But mathematically, the gradient will be the same with a log(2) scaling factor. </p>

<p>Referenced by <a class="el" href="group__Softmax.html#ga894cfd80c260b946702755b5754e520f">arm_softmax_with_batch_q7()</a>.</p>

</div>
</div>
<a class="anchor" id="ga3bc3ad13727a8a9d2cf7d0fba1209879"></a>
<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">arm_status arm_softmax_s16 </td>
          <td>(</td>
          <td class="paramtype">const int16_t *&#160;</td>
          <td class="paramname"><em>input</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>num_rows</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>row_size</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>mult</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>shift</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const <a class="el" href="structcmsis__nn__softmax__lut__s16.html">cmsis_nn_softmax_lut_s16</a> *&#160;</td>
          <td class="paramname"><em>softmax_params</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">int16_t *&#160;</td>
          <td class="paramname"><em>output</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramdir">[in]</td><td class="paramname">input</td><td>Pointer to the input tensor </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">num_rows</td><td>Number of rows in the input tensor </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">row_size</td><td>Number of elements in each input row </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">mult</td><td>Input quantization multiplier </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">shift</td><td>Input quantization shift within the range [0, 31] </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">softmax_params</td><td>Softmax s16 layer parameters with two pointers to LUTs speficied below. For indexing the high 9 bits are used and 7 remaining for interpolation. That means 512 entries for the 9-bit indexing and 1 extra for interpolation, i.e. 513 values for each LUT.<ul>
<li>Lookup table for exp(x), where x uniform distributed between [-10.0 , 0.0]</li>
<li>Lookup table for 1 / (1 + x), where x uniform distributed between [0.0 , 1.0] </li>
</ul>
</td></tr>
    <tr><td class="paramdir">[out]</td><td class="paramname">output</td><td>Pointer to the output tensor </td></tr>
  </table>
  </dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>The function returns <code>ARM_MATH_ARGUMENT_ERROR</code> if LUTs are NULL <code>ARM_MATH_SUCCESS</code> - Successful operation</dd></dl>
<dl class="section note"><dt>Note</dt><dd>Supported framework: TensorFlow Lite micro (bit-accurate) </dd></dl>

<p>References <a class="el" href="arm__nnsupportfunctions_8h.html#a6a13b7a567485da5fc7f0d311318886d">arm_nn_requantize()</a>, <a class="el" href="structcmsis__nn__softmax__lut__s16.html#a0efa8f93962010b589115d62bc8b1492">cmsis_nn_softmax_lut_s16::exp_lut</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#ad935f1ff1a50822e317bdb321ce991ad">MAX</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#adcd021ac91d43a62b2cdecf9a5b971a7">MIN</a>, <a class="el" href="arm__nn__math__types_8h.html#a30fe8775d9c28c72fb7c336ab4751269">NN_Q15_MAX</a>, <a class="el" href="arm__nn__math__types_8h.html#a51e8177e289decc5ccf94be9c160064d">NN_Q15_MIN</a>, and <a class="el" href="structcmsis__nn__softmax__lut__s16.html#a7bbf8c62fe03de10c44fb29ee97934c6">cmsis_nn_softmax_lut_s16::one_by_one_lut</a>.</p>

</div>
</div>
<a class="anchor" id="gaf309cdd53978a85a39c9bfdc476aea17"></a>
<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void arm_softmax_s8 </td>
          <td>(</td>
          <td class="paramtype">const int8_t *&#160;</td>
          <td class="paramname"><em>input</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>num_rows</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>row_size</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>mult</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>shift</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>diff_min</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">int8_t *&#160;</td>
          <td class="paramname"><em>output</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramdir">[in]</td><td class="paramname">input</td><td>Pointer to the input tensor </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">num_rows</td><td>Number of rows in the input tensor </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">row_size</td><td>Number of elements in each input row </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">mult</td><td>Input quantization multiplier </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">shift</td><td>Input quantization shift within the range [0, 31] </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">diff_min</td><td>Minimum difference with max in row. Used to check if the quantized exponential operation can be performed </td></tr>
    <tr><td class="paramdir">[out]</td><td class="paramname">output</td><td>Pointer to the output tensor</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd>Supported framework: TensorFlow Lite micro (bit-accurate) </dd></dl>

<p>References <a class="el" href="arm__softmax__s8_8c.html#a401e2dfaf6a8f0ef34f15295e026fd79">ACCUM_BITS</a>, <a class="el" href="group__Softmax.html#gaa5632ba67b623b5dff0c4a2d8e2a9a3a">arm_nn_softmax_common_s8()</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#abe05f0e80d965ae31dec16ba4063f48a">CLAMP</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#a7ce5ee6d8839bf541fb4bbdf4ef80eb1">DIV_POW2</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#a0e4379e9eef514ce88d02b5dfbff256d">DIV_POW2_MVE</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#a97055bb1e8a21ead129caecdfb24cfb1">EXP_ON_NEG</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#a518f7e0db18bea6b61a2b88f266aef20">MUL_SAT</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#a6349818fec8167dff87c3fb7ca81fc1c">MUL_SAT_MVE</a>, <a class="el" href="arm__nn__math__types_8h.html#a342199b16a4abf4da4a7607a3a552b4f">NN_Q7_MIN</a>, and <a class="el" href="arm__nnsupportfunctions_8h.html#a82ac477c930f5b05e8f71f6f61e405a8">ONE_OVER1</a>.</p>

</div>
</div>
<a class="anchor" id="ga4c00979132b735e75525296bb5fa830f"></a>
<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void arm_softmax_s8_s16 </td>
          <td>(</td>
          <td class="paramtype">const int8_t *&#160;</td>
          <td class="paramname"><em>input</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>num_rows</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>row_size</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>mult</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>shift</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>diff_min</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">int16_t *&#160;</td>
          <td class="paramname"><em>output</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramdir">[in]</td><td class="paramname">input</td><td>Pointer to the input tensor </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">num_rows</td><td>Number of rows in the input tensor </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">row_size</td><td>Number of elements in each input row </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">mult</td><td>Input quantization multiplier </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">shift</td><td>Input quantization shift within the range [0, 31] </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">diff_min</td><td>Minimum difference with max in row. Used to check if the quantized exponential operation can be performed </td></tr>
    <tr><td class="paramdir">[out]</td><td class="paramname">output</td><td>Pointer to the output tensor</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd>Supported framework: TensorFlow Lite micro (bit-accurate) </dd></dl>

<p>References <a class="el" href="group__Softmax.html#gaa5632ba67b623b5dff0c4a2d8e2a9a3a">arm_nn_softmax_common_s8()</a>.</p>

</div>
</div>
<a class="anchor" id="gaa1627ed96bd597a8046d00689f077dce"></a>
<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void arm_softmax_u8 </td>
          <td>(</td>
          <td class="paramtype">const uint8_t *&#160;</td>
          <td class="paramname"><em>input</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>num_rows</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>row_size</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>mult</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>shift</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const int32_t&#160;</td>
          <td class="paramname"><em>diff_min</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">uint8_t *&#160;</td>
          <td class="paramname"><em>output</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramdir">[in]</td><td class="paramname">input</td><td>Pointer to the input tensor </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">num_rows</td><td>Number of rows in the input tensor </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">row_size</td><td>Number of elements in each input row </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">mult</td><td>Input quantization multiplier </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">shift</td><td>Input quantization shift within the range [0, 31] </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">diff_min</td><td>Minimum difference with max in row. Used to check if the quantized exponential operation can be performed </td></tr>
    <tr><td class="paramdir">[out]</td><td class="paramname">output</td><td>Pointer to the output tensor</td></tr>
  </table>
  </dd>
</dl>
<dl class="section note"><dt>Note</dt><dd>Supported framework: TensorFlow Lite micro (bit-accurate) </dd></dl>

<p>References <a class="el" href="arm__softmax__u8_8c.html#a401e2dfaf6a8f0ef34f15295e026fd79">ACCUM_BITS</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#abe05f0e80d965ae31dec16ba4063f48a">CLAMP</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#a7ce5ee6d8839bf541fb4bbdf4ef80eb1">DIV_POW2</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#a97055bb1e8a21ead129caecdfb24cfb1">EXP_ON_NEG</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#ad935f1ff1a50822e317bdb321ce991ad">MAX</a>, <a class="el" href="arm__nnsupportfunctions_8h.html#a518f7e0db18bea6b61a2b88f266aef20">MUL_SAT</a>, and <a class="el" href="arm__nnsupportfunctions_8h.html#a82ac477c930f5b05e8f71f6f61e405a8">ONE_OVER1</a>.</p>

</div>
</div>
<a class="anchor" id="ga894cfd80c260b946702755b5754e520f"></a>
<div class="memitem">
<div class="memproto">
      <table class="memname">
        <tr>
          <td class="memname">void arm_softmax_with_batch_q7 </td>
          <td>(</td>
          <td class="paramtype">const q7_t *&#160;</td>
          <td class="paramname"><em>vec_in</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const uint16_t&#160;</td>
          <td class="paramname"><em>nb_batches</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">const uint16_t&#160;</td>
          <td class="paramname"><em>dim_vec</em>, </td>
        </tr>
        <tr>
          <td class="paramkey"></td>
          <td></td>
          <td class="paramtype">q7_t *&#160;</td>
          <td class="paramname"><em>p_out</em>&#160;</td>
        </tr>
        <tr>
          <td></td>
          <td>)</td>
          <td></td><td></td>
        </tr>
      </table>
</div><div class="memdoc">
<dl class="params"><dt>Parameters</dt><dd>
  <table class="params">
    <tr><td class="paramdir">[in]</td><td class="paramname">vec_in</td><td>pointer to input vector </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">nb_batches</td><td>number of batches </td></tr>
    <tr><td class="paramdir">[in]</td><td class="paramname">dim_vec</td><td>input vector dimention </td></tr>
    <tr><td class="paramdir">[out]</td><td class="paramname">p_out</td><td>pointer to output vector</td></tr>
  </table>
  </dd>
</dl>
<p>Here, instead of typical natural logarithm e based softmax, we use 2-based softmax here, i.e.,:</p>
<p>y_i = 2^(x_i) / sum(2^x_j)</p>
<p>The relative output will be different here. But mathematically, the gradient will be the same with a log(2) scaling factor. </p>

<p>References <a class="el" href="group__Softmax.html#ga89aff212a97a3cf32d9d7ddf11a8f43e">arm_softmax_q7()</a>.</p>

</div>
</div>
</div><!-- contents -->
</div><!-- doc-content -->
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
  <ul>
    <li class="footer">
      <script type="text/javascript">
        <!--
        writeFooter.call(this);
        //-->
      </script>    
    </li>
  </ul>
</div>
</body>
</html>
