

<!DOCTYPE html>
<html class="writer-html5" lang="en" >
<head>
  <meta charset="utf-8" />

  <meta name="viewport" content="width=device-width, initial-scale=1.0" />

  <title>Encoders &mdash; Openspeech v0.3.0 documentation</title>



  <link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />










  <!--[if lt IE 9]>
    <script src="../_static/js/html5shiv.min.js"></script>
  <![endif]-->


      <script type="text/javascript" id="documentation_options" data-url_root="../" src="../_static/documentation_options.js"></script>
        <script src="../_static/jquery.js"></script>
        <script src="../_static/underscore.js"></script>
        <script src="../_static/doctools.js"></script>
        <script src="../_static/language_data.js"></script>

    <script type="text/javascript" src="../_static/js/theme.js"></script>


    <link rel="index" title="Index" href="../genindex.html" />
    <link rel="search" title="Search" href="../search.html" />
    <link rel="next" title="Modules" href="Modules.html" />
    <link rel="prev" title="Decoders" href="Decoders.html" />
</head>

<body class="wy-body-for-nav">


  <div class="wy-grid-for-nav">

    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >



            <a href="../index.html" class="icon icon-home"> Openspeech



          </a>







<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>


        </div>


        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">






              <p class="caption"><span class="caption-text">GETTING STARTED</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../notes/intro.html">Introduction</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/hydra_configs.html">Openspeech’s Hydra configuration</a></li>
<li class="toctree-l1"><a class="reference internal" href="../notes/configs.html">Openspeech’s configurations</a></li>
</ul>
<p class="caption"><span class="caption-text">OPENSPEECH MODELS</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../models/Openspeech Model.html">Openspeech Model</a></li>
<li class="toctree-l1"><a class="reference internal" href="../models/Openspeech CTC Model.html">Openspeech CTC Model</a></li>
<li class="toctree-l1"><a class="reference internal" href="../models/Openspeech Encoder Decoder Model.html">Openspeech Encoder Decoder Model</a></li>
<li class="toctree-l1"><a class="reference internal" href="../models/Openspeech Transducer Model.html">Openspeech Transducer Model</a></li>
<li class="toctree-l1"><a class="reference internal" href="../models/Openspeech Language Model.html">Openspeech Language Model</a></li>
</ul>
<p class="caption"><span class="caption-text">MODEL ARCHITECTURES</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../architectures/Conformer.html">Conformer</a></li>
<li class="toctree-l1"><a class="reference internal" href="../architectures/ContextNet.html">ContextNet</a></li>
<li class="toctree-l1"><a class="reference internal" href="../architectures/DeepSpeech2.html">DeepSpeech2</a></li>
<li class="toctree-l1"><a class="reference internal" href="../architectures/Jasper.html">Jasper</a></li>
<li class="toctree-l1"><a class="reference internal" href="../architectures/Listen Attend Spell.html">Listen Attend Spell Model</a></li>
<li class="toctree-l1"><a class="reference internal" href="../architectures/LSTM LM.html">LSTM Language Model</a></li>
<li class="toctree-l1"><a class="reference internal" href="../architectures/QuartzNet.html">QuartzNet Model</a></li>
<li class="toctree-l1"><a class="reference internal" href="../architectures/RNN Transducer.html">RNN Transducer Model</a></li>
<li class="toctree-l1"><a class="reference internal" href="../architectures/Transformer.html">Transformer Model</a></li>
<li class="toctree-l1"><a class="reference internal" href="../architectures/Transformer LM.html">Transformer Language Model</a></li>
<li class="toctree-l1"><a class="reference internal" href="../architectures/Transformer Transducer.html">Transformer Transducer Model</a></li>
</ul>
<p class="caption"><span class="caption-text">CORPUS</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../corpus/AISHELL-1.html">AISHELL</a></li>
<li class="toctree-l1"><a class="reference internal" href="../corpus/KsponSpeech.html">KsponSpeech</a></li>
<li class="toctree-l1"><a class="reference internal" href="../corpus/LibriSpeech.html">LibriSpeech</a></li>
</ul>
<p class="caption"><span class="caption-text">LIBRARY REFERENCE</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="Callback.html">Callback</a></li>
<li class="toctree-l1"><a class="reference internal" href="Criterion.html">Criterion</a></li>
<li class="toctree-l1"><a class="reference internal" href="Data Augment.html">Data Augment</a></li>
<li class="toctree-l1"><a class="reference internal" href="Feature Transform.html">Feature Transform</a></li>
<li class="toctree-l1"><a class="reference internal" href="Datasets.html">Datasets</a></li>
<li class="toctree-l1"><a class="reference internal" href="Data Loaders.html">Data Loaders</a></li>
<li class="toctree-l1"><a class="reference internal" href="Decoders.html">Decoders</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">Encoders</a><ul>
<li class="toctree-l2"><a class="reference internal" href="#module-openspeech.encoders.openspeech_encoder">Openspeech Encoder</a></li>
<li class="toctree-l2"><a class="reference internal" href="#module-openspeech.encoders.conformer_encoder">Conformer Encoder</a></li>
<li class="toctree-l2"><a class="reference internal" href="#module-openspeech.encoders.contextnet_encoder">ContextNet Encoder</a></li>
<li class="toctree-l2"><a class="reference internal" href="#module-openspeech.encoders.convolutional_lstm_encoder">Convolutional LSTM Encoder</a></li>
<li class="toctree-l2"><a class="reference internal" href="#module-openspeech.encoders.convolutional_transformer_encoder">Convolutional Transformer Encoder</a></li>
<li class="toctree-l2"><a class="reference internal" href="#module-openspeech.encoders.deepspeech2">DeepSpeech2</a></li>
<li class="toctree-l2"><a class="reference internal" href="#module-openspeech.encoders.jasper">Jasper</a></li>
<li class="toctree-l2"><a class="reference internal" href="#module-openspeech.encoders.lstm_encoder">LSTM Encoder</a></li>
<li class="toctree-l2"><a class="reference internal" href="#module-openspeech.encoders.quartznet">QuartzNet</a></li>
<li class="toctree-l2"><a class="reference internal" href="#module-openspeech.encoders.rnn_transducer_encoder">RNN Transducer Encoder</a></li>
<li class="toctree-l2"><a class="reference internal" href="#module-openspeech.encoders.transformer_encoder">Transformer Encoder</a></li>
<li class="toctree-l2"><a class="reference internal" href="#module-openspeech.encoders.transformer_transducer_encoder">Transformer Transducer Encoder</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="Modules.html">Modules</a></li>
<li class="toctree-l1"><a class="reference internal" href="Optim.html">Optim</a></li>
<li class="toctree-l1"><a class="reference internal" href="Search.html">Search</a></li>
<li class="toctree-l1"><a class="reference internal" href="Tokenizers.html">Tokenizers</a></li>
<li class="toctree-l1"><a class="reference internal" href="Metric.html">Metric</a></li>
</ul>



        </div>

      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">


      <nav class="wy-nav-top" aria-label="top navigation">

          <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
          <a href="../index.html">Openspeech</a>

      </nav>


      <div class="wy-nav-content">

        <div class="rst-content">



















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">

      <li><a href="../index.html" class="icon icon-home"></a> &raquo;</li>

      <li>Encoders</li>


      <li class="wy-breadcrumbs-aside">


            <a href="../_sources/modules/Encoders.rst.txt" rel="nofollow"> View page source</a>


      </li>

  </ul>


  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">

  <div class="section" id="encoders">
<h1>Encoders<a class="headerlink" href="#encoders" title="Permalink to this headline">¶</a></h1>
<div class="section" id="module-openspeech.encoders.openspeech_encoder">
<span id="openspeech-encoder"></span><h2>Openspeech Encoder<a class="headerlink" href="#module-openspeech.encoders.openspeech_encoder" title="Permalink to this headline">¶</a></h2>
<dl class="py class">
<dt id="openspeech.encoders.openspeech_encoder.OpenspeechEncoder">
<em class="property">class </em><code class="sig-prename descclassname">openspeech.encoders.openspeech_encoder.</code><code class="sig-name descname">OpenspeechEncoder</code><a class="reference internal" href="../_modules/openspeech/encoders/openspeech_encoder.html#OpenspeechEncoder"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.openspeech_encoder.OpenspeechEncoder" title="Permalink to this definition">¶</a></dt>
<dd><p>Base Interface of Openspeech Encoder.</p>
<dl class="simple">
<dt>Inputs:</dt><dd><p>inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded <cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>.
input_lengths (torch.LongTensor): The length of input tensor. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p>
</dd>
</dl>
<dl class="py method">
<dt id="openspeech.encoders.openspeech_encoder.OpenspeechEncoder.count_parameters">
<code class="sig-name descname">count_parameters</code><span class="sig-paren">(</span><span class="sig-paren">)</span> &#x2192; <a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a><a class="reference internal" href="../_modules/openspeech/encoders/openspeech_encoder.html#OpenspeechEncoder.count_parameters"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.openspeech_encoder.OpenspeechEncoder.count_parameters" title="Permalink to this definition">¶</a></dt>
<dd><p>Count parameters of encoders</p>
</dd></dl>

<dl class="py method">
<dt id="openspeech.encoders.openspeech_encoder.OpenspeechEncoder.forward">
<code class="sig-name descname">forward</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">inputs</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em>, <em class="sig-param"><span class="n">input_lengths</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/openspeech/encoders/openspeech_encoder.html#OpenspeechEncoder.forward"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.openspeech_encoder.OpenspeechEncoder.forward" title="Permalink to this definition">¶</a></dt>
<dd><p>Forward propagate for encoders training.</p>
<dl class="simple">
<dt>Inputs:</dt><dd><p>inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded <cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>.
input_lengths (torch.LongTensor): The length of input tensor. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p>
</dd>
</dl>
</dd></dl>

<dl class="py method">
<dt id="openspeech.encoders.openspeech_encoder.OpenspeechEncoder.update_dropout">
<code class="sig-name descname">update_dropout</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">dropout_p</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)">float</a></span></em><span class="sig-paren">)</span> &#x2192; <a class="reference external" href="https://docs.python.org/3/library/constants.html#None" title="(in Python v3.9)">None</a><a class="reference internal" href="../_modules/openspeech/encoders/openspeech_encoder.html#OpenspeechEncoder.update_dropout"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.openspeech_encoder.OpenspeechEncoder.update_dropout" title="Permalink to this definition">¶</a></dt>
<dd><p>Update dropout probability of encoders</p>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="module-openspeech.encoders.conformer_encoder">
<span id="conformer-encoder"></span><h2>Conformer Encoder<a class="headerlink" href="#module-openspeech.encoders.conformer_encoder" title="Permalink to this headline">¶</a></h2>
<dl class="py class">
<dt id="openspeech.encoders.conformer_encoder.ConformerEncoder">
<em class="property">class </em><code class="sig-prename descclassname">openspeech.encoders.conformer_encoder.</code><code class="sig-name descname">ConformerEncoder</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">num_classes</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em>, <em class="sig-param"><span class="n">input_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">80</span></em>, <em class="sig-param"><span class="n">encoder_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">512</span></em>, <em class="sig-param"><span class="n">num_layers</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">17</span></em>, <em class="sig-param"><span class="n">num_attention_heads</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">8</span></em>, <em class="sig-param"><span class="n">feed_forward_expansion_factor</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">4</span></em>, <em class="sig-param"><span class="n">conv_expansion_factor</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">2</span></em>, <em class="sig-param"><span class="n">input_dropout_p</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)">float</a></span> <span class="o">=</span> <span class="default_value">0.1</span></em>, <em class="sig-param"><span class="n">feed_forward_dropout_p</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)">float</a></span> <span class="o">=</span> <span class="default_value">0.1</span></em>, <em class="sig-param"><span class="n">attention_dropout_p</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)">float</a></span> <span class="o">=</span> <span class="default_value">0.1</span></em>, <em class="sig-param"><span class="n">conv_dropout_p</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)">float</a></span> <span class="o">=</span> <span class="default_value">0.1</span></em>, <em class="sig-param"><span class="n">conv_kernel_size</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">31</span></em>, <em class="sig-param"><span class="n">half_step_residual</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)">bool</a></span> <span class="o">=</span> <span class="default_value">True</span></em>, <em class="sig-param"><span class="n">joint_ctc_attention</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)">bool</a></span> <span class="o">=</span> <span class="default_value">True</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/openspeech/encoders/conformer_encoder.html#ConformerEncoder"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.conformer_encoder.ConformerEncoder" title="Permalink to this definition">¶</a></dt>
<dd><p>Transformer models are good at capturing content-based global interactions, while CNNs exploit local features
effectively.  Conformer achieves the best of both worlds by studying how to combine convolution neural
networks and transformers to model both local and global dependencies of an audio sequence
in a parameter-efficient way.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>num_classes</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – Number of classification</p></li>
<li><p><strong>input_dim</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – Dimension of input vector</p></li>
<li><p><strong>encoder_dim</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – Dimension of conformer encoders</p></li>
<li><p><strong>num_layers</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – Number of conformer blocks</p></li>
<li><p><strong>num_attention_heads</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – Number of attention heads</p></li>
<li><p><strong>feed_forward_expansion_factor</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – Expansion factor of feed forward module</p></li>
<li><p><strong>conv_expansion_factor</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – Expansion factor of conformer convolution module</p></li>
<li><p><strong>feed_forward_dropout_p</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)"><em>float</em></a><em>, </em><em>optional</em>) – Probability of feed forward module dropout</p></li>
<li><p><strong>attention_dropout_p</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)"><em>float</em></a><em>, </em><em>optional</em>) – Probability of attention module dropout</p></li>
<li><p><strong>conv_dropout_p</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)"><em>float</em></a><em>, </em><em>optional</em>) – Probability of conformer convolution module dropout</p></li>
<li><p><strong>conv_kernel_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em> or </em><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#tuple" title="(in Python v3.9)"><em>tuple</em></a><em>, </em><em>optional</em>) – Size of the convolving kernel</p></li>
<li><p><strong>half_step_residual</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)"><em>bool</em></a>) – Flag indication whether to use half step residual or not
joint_ctc_attention (bool, optional): flag indication joint ctc attention or not</p></li>
</ul>
</dd>
</dl>
<dl class="simple">
<dt>Inputs: inputs, input_lengths</dt><dd><ul class="simple">
<li><p><strong>inputs</strong> (batch, time, dim): Tensor containing input vector</p></li>
<li><p><strong>input_lengths</strong> (batch): list of sequence input lengths</p></li>
</ul>
</dd>
<dt>Returns: outputs, output_lengths</dt><dd><ul class="simple">
<li><p><strong>outputs</strong> (batch, out_channels, time): Tensor produces by conformer encoders.</p></li>
<li><p><strong>output_lengths</strong> (batch): list of sequence output lengths</p></li>
</ul>
</dd>
<dt>Reference:</dt><dd><p>Anmol Gulati et al: Conformer: Convolution-augmented Transformer for Speech Recognition
<a class="reference external" href="https://arxiv.org/abs/2005.08100">https://arxiv.org/abs/2005.08100</a></p>
</dd>
</dl>
<dl class="py method">
<dt id="openspeech.encoders.conformer_encoder.ConformerEncoder.forward">
<code class="sig-name descname">forward</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">inputs</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em>, <em class="sig-param"><span class="n">input_lengths</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em><span class="sig-paren">)</span> &#x2192; Tuple<span class="p">[</span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">]</span><a class="reference internal" href="../_modules/openspeech/encoders/conformer_encoder.html#ConformerEncoder.forward"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.conformer_encoder.ConformerEncoder.forward" title="Permalink to this definition">¶</a></dt>
<dd><p>Forward propagate a <cite>inputs</cite> for  encoders training.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>inputs</strong> (<em>torch.FloatTensor</em>) – A input sequence passed to encoders. Typically for inputs this will be a padded
<cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>.</p></li>
<li><p><strong>input_lengths</strong> (<em>torch.LongTensor</em>) – The length of input tensor. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p><p>(Tensor, Tensor, Tensor)</p>
<ul class="simple">
<li><p>outputs: A output sequence of encoders. <cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code></p></li>
<li><dl class="simple">
<dt>encoder_logits: Log probability of encoders outputs will be passed to CTC Loss.</dt><dd><p>If joint_ctc_attention is False, return None.</p>
</dd>
</dl>
</li>
<li><p>output_lengths: The length of encoders outputs. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</p>
</dd>
</dl>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="module-openspeech.encoders.contextnet_encoder">
<span id="contextnet-encoder"></span><h2>ContextNet Encoder<a class="headerlink" href="#module-openspeech.encoders.contextnet_encoder" title="Permalink to this headline">¶</a></h2>
<dl class="py class">
<dt id="openspeech.encoders.contextnet_encoder.ContextNetEncoder">
<em class="property">class </em><code class="sig-prename descclassname">openspeech.encoders.contextnet_encoder.</code><code class="sig-name descname">ContextNetEncoder</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">num_classes</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em>, <em class="sig-param"><span class="n">model_size</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)">str</a></span> <span class="o">=</span> <span class="default_value">'medium'</span></em>, <em class="sig-param"><span class="n">input_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">80</span></em>, <em class="sig-param"><span class="n">num_layers</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">5</span></em>, <em class="sig-param"><span class="n">kernel_size</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">5</span></em>, <em class="sig-param"><span class="n">num_channels</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">256</span></em>, <em class="sig-param"><span class="n">output_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">640</span></em>, <em class="sig-param"><span class="n">joint_ctc_attention</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)">bool</a></span> <span class="o">=</span> <span class="default_value">False</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/openspeech/encoders/contextnet_encoder.html#ContextNetEncoder"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.contextnet_encoder.ContextNetEncoder" title="Permalink to this definition">¶</a></dt>
<dd><p>ContextNetEncoder goes through 23 convolution blocks to convert to higher feature values.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>num_classes</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – Number of classification</p></li>
<li><p><strong>model_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)"><em>str</em></a><em>, </em><em>optional</em>) – Size of the model[‘small’, ‘medium’, ‘large’] (default : ‘medium’)</p></li>
<li><p><strong>input_dim</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – Dimension of input vector (default : 80)</p></li>
<li><p><strong>num_layers</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – The number of convolutional layers (default : 5)</p></li>
<li><p><strong>kernel_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – Value of convolution kernel size (default : 5)</p></li>
<li><p><strong>num_channels</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – The number of channels in the convolution filter (default: 256)</p></li>
<li><p><strong>output_dim</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – Dimension of encoder output vector (default: 640)</p></li>
<li><p><strong>joint_ctc_attention</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)"><em>bool</em></a><em>, </em><em>optional</em>) – flag indication joint ctc attention or not</p></li>
</ul>
</dd>
</dl>
<dl class="simple">
<dt>Inputs: inputs, input_lengths</dt><dd><ul class="simple">
<li><p><strong>inputs</strong>: Parsed audio of batch size number <cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code></p></li>
<li><p><strong>input_lengths</strong>: Tensor representing the sequence length of the input <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</dd>
<dt>Returns: output, output_lengths</dt><dd><ul class="simple">
<li><dl class="simple">
<dt><strong>output</strong>: Tensor of encoder output <cite>FloatTensor</cite> of size</dt><dd><p><code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code></p>
</dd>
</dl>
</li>
<li><dl class="simple">
<dt><strong>encoder_logits</strong>: Log probability of encoders outputs will be passed to CTC Loss.</dt><dd><p>If joint_ctc_attention is False, return None.</p>
</dd>
</dl>
</li>
<li><p><strong>output_lengths</strong>: Tensor representing the length of the encoder output <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</dd>
</dl>
<dl class="py method">
<dt id="openspeech.encoders.contextnet_encoder.ContextNetEncoder.forward">
<code class="sig-name descname">forward</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">inputs</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em>, <em class="sig-param"><span class="n">input_lengths</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em><span class="sig-paren">)</span> &#x2192; Tuple<span class="p">[</span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">]</span><a class="reference internal" href="../_modules/openspeech/encoders/contextnet_encoder.html#ContextNetEncoder.forward"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.contextnet_encoder.ContextNetEncoder.forward" title="Permalink to this definition">¶</a></dt>
<dd><p>Forward propagate a <cite>inputs</cite> for audio encoder.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>**inputs**</strong> (<em>torch.FloatTensor</em>) – Parsed audio of batch size number <cite>FloatTensor</cite> of size
<code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code></p></li>
<li><p><strong>**input_lengths**</strong> (<em>torch.LongTensor</em>) – Tensor representing the sequence length of the input
<cite>LongTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p><dl class="simple">
<dt>Tensor of encoder output <cite>FloatTensor</cite> of size</dt><dd><p><code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code></p>
</dd>
<dt><strong>encoder_logits</strong> (torch.FloatTensor): Log probability of encoders outputs will be passed to CTC Loss.</dt><dd><p>If joint_ctc_attention is False, return None.</p>
</dd>
<dt><strong>output_lengths</strong> (torch.LongTensor): Tensor representing the length of the encoder output</dt><dd><p><cite>LongTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p>
</dd>
</dl>
</p>
</dd>
<dt class="field-odd">Return type</dt>
<dd class="field-odd"><p><strong>output</strong> (torch.FloatTensor)</p>
</dd>
</dl>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="module-openspeech.encoders.convolutional_lstm_encoder">
<span id="convolutional-lstm-encoder"></span><h2>Convolutional LSTM Encoder<a class="headerlink" href="#module-openspeech.encoders.convolutional_lstm_encoder" title="Permalink to this headline">¶</a></h2>
<dl class="py class">
<dt id="openspeech.encoders.convolutional_lstm_encoder.ConvolutionalLSTMEncoder">
<em class="property">class </em><code class="sig-prename descclassname">openspeech.encoders.convolutional_lstm_encoder.</code><code class="sig-name descname">ConvolutionalLSTMEncoder</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">input_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em>, <em class="sig-param"><span class="n">num_classes</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">None</span></em>, <em class="sig-param"><span class="n">hidden_state_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">512</span></em>, <em class="sig-param"><span class="n">dropout_p</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)">float</a></span> <span class="o">=</span> <span class="default_value">0.3</span></em>, <em class="sig-param"><span class="n">num_layers</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">3</span></em>, <em class="sig-param"><span class="n">bidirectional</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)">bool</a></span> <span class="o">=</span> <span class="default_value">True</span></em>, <em class="sig-param"><span class="n">rnn_type</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)">str</a></span> <span class="o">=</span> <span class="default_value">'lstm'</span></em>, <em class="sig-param"><span class="n">extractor</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)">str</a></span> <span class="o">=</span> <span class="default_value">'vgg'</span></em>, <em class="sig-param"><span class="n">conv_activation</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)">str</a></span> <span class="o">=</span> <span class="default_value">'hardtanh'</span></em>, <em class="sig-param"><span class="n">joint_ctc_attention</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)">bool</a></span> <span class="o">=</span> <span class="default_value">False</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/openspeech/encoders/convolutional_lstm_encoder.html#ConvolutionalLSTMEncoder"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.convolutional_lstm_encoder.ConvolutionalLSTMEncoder" title="Permalink to this definition">¶</a></dt>
<dd><p>Converts low level speech signals into higher level features with convolutional extractor.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>input_dim</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – dimension of input vector</p></li>
<li><p><strong>num_classes</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – number of classification</p></li>
<li><p><strong>hidden_state_dim</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – the number of features in the encoders hidden state <cite>h</cite></p></li>
<li><p><strong>num_layers</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – number of recurrent layers (default: 3)</p></li>
<li><p><strong>bidirectional</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)"><em>bool</em></a><em>, </em><em>optional</em>) – if True, becomes a bidirectional encoders (default: False)</p></li>
<li><p><strong>extractor</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)"><em>str</em></a>) – type of CNN extractor (default: vgg)</p></li>
<li><p><strong>conv_activation</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)"><em>str</em></a>) – activation function of convolutional extractor (default: hardtanh)</p></li>
<li><p><strong>rnn_type</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)"><em>str</em></a><em>, </em><em>optional</em>) – type of RNN cell (default: lstm)</p></li>
<li><p><strong>dropout_p</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)"><em>float</em></a><em>, </em><em>optional</em>) – dropout probability of encoders (default: 0.2)</p></li>
<li><p><strong>joint_ctc_attention</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)"><em>bool</em></a><em>, </em><em>optional</em>) – flag indication joint ctc attention or not</p></li>
</ul>
</dd>
</dl>
<dl class="simple">
<dt>Inputs: inputs, input_lengths</dt><dd><ul class="simple">
<li><p><strong>inputs</strong>: list of sequences, whose length is the batch size and within which each sequence is list of tokens</p></li>
<li><p><strong>input_lengths</strong>: list of sequence lengths</p></li>
</ul>
</dd>
<dt>Returns: encoder_outputs, encoder_log__probs, output_lengths</dt><dd><ul class="simple">
<li><p><strong>encoder_outputs</strong>: tensor containing the encoded features of the input sequence</p></li>
<li><p><strong>encoder_log__probs</strong>: tensor containing log probability for encoder_only loss</p></li>
<li><p><strong>output_lengths</strong>: list of sequence lengths produced by Listener</p></li>
</ul>
</dd>
</dl>
<dl class="py method">
<dt id="openspeech.encoders.convolutional_lstm_encoder.ConvolutionalLSTMEncoder.forward">
<code class="sig-name descname">forward</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">inputs</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em>, <em class="sig-param"><span class="n">input_lengths</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em><span class="sig-paren">)</span> &#x2192; Tuple<span class="p">[</span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span>Optional<span class="p">[</span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">]</span><span class="p">]</span><a class="reference internal" href="../_modules/openspeech/encoders/convolutional_lstm_encoder.html#ConvolutionalLSTMEncoder.forward"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.convolutional_lstm_encoder.ConvolutionalLSTMEncoder.forward" title="Permalink to this definition">¶</a></dt>
<dd><p>Forward propagate a <cite>inputs</cite> for  encoders training.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>inputs</strong> (<em>torch.FloatTensor</em>) – A input sequence passed to encoders. Typically for inputs this will be a padded
<cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>.</p></li>
<li><p><strong>input_lengths</strong> (<em>torch.LongTensor</em>) – The length of input tensor. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p><ul class="simple">
<li><p>outputs: A output sequence of encoders. <cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code></p></li>
<li><dl class="simple">
<dt>encoder_logits: Log probability of encoders outputs will be passed to CTC Loss.</dt><dd><p>If joint_ctc_attention is False, return None.</p>
</dd>
</dl>
</li>
<li><p>encoder_output_lengths: The length of encoders outputs. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</p>
</dd>
<dt class="field-odd">Return type</dt>
<dd class="field-odd"><p>(Tensor, Tensor, Tensor)</p>
</dd>
</dl>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="module-openspeech.encoders.convolutional_transformer_encoder">
<span id="convolutional-transformer-encoder"></span><h2>Convolutional Transformer Encoder<a class="headerlink" href="#module-openspeech.encoders.convolutional_transformer_encoder" title="Permalink to this headline">¶</a></h2>
<dl class="py class">
<dt id="openspeech.encoders.convolutional_transformer_encoder.ConvolutionalTransformerEncoder">
<em class="property">class </em><code class="sig-prename descclassname">openspeech.encoders.convolutional_transformer_encoder.</code><code class="sig-name descname">ConvolutionalTransformerEncoder</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">num_classes</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em>, <em class="sig-param"><span class="n">input_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em>, <em class="sig-param"><span class="n">extractor</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)">str</a></span> <span class="o">=</span> <span class="default_value">'vgg'</span></em>, <em class="sig-param"><span class="n">d_model</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">512</span></em>, <em class="sig-param"><span class="n">d_ff</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">2048</span></em>, <em class="sig-param"><span class="n">num_layers</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">6</span></em>, <em class="sig-param"><span class="n">num_heads</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">8</span></em>, <em class="sig-param"><span class="n">dropout_p</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)">float</a></span> <span class="o">=</span> <span class="default_value">0.3</span></em>, <em class="sig-param"><span class="n">conv_activation</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)">str</a></span> <span class="o">=</span> <span class="default_value">'relu'</span></em>, <em class="sig-param"><span class="n">joint_ctc_attention</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)">bool</a></span> <span class="o">=</span> <span class="default_value">False</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/openspeech/encoders/convolutional_transformer_encoder.html#ConvolutionalTransformerEncoder"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.convolutional_transformer_encoder.ConvolutionalTransformerEncoder" title="Permalink to this definition">¶</a></dt>
<dd><p>The TransformerEncoder is composed of a stack of N identical layers.
Each layer has two sub-layers. The first is a multi-head self-attention mechanism,
and the second is a simple, position-wise fully connected feed-forward network.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>input_dim</strong> – dimension of feature vector</p></li>
<li><p><strong>extractor</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)"><em>str</em></a>) – convolutional extractor</p></li>
<li><p><strong>d_model</strong> – dimension of model (default: 512)</p></li>
<li><p><strong>d_ff</strong> – dimension of feed forward network (default: 2048)</p></li>
<li><p><strong>num_layers</strong> – number of encoders layers (default: 6)</p></li>
<li><p><strong>num_heads</strong> – number of attention heads (default: 8)</p></li>
<li><p><strong>dropout_p</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)"><em>float</em></a><em>, </em><em>optional</em>) – probability of dropout (default: 0.3)</p></li>
<li><p><strong>conv_activation</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)"><em>str</em></a><em>, </em><em>optional</em>) – activation function of convolutional extractor (default: hardtanh)</p></li>
<li><p><strong>joint_ctc_attention</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)"><em>bool</em></a><em>, </em><em>optional</em>) – flag indication joint ctc attention or not (default: False)</p></li>
</ul>
</dd>
</dl>
<dl class="simple">
<dt>Inputs:</dt><dd><ul class="simple">
<li><p><strong>inputs</strong>: list of sequences, whose length is the batch size and within which each sequence is list of tokens</p></li>
<li><p><strong>input_lengths</strong>: list of sequence lengths</p></li>
</ul>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">Returns</dt>
<dd class="field-odd"><p><ul class="simple">
<li><p>outputs: A output sequence of encoders. <cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code></p></li>
<li><dl class="simple">
<dt>encoder_logits: Log probability of encoders outputs will be passed to CTC Loss.</dt><dd><p>If joint_ctc_attention is False, return None.</p>
</dd>
</dl>
</li>
<li><p>output_lengths: The length of encoders outputs. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</p>
</dd>
<dt class="field-even">Return type</dt>
<dd class="field-even"><p>(Tensor, Tensor, Tensor)</p>
</dd>
</dl>
<dl class="py method">
<dt id="openspeech.encoders.convolutional_transformer_encoder.ConvolutionalTransformerEncoder.forward">
<code class="sig-name descname">forward</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">inputs</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em>, <em class="sig-param"><span class="n">input_lengths</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em><span class="sig-paren">)</span> &#x2192; Tuple<span class="p">[</span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">]</span><a class="reference internal" href="../_modules/openspeech/encoders/convolutional_transformer_encoder.html#ConvolutionalTransformerEncoder.forward"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.convolutional_transformer_encoder.ConvolutionalTransformerEncoder.forward" title="Permalink to this definition">¶</a></dt>
<dd><p>Forward propagate a <cite>inputs</cite> for  encoders training.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>inputs</strong> (<em>torch.FloatTensor</em>) – A input sequence passed to encoders. Typically for inputs this will be a padded
<cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>.</p></li>
<li><p><strong>input_lengths</strong> (<em>torch.LongTensor</em>) – The length of input tensor. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p><ul class="simple">
<li><p>outputs: A output sequence of encoders. <cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code></p></li>
<li><dl class="simple">
<dt>encoder_logits: Log probability of encoders outputs will be passed to CTC Loss.</dt><dd><p>If joint_ctc_attention is False, return None.</p>
</dd>
</dl>
</li>
<li><p>output_lengths: The length of encoders outputs. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</p>
</dd>
<dt class="field-odd">Return type</dt>
<dd class="field-odd"><p>(Tensor, Tensor, Tensor)</p>
</dd>
</dl>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="module-openspeech.encoders.deepspeech2">
<span id="deepspeech2"></span><h2>DeepSpeech2<a class="headerlink" href="#module-openspeech.encoders.deepspeech2" title="Permalink to this headline">¶</a></h2>
<dl class="py class">
<dt id="openspeech.encoders.deepspeech2.DeepSpeech2">
<em class="property">class </em><code class="sig-prename descclassname">openspeech.encoders.deepspeech2.</code><code class="sig-name descname">DeepSpeech2</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">input_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em>, <em class="sig-param"><span class="n">num_classes</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em>, <em class="sig-param"><span class="n">rnn_type</span><span class="o">=</span><span class="default_value">'gru'</span></em>, <em class="sig-param"><span class="n">num_rnn_layers</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">5</span></em>, <em class="sig-param"><span class="n">rnn_hidden_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">512</span></em>, <em class="sig-param"><span class="n">dropout_p</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)">float</a></span> <span class="o">=</span> <span class="default_value">0.1</span></em>, <em class="sig-param"><span class="n">bidirectional</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)">bool</a></span> <span class="o">=</span> <span class="default_value">True</span></em>, <em class="sig-param"><span class="n">activation</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)">str</a></span> <span class="o">=</span> <span class="default_value">'hardtanh'</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/openspeech/encoders/deepspeech2.html#DeepSpeech2"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.deepspeech2.DeepSpeech2" title="Permalink to this definition">¶</a></dt>
<dd><p>DeepSpeech2 is a set of speech recognition models based on Baidu DeepSpeech2. DeepSpeech2 is trained with CTC loss.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>input_dim</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – dimension of input vector</p></li>
<li><p><strong>num_classes</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – number of classfication</p></li>
<li><p><strong>rnn_type</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)"><em>str</em></a><em>, </em><em>optional</em>) – type of RNN cell (default: gru)</p></li>
<li><p><strong>num_rnn_layers</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – number of recurrent layers (default: 5)</p></li>
<li><p><strong>rnn_hidden_dim</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – the number of features in the hidden state <cite>h</cite></p></li>
<li><p><strong>dropout_p</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)"><em>float</em></a><em>, </em><em>optional</em>) – dropout probability (default: 0.1)</p></li>
<li><p><strong>bidirectional</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)"><em>bool</em></a><em>, </em><em>optional</em>) – if True, becomes a bidirectional encoders (defulat: True)</p></li>
<li><p><strong>activation</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)"><em>str</em></a>) – type of activation function (default: hardtanh)</p></li>
</ul>
</dd>
</dl>
<dl class="simple">
<dt>Inputs: inputs, input_lengths</dt><dd><ul class="simple">
<li><p><strong>inputs</strong>: list of sequences, whose length is the batch size and within which each sequence is list of tokens</p></li>
<li><p><strong>input_lengths</strong>: list of sequence lengths</p></li>
</ul>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">Returns</dt>
<dd class="field-odd"><p><ul class="simple">
<li><p>predicted_log_prob (torch.FloatTensor)s: Log probability of model predictions.</p></li>
<li><p>output_lengths (torch.LongTensor): The length of output tensor <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</p>
</dd>
<dt class="field-even">Return type</dt>
<dd class="field-even"><p>(Tensor, Tensor)</p>
</dd>
</dl>
<dl class="simple">
<dt>Reference:</dt><dd><p>Dario Amodei et al.: Deep Speech 2: End-to-End Speech Recognition in English and Mandarin
<a class="reference external" href="https://arxiv.org/abs/1512.02595">https://arxiv.org/abs/1512.02595</a></p>
</dd>
</dl>
<dl class="py method">
<dt id="openspeech.encoders.deepspeech2.DeepSpeech2.count_parameters">
<code class="sig-name descname">count_parameters</code><span class="sig-paren">(</span><span class="sig-paren">)</span> &#x2192; <a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a><a class="reference internal" href="../_modules/openspeech/encoders/deepspeech2.html#DeepSpeech2.count_parameters"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.deepspeech2.DeepSpeech2.count_parameters" title="Permalink to this definition">¶</a></dt>
<dd><p>Count parameters of encoders</p>
</dd></dl>

<dl class="py method">
<dt id="openspeech.encoders.deepspeech2.DeepSpeech2.forward">
<code class="sig-name descname">forward</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">inputs</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em>, <em class="sig-param"><span class="n">input_lengths</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em><span class="sig-paren">)</span> &#x2192; Tuple<span class="p">[</span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">]</span><a class="reference internal" href="../_modules/openspeech/encoders/deepspeech2.html#DeepSpeech2.forward"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.deepspeech2.DeepSpeech2.forward" title="Permalink to this definition">¶</a></dt>
<dd><p>Forward propagate a <cite>inputs</cite> for  encoder_only training.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>inputs</strong> (<em>torch.FloatTensor</em>) – A input sequence passed to encoders. Typically for inputs this will be a padded
<cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>.</p></li>
<li><p><strong>input_lengths</strong> (<em>torch.LongTensor</em>) – The length of input tensor. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p><ul class="simple">
<li><p>predicted_log_prob (torch.FloatTensor)s: Log probability of model predictions.</p></li>
<li><p>output_lengths (torch.LongTensor): The length of output tensor <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</p>
</dd>
<dt class="field-odd">Return type</dt>
<dd class="field-odd"><p>(Tensor, Tensor)</p>
</dd>
</dl>
</dd></dl>

<dl class="py method">
<dt id="openspeech.encoders.deepspeech2.DeepSpeech2.update_dropout">
<code class="sig-name descname">update_dropout</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">dropout_p</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)">float</a></span></em><span class="sig-paren">)</span> &#x2192; <a class="reference external" href="https://docs.python.org/3/library/constants.html#None" title="(in Python v3.9)">None</a><a class="reference internal" href="../_modules/openspeech/encoders/deepspeech2.html#DeepSpeech2.update_dropout"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.deepspeech2.DeepSpeech2.update_dropout" title="Permalink to this definition">¶</a></dt>
<dd><p>Update dropout probability of encoders</p>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="module-openspeech.encoders.jasper">
<span id="jasper"></span><h2>Jasper<a class="headerlink" href="#module-openspeech.encoders.jasper" title="Permalink to this headline">¶</a></h2>
<dl class="py class">
<dt id="openspeech.encoders.jasper.Jasper">
<em class="property">class </em><code class="sig-prename descclassname">openspeech.encoders.jasper.</code><code class="sig-name descname">Jasper</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">configs</span><span class="p">:</span> <span class="n">omegaconf.dictconfig.DictConfig</span></em>, <em class="sig-param"><span class="n">input_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em>, <em class="sig-param"><span class="n">num_classes</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/openspeech/encoders/jasper.html#Jasper"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.jasper.Jasper" title="Permalink to this definition">¶</a></dt>
<dd><p>Jasper (Just Another Speech Recognizer), an ASR model comprised of 54 layers proposed by NVIDIA.
Jasper achieved sub 3 percent word error rate (WER) on the LibriSpeech dataset.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>num_classes</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – number of classification</p></li>
<li><p><strong>version</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)"><em>str</em></a>) – version of jasper. Marked as BxR: B - number of blocks, R - number of sub-blocks</p></li>
</ul>
</dd>
</dl>
<dl class="simple">
<dt>Inputs: inputs, input_lengths, residual</dt><dd><ul class="simple">
<li><p><strong>inputs</strong>: tensor contains input sequence vector</p></li>
<li><p><strong>input_lengths</strong>: tensor contains sequence lengths</p></li>
</ul>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">Returns</dt>
<dd class="field-odd"><p><ul class="simple">
<li><p>outputs (torch.FloatTensor): Log probability of model predictions.  <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">num_classes)</span></code></p></li>
<li><p>output_lengths (torch.LongTensor): The length of output tensor <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</p>
</dd>
<dt class="field-even">Return type</dt>
<dd class="field-even"><p>(Tensor, Tensor)</p>
</dd>
</dl>
<dl class="simple">
<dt>Reference:</dt><dd><p>Jason Li. et al.: Jasper: An End-to-End Convolutional Neural Acoustic Model
<a class="reference external" href="https://arxiv.org/pdf/1904.03288.pdf">https://arxiv.org/pdf/1904.03288.pdf</a></p>
</dd>
</dl>
<dl class="py method">
<dt id="openspeech.encoders.jasper.Jasper.count_parameters">
<code class="sig-name descname">count_parameters</code><span class="sig-paren">(</span><span class="sig-paren">)</span> &#x2192; <a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a><a class="reference internal" href="../_modules/openspeech/encoders/jasper.html#Jasper.count_parameters"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.jasper.Jasper.count_parameters" title="Permalink to this definition">¶</a></dt>
<dd><p>Count parameters of model</p>
</dd></dl>

<dl class="py method">
<dt id="openspeech.encoders.jasper.Jasper.forward">
<code class="sig-name descname">forward</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">inputs</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em>, <em class="sig-param"><span class="n">input_lengths</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em><span class="sig-paren">)</span> &#x2192; Tuple<span class="p">[</span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">]</span><a class="reference internal" href="../_modules/openspeech/encoders/jasper.html#Jasper.forward"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.jasper.Jasper.forward" title="Permalink to this definition">¶</a></dt>
<dd><p>Forward propagate a <cite>inputs</cite> for  encoder_only training.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>inputs</strong> (<em>torch.FloatTensor</em>) – A input sequence passed to encoders. Typically for inputs this will be a padded
<cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>.</p></li>
<li><p><strong>input_lengths</strong> (<em>torch.LongTensor</em>) – The length of input tensor. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p><ul class="simple">
<li><p>outputs (torch.FloatTensor): Log probability of model predictions.  <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">num_classes)</span></code></p></li>
<li><p>output_lengths (torch.LongTensor): The length of output tensor <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</p>
</dd>
<dt class="field-odd">Return type</dt>
<dd class="field-odd"><p>(Tensor, Tensor)</p>
</dd>
</dl>
</dd></dl>

<dl class="py method">
<dt id="openspeech.encoders.jasper.Jasper.update_dropout">
<code class="sig-name descname">update_dropout</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">dropout_p</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)">float</a></span></em><span class="sig-paren">)</span> &#x2192; <a class="reference external" href="https://docs.python.org/3/library/constants.html#None" title="(in Python v3.9)">None</a><a class="reference internal" href="../_modules/openspeech/encoders/jasper.html#Jasper.update_dropout"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.jasper.Jasper.update_dropout" title="Permalink to this definition">¶</a></dt>
<dd><p>Update dropout probability of model</p>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="module-openspeech.encoders.lstm_encoder">
<span id="lstm-encoder"></span><h2>LSTM Encoder<a class="headerlink" href="#module-openspeech.encoders.lstm_encoder" title="Permalink to this headline">¶</a></h2>
<dl class="py class">
<dt id="openspeech.encoders.lstm_encoder.LSTMEncoder">
<em class="property">class </em><code class="sig-prename descclassname">openspeech.encoders.lstm_encoder.</code><code class="sig-name descname">LSTMEncoder</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">input_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em>, <em class="sig-param"><span class="n">num_classes</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">None</span></em>, <em class="sig-param"><span class="n">hidden_state_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">512</span></em>, <em class="sig-param"><span class="n">dropout_p</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)">float</a></span> <span class="o">=</span> <span class="default_value">0.3</span></em>, <em class="sig-param"><span class="n">num_layers</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">3</span></em>, <em class="sig-param"><span class="n">bidirectional</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)">bool</a></span> <span class="o">=</span> <span class="default_value">True</span></em>, <em class="sig-param"><span class="n">rnn_type</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)">str</a></span> <span class="o">=</span> <span class="default_value">'lstm'</span></em>, <em class="sig-param"><span class="n">joint_ctc_attention</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)">bool</a></span> <span class="o">=</span> <span class="default_value">False</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/openspeech/encoders/lstm_encoder.html#LSTMEncoder"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.lstm_encoder.LSTMEncoder" title="Permalink to this definition">¶</a></dt>
<dd><p>Converts low level speech signals into higher level features</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>input_dim</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – dimension of input vector</p></li>
<li><p><strong>num_classes</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – number of classification</p></li>
<li><p><strong>hidden_state_dim</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – the number of features in the encoders hidden state <cite>h</cite></p></li>
<li><p><strong>num_layers</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – number of recurrent layers (default: 3)</p></li>
<li><p><strong>bidirectional</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)"><em>bool</em></a><em>, </em><em>optional</em>) – if True, becomes a bidirectional encoders (default: False)</p></li>
<li><p><strong>rnn_type</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)"><em>str</em></a><em>, </em><em>optional</em>) – type of RNN cell (default: lstm)</p></li>
<li><p><strong>dropout_p</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)"><em>float</em></a><em>, </em><em>optional</em>) – dropout probability of encoders (default: 0.2)</p></li>
<li><p><strong>joint_ctc_attention</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)"><em>bool</em></a><em>, </em><em>optional</em>) – flag indication joint ctc attention or not</p></li>
</ul>
</dd>
</dl>
<dl class="simple">
<dt>Inputs: inputs, input_lengths</dt><dd><ul class="simple">
<li><p><strong>inputs</strong>: list of sequences, whose length is the batch size and within which each sequence is list of tokens</p></li>
<li><p><strong>input_lengths</strong>: list of sequence lengths</p></li>
</ul>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">Returns</dt>
<dd class="field-odd"><p><ul class="simple">
<li><p>outputs: A output sequence of encoders. <cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code></p></li>
<li><dl class="simple">
<dt>encoder_logits: Log probability of encoders outputs will be passed to CTC Loss.</dt><dd><p>If joint_ctc_attention is False, return None.</p>
</dd>
</dl>
</li>
<li><p>encoder_output_lengths: The length of encoders outputs. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</p>
</dd>
<dt class="field-even">Return type</dt>
<dd class="field-even"><p>(Tensor, Tensor, Tensor)</p>
</dd>
</dl>
<dl class="py method">
<dt id="openspeech.encoders.lstm_encoder.LSTMEncoder.forward">
<code class="sig-name descname">forward</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">inputs</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em>, <em class="sig-param"><span class="n">input_lengths</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em><span class="sig-paren">)</span> &#x2192; Tuple<span class="p">[</span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span>Optional<span class="p">[</span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">]</span><span class="p">]</span><a class="reference internal" href="../_modules/openspeech/encoders/lstm_encoder.html#LSTMEncoder.forward"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.lstm_encoder.LSTMEncoder.forward" title="Permalink to this definition">¶</a></dt>
<dd><p>Forward propagate a <cite>inputs</cite> for  encoders training.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>inputs</strong> (<em>torch.FloatTensor</em>) – A input sequence passed to encoders. Typically for inputs this will be a padded
<cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>.</p></li>
<li><p><strong>input_lengths</strong> (<em>torch.LongTensor</em>) – The length of input tensor. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p><ul class="simple">
<li><p>outputs: A output sequence of encoders. <cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code></p></li>
<li><dl class="simple">
<dt>encoder_logits: Log probability of encoders outputs will be passed to CTC Loss.</dt><dd><p>If joint_ctc_attention is False, return None.</p>
</dd>
</dl>
</li>
<li><p>encoder_output_lengths: The length of encoders outputs. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</p>
</dd>
<dt class="field-odd">Return type</dt>
<dd class="field-odd"><p>(Tensor, Tensor, Tensor)</p>
</dd>
</dl>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="module-openspeech.encoders.quartznet">
<span id="quartznet"></span><h2>QuartzNet<a class="headerlink" href="#module-openspeech.encoders.quartznet" title="Permalink to this headline">¶</a></h2>
<dl class="py class">
<dt id="openspeech.encoders.quartznet.QuartzNet">
<em class="property">class </em><code class="sig-prename descclassname">openspeech.encoders.quartznet.</code><code class="sig-name descname">QuartzNet</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">configs</span><span class="p">:</span> <span class="n">omegaconf.dictconfig.DictConfig</span></em>, <em class="sig-param"><span class="n">input_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em>, <em class="sig-param"><span class="n">num_classes</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/openspeech/encoders/quartznet.html#QuartzNet"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.quartznet.QuartzNet" title="Permalink to this definition">¶</a></dt>
<dd><p>QuartzNet is fully convolutional automatic speech recognition model.  The model is composed of multiple
blocks with residual connections between them. Each block consists of one or more modules with
1D time-channel separable convolutional layers, batch normalization, and ReLU layers.
It is trained with CTC loss.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>configs</strong> (<em>DictConfig</em>) – hydra configuration set.</p></li>
<li><p><strong>input_dim</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – dimension of input.</p></li>
<li><p><strong>num_classes</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – number of classification.</p></li>
</ul>
</dd>
</dl>
<dl class="simple">
<dt>Inputs:</dt><dd><p>inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded <cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>.
input_lengths (torch.LongTensor): The length of input tensor. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">Returns</dt>
<dd class="field-odd"><p><ul class="simple">
<li><p>outputs (torch.FloatTensor): Log probability of model predictions.  <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">num_classes)</span></code></p></li>
<li><p>output_lengths (torch.LongTensor): The length of output tensor <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</p>
</dd>
<dt class="field-even">Return type</dt>
<dd class="field-even"><p>(Tensor, Tensor)</p>
</dd>
</dl>
<dl class="simple">
<dt>Reference:</dt><dd><p>Samuel Kriman et al.: QUARTZNET: DEEP AUTOMATIC SPEECH RECOGNITION WITH 1D TIME-CHANNEL SEPARABLE CONVOLUTIONS.
<a class="reference external" href="https://arxiv.org/abs/1910.10261.pdf">https://arxiv.org/abs/1910.10261.pdf</a></p>
</dd>
</dl>
<dl class="py method">
<dt id="openspeech.encoders.quartznet.QuartzNet.forward">
<code class="sig-name descname">forward</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">inputs</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em>, <em class="sig-param"><span class="n">input_lengths</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em><span class="sig-paren">)</span> &#x2192; Tuple<span class="p">[</span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">]</span><a class="reference internal" href="../_modules/openspeech/encoders/quartznet.html#QuartzNet.forward"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.quartznet.QuartzNet.forward" title="Permalink to this definition">¶</a></dt>
<dd><p>Forward propagate a <cite>inputs</cite> for  encoder_only training.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>inputs</strong> (<em>torch.FloatTensor</em>) – A input sequence passed to encoders. Typically for inputs this will be a padded <cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>.</p></li>
<li><p><strong>input_lengths</strong> (<em>torch.LongTensor</em>) – The length of input tensor. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p><ul class="simple">
<li><p>outputs (torch.FloatTensor): Log probability of model predictions.  <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">num_classes)</span></code></p></li>
<li><p>output_lengths (torch.LongTensor): The length of output tensor <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</p>
</dd>
<dt class="field-odd">Return type</dt>
<dd class="field-odd"><p>(Tensor, Tensor)</p>
</dd>
</dl>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="module-openspeech.encoders.rnn_transducer_encoder">
<span id="rnn-transducer-encoder"></span><h2>RNN Transducer Encoder<a class="headerlink" href="#module-openspeech.encoders.rnn_transducer_encoder" title="Permalink to this headline">¶</a></h2>
<dl class="py class">
<dt id="openspeech.encoders.rnn_transducer_encoder.RNNTransducerEncoder">
<em class="property">class </em><code class="sig-prename descclassname">openspeech.encoders.rnn_transducer_encoder.</code><code class="sig-name descname">RNNTransducerEncoder</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">input_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em>, <em class="sig-param"><span class="n">hidden_state_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em>, <em class="sig-param"><span class="n">output_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em>, <em class="sig-param"><span class="n">num_layers</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em>, <em class="sig-param"><span class="n">rnn_type</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)">str</a></span> <span class="o">=</span> <span class="default_value">'lstm'</span></em>, <em class="sig-param"><span class="n">dropout_p</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)">float</a></span> <span class="o">=</span> <span class="default_value">0.2</span></em>, <em class="sig-param"><span class="n">bidirectional</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)">bool</a></span> <span class="o">=</span> <span class="default_value">True</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/openspeech/encoders/rnn_transducer_encoder.html#RNNTransducerEncoder"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.rnn_transducer_encoder.RNNTransducerEncoder" title="Permalink to this definition">¶</a></dt>
<dd><p>Encoder of RNN-Transducer.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>input_dim</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – dimension of input vector</p></li>
<li><p><strong>hidden_state_dim</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – hidden state dimension of encoders (default: 320)</p></li>
<li><p><strong>output_dim</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – output dimension of encoders and decoders (default: 512)</p></li>
<li><p><strong>num_layers</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a><em>, </em><em>optional</em>) – number of encoders layers (default: 4)</p></li>
<li><p><strong>rnn_type</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.9)"><em>str</em></a><em>, </em><em>optional</em>) – type of rnn cell (default: lstm)</p></li>
<li><p><strong>bidirectional</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)"><em>bool</em></a><em>, </em><em>optional</em>) – if True, becomes a bidirectional encoders (default: True)</p></li>
</ul>
</dd>
</dl>
<dl class="simple">
<dt>Inputs: inputs, input_lengths</dt><dd><p>inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded <cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>.
input_lengths (torch.LongTensor): The length of input tensor. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">Returns</dt>
<dd class="field-odd"><p><p>(Tensor, Tensor)</p>
<ul class="simple">
<li><dl class="simple">
<dt>outputs (torch.FloatTensor): A output sequence of encoders. <cite>FloatTensor</cite> of size</dt><dd><p><code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code></p>
</dd>
</dl>
</li>
<li><dl class="simple">
<dt>hidden_states (torch.FloatTensor): A hidden state of encoders. <cite>FloatTensor</cite> of size</dt><dd><p><code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code></p>
</dd>
</dl>
</li>
</ul>
</p>
</dd>
</dl>
<dl class="simple">
<dt>Reference:</dt><dd><p>A Graves: Sequence Transduction with Recurrent Neural Networks
<a class="reference external" href="https://arxiv.org/abs/1211.3711.pdf">https://arxiv.org/abs/1211.3711.pdf</a></p>
</dd>
</dl>
<dl class="py method">
<dt id="openspeech.encoders.rnn_transducer_encoder.RNNTransducerEncoder.forward">
<code class="sig-name descname">forward</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">inputs</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em>, <em class="sig-param"><span class="n">input_lengths</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em><span class="sig-paren">)</span> &#x2192; Tuple<span class="p">[</span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">]</span><a class="reference internal" href="../_modules/openspeech/encoders/rnn_transducer_encoder.html#RNNTransducerEncoder.forward"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.rnn_transducer_encoder.RNNTransducerEncoder.forward" title="Permalink to this definition">¶</a></dt>
<dd><p>Forward propagate a <cite>inputs</cite> for  encoders training.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>inputs</strong> (<em>torch.FloatTensor</em>) – A input sequence passed to encoders. Typically for inputs this will be a padded <cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>.</p></li>
<li><p><strong>input_lengths</strong> (<em>torch.LongTensor</em>) – The length of input tensor. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p><p>(Tensor, Tensor)</p>
<ul class="simple">
<li><dl class="simple">
<dt>outputs (torch.FloatTensor): A output sequence of encoders. <cite>FloatTensor</cite> of size</dt><dd><p><code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code></p>
</dd>
</dl>
</li>
<li><p>output_lengths (torch.LongTensor): The length of output tensor. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</p>
</dd>
</dl>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="module-openspeech.encoders.transformer_encoder">
<span id="transformer-encoder"></span><h2>Transformer Encoder<a class="headerlink" href="#module-openspeech.encoders.transformer_encoder" title="Permalink to this headline">¶</a></h2>
<dl class="py class">
<dt id="openspeech.encoders.transformer_encoder.TransformerEncoder">
<em class="property">class </em><code class="sig-prename descclassname">openspeech.encoders.transformer_encoder.</code><code class="sig-name descname">TransformerEncoder</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">num_classes</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span></em>, <em class="sig-param"><span class="n">input_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">80</span></em>, <em class="sig-param"><span class="n">d_model</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">512</span></em>, <em class="sig-param"><span class="n">d_ff</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">2048</span></em>, <em class="sig-param"><span class="n">num_layers</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">6</span></em>, <em class="sig-param"><span class="n">num_heads</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">8</span></em>, <em class="sig-param"><span class="n">dropout_p</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)">float</a></span> <span class="o">=</span> <span class="default_value">0.3</span></em>, <em class="sig-param"><span class="n">joint_ctc_attention</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)">bool</a></span> <span class="o">=</span> <span class="default_value">False</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/openspeech/encoders/transformer_encoder.html#TransformerEncoder"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.transformer_encoder.TransformerEncoder" title="Permalink to this definition">¶</a></dt>
<dd><p>The TransformerEncoder is composed of a stack of N identical layers.
Each layer has two sub-layers. The first is a multi-head self-attention mechanism,
and the second is a simple, position-wise fully connected feed-forward network.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>input_dim</strong> – dimension of feature vector</p></li>
<li><p><strong>d_model</strong> – dimension of model (default: 512)</p></li>
<li><p><strong>d_ff</strong> – dimension of feed forward network (default: 2048)</p></li>
<li><p><strong>num_layers</strong> – number of encoders layers (default: 6)</p></li>
<li><p><strong>num_heads</strong> – number of attention heads (default: 8)</p></li>
<li><p><strong>dropout_p</strong> – probability of dropout (default: 0.3)</p></li>
<li><p><strong>joint_ctc_attention</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.9)"><em>bool</em></a><em>, </em><em>optional</em>) – flag indication joint ctc attention or not</p></li>
</ul>
</dd>
</dl>
<dl class="simple">
<dt>Inputs:</dt><dd><ul class="simple">
<li><p><strong>inputs</strong>: list of sequences, whose length is the batch size and within which each sequence is list of tokens</p></li>
<li><p><strong>input_lengths</strong>: list of sequence lengths</p></li>
</ul>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">Returns</dt>
<dd class="field-odd"><p><ul class="simple">
<li><p>outputs: A output sequence of encoders. <cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code></p></li>
<li><dl class="simple">
<dt>encoder_logits: Log probability of encoders outputs will be passed to CTC Loss.</dt><dd><p>If joint_ctc_attention is False, return None.  <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">num_classes)</span></code></p>
</dd>
</dl>
</li>
<li><p>output_lengths: The length of encoders outputs. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</p>
</dd>
<dt class="field-even">Return type</dt>
<dd class="field-even"><p>(Tensor, Tensor, Tensor)</p>
</dd>
</dl>
<dl class="simple">
<dt>Reference:</dt><dd><p>Ashish Vaswani et al.: Attention Is All You Need
<a class="reference external" href="https://arxiv.org/abs/1706.03762">https://arxiv.org/abs/1706.03762</a></p>
</dd>
</dl>
<dl class="py method">
<dt id="openspeech.encoders.transformer_encoder.TransformerEncoder.forward">
<code class="sig-name descname">forward</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">inputs</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em>, <em class="sig-param"><span class="n">input_lengths</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em><span class="sig-paren">)</span> &#x2192; Tuple<span class="p">[</span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">]</span><a class="reference internal" href="../_modules/openspeech/encoders/transformer_encoder.html#TransformerEncoder.forward"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.transformer_encoder.TransformerEncoder.forward" title="Permalink to this definition">¶</a></dt>
<dd><p>Forward propagate a <cite>inputs</cite> for  encoders training.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>inputs</strong> (<em>torch.FloatTensor</em>) – A input sequence passed to encoders. Typically for inputs this will be a padded
<cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>.</p></li>
<li><p><strong>input_lengths</strong> (<em>torch.LongTensor</em>) – The length of input tensor. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p><ul class="simple">
<li><p>outputs: A output sequence of encoders. <cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code></p></li>
<li><dl class="simple">
<dt>encoder_logits: Log probability of encoders outputs will be passed to CTC Loss.</dt><dd><p>If joint_ctc_attention is False, return None. <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">num_classes)</span></code></p>
</dd>
</dl>
</li>
<li><p>output_lengths: The length of encoders outputs. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</p>
</dd>
<dt class="field-odd">Return type</dt>
<dd class="field-odd"><p>(Tensor, Tensor, Tensor)</p>
</dd>
</dl>
</dd></dl>

</dd></dl>

<dl class="py class">
<dt id="openspeech.encoders.transformer_encoder.TransformerEncoderLayer">
<em class="property">class </em><code class="sig-prename descclassname">openspeech.encoders.transformer_encoder.</code><code class="sig-name descname">TransformerEncoderLayer</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">d_model</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">512</span></em>, <em class="sig-param"><span class="n">num_heads</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">8</span></em>, <em class="sig-param"><span class="n">d_ff</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">2048</span></em>, <em class="sig-param"><span class="n">dropout_p</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)">float</a></span> <span class="o">=</span> <span class="default_value">0.3</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/openspeech/encoders/transformer_encoder.html#TransformerEncoderLayer"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.transformer_encoder.TransformerEncoderLayer" title="Permalink to this definition">¶</a></dt>
<dd><p>EncoderLayer is made up of self-attention and feedforward network.
This standard encoders layer is based on the paper “Attention Is All You Need”.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>d_model</strong> – dimension of model (default: 512)</p></li>
<li><p><strong>num_heads</strong> – number of attention heads (default: 8)</p></li>
<li><p><strong>d_ff</strong> – dimension of feed forward network (default: 2048)</p></li>
<li><p><strong>dropout_p</strong> – probability of dropout (default: 0.3)</p></li>
</ul>
</dd>
</dl>
<dl class="simple">
<dt>Inputs:</dt><dd><p>inputs (torch.FloatTensor): input sequence of transformer encoder layer
self_attn_mask (torch.BoolTensor): mask of self attention</p>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">Returns</dt>
<dd class="field-odd"><p><p>(Tensor, Tensor)</p>
<ul class="simple">
<li><p>outputs (torch.FloatTensor): output of transformer encoder layer</p></li>
<li><p>attn (torch.FloatTensor): attention of transformer encoder layer</p></li>
</ul>
</p>
</dd>
</dl>
<dl class="py method">
<dt id="openspeech.encoders.transformer_encoder.TransformerEncoderLayer.forward">
<code class="sig-name descname">forward</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">inputs</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em>, <em class="sig-param"><span class="n">self_attn_mask</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span> <span class="o">=</span> <span class="default_value">None</span></em><span class="sig-paren">)</span> &#x2192; Tuple<span class="p">[</span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">]</span><a class="reference internal" href="../_modules/openspeech/encoders/transformer_encoder.html#TransformerEncoderLayer.forward"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.transformer_encoder.TransformerEncoderLayer.forward" title="Permalink to this definition">¶</a></dt>
<dd><p>Forward propagate of transformer encoder layer.</p>
<dl class="simple">
<dt>Inputs:</dt><dd><p>inputs (torch.FloatTensor): input sequence of transformer encoder layer
self_attn_mask (torch.BoolTensor): mask of self attention</p>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">Returns</dt>
<dd class="field-odd"><p>output of transformer encoder layer
attn (torch.FloatTensor): attention of transformer encoder layer</p>
</dd>
<dt class="field-even">Return type</dt>
<dd class="field-even"><p>outputs (torch.FloatTensor)</p>
</dd>
</dl>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="module-openspeech.encoders.transformer_transducer_encoder">
<span id="transformer-transducer-encoder"></span><h2>Transformer Transducer Encoder<a class="headerlink" href="#module-openspeech.encoders.transformer_transducer_encoder" title="Permalink to this headline">¶</a></h2>
<dl class="py class">
<dt id="openspeech.encoders.transformer_transducer_encoder.TransformerTransducerEncoder">
<em class="property">class </em><code class="sig-prename descclassname">openspeech.encoders.transformer_transducer_encoder.</code><code class="sig-name descname">TransformerTransducerEncoder</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">input_size</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">80</span></em>, <em class="sig-param"><span class="n">model_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">512</span></em>, <em class="sig-param"><span class="n">d_ff</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">2048</span></em>, <em class="sig-param"><span class="n">num_layers</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">18</span></em>, <em class="sig-param"><span class="n">num_heads</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">8</span></em>, <em class="sig-param"><span class="n">dropout</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)">float</a></span> <span class="o">=</span> <span class="default_value">0.1</span></em>, <em class="sig-param"><span class="n">max_positional_length</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">5000</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/openspeech/encoders/transformer_transducer_encoder.html#TransformerTransducerEncoder"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.transformer_transducer_encoder.TransformerTransducerEncoder" title="Permalink to this definition">¶</a></dt>
<dd><p>Converts the audio signal to higher feature values</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>input_size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – dimension of input vector (default : 80)</p></li>
<li><p><strong>model_dim</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – the number of features in the audio encoder (default : 512)</p></li>
<li><p><strong>d_ff</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – the number of features in the feed forward layers (default : 2048)</p></li>
<li><p><strong>num_layers</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – the number of audio encoder layers (default: 18)</p></li>
<li><p><strong>num_heads</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – the number of heads in the multi-head attention (default: 8)</p></li>
<li><p><strong>dropout</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)"><em>float</em></a>) – dropout probability of audio encoder (default: 0.1)</p></li>
<li><p><strong>max_positional_length</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – Maximum length to use for positional encoding (default : 5000)</p></li>
</ul>
</dd>
</dl>
<dl class="simple">
<dt>Inputs: inputs, inputs_lens</dt><dd><ul class="simple">
<li><p><strong>inputs</strong>: Parsed audio of batch size number</p></li>
<li><p><strong>inputs_lens</strong>: Tensor of sequence lengths</p></li>
</ul>
</dd>
</dl>
<dl class="field-list simple">
<dt class="field-odd">Returns</dt>
<dd class="field-odd"><p><code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>
* input_lengths (torch.LongTensor):  <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p>
</dd>
<dt class="field-even">Return type</dt>
<dd class="field-even"><p><ul class="simple">
<li><p>outputs (torch.FloatTensor)</p></li>
</ul>
</p>
</dd>
</dl>
<dl class="simple">
<dt>Reference:</dt><dd><p>Qian Zhang et al.: Transformer Transducer: A Streamable Speech Recognition Model with Transformer Encoders and RNN-T Loss
<a class="reference external" href="https://arxiv.org/abs/2002.02562">https://arxiv.org/abs/2002.02562</a></p>
</dd>
</dl>
<dl class="py method">
<dt id="openspeech.encoders.transformer_transducer_encoder.TransformerTransducerEncoder.forward">
<code class="sig-name descname">forward</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">inputs</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em>, <em class="sig-param"><span class="n">input_lengths</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em><span class="sig-paren">)</span> &#x2192; Tuple<span class="p">[</span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">]</span><a class="reference internal" href="../_modules/openspeech/encoders/transformer_transducer_encoder.html#TransformerTransducerEncoder.forward"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.transformer_transducer_encoder.TransformerTransducerEncoder.forward" title="Permalink to this definition">¶</a></dt>
<dd><p>Forward propagate a <cite>inputs</cite> for audio encoder.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>inputs</strong> (<em>torch.FloatTensor</em>) – A input sequence passed to audio encoder. Typically inputs will be a padded
<cite>FloatTensor</cite> of size <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>.</p></li>
<li><p><strong>input_lengths</strong> (<em>torch.LongTensor</em>) – The length of input tensor. <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p><code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>
** input_lengths**(Tensor):  <code class="docutils literal notranslate"><span class="pre">(batch)</span></code></p>
</dd>
<dt class="field-odd">Return type</dt>
<dd class="field-odd"><p><strong>outputs</strong> (Tensor)</p>
</dd>
</dl>
</dd></dl>

</dd></dl>

<dl class="py class">
<dt id="openspeech.encoders.transformer_transducer_encoder.TransformerTransducerEncoderLayer">
<em class="property">class </em><code class="sig-prename descclassname">openspeech.encoders.transformer_transducer_encoder.</code><code class="sig-name descname">TransformerTransducerEncoderLayer</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">model_dim</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">512</span></em>, <em class="sig-param"><span class="n">d_ff</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">2048</span></em>, <em class="sig-param"><span class="n">num_heads</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)">int</a></span> <span class="o">=</span> <span class="default_value">8</span></em>, <em class="sig-param"><span class="n">dropout</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)">float</a></span> <span class="o">=</span> <span class="default_value">0.1</span></em><span class="sig-paren">)</span><a class="reference internal" href="../_modules/openspeech/encoders/transformer_transducer_encoder.html#TransformerTransducerEncoderLayer"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.transformer_transducer_encoder.TransformerTransducerEncoderLayer" title="Permalink to this definition">¶</a></dt>
<dd><p>Repeated layers common to audio encoders and label encoders</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>model_dim</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – the number of features in the encoder (default : 512)</p></li>
<li><p><strong>d_ff</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – the number of features in the feed forward layers (default : 2048)</p></li>
<li><p><strong>num_heads</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.9)"><em>int</em></a>) – the number of heads in the multi-head attention (default: 8)</p></li>
<li><p><strong>dropout</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.9)"><em>float</em></a>) – dropout probability of encoder layer (default: 0.1)</p></li>
</ul>
</dd>
</dl>
<dl class="simple">
<dt>Inputs: inputs, self_attn_mask</dt><dd><ul class="simple">
<li><p><strong>inputs</strong>: Audio feature or label feature</p></li>
<li><p><strong>self_attn_mask</strong>: Self attention mask to use in multi-head attention</p></li>
</ul>
</dd>
<dt>Returns: outputs, attn_distribution</dt><dd><p>(Tensor, Tensor)</p>
<ul class="simple">
<li><p>outputs (torch.FloatTensor): Tensor containing higher (audio, label) feature values</p></li>
<li><p>attn_distribution (torch.FloatTensor): Attention distribution in multi-head attention</p></li>
</ul>
</dd>
</dl>
<dl class="py method">
<dt id="openspeech.encoders.transformer_transducer_encoder.TransformerTransducerEncoderLayer.forward">
<code class="sig-name descname">forward</code><span class="sig-paren">(</span><em class="sig-param"><span class="n">inputs</span><span class="p">:</span> <span class="n"><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a></span></em>, <em class="sig-param"><span class="n">self_attn_mask</span><span class="p">:</span> <span class="n">Optional<span class="p">[</span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">]</span></span> <span class="o">=</span> <span class="default_value">None</span></em><span class="sig-paren">)</span> &#x2192; Tuple<span class="p">[</span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">, </span><a class="reference external" href="https://pytorch.org/docs/master/tensors.html#torch.Tensor" title="(in PyTorch vmaster (1.10.0a0+git2bfbfd8 ))">torch.Tensor</a><span class="p">]</span><a class="reference internal" href="../_modules/openspeech/encoders/transformer_transducer_encoder.html#TransformerTransducerEncoderLayer.forward"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#openspeech.encoders.transformer_transducer_encoder.TransformerTransducerEncoderLayer.forward" title="Permalink to this definition">¶</a></dt>
<dd><p>Forward propagate a <cite>inputs</cite> for label encoder.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>inputs</strong> – A input sequence passed to encoder layer. <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code></p></li>
<li><p><strong>self_attn_mask</strong> – Self attention mask to cover up padding <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">seq_length)</span></code></p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p><code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">dimension)</span></code>
<strong>attn_distribution</strong> (Tensor): <code class="docutils literal notranslate"><span class="pre">(batch,</span> <span class="pre">seq_length,</span> <span class="pre">seq_length)</span></code></p>
</dd>
<dt class="field-odd">Return type</dt>
<dd class="field-odd"><p><strong>outputs</strong> (Tensor)</p>
</dd>
</dl>
</dd></dl>

</dd></dl>

</div>
</div>


           </div>

          </div>
          <footer>
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
        <a href="Modules.html" class="btn btn-neutral float-right" title="Modules" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right" aria-hidden="true"></span></a>
        <a href="Decoders.html" class="btn btn-neutral float-left" title="Decoders" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a>
    </div>

  <hr/>

  <div role="contentinfo">
    <p>
        &#169; Copyright 2021, Kim, Soohwan and Ha, Sangchun and Cho, Soyoung.

    </p>
  </div>



    Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a

    <a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>

    provided by <a href="https://readthedocs.org">Read the Docs</a>.

</footer>
        </div>
      </div>

    </section>

  </div>


  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>






</body>
</html>