

<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>Reading Comprehension &mdash; NLP Architect by Intel® AI Lab 0.5.2 documentation</title>
  

  
  
  
  

  
  <script type="text/javascript" src="../_static/js/modernizr.min.js"></script>
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../" src="../_static/documentation_options.js"></script>
        <script type="text/javascript" src="../_static/jquery.js"></script>
        <script type="text/javascript" src="../_static/underscore.js"></script>
        <script type="text/javascript" src="../_static/doctools.js"></script>
        <script type="text/javascript" src="../_static/language_data.js"></script>
        <script type="text/javascript" src="../_static/install.js"></script>
        <script async="async" type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
    
    <script type="text/javascript" src="../_static/js/theme.js"></script>

    

  
  <link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../_static/nlp_arch_theme.css" type="text/css" />
  <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto+Mono" type="text/css" />
  <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Open+Sans:100,900" type="text/css" />
    <link rel="index" title="Index" href="../genindex.html" />
    <link rel="search" title="Search" href="../search.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../index.html">
          

          
            
            <img src="../_static/logo.png" class="logo" alt="Logo"/>
          
          </a>

          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <ul>
<li class="toctree-l1"><a class="reference internal" href="../quick_start.html">Quick start</a></li>
<li class="toctree-l1"><a class="reference internal" href="../installation.html">Installation</a></li>
<li class="toctree-l1"><a class="reference internal" href="../publications.html">Publications</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorials.html">Jupyter Tutorials</a></li>
<li class="toctree-l1"><a class="reference internal" href="../model_zoo.html">Model Zoo</a></li>
</ul>
<p class="caption"><span class="caption-text">NLP/NLU Models</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../tagging/sequence_tagging.html">Sequence Tagging</a></li>
<li class="toctree-l1"><a class="reference internal" href="../sentiment.html">Sentiment Analysis</a></li>
<li class="toctree-l1"><a class="reference internal" href="../bist_parser.html">Dependency Parsing</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intent.html">Intent Extraction</a></li>
<li class="toctree-l1"><a class="reference internal" href="../lm.html">Language Models</a></li>
<li class="toctree-l1"><a class="reference internal" href="../information_extraction.html">Information Extraction</a></li>
<li class="toctree-l1"><a class="reference internal" href="../transformers.html">Transformers</a></li>
<li class="toctree-l1"><a class="reference internal" href="additional.html">Additional Models</a></li>
</ul>
<p class="caption"><span class="caption-text">Optimized Models</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../quantized_bert.html">Quantized BERT</a></li>
<li class="toctree-l1"><a class="reference internal" href="../transformers_distillation.html">Transformers Distillation</a></li>
<li class="toctree-l1"><a class="reference internal" href="../sparse_gnmt.html">Sparse Neural Machine Translation</a></li>
</ul>
<p class="caption"><span class="caption-text">Solutions</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../absa_solution.html">Aspect Based Sentiment Analysis</a></li>
<li class="toctree-l1"><a class="reference internal" href="../term_set_expansion.html">Set Expansion</a></li>
<li class="toctree-l1"><a class="reference internal" href="../trend_analysis.html">Trend Analysis</a></li>
</ul>
<p class="caption"><span class="caption-text">For Developers</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../generated_api/nlp_architect_api_index.html">nlp_architect API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../developer_guide.html">Developer Guide</a></li>
</ul>

            
          
        </div>
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" aria-label="top navigation">
        
          <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
          <a href="../index.html">NLP Architect by Intel® AI Lab</a>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        
          















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../index.html">Docs</a> &raquo;</li>
        
      <li>Reading Comprehension</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
            
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <div class="section" id="reading-comprehension">
<h1>Reading Comprehension<a class="headerlink" href="#reading-comprehension" title="Permalink to this headline">¶</a></h1>
<div class="section" id="overview">
<h2>Overview<a class="headerlink" href="#overview" title="Permalink to this headline">¶</a></h2>
<p>This directory contains an implementation of the boundary model(b in the Figure) Match LSTM and
Answer Pointer network for Machine Reading Comprehension. The idea behind this
method is to build a question aware representation of the passage and use this representation as an
input to the pointer network which identifies the start and end indices of the answer.</p>
<div class="section" id="model-architecture">
<h3>Model Architecture<a class="headerlink" href="#model-architecture" title="Permalink to this headline">¶</a></h3>
<img alt="archived/../../../examples/reading_comprehension/MatchLSTM_Model.png" src="archived/../../../examples/reading_comprehension/MatchLSTM_Model.png" />
</div>
</div>
<div class="section" id="files">
<h2>Files<a class="headerlink" href="#files" title="Permalink to this headline">¶</a></h2>
<ul class="simple">
<li><strong>examples/reading_comprehension/train.py</strong> -Implements the end to end model along with the training commands</li>
<li><strong>examples/reading_comprehension/utils.py</strong>- Implements different utility functions to set up the data loader and for evaluation.</li>
<li><strong>examples/reading_comprehension/prepare_data.py</strong>- Implements the pipeline to preprocess the dataset</li>
<li><strong>examples/reading_comprehension/matchlstm_ansptr.py</strong>- Defines the end to end MatchLSTM and <code class="xref py py-class docutils literal notranslate"><span class="pre">Answer_Pointer</span></code> network for Reading Comprehension</li>
</ul>
</div>
<div class="section" id="dataset">
<h2>Dataset<a class="headerlink" href="#dataset" title="Permalink to this headline">¶</a></h2>
<p>This repository uses the SQuAD dataset. The preprocessing steps required prior to training are listed below:</p>
<ol class="arabic simple">
<li><code class="docutils literal notranslate"><span class="pre">cd</span> <span class="pre">examples/reading_comprehension/;</span> <span class="pre">mkdir</span> <span class="pre">data</span></code></li>
</ol>
<p>2. Download the official SQuAD-v1.1 training (train-v1.1.json) and development(dev-v1.1.json) datasets from <a class="reference external" href="https://worksheets.codalab.org/worksheets/0x62eefc3e64e04430a1a24785a9293fff/">here</a> and place the extracted json files in the <code class="docutils literal notranslate"><span class="pre">data</span></code> directory. For more information about SQuAD, please visit <a class="reference external" href="https://rajpurkar.github.io/SQuAD-explorer/">https://rajpurkar.github.io/SQuAD-explorer/</a>.
The terms and conditions of the data set license apply. Intel does not grant any rights to the data files.
3. Download the GloVe pretrained embeddings from <a class="reference external" href="http://nlp.stanford.edu/data/glove.6B.zip">http://nlp.stanford.edu/data/glove.6B.zip</a> and copy <code class="docutils literal notranslate"><span class="pre">glove.6B.300d.txt</span></code> file into the <code class="docutils literal notranslate"><span class="pre">data</span></code> directory. For more information about GloVe please visit <a class="reference external" href="https://nlp.stanford.edu/projects/glove/">https://nlp.stanford.edu/projects/glove/</a>. The terms and conditions of the data set license apply. Intel does not grant any rights to the data files.
4. Preprocess the data set using the following command:</p>
<div class="code python highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">python</span> <span class="n">examples</span><span class="o">/</span><span class="n">reading_comprehension</span><span class="o">/</span><span class="n">prepare_data</span><span class="o">.</span><span class="n">py</span> <span class="o">--</span><span class="n">data_path</span> <span class="n">data</span><span class="o">/</span>
</pre></div>
</div>
</div>
<div class="section" id="running-modalities">
<h2>Running Modalities<a class="headerlink" href="#running-modalities" title="Permalink to this headline">¶</a></h2>
<div class="section" id="training-inference">
<h3>Training &amp; Inference<a class="headerlink" href="#training-inference" title="Permalink to this headline">¶</a></h3>
<p>Train the model using the following command:</p>
<div class="code python highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">python</span> <span class="n">examples</span><span class="o">/</span><span class="n">reading_comprehension</span><span class="o">/</span><span class="n">train</span><span class="o">.</span><span class="n">py</span> <span class="o">--</span><span class="n">data_path</span> <span class="n">data</span><span class="o">/</span>
</pre></div>
</div>
<p>To visualize predicted answers for paragraphs and questions in the validation dataset (ie run inference with batch_size=1)  use the following command:</p>
<div class="code python highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">python</span> <span class="n">train</span><span class="o">.</span><span class="n">py</span> <span class="o">--</span><span class="n">restore_model</span><span class="o">=</span><span class="kc">True</span> <span class="o">--</span><span class="n">inference_mode</span><span class="o">=</span><span class="kc">True</span> <span class="o">--</span><span class="n">data_path</span><span class="o">=</span><span class="n">data</span><span class="o">/</span> <span class="o">--</span><span class="n">model_dir</span><span class="o">=/</span><span class="n">path</span><span class="o">/</span><span class="n">to</span><span class="o">/</span><span class="n">trained_model</span><span class="o">/</span> <span class="o">--</span><span class="n">batch_size</span><span class="o">=</span><span class="mi">1</span> <span class="o">--</span><span class="n">num_examples</span><span class="o">=</span><span class="mi">50</span>
</pre></div>
</div>
<p>The command line options available are:</p>
<table class="docutils option-list" frame="void" rules="none">
<col class="option" />
<col class="description" />
<tbody valign="top">
<tr><td class="option-group">
<kbd><span class="option">--data_path</span></kbd></td>
<td>enter the path to the preprocessed dataset</td></tr>
<tr><td class="option-group">
<kbd><span class="option">--max_para_req</span></kbd></td>
<td>enter the max length of the paragraph to truncate the dataset. Default is 300.</td></tr>
<tr><td class="option-group">
<kbd><span class="option">--epochs</span></kbd></td>
<td>enter number of epochs to start training. Default is 15.</td></tr>
<tr><td class="option-group">
<kbd><span class="option">--gpu_id</span></kbd></td>
<td>select the gpu id train the model. Default is 0.</td></tr>
<tr><td class="option-group" colspan="2">
<kbd><span class="option">--train_set_size</span></kbd></td>
</tr>
<tr><td>&#160;</td><td>enter the size of the training set. Default takes in all examples for training.</td></tr>
<tr><td class="option-group">
<kbd><span class="option">--batch_size</span></kbd></td>
<td>enter the batch size. Default is 64.</td></tr>
<tr><td class="option-group">
<kbd><span class="option">--hidden_size</span></kbd></td>
<td>enter the number of hidden units. Default is 150.</td></tr>
<tr><td class="option-group">
<kbd><span class="option">--model_dir</span></kbd></td>
<td>enter the path to save/load model.</td></tr>
<tr><td class="option-group" colspan="2">
<kbd><span class="option">--select_device</span></kbd></td>
</tr>
<tr><td>&#160;</td><td>select the device to run training (CPU, GPU etc)</td></tr>
<tr><td class="option-group" colspan="2">
<kbd><span class="option">--restore_model</span></kbd></td>
</tr>
<tr><td>&#160;</td><td>choose whether to restore training from a previously saved model. Default is False.</td></tr>
<tr><td class="option-group" colspan="2">
<kbd><span class="option">--inference_mode</span></kbd></td>
</tr>
<tr><td>&#160;</td><td>choose whether to run inference only</td></tr>
<tr><td class="option-group">
<kbd><span class="option">--num_examples</span></kbd></td>
<td>enter the number of examples to run inference. Default is 50.</td></tr>
</tbody>
</table>
</div>
<div class="section" id="results">
<h3>Results<a class="headerlink" href="#results" title="Permalink to this headline">¶</a></h3>
<p>After training starts, you will see outputs similar to this:</p>
<div class="code python highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">Loading</span> <span class="n">Embeddings</span>
<span class="n">creating</span> <span class="n">training</span> <span class="ow">and</span> <span class="n">development</span> <span class="n">sets</span>
<span class="n">Match</span> <span class="n">LSTM</span> <span class="n">Pass</span>
<span class="n">Answer</span> <span class="n">Pointer</span> <span class="n">Pass</span>
<span class="n">Setting</span> <span class="n">up</span> <span class="n">Loss</span>
<span class="n">Set</span> <span class="n">up</span> <span class="n">optimizer</span>
<span class="n">Begin</span> <span class="n">Training</span>
<span class="n">Epoch</span> <span class="n">Number</span><span class="p">:</span>  <span class="mi">0</span>
<span class="n">iteration</span> <span class="o">=</span> <span class="mi">1</span><span class="p">,</span> <span class="n">train</span> <span class="n">loss</span> <span class="o">=</span> <span class="mf">13.156427383422852</span>
<span class="n">F1_Score</span> <span class="ow">and</span> <span class="n">EM_score</span> <span class="n">are</span> <span class="mf">0.0</span> <span class="mf">0.0</span>
<span class="n">iteration</span> <span class="o">=</span> <span class="mi">21</span><span class="p">,</span> <span class="n">train</span> <span class="n">loss</span> <span class="o">=</span> <span class="mf">12.441322326660156</span>
<span class="n">F1_Score</span> <span class="ow">and</span> <span class="n">EM_score</span> <span class="n">are</span> <span class="mf">8.333333333333332</span> <span class="mf">0.0</span>
<span class="n">iteration</span> <span class="o">=</span> <span class="mi">41</span><span class="p">,</span> <span class="n">train</span> <span class="n">loss</span> <span class="o">=</span> <span class="mf">10.773386001586914</span>
<span class="n">F1_Score</span> <span class="ow">and</span> <span class="n">EM_score</span> <span class="n">are</span> <span class="mf">6.25</span> <span class="mf">6.25</span>
<span class="n">iteration</span> <span class="o">=</span> <span class="mi">61</span><span class="p">,</span> <span class="n">train</span> <span class="n">loss</span> <span class="o">=</span> <span class="mf">11.69123649597168</span>
<span class="n">F1_Score</span> <span class="ow">and</span> <span class="n">EM_score</span> <span class="n">are</span> <span class="mf">6.25</span> <span class="mf">6.25</span>
</pre></div>
</div>
<p>Please note that after each epoch you will see the validation F1 and EM scores being printed out.
These numbers are a result of a much stricter evaluation and lower than the official evaluation numbers.</p>
<p>Considering the default setting, which has training set of 85387 examples and a development set of 10130 examples
after 15 epochs, you should expect to see a F1 and EM scores on the development set similar to this:</p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
<tr class="field-odd field"><th class="field-name">F1 Score:</th><td class="field-body">~62%</td>
</tr>
<tr class="field-even field"><th class="field-name">EM Score:</th><td class="field-body">~48%</td>
</tr>
</tbody>
</table>
</div>
</div>
<div class="section" id="references">
<h2>References<a class="headerlink" href="#references" title="Permalink to this headline">¶</a></h2>
<table class="docutils footnote" frame="void" id="id1" rules="none">
<colgroup><col class="label" /><col /></colgroup>
<tbody valign="top">
<tr><td class="label">[1]</td><td>SQuAD: 100,000+ Questions for Machine Comprehension of Text. Authors: Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, Percy Liang.
Subjects: Computation and Language(cs.CL). arXiv:1606.05250 [cs.CL][https://arxiv.org/abs/1606.05250]. License: https://creativecommons.org/licenses/by-sa/4.0/legalcode</td></tr>
</tbody>
</table>
<table class="docutils footnote" frame="void" id="id2" rules="none">
<colgroup><col class="label" /><col /></colgroup>
<tbody valign="top">
<tr><td class="label">[2]</td><td>Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014 <a class="reference external" href="https://nlp.stanford.edu/pubs/glove.pdf">https://nlp.stanford.edu/pubs/glove.pdf</a>. License: <a class="reference external" href="http://www.opendatacommons.org/licenses/pddl/1.0/">http://www.opendatacommons.org/licenses/pddl/1.0/</a></td></tr>
</tbody>
</table>
<table class="docutils footnote" frame="void" id="id3" rules="none">
<colgroup><col class="label" /><col /></colgroup>
<tbody valign="top">
<tr><td class="label">[3]</td><td>Wang, S., &amp; Jiang, J. (2016). Machine comprehension using match-lstm and answer pointer. arXiv preprint arXiv:1608.07905. [https://arxiv.org/abs/1608.07905]</td></tr>
</tbody>
</table>
</div>
</div>


           </div>
           
          </div>
          <footer>
  

  <hr/>

  <div role="contentinfo">
    <p>

    </p>
  </div>
  Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>

        </div>
      </div>

    </section>

  </div>
  


  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
   

</body>
</html>