

<!DOCTYPE html>
<html class="writer-html5" lang="en" >
<head>
  <meta charset="utf-8">
  <meta name="generator" content="Docutils 0.17.1: http://docutils.sourceforge.net/" />

  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>README &mdash; pytorch_tabnet  documentation</title>
  

  
  <link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../_static/graphviz.css" type="text/css" />
  <link rel="stylesheet" href="../_static/./default.css" type="text/css" />

  
  
  
  

  
  <!--[if lt IE 9]>
    <script src="../_static/js/html5shiv.min.js"></script>
  <![endif]-->
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../" src="../_static/documentation_options.js"></script>
        <script src="../_static/jquery.js"></script>
        <script src="../_static/underscore.js"></script>
        <script src="../_static/doctools.js"></script>
        <script src="../_static/language_data.js"></script>
    
    <script type="text/javascript" src="../_static/js/theme.js"></script>

    
    <link rel="index" title="Index" href="../genindex.html" />
    <link rel="search" title="Search" href="../search.html" />
    <link rel="next" title="pytorch_tabnet package" href="pytorch_tabnet.html" />
    <link rel="prev" title="Welcome to pytorch_tabnet’s documentation!" href="../index.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../index.html" class="icon icon-home" alt="Documentation Home"> pytorch_tabnet
          

          
          </a>

          
            
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p><span class="caption-text">Contents:</span></p>
<ul class="current">
<li class="toctree-l1 current"><a class="current reference internal" href="#">README</a></li>
<li class="toctree-l1"><a class="reference internal" href="#tabnet-attentive-interpretable-tabular-learning">TabNet : Attentive Interpretable Tabular Learning</a></li>
<li class="toctree-l1"><a class="reference internal" href="#installation">Installation</a><ul>
<li class="toctree-l2"><a class="reference internal" href="#easy-installation">Easy installation</a></li>
<li class="toctree-l2"><a class="reference internal" href="#source-code">Source code</a><ul>
<li class="toctree-l3"><a class="reference internal" href="#cpu-only">CPU only</a></li>
<li class="toctree-l3"><a class="reference internal" href="#gpu">GPU</a></li>
</ul>
</li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="#what-is-new">What is new ?</a></li>
<li class="toctree-l1"><a class="reference internal" href="#contributing">Contributing</a></li>
<li class="toctree-l1"><a class="reference internal" href="#what-problems-does-pytorch-tabnet-handle">What problems does pytorch-tabnet handle?</a></li>
<li class="toctree-l1"><a class="reference internal" href="#how-to-use-it">How to use it?</a><ul>
<li class="toctree-l2"><a class="reference internal" href="#default-eval-metric">Default eval_metric</a></li>
<li class="toctree-l2"><a class="reference internal" href="#custom-evaluation-metrics">Custom evaluation metrics</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="#semi-supervised-pre-training">Semi-supervised pre-training</a></li>
<li class="toctree-l1"><a class="reference internal" href="#data-augmentation-on-the-fly">Data augmentation on the fly</a></li>
<li class="toctree-l1"><a class="reference internal" href="#easy-saving-and-loading">Easy saving and loading</a></li>
<li class="toctree-l1"><a class="reference internal" href="#useful-links">Useful links</a><ul>
<li class="toctree-l2"><a class="reference internal" href="#model-parameters">Model parameters</a></li>
<li class="toctree-l2"><a class="reference internal" href="#fit-parameters">Fit parameters</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="pytorch_tabnet.html">pytorch_tabnet package</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" aria-label="top navigation">
        
          <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
          <a href="../index.html">pytorch_tabnet</a>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        
          















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../index.html" class="icon icon-home"></a> &raquo;</li>
        
      <li>README</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
            
            <a href="../_sources/generated_docs/README.md.txt" rel="nofollow"> View page source</a>
          
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <section id="readme">
<h1>README<a class="headerlink" href="#readme" title="Permalink to this headline">¶</a></h1>
</section>
<section id="tabnet-attentive-interpretable-tabular-learning">
<h1>TabNet : Attentive Interpretable Tabular Learning<a class="headerlink" href="#tabnet-attentive-interpretable-tabular-learning" title="Permalink to this headline">¶</a></h1>
<p>This is a pyTorch implementation of Tabnet (Arik, S. O., &amp; Pfister, T. (2019). TabNet: Attentive Interpretable Tabular Learning. arXiv preprint arXiv:1908.07442.) https://arxiv.org/pdf/1908.07442.pdf. Please note that some different choices have been made overtime to improve the library which can differ from the orginal paper.</p>
<!--- BADGES: START ---><p><a class="reference external" href="https://circleci.com/gh/dreamquark-ai/tabnet"><img alt="CircleCI" src="https://circleci.com/gh/dreamquark-ai/tabnet.svg?style=svg" /></a></p>
<p><a class="reference external" href="https://badge.fury.io/py/pytorch-tabnet"><img alt="PyPI version" src="https://badge.fury.io/py/pytorch-tabnet.svg" /></a></p>
<p><img alt="PyPI - Downloads" src="https://img.shields.io/pypi/dm/pytorch-tabnet" /></p>
<p><a class="reference external" href="https://pypi.org/project/pytorch-tabnet/"><img alt="PyPI - Python Version" src="https://img.shields.io/pypi/pyversions/pytorch-tabnet?logo=pypi&amp;style=flat&amp;color=blue" /></a></p>
<p><a class="reference external" href="https://anaconda.org/conda-forge/pytorch-tabnet"><img alt="Conda - Platform" src="https://img.shields.io/conda/pn/conda-forge/pytorch-tabnet?logo=anaconda&amp;style=flat" /></a></p>
<p><a class="reference external" href="https://anaconda.org/conda-forge/pytorch-tabnet"><img alt="Conda (channel only)" src="https://img.shields.io/conda/vn/conda-forge/pytorch-tabnet?logo=anaconda&amp;style=flat&amp;color=orange" /></a></p>
<p><a class="reference external" href="https://github.com/dreamquark-ai/tabnet/blob/main/LICENSE"><img alt="GitHub - License" src="https://img.shields.io/github/license/dreamquark-ai/tabnet?logo=github&amp;style=flat&amp;color=green" /></a></p>
<!--- BADGES: END ---><p>Any questions ? Want to contribute ? To talk with us ? You can join us on <a class="reference external" href="https://join.slack.com/t/mltooling/shared_invite/zt-fxaj0qk7-SWy2_~EWyhj4x9SD6gbRvg">Slack</a></p>
</section>
<section id="installation">
<h1>Installation<a class="headerlink" href="#installation" title="Permalink to this headline">¶</a></h1>
<section id="easy-installation">
<h2>Easy installation<a class="headerlink" href="#easy-installation" title="Permalink to this headline">¶</a></h2>
<p>You can install using <code class="docutils literal notranslate"><span class="pre">pip</span></code> or <code class="docutils literal notranslate"><span class="pre">conda</span></code> as follows.</p>
<p><strong>with pip</strong></p>
<div class="highlight-sh notranslate"><div class="highlight"><pre><span></span>pip<span class="w"> </span>install<span class="w"> </span>pytorch-tabnet
</pre></div>
</div>
<p><strong>with conda</strong></p>
<div class="highlight-sh notranslate"><div class="highlight"><pre><span></span>conda<span class="w"> </span>install<span class="w"> </span>-c<span class="w"> </span>conda-forge<span class="w"> </span>pytorch-tabnet
</pre></div>
</div>
</section>
<section id="source-code">
<h2>Source code<a class="headerlink" href="#source-code" title="Permalink to this headline">¶</a></h2>
<p>If you wan to use it locally within a docker container:</p>
<ul class="simple">
<li><p><code class="docutils literal notranslate"><span class="pre">git</span> <span class="pre">clone</span> <span class="pre">git&#64;github.com:dreamquark-ai/tabnet.git</span></code></p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">cd</span> <span class="pre">tabnet</span></code> to get inside the repository</p></li>
</ul>
<hr class="docutils" />
<section id="cpu-only">
<h3>CPU only<a class="headerlink" href="#cpu-only" title="Permalink to this headline">¶</a></h3>
<ul class="simple">
<li><p><code class="docutils literal notranslate"><span class="pre">make</span> <span class="pre">start</span></code> to build and get inside the container</p></li>
</ul>
</section>
<section id="gpu">
<h3>GPU<a class="headerlink" href="#gpu" title="Permalink to this headline">¶</a></h3>
<ul class="simple">
<li><p><code class="docutils literal notranslate"><span class="pre">make</span> <span class="pre">start-gpu</span></code> to build and get inside the GPU container</p></li>
</ul>
<hr class="docutils" />
<ul class="simple">
<li><p><code class="docutils literal notranslate"><span class="pre">poetry</span> <span class="pre">install</span></code> to install all the dependencies, including jupyter</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">make</span> <span class="pre">notebook</span></code> inside the same terminal. You can then follow the link to a jupyter notebook with tabnet installed.</p></li>
</ul>
</section>
</section>
</section>
<section id="what-is-new">
<h1>What is new ?<a class="headerlink" href="#what-is-new" title="Permalink to this headline">¶</a></h1>
<ul class="simple">
<li><p>from version <strong>&gt; 4.0</strong> attention is now embedding aware. This aims to maintain a good attention mechanism even with large number of embedding. It is also now possible to specify attention groups (using <code class="docutils literal notranslate"><span class="pre">grouped_features</span></code>). Attention is now done at the group level and not feature level. This is especially useful if a dataset has a lot of columns coming from on single source of data (exemple: a text column transformed using TD-IDF).</p></li>
</ul>
</section>
<section id="contributing">
<h1>Contributing<a class="headerlink" href="#contributing" title="Permalink to this headline">¶</a></h1>
<p>When contributing to the TabNet repository, please make sure to first discuss the change you wish to make via a new or already existing issue.</p>
<p>Our commits follow the rules presented <a class="reference external" href="https://www.conventionalcommits.org/en/v1.0.0/">here</a>.</p>
</section>
<section id="what-problems-does-pytorch-tabnet-handle">
<h1>What problems does pytorch-tabnet handle?<a class="headerlink" href="#what-problems-does-pytorch-tabnet-handle" title="Permalink to this headline">¶</a></h1>
<ul class="simple">
<li><p>TabNetClassifier : binary classification and multi-class classification problems</p></li>
<li><p>TabNetRegressor : simple and multi-task regression problems</p></li>
<li><p>TabNetMultiTaskClassifier:  multi-task multi-classification problems</p></li>
</ul>
</section>
<section id="how-to-use-it">
<h1>How to use it?<a class="headerlink" href="#how-to-use-it" title="Permalink to this headline">¶</a></h1>
<p>TabNet is now scikit-compatible, training a TabNetClassifier or TabNetRegressor is really easy.</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">pytorch_tabnet.tab_model</span> <span class="kn">import</span> <span class="n">TabNetClassifier</span><span class="p">,</span> <span class="n">TabNetRegressor</span>

<span class="n">clf</span> <span class="o">=</span> <span class="n">TabNetClassifier</span><span class="p">()</span>  <span class="c1">#TabNetRegressor()</span>
<span class="n">clf</span><span class="o">.</span><span class="n">fit</span><span class="p">(</span>
  <span class="n">X_train</span><span class="p">,</span> <span class="n">Y_train</span><span class="p">,</span>
  <span class="n">eval_set</span><span class="o">=</span><span class="p">[(</span><span class="n">X_valid</span><span class="p">,</span> <span class="n">y_valid</span><span class="p">)]</span>
<span class="p">)</span>
<span class="n">preds</span> <span class="o">=</span> <span class="n">clf</span><span class="o">.</span><span class="n">predict</span><span class="p">(</span><span class="n">X_test</span><span class="p">)</span>
</pre></div>
</div>
<p>or for TabNetMultiTaskClassifier :</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">pytorch_tabnet.multitask</span> <span class="kn">import</span> <span class="n">TabNetMultiTaskClassifier</span>
<span class="n">clf</span> <span class="o">=</span> <span class="n">TabNetMultiTaskClassifier</span><span class="p">()</span>
<span class="n">clf</span><span class="o">.</span><span class="n">fit</span><span class="p">(</span>
  <span class="n">X_train</span><span class="p">,</span> <span class="n">Y_train</span><span class="p">,</span>
  <span class="n">eval_set</span><span class="o">=</span><span class="p">[(</span><span class="n">X_valid</span><span class="p">,</span> <span class="n">y_valid</span><span class="p">)]</span>
<span class="p">)</span>
<span class="n">preds</span> <span class="o">=</span> <span class="n">clf</span><span class="o">.</span><span class="n">predict</span><span class="p">(</span><span class="n">X_test</span><span class="p">)</span>
</pre></div>
</div>
<p>The targets on <code class="docutils literal notranslate"><span class="pre">y_train/y_valid</span></code> should contain a unique type (e.g. they must all be strings or integers).</p>
<section id="default-eval-metric">
<h2>Default eval_metric<a class="headerlink" href="#default-eval-metric" title="Permalink to this headline">¶</a></h2>
<p>A few classic evaluation metrics are implemented (see further below for custom ones):</p>
<ul class="simple">
<li><p>binary classification metrics : ‘auc’, ‘accuracy’, ‘balanced_accuracy’, ‘logloss’</p></li>
<li><p>multiclass classification : ‘accuracy’, ‘balanced_accuracy’, ‘logloss’</p></li>
<li><p>regression: ‘mse’, ‘mae’, ‘rmse’, ‘rmsle’</p></li>
</ul>
<p>Important Note : ‘rmsle’ will automatically clip negative predictions to 0, because the model can predict negative values.
In order to match the given scores, you need to use <code class="docutils literal notranslate"><span class="pre">np.clip(clf.predict(X_predict),</span> <span class="pre">a_min=0,</span> <span class="pre">a_max=None)</span></code> when doing predictions.</p>
</section>
<section id="custom-evaluation-metrics">
<h2>Custom evaluation metrics<a class="headerlink" href="#custom-evaluation-metrics" title="Permalink to this headline">¶</a></h2>
<p>You can create a metric for your specific need. Here is an example for gini score (note that you need to specifiy whether this metric should be maximized or not):</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">pytorch_tabnet.metrics</span> <span class="kn">import</span> <span class="n">Metric</span>
<span class="kn">from</span> <span class="nn">sklearn.metrics</span> <span class="kn">import</span> <span class="n">roc_auc_score</span>

<span class="k">class</span> <span class="nc">Gini</span><span class="p">(</span><span class="n">Metric</span><span class="p">):</span>
    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">_name</span> <span class="o">=</span> <span class="s2">&quot;gini&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">_maximize</span> <span class="o">=</span> <span class="kc">True</span>

    <span class="k">def</span> <span class="fm">__call__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">y_true</span><span class="p">,</span> <span class="n">y_score</span><span class="p">):</span>
        <span class="n">auc</span> <span class="o">=</span> <span class="n">roc_auc_score</span><span class="p">(</span><span class="n">y_true</span><span class="p">,</span> <span class="n">y_score</span><span class="p">[:,</span> <span class="mi">1</span><span class="p">])</span>
        <span class="k">return</span> <span class="nb">max</span><span class="p">(</span><span class="mi">2</span><span class="o">*</span><span class="n">auc</span> <span class="o">-</span> <span class="mi">1</span><span class="p">,</span> <span class="mf">0.</span><span class="p">)</span>

<span class="n">clf</span> <span class="o">=</span> <span class="n">TabNetClassifier</span><span class="p">()</span>
<span class="n">clf</span><span class="o">.</span><span class="n">fit</span><span class="p">(</span>
  <span class="n">X_train</span><span class="p">,</span> <span class="n">Y_train</span><span class="p">,</span>
  <span class="n">eval_set</span><span class="o">=</span><span class="p">[(</span><span class="n">X_valid</span><span class="p">,</span> <span class="n">y_valid</span><span class="p">)],</span>
  <span class="n">eval_metric</span><span class="o">=</span><span class="p">[</span><span class="n">Gini</span><span class="p">]</span>
<span class="p">)</span>
</pre></div>
</div>
<p>A specific customization example notebook is available here : https://github.com/dreamquark-ai/tabnet/blob/develop/customizing_example.ipynb</p>
</section>
</section>
<section id="semi-supervised-pre-training">
<h1>Semi-supervised pre-training<a class="headerlink" href="#semi-supervised-pre-training" title="Permalink to this headline">¶</a></h1>
<p>Added later to TabNet’s original paper, semi-supervised pre-training is now available via the class <code class="docutils literal notranslate"><span class="pre">TabNetPretrainer</span></code>:</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="c1"># TabNetPretrainer</span>
<span class="n">unsupervised_model</span> <span class="o">=</span> <span class="n">TabNetPretrainer</span><span class="p">(</span>
    <span class="n">optimizer_fn</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">optim</span><span class="o">.</span><span class="n">Adam</span><span class="p">,</span>
    <span class="n">optimizer_params</span><span class="o">=</span><span class="nb">dict</span><span class="p">(</span><span class="n">lr</span><span class="o">=</span><span class="mf">2e-2</span><span class="p">),</span>
    <span class="n">mask_type</span><span class="o">=</span><span class="s1">&#39;entmax&#39;</span> <span class="c1"># &quot;sparsemax&quot;</span>
<span class="p">)</span>

<span class="n">unsupervised_model</span><span class="o">.</span><span class="n">fit</span><span class="p">(</span>
    <span class="n">X_train</span><span class="o">=</span><span class="n">X_train</span><span class="p">,</span>
    <span class="n">eval_set</span><span class="o">=</span><span class="p">[</span><span class="n">X_valid</span><span class="p">],</span>
    <span class="n">pretraining_ratio</span><span class="o">=</span><span class="mf">0.8</span><span class="p">,</span>
<span class="p">)</span>

<span class="n">clf</span> <span class="o">=</span> <span class="n">TabNetClassifier</span><span class="p">(</span>
    <span class="n">optimizer_fn</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">optim</span><span class="o">.</span><span class="n">Adam</span><span class="p">,</span>
    <span class="n">optimizer_params</span><span class="o">=</span><span class="nb">dict</span><span class="p">(</span><span class="n">lr</span><span class="o">=</span><span class="mf">2e-2</span><span class="p">),</span>
    <span class="n">scheduler_params</span><span class="o">=</span><span class="p">{</span><span class="s2">&quot;step_size&quot;</span><span class="p">:</span><span class="mi">10</span><span class="p">,</span> <span class="c1"># how to use learning rate scheduler</span>
                      <span class="s2">&quot;gamma&quot;</span><span class="p">:</span><span class="mf">0.9</span><span class="p">},</span>
    <span class="n">scheduler_fn</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">optim</span><span class="o">.</span><span class="n">lr_scheduler</span><span class="o">.</span><span class="n">StepLR</span><span class="p">,</span>
    <span class="n">mask_type</span><span class="o">=</span><span class="s1">&#39;sparsemax&#39;</span> <span class="c1"># This will be overwritten if using pretrain model</span>
<span class="p">)</span>

<span class="n">clf</span><span class="o">.</span><span class="n">fit</span><span class="p">(</span>
    <span class="n">X_train</span><span class="o">=</span><span class="n">X_train</span><span class="p">,</span> <span class="n">y_train</span><span class="o">=</span><span class="n">y_train</span><span class="p">,</span>
    <span class="n">eval_set</span><span class="o">=</span><span class="p">[(</span><span class="n">X_train</span><span class="p">,</span> <span class="n">y_train</span><span class="p">),</span> <span class="p">(</span><span class="n">X_valid</span><span class="p">,</span> <span class="n">y_valid</span><span class="p">)],</span>
    <span class="n">eval_name</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;train&#39;</span><span class="p">,</span> <span class="s1">&#39;valid&#39;</span><span class="p">],</span>
    <span class="n">eval_metric</span><span class="o">=</span><span class="p">[</span><span class="s1">&#39;auc&#39;</span><span class="p">],</span>
    <span class="n">from_unsupervised</span><span class="o">=</span><span class="n">unsupervised_model</span>
<span class="p">)</span>
</pre></div>
</div>
<p>The loss function has been normalized to be independent of <code class="docutils literal notranslate"><span class="pre">pretraining_ratio</span></code>, <code class="docutils literal notranslate"><span class="pre">batch_size</span></code> and the number of features in the problem.
A self supervised loss greater than 1 means that your model is reconstructing worse than predicting the mean for each feature, a loss bellow 1 means that the model is doing better than predicting the mean.</p>
<p>A complete example can be found within the notebook <code class="docutils literal notranslate"><span class="pre">pretraining_example.ipynb</span></code>.</p>
<p>/!\ : current implementation is trying to reconstruct the original inputs, but Batch Normalization applies a random transformation that can’t be deduced by a single line, making the reconstruction harder. Lowering the <code class="docutils literal notranslate"><span class="pre">batch_size</span></code> might make the pretraining easier.</p>
</section>
<section id="data-augmentation-on-the-fly">
<h1>Data augmentation on the fly<a class="headerlink" href="#data-augmentation-on-the-fly" title="Permalink to this headline">¶</a></h1>
<p>It is now possible to apply custom data augmentation pipeline during training.
Templates for ClassificationSMOTE and RegressionSMOTE have been added in <code class="docutils literal notranslate"><span class="pre">pytorch-tabnet/augmentations.py</span></code> and can be used as is.</p>
</section>
<section id="easy-saving-and-loading">
<h1>Easy saving and loading<a class="headerlink" href="#easy-saving-and-loading" title="Permalink to this headline">¶</a></h1>
<p>It’s really easy to save and re-load a trained model, this makes TabNet production ready.</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="c1"># save tabnet model</span>
<span class="n">saving_path_name</span> <span class="o">=</span> <span class="s2">&quot;./tabnet_model_test_1&quot;</span>
<span class="n">saved_filepath</span> <span class="o">=</span> <span class="n">clf</span><span class="o">.</span><span class="n">save_model</span><span class="p">(</span><span class="n">saving_path_name</span><span class="p">)</span>

<span class="c1"># define new model with basic parameters and load state dict weights</span>
<span class="n">loaded_clf</span> <span class="o">=</span> <span class="n">TabNetClassifier</span><span class="p">()</span>
<span class="n">loaded_clf</span><span class="o">.</span><span class="n">load_model</span><span class="p">(</span><span class="n">saved_filepath</span><span class="p">)</span>
</pre></div>
</div>
</section>
<section id="useful-links">
<h1>Useful links<a class="headerlink" href="#useful-links" title="Permalink to this headline">¶</a></h1>
<ul class="simple">
<li><p><a class="reference external" href="https://youtu.be/ysBaZO8YmX8">explanatory video</a></p></li>
<li><p><a class="reference external" href="https://github.com/dreamquark-ai/tabnet/blob/develop/census_example.ipynb">binary classification examples</a></p></li>
<li><p><a class="reference external" href="https://github.com/dreamquark-ai/tabnet/blob/develop/forest_example.ipynb">multi-class classification examples</a></p></li>
<li><p><a class="reference external" href="https://github.com/dreamquark-ai/tabnet/blob/develop/regression_example.ipynb">regression examples</a></p></li>
<li><p><a class="reference external" href="https://github.com/dreamquark-ai/tabnet/blob/develop/multi_regression_example.ipynb">multi-task regression examples</a></p></li>
<li><p><a class="reference external" href="https://www.kaggle.com/optimo/tabnetmultitaskclassifier">multi-task multi-class classification examples</a></p></li>
<li><p><a class="reference external" href="https://www.kaggle.com/c/lish-moa/discussion/201510">kaggle moa 1st place solution using tabnet</a></p></li>
</ul>
<section id="model-parameters">
<h2>Model parameters<a class="headerlink" href="#model-parameters" title="Permalink to this headline">¶</a></h2>
<ul>
<li><p><code class="docutils literal notranslate"><span class="pre">n_d</span></code> : int (default=8)</p>
<p>Width of the decision prediction layer. Bigger values gives more capacity to the model with the risk of overfitting.
Values typically range from 8 to 64.</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">n_a</span></code>: int (default=8)</p>
<p>Width of the attention embedding for each mask.
According to the paper n_d=n_a is usually a good choice. (default=8)</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">n_steps</span></code> : int (default=3)</p>
<p>Number of steps in the architecture (usually between 3 and 10)</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">gamma</span></code> : float  (default=1.3)</p>
<p>This is the coefficient for feature reusage in the masks.
A value close to 1 will make mask selection least correlated between layers.
Values range from 1.0 to 2.0.</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">cat_idxs</span></code> : list of int (default=[] - Mandatory for embeddings)</p>
<p>List of categorical features indices.</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">cat_dims</span></code> : list of int (default=[] - Mandatory for embeddings)</p>
<p>List of categorical features number of modalities (number of unique values for a categorical feature)
/!\ no new modalities can be predicted</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">cat_emb_dim</span></code> : list of int (optional)</p>
<p>List of embeddings size for each categorical features. (default =1)</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">n_independent</span></code> : int  (default=2)</p>
<p>Number of independent Gated Linear Units layers at each step.
Usual values range from 1 to 5.</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">n_shared</span></code> : int (default=2)</p>
<p>Number of shared Gated Linear Units at each step
Usual values range from 1 to 5</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">epsilon</span></code> : float  (default 1e-15)</p>
<p>Should be left untouched.</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">seed</span></code> : int (default=0)</p>
<p>Random seed for reproducibility</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">momentum</span></code> : float</p>
<p>Momentum for batch normalization, typically ranges from 0.01 to 0.4 (default=0.02)</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">clip_value</span></code> : float (default None)</p>
<p>If a float is given this will clip the gradient at clip_value.</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">lambda_sparse</span></code> : float (default = 1e-3)</p>
<p>This is the extra sparsity loss coefficient as proposed in the original paper. The bigger this coefficient is, the sparser your model will be in terms of feature selection. Depending on the difficulty of your problem, reducing this value could help.</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">optimizer_fn</span></code> : torch.optim (default=torch.optim.Adam)</p>
<p>Pytorch optimizer function</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">optimizer_params</span></code>: dict (default=dict(lr=2e-2))</p>
<p>Parameters compatible with optimizer_fn used initialize the optimizer. Since we have Adam as our default optimizer, we use this to define the initial learning rate used for training. As mentionned in the original paper, a large initial learning rate of <code class="docutils literal notranslate"><span class="pre">0.02</span> </code>  with decay is a good option.</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">scheduler_fn</span></code> : torch.optim.lr_scheduler (default=None)</p>
<p>Pytorch Scheduler to change learning rates during training.</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">scheduler_params</span></code> : dict</p>
<p>Dictionnary of parameters to apply to the scheduler_fn. Ex : {”gamma”: 0.95, “step_size”: 10}</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">model_name</span></code> : str (default = ‘DreamQuarkTabNet’)</p>
<p>Name of the model used for saving in disk, you can customize this to easily retrieve and reuse your trained models.</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">verbose</span></code> : int (default=1)</p>
<p>Verbosity for notebooks plots, set to 1 to see every epoch, 0 to get None.</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">device_name</span></code> : str (default=’auto’)
‘cpu’ for cpu training, ‘gpu’ for gpu training, ‘auto’ to automatically detect gpu.</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">mask_type:</span> <span class="pre">str</span></code> (default=’sparsemax’)
Either “sparsemax” or “entmax” : this is the masking function to use for selecting features.</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">grouped_features:</span> <span class="pre">list</span> <span class="pre">of</span> <span class="pre">list</span> <span class="pre">of</span> <span class="pre">ints</span></code> (default=None)
This allows the model to share it’s attention accross feature inside a same group.
This can be especially useful when your preprocessing generates correlated or dependant features: like if you use a TF-IDF or a PCA on a text column.
Note that feature importance will be exactly the same between features on a same group.
Please also note that embeddings generated for a categorical variable are always inside a same group.</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">n_shared_decoder</span></code> : int (default=1)</p>
<p>Number of shared GLU block in decoder, this is only useful for <code class="docutils literal notranslate"><span class="pre">TabNetPretrainer</span></code>.</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">n_indep_decoder</span></code> : int (default=1)</p>
<p>Number of independent GLU block in decoder, this is only useful for <code class="docutils literal notranslate"><span class="pre">TabNetPretrainer</span></code>.</p>
</li>
</ul>
</section>
<section id="fit-parameters">
<h2>Fit parameters<a class="headerlink" href="#fit-parameters" title="Permalink to this headline">¶</a></h2>
<ul>
<li><p><code class="docutils literal notranslate"><span class="pre">X_train</span></code> : np.array or scipy.sparse.csr_matrix</p>
<p>Training features</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">y_train</span></code> : np.array</p>
<p>Training targets</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">eval_set</span></code>: list of tuple</p>
<p>List of eval tuple set (X, y).<br />The last one is used for early stopping</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">eval_name</span></code>: list of str<br />List of eval set names.</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">eval_metric</span></code> : list of str<br />List of evaluation metrics.<br />The last metric is used for early stopping.</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">max_epochs</span></code> : int (default = 200)</p>
<p>Maximum number of epochs for trainng.</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">patience</span></code> : int (default = 10)</p>
<p>Number of consecutive epochs without improvement before performing early stopping.</p>
<p>If patience is set to 0, then no early stopping will be performed.</p>
<p>Note that if patience is enabled, then best weights from best epoch will automatically be loaded at the end of <code class="docutils literal notranslate"><span class="pre">fit</span></code>.</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">weights</span></code> : int or dict (default=0)</p>
<p>/!\ Only for TabNetClassifier
Sampling parameter
0 : no sampling
1 : automated sampling with inverse class occurrences
dict : keys are classes, values are weights for each class</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">loss_fn</span></code> : torch.loss or list of torch.loss</p>
<p>Loss function for training (default to mse for regression and cross entropy for classification)
When using TabNetMultiTaskClassifier you can set a list of same length as number of tasks,
each task will be assigned its own loss function</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">batch_size</span></code> : int (default=1024)</p>
<p>Number of examples per batch. Large batch sizes are recommended.</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">virtual_batch_size</span></code> : int (default=128)</p>
<p>Size of the mini batches used for “Ghost Batch Normalization”.
/!\ <code class="docutils literal notranslate"><span class="pre">virtual_batch_size</span></code> should divide <code class="docutils literal notranslate"><span class="pre">batch_size</span></code></p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">num_workers</span></code> : int (default=0)</p>
<p>Number or workers used in torch.utils.data.Dataloader</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">drop_last</span></code> : bool (default=False)</p>
<p>Whether to drop last batch if not complete during training</p>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">callbacks</span></code> : list of callback function<br />List of custom callbacks</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">pretraining_ratio</span></code> : float</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span>  /!\ TabNetPretrainer Only : Percentage of input features to mask during pretraining.

  Should be between 0 and 1. The bigger the harder the reconstruction task is.
</pre></div>
</div>
</li>
<li><p><code class="docutils literal notranslate"><span class="pre">warm_start</span></code> : bool (default=False)
In order to match scikit-learn API, this is set to False.
It allows to fit twice the same model and start from a warm start.</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">compute_importance</span></code> : bool (default=True)</p>
<p>Whether to compute feature importance</p>
</li>
</ul>
</section>
</section>


           </div>
           
          </div>
          <footer>
  
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="pytorch_tabnet.html" class="btn btn-neutral float-right" title="pytorch_tabnet package" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right"></span></a>
      
      
        <a href="../index.html" class="btn btn-neutral float-left" title="Welcome to pytorch_tabnet’s documentation!" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left"></span> Previous</a>
      
    </div>
  

  <hr/>

  <div role="contentinfo">
    <p>
        
        &copy; Copyright 2019, Dreamquark

    </p>
  </div>
    
    
    
    Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a
    
    <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a>
    
    provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>

        </div>
      </div>

    </section>

  </div>
  

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
   

</body>
</html>