<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">

<html lang="en">

<head>
  <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
  <title>LCOV - code analysis - include/caffe/filler.hpp</title>
  <link rel="stylesheet" type="text/css" href="../../gcov.css">
</head>

<body>

  <table width="100%" border=0 cellspacing=0 cellpadding=0>
    <tr><td class="title">LCOV - code coverage report</td></tr>
    <tr><td class="ruler"><img src="../../glass.png" width=3 height=3 alt=""></td></tr>

    <tr>
      <td width="100%">
        <table cellpadding=1 border=0 width="100%">
          <tr>
            <td width="10%" class="headerItem">Current view:</td>
            <td width="35%" class="headerValue"><a href="../../index.html">top level</a> - <a href="index.html">include/caffe</a> - filler.hpp<span style="font-size: 80%;"> (source / <a href="filler.hpp.func-sort-c.html">functions</a>)</span></td>
            <td width="5%"></td>
            <td width="15%"></td>
            <td width="10%" class="headerCovTableHead">Hit</td>
            <td width="10%" class="headerCovTableHead">Total</td>
            <td width="15%" class="headerCovTableHead">Coverage</td>
          </tr>
          <tr>
            <td class="headerItem">Test:</td>
            <td class="headerValue">code analysis</td>
            <td></td>
            <td class="headerItem">Lines:</td>
            <td class="headerCovTableEntry">32</td>
            <td class="headerCovTableEntry">109</td>
            <td class="headerCovTableEntryLo">29.4 %</td>
          </tr>
          <tr>
            <td class="headerItem">Date:</td>
            <td class="headerValue">2020-09-11 22:25:26</td>
            <td></td>
            <td class="headerItem">Functions:</td>
            <td class="headerCovTableEntry">5</td>
            <td class="headerCovTableEntry">44</td>
            <td class="headerCovTableEntryLo">11.4 %</td>
          </tr>
          <tr>
            <td class="headerItem">Legend:</td>
            <td class="headerValueLeg">            Lines:
            <span class="coverLegendCov">hit</span>
            <span class="coverLegendNoCov">not hit</span>
</td>
            <td></td>
          </tr>
          <tr><td><img src="../../glass.png" width=3 height=3 alt=""></td></tr>
        </table>
      </td>
    </tr>

    <tr><td class="ruler"><img src="../../glass.png" width=3 height=3 alt=""></td></tr>
  </table>

  <table cellpadding=0 cellspacing=0 border=0>
    <tr>
      <td><br></td>
    </tr>
    <tr>
      <td>
<pre class="sourceHeading">          Line data    Source code</pre>
<pre class="source">
<a name="1"><span class="lineNum">       1 </span>            : // Fillers are random number generators that fills a blob using the specified</a>
<span class="lineNum">       2 </span>            : // algorithm. The expectation is that they are only going to be used during
<span class="lineNum">       3 </span>            : // initialization time and will not involve any GPUs.
<span class="lineNum">       4 </span>            : 
<span class="lineNum">       5 </span>            : #ifndef CAFFE_FILLER_HPP
<span class="lineNum">       6 </span>            : #define CAFFE_FILLER_HPP
<span class="lineNum">       7 </span>            : 
<span class="lineNum">       8 </span>            : #include &lt;string&gt;
<span class="lineNum">       9 </span>            : 
<span class="lineNum">      10 </span>            : #include &quot;caffe/blob.hpp&quot;
<span class="lineNum">      11 </span>            : #include &quot;caffe/proto/caffe.pb.h&quot;
<span class="lineNum">      12 </span>            : #include &quot;caffe/syncedmem.hpp&quot;
<span class="lineNum">      13 </span>            : #include &quot;caffe/util/math_functions.hpp&quot;
<span class="lineNum">      14 </span>            : 
<span class="lineNum">      15 </span>            : namespace caffe {
<span class="lineNum">      16 </span>            : 
<span class="lineNum">      17 </span>            : /// @brief Fills a Blob with constant or randomly-generated data.
<span class="lineNum">      18 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">      19 </span>            : class Filler {
<span class="lineNum">      20 </span>            :  public:
<span class="lineNum">      21 </span><span class="lineCov">          8 :   explicit Filler(const FillerParameter&amp; param) : filler_param_(param) {}</span>
<span class="lineNum">      22 </span><span class="lineCov">          8 :   virtual ~Filler() {}</span>
<span class="lineNum">      23 </span>            :   virtual void Fill(Blob&lt;Dtype&gt;* blob) = 0;
<span class="lineNum">      24 </span>            :  protected:
<span class="lineNum">      25 </span>            :   FillerParameter filler_param_;
<span class="lineNum">      26 </span>            : };  // class Filler
<span class="lineNum">      27 </span>            : 
<span class="lineNum">      28 </span>            : 
<a name="29"><span class="lineNum">      29 </span>            : /// @brief Fills a Blob with constant values @f$ x = 0 @f$.</a>
<span class="lineNum">      30 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">      31 </span><span class="lineCov">          8 : class ConstantFiller : public Filler&lt;Dtype&gt; {</span>
<span class="lineNum">      32 </span>            :  public:
<a name="33"><span class="lineNum">      33 </span>            :   explicit ConstantFiller(const FillerParameter&amp; param)</a>
<span class="lineNum">      34 </span><span class="lineCov">          4 :       : Filler&lt;Dtype&gt;(param) {}</span>
<span class="lineNum">      35 </span><span class="lineCov">          4 :   virtual void Fill(Blob&lt;Dtype&gt;* blob) {</span>
<span class="lineNum">      36 </span><span class="lineCov">          4 :     Dtype* data = blob-&gt;mutable_cpu_data();</span>
<span class="lineNum">      37 </span>            :     const int count = blob-&gt;count();
<span class="lineNum">      38 </span><span class="lineNoCov">          0 :     const Dtype value = this-&gt;filler_param_.value();</span>
<span class="lineNum">      39 </span><span class="lineCov">          8 :     CHECK(count);</span>
<span class="lineNum">      40 </span><span class="lineCov">       1164 :     for (int i = 0; i &lt; count; ++i) {</span>
<span class="lineNum">      41 </span><span class="lineCov">        580 :       data[i] = value;</span>
<span class="lineNum">      42 </span>            :     }
<span class="lineNum">      43 </span><span class="lineCov">          4 :     CHECK_EQ(this-&gt;filler_param_.sparse(), -1)</span>
<span class="lineNum">      44 </span>            :          &lt;&lt; &quot;Sparsity not supported by this Filler.&quot;;
<span class="lineNum">      45 </span><span class="lineCov">          4 :   }</span>
<span class="lineNum">      46 </span>            : };
<span class="lineNum">      47 </span>            : 
<a name="48"><span class="lineNum">      48 </span>            : /// @brief Fills a Blob with uniformly distributed values @f$ x\sim U(a, b) @f$.</a>
<span class="lineNum">      49 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">      50 </span><span class="lineNoCov">          0 : class UniformFiller : public Filler&lt;Dtype&gt; {</span>
<span class="lineNum">      51 </span>            :  public:
<a name="52"><span class="lineNum">      52 </span>            :   explicit UniformFiller(const FillerParameter&amp; param)</a>
<span class="lineNum">      53 </span><span class="lineNoCov">          0 :       : Filler&lt;Dtype&gt;(param) {}</span>
<span class="lineNum">      54 </span><span class="lineNoCov">          0 :   virtual void Fill(Blob&lt;Dtype&gt;* blob) {</span>
<span class="lineNum">      55 </span><span class="lineNoCov">          0 :     CHECK(blob-&gt;count());</span>
<span class="lineNum">      56 </span><span class="lineNoCov">          0 :     caffe_rng_uniform&lt;Dtype&gt;(blob-&gt;count(), Dtype(this-&gt;filler_param_.min()),</span>
<span class="lineNum">      57 </span>            :         Dtype(this-&gt;filler_param_.max()), blob-&gt;mutable_cpu_data());
<span class="lineNum">      58 </span><span class="lineNoCov">          0 :     CHECK_EQ(this-&gt;filler_param_.sparse(), -1)</span>
<span class="lineNum">      59 </span>            :          &lt;&lt; &quot;Sparsity not supported by this Filler.&quot;;
<span class="lineNum">      60 </span><span class="lineNoCov">          0 :   }</span>
<span class="lineNum">      61 </span>            : };
<span class="lineNum">      62 </span>            : 
<a name="63"><span class="lineNum">      63 </span>            : /// @brief Fills a Blob with Gaussian-distributed values @f$ x = a @f$.</a>
<span class="lineNum">      64 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">      65 </span><span class="lineNoCov">          0 : class GaussianFiller : public Filler&lt;Dtype&gt; {</span>
<span class="lineNum">      66 </span>            :  public:
<span class="lineNum">      67 </span>            :   explicit GaussianFiller(const FillerParameter&amp; param)
<span class="lineNum">      68 </span><span class="lineNoCov">          0 :       : Filler&lt;Dtype&gt;(param) {}</span>
<span class="lineNum">      69 </span><span class="lineNoCov">          0 :   virtual void Fill(Blob&lt;Dtype&gt;* blob) {</span>
<span class="lineNum">      70 </span><span class="lineNoCov">          0 :     Dtype* data = blob-&gt;mutable_cpu_data();</span>
<span class="lineNum">      71 </span><span class="lineNoCov">          0 :     CHECK(blob-&gt;count());</span>
<span class="lineNum">      72 </span><span class="lineNoCov">          0 :     caffe_rng_gaussian&lt;Dtype&gt;(blob-&gt;count(), Dtype(this-&gt;filler_param_.mean()),</span>
<span class="lineNum">      73 </span>            :         Dtype(this-&gt;filler_param_.std()), blob-&gt;mutable_cpu_data());
<span class="lineNum">      74 </span>            :     int sparse = this-&gt;filler_param_.sparse();
<span class="lineNum">      75 </span><span class="lineNoCov">          0 :     CHECK_GE(sparse, -1);</span>
<span class="lineNum">      76 </span><span class="lineNoCov">          0 :     if (sparse &gt;= 0) {</span>
<span class="lineNum">      77 </span>            :       // Sparse initialization is implemented for &quot;weight&quot; blobs; i.e. matrices.
<span class="lineNum">      78 </span>            :       // These have num == channels == 1; width is number of inputs; height is
<span class="lineNum">      79 </span>            :       // number of outputs.  The 'sparse' variable specifies the mean number
<span class="lineNum">      80 </span>            :       // of non-zero input weights for a given output.
<span class="lineNum">      81 </span><span class="lineNoCov">          0 :       CHECK_GE(blob-&gt;num_axes(), 1);</span>
<span class="lineNum">      82 </span>            :       const int num_outputs = blob-&gt;shape(0);
<span class="lineNum">      83 </span><span class="lineNoCov">          0 :       Dtype non_zero_probability = Dtype(sparse) / Dtype(num_outputs);</span>
<span class="lineNum">      84 </span><span class="lineNoCov">          0 :       rand_vec_.reset(new SyncedMemory(blob-&gt;count() * sizeof(int)));</span>
<span class="lineNum">      85 </span><span class="lineNoCov">          0 :       int* mask = reinterpret_cast&lt;int*&gt;(rand_vec_-&gt;mutable_cpu_data());</span>
<span class="lineNum">      86 </span><span class="lineNoCov">          0 :       caffe_rng_bernoulli(blob-&gt;count(), non_zero_probability, mask);</span>
<span class="lineNum">      87 </span><span class="lineNoCov">          0 :       for (int i = 0; i &lt; blob-&gt;count(); ++i) {</span>
<span class="lineNum">      88 </span><span class="lineNoCov">          0 :         data[i] *= mask[i];</span>
<span class="lineNum">      89 </span>            :       }
<span class="lineNum">      90 </span>            :     }
<span class="lineNum">      91 </span><span class="lineNoCov">          0 :   }</span>
<span class="lineNum">      92 </span>            : 
<span class="lineNum">      93 </span>            :  protected:
<span class="lineNum">      94 </span>            :   shared_ptr&lt;SyncedMemory&gt; rand_vec_;
<span class="lineNum">      95 </span>            : };
<span class="lineNum">      96 </span>            : 
<span class="lineNum">      97 </span>            : /** @brief Fills a Blob with values @f$ x \in [0, 1] @f$
<span class="lineNum">      98 </span>            :  *         such that @f$ \forall i \sum_j x_{ij} = 1 @f$.
<a name="99"><span class="lineNum">      99 </span>            :  */</a>
<span class="lineNum">     100 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     101 </span><span class="lineNoCov">          0 : class PositiveUnitballFiller : public Filler&lt;Dtype&gt; {</span>
<span class="lineNum">     102 </span>            :  public:
<span class="lineNum">     103 </span>            :   explicit PositiveUnitballFiller(const FillerParameter&amp; param)
<span class="lineNum">     104 </span><span class="lineNoCov">          0 :       : Filler&lt;Dtype&gt;(param) {}</span>
<span class="lineNum">     105 </span><span class="lineNoCov">          0 :   virtual void Fill(Blob&lt;Dtype&gt;* blob) {</span>
<span class="lineNum">     106 </span><span class="lineNoCov">          0 :     Dtype* data = blob-&gt;mutable_cpu_data();</span>
<span class="lineNum">     107 </span>            :     DCHECK(blob-&gt;count());
<span class="lineNum">     108 </span><span class="lineNoCov">          0 :     caffe_rng_uniform&lt;Dtype&gt;(blob-&gt;count(), 0, 1, blob-&gt;mutable_cpu_data());</span>
<span class="lineNum">     109 </span>            :     // We expect the filler to not be called very frequently, so we will
<span class="lineNum">     110 </span>            :     // just use a simple implementation
<span class="lineNum">     111 </span><span class="lineNoCov">          0 :     int dim = blob-&gt;count() / blob-&gt;shape(0);</span>
<span class="lineNum">     112 </span><span class="lineNoCov">          0 :     CHECK(dim);</span>
<span class="lineNum">     113 </span><span class="lineNoCov">          0 :     for (int i = 0; i &lt; blob-&gt;shape(0); ++i) {</span>
<span class="lineNum">     114 </span>            :       Dtype sum = 0;
<span class="lineNum">     115 </span><span class="lineNoCov">          0 :       for (int j = 0; j &lt; dim; ++j) {</span>
<span class="lineNum">     116 </span><span class="lineNoCov">          0 :         sum += data[i * dim + j];</span>
<span class="lineNum">     117 </span>            :       }
<span class="lineNum">     118 </span><span class="lineNoCov">          0 :       for (int j = 0; j &lt; dim; ++j) {</span>
<span class="lineNum">     119 </span><span class="lineNoCov">          0 :         data[i * dim + j] /= sum;</span>
<span class="lineNum">     120 </span>            :       }
<span class="lineNum">     121 </span>            :     }
<span class="lineNum">     122 </span><span class="lineNoCov">          0 :     CHECK_EQ(this-&gt;filler_param_.sparse(), -1)</span>
<span class="lineNum">     123 </span>            :          &lt;&lt; &quot;Sparsity not supported by this Filler.&quot;;
<span class="lineNum">     124 </span><span class="lineNoCov">          0 :   }</span>
<span class="lineNum">     125 </span>            : };
<span class="lineNum">     126 </span>            : 
<span class="lineNum">     127 </span>            : /**
<span class="lineNum">     128 </span>            :  * @brief Fills a Blob with values @f$ x \sim U(-a, +a) @f$ where @f$ a @f$ is
<span class="lineNum">     129 </span>            :  *        set inversely proportional to number of incoming nodes, outgoing
<span class="lineNum">     130 </span>            :  *        nodes, or their average.
<span class="lineNum">     131 </span>            :  *
<span class="lineNum">     132 </span>            :  * A Filler based on the paper [Bengio and Glorot 2010]: Understanding
<span class="lineNum">     133 </span>            :  * the difficulty of training deep feedforward neuralnetworks.
<span class="lineNum">     134 </span>            :  *
<span class="lineNum">     135 </span>            :  * It fills the incoming matrix by randomly sampling uniform data from [-scale,
<span class="lineNum">     136 </span>            :  * scale] where scale = sqrt(3 / n) where n is the fan_in, fan_out, or their
<span class="lineNum">     137 </span>            :  * average, depending on the variance_norm option. You should make sure the
<span class="lineNum">     138 </span>            :  * input blob has shape (num, a, b, c) where a * b * c = fan_in and num * b * c
<span class="lineNum">     139 </span>            :  * = fan_out. Note that this is currently not the case for inner product layers.
<span class="lineNum">     140 </span>            :  *
<span class="lineNum">     141 </span>            :  * TODO(dox): make notation in above comment consistent with rest &amp; use LaTeX.
<a name="142"><span class="lineNum">     142 </span>            :  */</a>
<span class="lineNum">     143 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     144 </span><span class="lineCov">          8 : class XavierFiller : public Filler&lt;Dtype&gt; {</span>
<span class="lineNum">     145 </span>            :  public:
<span class="lineNum">     146 </span>            :   explicit XavierFiller(const FillerParameter&amp; param)
<span class="lineNum">     147 </span><span class="lineCov">          4 :       : Filler&lt;Dtype&gt;(param) {}</span>
<span class="lineNum">     148 </span><span class="lineCov">          4 :   virtual void Fill(Blob&lt;Dtype&gt;* blob) {</span>
<span class="lineNum">     149 </span><span class="lineCov">          8 :     CHECK(blob-&gt;count());</span>
<span class="lineNum">     150 </span><span class="lineCov">          4 :     int fan_in = blob-&gt;count() / blob-&gt;shape(0);</span>
<span class="lineNum">     151 </span>            :     // Compatibility with ND blobs
<span class="lineNum">     152 </span>            :     int fan_out = blob-&gt;num_axes() &gt; 1 ?
<span class="lineNum">     153 </span>            :                   blob-&gt;count() / blob-&gt;shape(1) :
<span class="lineNum">     154 </span><span class="lineCov">          8 :                   blob-&gt;count();</span>
<span class="lineNum">     155 </span><span class="lineCov">          4 :     Dtype n = fan_in;  // default to fan_in</span>
<span class="lineNum">     156 </span><span class="lineCov">          4 :     if (this-&gt;filler_param_.variance_norm() ==</span>
<span class="lineNum">     157 </span>            :         FillerParameter_VarianceNorm_AVERAGE) {
<span class="lineNum">     158 </span><span class="lineNoCov">          0 :       n = (fan_in + fan_out) / Dtype(2);</span>
<span class="lineNum">     159 </span><span class="lineCov">          4 :     } else if (this-&gt;filler_param_.variance_norm() ==</span>
<span class="lineNum">     160 </span>            :         FillerParameter_VarianceNorm_FAN_OUT) {
<span class="lineNum">     161 </span><span class="lineNoCov">          0 :       n = fan_out;</span>
<span class="lineNum">     162 </span>            :     }
<span class="lineNum">     163 </span><span class="lineCov">          4 :     Dtype scale = sqrt(Dtype(3) / n);</span>
<span class="lineNum">     164 </span><span class="lineCov">          4 :     caffe_rng_uniform&lt;Dtype&gt;(blob-&gt;count(), -scale, scale,</span>
<span class="lineNum">     165 </span>            :         blob-&gt;mutable_cpu_data());
<span class="lineNum">     166 </span><span class="lineCov">          4 :     CHECK_EQ(this-&gt;filler_param_.sparse(), -1)</span>
<span class="lineNum">     167 </span>            :          &lt;&lt; &quot;Sparsity not supported by this Filler.&quot;;
<span class="lineNum">     168 </span><span class="lineCov">          4 :   }</span>
<span class="lineNum">     169 </span>            : };
<span class="lineNum">     170 </span>            : 
<span class="lineNum">     171 </span>            : /**
<span class="lineNum">     172 </span>            :  * @brief Fills a Blob with values @f$ x \sim N(0, \sigma^2) @f$ where
<span class="lineNum">     173 </span>            :  *        @f$ \sigma^2 @f$ is set inversely proportional to number of incoming
<span class="lineNum">     174 </span>            :  *        nodes, outgoing nodes, or their average.
<span class="lineNum">     175 </span>            :  *
<span class="lineNum">     176 </span>            :  * A Filler based on the paper [He, Zhang, Ren and Sun 2015]: Specifically
<span class="lineNum">     177 </span>            :  * accounts for ReLU nonlinearities.
<span class="lineNum">     178 </span>            :  *
<span class="lineNum">     179 </span>            :  * Aside: for another perspective on the scaling factor, see the derivation of
<span class="lineNum">     180 </span>            :  * [Saxe, McClelland, and Ganguli 2013 (v3)].
<span class="lineNum">     181 </span>            :  *
<span class="lineNum">     182 </span>            :  * It fills the incoming matrix by randomly sampling Gaussian data with std =
<span class="lineNum">     183 </span>            :  * sqrt(2 / n) where n is the fan_in, fan_out, or their average, depending on
<span class="lineNum">     184 </span>            :  * the variance_norm option. You should make sure the input blob has shape (num,
<span class="lineNum">     185 </span>            :  * a, b, c) where a * b * c = fan_in and num * b * c = fan_out. Note that this
<span class="lineNum">     186 </span>            :  * is currently not the case for inner product layers.
<a name="187"><span class="lineNum">     187 </span>            :  */</a>
<span class="lineNum">     188 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     189 </span><span class="lineNoCov">          0 : class MSRAFiller : public Filler&lt;Dtype&gt; {</span>
<span class="lineNum">     190 </span>            :  public:
<span class="lineNum">     191 </span>            :   explicit MSRAFiller(const FillerParameter&amp; param)
<span class="lineNum">     192 </span><span class="lineNoCov">          0 :       : Filler&lt;Dtype&gt;(param) {}</span>
<span class="lineNum">     193 </span><span class="lineNoCov">          0 :   virtual void Fill(Blob&lt;Dtype&gt;* blob) {</span>
<span class="lineNum">     194 </span><span class="lineNoCov">          0 :     CHECK(blob-&gt;count());</span>
<span class="lineNum">     195 </span><span class="lineNoCov">          0 :     int fan_in = blob-&gt;count() / blob-&gt;shape(0);</span>
<span class="lineNum">     196 </span>            :     // Compatibility with ND blobs
<span class="lineNum">     197 </span>            :     int fan_out = blob-&gt;num_axes() &gt; 1 ?
<span class="lineNum">     198 </span>            :                   blob-&gt;count() / blob-&gt;shape(1) :
<span class="lineNum">     199 </span><span class="lineNoCov">          0 :                   blob-&gt;count();</span>
<span class="lineNum">     200 </span><span class="lineNoCov">          0 :     Dtype n = fan_in;  // default to fan_in</span>
<span class="lineNum">     201 </span><span class="lineNoCov">          0 :     if (this-&gt;filler_param_.variance_norm() ==</span>
<span class="lineNum">     202 </span>            :         FillerParameter_VarianceNorm_AVERAGE) {
<span class="lineNum">     203 </span><span class="lineNoCov">          0 :       n = (fan_in + fan_out) / Dtype(2);</span>
<span class="lineNum">     204 </span><span class="lineNoCov">          0 :     } else if (this-&gt;filler_param_.variance_norm() ==</span>
<span class="lineNum">     205 </span>            :         FillerParameter_VarianceNorm_FAN_OUT) {
<span class="lineNum">     206 </span><span class="lineNoCov">          0 :       n = fan_out;</span>
<span class="lineNum">     207 </span>            :     }
<span class="lineNum">     208 </span><span class="lineNoCov">          0 :     Dtype std = sqrt(Dtype(2) / n);</span>
<span class="lineNum">     209 </span><span class="lineNoCov">          0 :     caffe_rng_gaussian&lt;Dtype&gt;(blob-&gt;count(), Dtype(0), std,</span>
<span class="lineNum">     210 </span>            :         blob-&gt;mutable_cpu_data());
<span class="lineNum">     211 </span><span class="lineNoCov">          0 :     CHECK_EQ(this-&gt;filler_param_.sparse(), -1)</span>
<span class="lineNum">     212 </span>            :          &lt;&lt; &quot;Sparsity not supported by this Filler.&quot;;
<span class="lineNum">     213 </span><span class="lineNoCov">          0 :   }</span>
<span class="lineNum">     214 </span>            : };
<span class="lineNum">     215 </span>            : 
<span class="lineNum">     216 </span>            : /*!
<span class="lineNum">     217 </span>            : @brief Fills a Blob with coefficients for bilinear interpolation.
<span class="lineNum">     218 </span>            : 
<span class="lineNum">     219 </span>            : A common use case is with the DeconvolutionLayer acting as upsampling.
<span class="lineNum">     220 </span>            : You can upsample a feature map with shape of (B, C, H, W) by any integer factor
<span class="lineNum">     221 </span>            : using the following proto.
<span class="lineNum">     222 </span>            : \code
<span class="lineNum">     223 </span>            : layer {
<span class="lineNum">     224 </span>            :   name: &quot;upsample&quot;, type: &quot;Deconvolution&quot;
<span class="lineNum">     225 </span>            :   bottom: &quot;{{bottom_name}}&quot; top: &quot;{{top_name}}&quot;
<span class="lineNum">     226 </span>            :   convolution_param {
<span class="lineNum">     227 </span>            :     kernel_size: {{2 * factor - factor % 2}} stride: {{factor}}
<span class="lineNum">     228 </span>            :     num_output: {{C}} group: {{C}}
<span class="lineNum">     229 </span>            :     pad: {{ceil((factor - 1) / 2.)}}
<span class="lineNum">     230 </span>            :     weight_filler: { type: &quot;bilinear&quot; } bias_term: false
<span class="lineNum">     231 </span>            :   }
<span class="lineNum">     232 </span>            :   param { lr_mult: 0 decay_mult: 0 }
<span class="lineNum">     233 </span>            : }
<span class="lineNum">     234 </span>            : \endcode
<span class="lineNum">     235 </span>            : Please use this by replacing `{{}}` with your values. By specifying
<span class="lineNum">     236 </span>            : `num_output: {{C}} group: {{C}}`, it behaves as
<span class="lineNum">     237 </span>            : channel-wise convolution. The filter shape of this deconvolution layer will be
<span class="lineNum">     238 </span>            : (C, 1, K, K) where K is `kernel_size`, and this filler will set a (K, K)
<span class="lineNum">     239 </span>            : interpolation kernel for every channel of the filter identically. The resulting
<span class="lineNum">     240 </span>            : shape of the top feature map will be (B, C, factor * H, factor * W).
<span class="lineNum">     241 </span>            : Note that the learning rate and the
<span class="lineNum">     242 </span>            : weight decay are set to 0 in order to keep coefficient values of bilinear
<span class="lineNum">     243 </span>            : interpolation unchanged during training. If you apply this to an image, this
<span class="lineNum">     244 </span>            : operation is equivalent to the following call in Python with Scikit.Image.
<span class="lineNum">     245 </span>            : \code{.py}
<span class="lineNum">     246 </span>            : out = skimage.transform.rescale(img, factor, mode='constant', cval=0)
<span class="lineNum">     247 </span>            : \endcode
<a name="248"><span class="lineNum">     248 </span>            :  */</a>
<span class="lineNum">     249 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     250 </span><span class="lineNoCov">          0 : class BilinearFiller : public Filler&lt;Dtype&gt; {</span>
<span class="lineNum">     251 </span>            :  public:
<span class="lineNum">     252 </span>            :   explicit BilinearFiller(const FillerParameter&amp; param)
<span class="lineNum">     253 </span><span class="lineNoCov">          0 :       : Filler&lt;Dtype&gt;(param) {}</span>
<span class="lineNum">     254 </span><span class="lineNoCov">          0 :   virtual void Fill(Blob&lt;Dtype&gt;* blob) {</span>
<span class="lineNum">     255 </span><span class="lineNoCov">          0 :     CHECK_EQ(blob-&gt;num_axes(), 4) &lt;&lt; &quot;Blob must be 4 dim.&quot;;</span>
<span class="lineNum">     256 </span><span class="lineNoCov">          0 :     CHECK_EQ(blob-&gt;width(), blob-&gt;height()) &lt;&lt; &quot;Filter must be square&quot;;</span>
<span class="lineNum">     257 </span><span class="lineNoCov">          0 :     Dtype* data = blob-&gt;mutable_cpu_data();</span>
<span class="lineNum">     258 </span><span class="lineNoCov">          0 :     int f = ceil(blob-&gt;width() / 2.);</span>
<span class="lineNum">     259 </span><span class="lineNoCov">          0 :     Dtype c = (blob-&gt;width() - 1) / (2. * f);</span>
<span class="lineNum">     260 </span><span class="lineNoCov">          0 :     for (int i = 0; i &lt; blob-&gt;count(); ++i) {</span>
<span class="lineNum">     261 </span><span class="lineNoCov">          0 :       Dtype x = i % blob-&gt;width();</span>
<span class="lineNum">     262 </span><span class="lineNoCov">          0 :       Dtype y = (i / blob-&gt;width()) % blob-&gt;height();</span>
<span class="lineNum">     263 </span><span class="lineNoCov">          0 :       data[i] = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c));</span>
<span class="lineNum">     264 </span>            :     }
<span class="lineNum">     265 </span><span class="lineNoCov">          0 :     CHECK_EQ(this-&gt;filler_param_.sparse(), -1)</span>
<span class="lineNum">     266 </span>            :          &lt;&lt; &quot;Sparsity not supported by this Filler.&quot;;
<span class="lineNum">     267 </span><span class="lineNoCov">          0 :   }</span>
<span class="lineNum">     268 </span>            : };
<span class="lineNum">     269 </span>            : 
<span class="lineNum">     270 </span>            : /**
<span class="lineNum">     271 </span>            :  * @brief Get a specific filler from the specification given in FillerParameter.
<span class="lineNum">     272 </span>            :  *
<span class="lineNum">     273 </span>            :  * Ideally this would be replaced by a factory pattern, but we will leave it
<span class="lineNum">     274 </span>            :  * this way for now.
<a name="275"><span class="lineNum">     275 </span>            :  */</a>
<span class="lineNum">     276 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     277 </span><span class="lineCov">          8 : Filler&lt;Dtype&gt;* GetFiller(const FillerParameter&amp; param) {</span>
<span class="lineNum">     278 </span>            :   const std::string&amp; type = param.type();
<span class="lineNum">     279 </span><span class="lineCov">          8 :   if (type == &quot;constant&quot;) {</span>
<span class="lineNum">     280 </span><span class="lineCov">          8 :     return new ConstantFiller&lt;Dtype&gt;(param);</span>
<span class="lineNum">     281 </span><span class="lineCov">          4 :   } else if (type == &quot;gaussian&quot;) {</span>
<span class="lineNum">     282 </span><span class="lineNoCov">          0 :     return new GaussianFiller&lt;Dtype&gt;(param);</span>
<span class="lineNum">     283 </span><span class="lineCov">          4 :   } else if (type == &quot;positive_unitball&quot;) {</span>
<span class="lineNum">     284 </span><span class="lineNoCov">          0 :     return new PositiveUnitballFiller&lt;Dtype&gt;(param);</span>
<span class="lineNum">     285 </span><span class="lineCov">          4 :   } else if (type == &quot;uniform&quot;) {</span>
<span class="lineNum">     286 </span><span class="lineNoCov">          0 :     return new UniformFiller&lt;Dtype&gt;(param);</span>
<span class="lineNum">     287 </span><span class="lineCov">          4 :   } else if (type == &quot;xavier&quot;) {</span>
<span class="lineNum">     288 </span><span class="lineCov">          8 :     return new XavierFiller&lt;Dtype&gt;(param);</span>
<span class="lineNum">     289 </span><span class="lineNoCov">          0 :   } else if (type == &quot;msra&quot;) {</span>
<span class="lineNum">     290 </span><span class="lineNoCov">          0 :     return new MSRAFiller&lt;Dtype&gt;(param);</span>
<span class="lineNum">     291 </span><span class="lineNoCov">          0 :   } else if (type == &quot;bilinear&quot;) {</span>
<span class="lineNum">     292 </span><span class="lineNoCov">          0 :     return new BilinearFiller&lt;Dtype&gt;(param);</span>
<span class="lineNum">     293 </span>            :   } else {
<span class="lineNum">     294 </span><span class="lineNoCov">          0 :     CHECK(false) &lt;&lt; &quot;Unknown filler name: &quot; &lt;&lt; param.type();</span>
<span class="lineNum">     295 </span>            :   }
<span class="lineNum">     296 </span>            :   return (Filler&lt;Dtype&gt;*)(NULL);
<span class="lineNum">     297 </span>            : }
<span class="lineNum">     298 </span>            : 
<span class="lineNum">     299 </span>            : }  // namespace caffe
<span class="lineNum">     300 </span>            : 
<span class="lineNum">     301 </span>            : #endif  // CAFFE_FILLER_HPP_
</pre>
      </td>
    </tr>
  </table>
  <br>

  <table width="100%" border=0 cellspacing=0 cellpadding=0>
    <tr><td class="ruler"><img src="../../glass.png" width=3 height=3 alt=""></td></tr>
    <tr><td class="versionInfo">Generated by: <a href="http://ltp.sourceforge.net/coverage/lcov.php" target="_parent">LCOV version 1.12</a></td></tr>
  </table>
  <br>

</body>
</html>
