<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">

<html lang="en">

<head>
  <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
  <title>LCOV - code analysis - src/caffe/layers/batch_norm_layer.cpp</title>
  <link rel="stylesheet" type="text/css" href="../../../gcov.css">
</head>

<body>

  <table width="100%" border=0 cellspacing=0 cellpadding=0>
    <tr><td class="title">LCOV - code coverage report</td></tr>
    <tr><td class="ruler"><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>

    <tr>
      <td width="100%">
        <table cellpadding=1 border=0 width="100%">
          <tr>
            <td width="10%" class="headerItem">Current view:</td>
            <td width="35%" class="headerValue"><a href="../../../index.html">top level</a> - <a href="index.html">src/caffe/layers</a> - batch_norm_layer.cpp<span style="font-size: 80%;"> (source / <a href="batch_norm_layer.cpp.func-sort-c.html">functions</a>)</span></td>
            <td width="5%"></td>
            <td width="15%"></td>
            <td width="10%" class="headerCovTableHead">Hit</td>
            <td width="10%" class="headerCovTableHead">Total</td>
            <td width="15%" class="headerCovTableHead">Coverage</td>
          </tr>
          <tr>
            <td class="headerItem">Test:</td>
            <td class="headerValue">code analysis</td>
            <td></td>
            <td class="headerItem">Lines:</td>
            <td class="headerCovTableEntry">2</td>
            <td class="headerCovTableEntry">110</td>
            <td class="headerCovTableEntryLo">1.8 %</td>
          </tr>
          <tr>
            <td class="headerItem">Date:</td>
            <td class="headerValue">2020-09-11 22:25:26</td>
            <td></td>
            <td class="headerItem">Functions:</td>
            <td class="headerCovTableEntry">2</td>
            <td class="headerCovTableEntry">16</td>
            <td class="headerCovTableEntryLo">12.5 %</td>
          </tr>
          <tr>
            <td class="headerItem">Legend:</td>
            <td class="headerValueLeg">            Lines:
            <span class="coverLegendCov">hit</span>
            <span class="coverLegendNoCov">not hit</span>
</td>
            <td></td>
          </tr>
          <tr><td><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>
        </table>
      </td>
    </tr>

    <tr><td class="ruler"><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>
  </table>

  <table cellpadding=0 cellspacing=0 border=0>
    <tr>
      <td><br></td>
    </tr>
    <tr>
      <td>
<pre class="sourceHeading">          Line data    Source code</pre>
<pre class="source">
<a name="1"><span class="lineNum">       1 </span>            : #include &lt;algorithm&gt;</a>
<span class="lineNum">       2 </span>            : #include &lt;vector&gt;
<span class="lineNum">       3 </span>            : 
<span class="lineNum">       4 </span>            : #include &quot;caffe/layers/batch_norm_layer.hpp&quot;
<span class="lineNum">       5 </span>            : #include &quot;caffe/util/math_functions.hpp&quot;
<span class="lineNum">       6 </span>            : 
<span class="lineNum">       7 </span>            : namespace caffe {
<span class="lineNum">       8 </span>            : 
<span class="lineNum">       9 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">      10 </span><span class="lineNoCov">          0 : void BatchNormLayer&lt;Dtype&gt;::LayerSetUp(const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; bottom,</span>
<span class="lineNum">      11 </span>            :       const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; top) {
<span class="lineNum">      12 </span><span class="lineNoCov">          0 :   BatchNormParameter param = this-&gt;layer_param_.batch_norm_param();</span>
<span class="lineNum">      13 </span><span class="lineNoCov">          0 :   moving_average_fraction_ = param.moving_average_fraction();</span>
<span class="lineNum">      14 </span><span class="lineNoCov">          0 :   use_global_stats_ = this-&gt;phase_ == TEST;</span>
<span class="lineNum">      15 </span><span class="lineNoCov">          0 :   if (param.has_use_global_stats())</span>
<span class="lineNum">      16 </span><span class="lineNoCov">          0 :     use_global_stats_ = param.use_global_stats();</span>
<span class="lineNum">      17 </span><span class="lineNoCov">          0 :   if (bottom[0]-&gt;num_axes() == 1)</span>
<span class="lineNum">      18 </span><span class="lineNoCov">          0 :     channels_ = 1;</span>
<span class="lineNum">      19 </span>            :   else
<span class="lineNum">      20 </span><span class="lineNoCov">          0 :     channels_ = bottom[0]-&gt;shape(1);</span>
<span class="lineNum">      21 </span><span class="lineNoCov">          0 :   eps_ = param.eps();</span>
<span class="lineNum">      22 </span><span class="lineNoCov">          0 :   if (this-&gt;blobs_.size() &gt; 0) {</span>
<span class="lineNum">      23 </span><span class="lineNoCov">          0 :     LOG(INFO) &lt;&lt; &quot;Skipping parameter initialization&quot;;</span>
<span class="lineNum">      24 </span>            :   } else {
<span class="lineNum">      25 </span><span class="lineNoCov">          0 :     this-&gt;blobs_.resize(3);</span>
<span class="lineNum">      26 </span>            :     vector&lt;int&gt; sz;
<span class="lineNum">      27 </span><span class="lineNoCov">          0 :     sz.push_back(channels_);</span>
<span class="lineNum">      28 </span><span class="lineNoCov">          0 :     this-&gt;blobs_[0].reset(new Blob&lt;Dtype&gt;(sz));</span>
<span class="lineNum">      29 </span><span class="lineNoCov">          0 :     this-&gt;blobs_[1].reset(new Blob&lt;Dtype&gt;(sz));</span>
<span class="lineNum">      30 </span><span class="lineNoCov">          0 :     sz[0] = 1;</span>
<span class="lineNum">      31 </span><span class="lineNoCov">          0 :     this-&gt;blobs_[2].reset(new Blob&lt;Dtype&gt;(sz));</span>
<span class="lineNum">      32 </span><span class="lineNoCov">          0 :     for (int i = 0; i &lt; 3; ++i) {</span>
<span class="lineNum">      33 </span><span class="lineNoCov">          0 :       caffe_set(this-&gt;blobs_[i]-&gt;count(), Dtype(0),</span>
<span class="lineNum">      34 </span><span class="lineNoCov">          0 :                 this-&gt;blobs_[i]-&gt;mutable_cpu_data());</span>
<span class="lineNum">      35 </span>            :     }
<span class="lineNum">      36 </span>            :   }
<span class="lineNum">      37 </span>            :   // Mask statistics from optimization by setting local learning rates
<span class="lineNum">      38 </span>            :   // for mean, variance, and the bias correction to zero.
<span class="lineNum">      39 </span><span class="lineNoCov">          0 :   for (int i = 0; i &lt; this-&gt;blobs_.size(); ++i) {</span>
<span class="lineNum">      40 </span><span class="lineNoCov">          0 :     if (this-&gt;layer_param_.param_size() == i) {</span>
<span class="lineNum">      41 </span>            :       ParamSpec* fixed_param_spec = this-&gt;layer_param_.add_param();
<span class="lineNum">      42 </span>            :       fixed_param_spec-&gt;set_lr_mult(0.f);
<span class="lineNum">      43 </span>            :     } else {
<span class="lineNum">      44 </span><span class="lineNoCov">          0 :       CHECK_EQ(this-&gt;layer_param_.param(i).lr_mult(), 0.f)</span>
<span class="lineNum">      45 </span>            :           &lt;&lt; &quot;Cannot configure batch normalization statistics as layer &quot;
<span class="lineNum">      46 </span>            :           &lt;&lt; &quot;parameters.&quot;;
<span class="lineNum">      47 </span>            :     }
<span class="lineNum">      48 </span>            :   }
<span class="lineNum">      49 </span><span class="lineNoCov">          0 : }</span>
<span class="lineNum">      50 </span>            : 
<span class="lineNum">      51 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">      52 </span><span class="lineNoCov">          0 : void BatchNormLayer&lt;Dtype&gt;::Reshape(const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; bottom,</span>
<span class="lineNum">      53 </span>            :       const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; top) {
<span class="lineNum">      54 </span><span class="lineNoCov">          0 :   if (bottom[0]-&gt;num_axes() &gt;= 1)</span>
<span class="lineNum">      55 </span><span class="lineNoCov">          0 :     CHECK_EQ(bottom[0]-&gt;shape(1), channels_);</span>
<span class="lineNum">      56 </span><span class="lineNoCov">          0 :   top[0]-&gt;ReshapeLike(*bottom[0]);</span>
<span class="lineNum">      57 </span>            : 
<span class="lineNum">      58 </span>            :   vector&lt;int&gt; sz;
<span class="lineNum">      59 </span><span class="lineNoCov">          0 :   sz.push_back(channels_);</span>
<span class="lineNum">      60 </span><span class="lineNoCov">          0 :   mean_.Reshape(sz);</span>
<span class="lineNum">      61 </span><span class="lineNoCov">          0 :   variance_.Reshape(sz);</span>
<span class="lineNum">      62 </span><span class="lineNoCov">          0 :   temp_.ReshapeLike(*bottom[0]);</span>
<span class="lineNum">      63 </span><span class="lineNoCov">          0 :   x_norm_.ReshapeLike(*bottom[0]);</span>
<span class="lineNum">      64 </span><span class="lineNoCov">          0 :   sz[0] = bottom[0]-&gt;shape(0);</span>
<span class="lineNum">      65 </span><span class="lineNoCov">          0 :   batch_sum_multiplier_.Reshape(sz);</span>
<span class="lineNum">      66 </span>            : 
<span class="lineNum">      67 </span><span class="lineNoCov">          0 :   int spatial_dim = bottom[0]-&gt;count()/(channels_*bottom[0]-&gt;shape(0));</span>
<span class="lineNum">      68 </span><span class="lineNoCov">          0 :   if (spatial_sum_multiplier_.num_axes() == 0 ||</span>
<span class="lineNum">      69 </span><span class="lineNoCov">          0 :       spatial_sum_multiplier_.shape(0) != spatial_dim) {</span>
<span class="lineNum">      70 </span><span class="lineNoCov">          0 :     sz[0] = spatial_dim;</span>
<span class="lineNum">      71 </span><span class="lineNoCov">          0 :     spatial_sum_multiplier_.Reshape(sz);</span>
<span class="lineNum">      72 </span><span class="lineNoCov">          0 :     Dtype* multiplier_data = spatial_sum_multiplier_.mutable_cpu_data();</span>
<span class="lineNum">      73 </span><span class="lineNoCov">          0 :     caffe_set(spatial_sum_multiplier_.count(), Dtype(1), multiplier_data);</span>
<span class="lineNum">      74 </span>            :   }
<span class="lineNum">      75 </span>            : 
<span class="lineNum">      76 </span><span class="lineNoCov">          0 :   int numbychans = channels_*bottom[0]-&gt;shape(0);</span>
<span class="lineNum">      77 </span><span class="lineNoCov">          0 :   if (num_by_chans_.num_axes() == 0 ||</span>
<span class="lineNum">      78 </span><span class="lineNoCov">          0 :       num_by_chans_.shape(0) != numbychans) {</span>
<span class="lineNum">      79 </span><span class="lineNoCov">          0 :     sz[0] = numbychans;</span>
<span class="lineNum">      80 </span><span class="lineNoCov">          0 :     num_by_chans_.Reshape(sz);</span>
<span class="lineNum">      81 </span><span class="lineNoCov">          0 :     caffe_set(batch_sum_multiplier_.count(), Dtype(1),</span>
<span class="lineNum">      82 </span>            :         batch_sum_multiplier_.mutable_cpu_data());
<span class="lineNum">      83 </span>            :   }
<span class="lineNum">      84 </span><span class="lineNoCov">          0 : }</span>
<span class="lineNum">      85 </span>            : 
<span class="lineNum">      86 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">      87 </span><span class="lineNoCov">          0 : void BatchNormLayer&lt;Dtype&gt;::Forward_cpu(const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; bottom,</span>
<span class="lineNum">      88 </span>            :     const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; top) {
<span class="lineNum">      89 </span><span class="lineNoCov">          0 :   const Dtype* bottom_data = bottom[0]-&gt;cpu_data();</span>
<span class="lineNum">      90 </span><span class="lineNoCov">          0 :   Dtype* top_data = top[0]-&gt;mutable_cpu_data();</span>
<span class="lineNum">      91 </span><span class="lineNoCov">          0 :   int num = bottom[0]-&gt;shape(0);</span>
<span class="lineNum">      92 </span><span class="lineNoCov">          0 :   int spatial_dim = bottom[0]-&gt;count()/(bottom[0]-&gt;shape(0)*channels_);</span>
<span class="lineNum">      93 </span>            : 
<span class="lineNum">      94 </span><span class="lineNoCov">          0 :   if (bottom[0] != top[0]) {</span>
<span class="lineNum">      95 </span><span class="lineNoCov">          0 :     caffe_copy(bottom[0]-&gt;count(), bottom_data, top_data);</span>
<span class="lineNum">      96 </span>            :   }
<span class="lineNum">      97 </span>            : 
<span class="lineNum">      98 </span><span class="lineNoCov">          0 :   if (use_global_stats_) {</span>
<span class="lineNum">      99 </span>            :     // use the stored mean/variance estimates.
<span class="lineNum">     100 </span><span class="lineNoCov">          0 :     const Dtype scale_factor = this-&gt;blobs_[2]-&gt;cpu_data()[0] == 0 ?</span>
<span class="lineNum">     101 </span><span class="lineNoCov">          0 :         0 : 1 / this-&gt;blobs_[2]-&gt;cpu_data()[0];</span>
<span class="lineNum">     102 </span><span class="lineNoCov">          0 :     caffe_cpu_scale(variance_.count(), scale_factor,</span>
<span class="lineNum">     103 </span>            :         this-&gt;blobs_[0]-&gt;cpu_data(), mean_.mutable_cpu_data());
<span class="lineNum">     104 </span><span class="lineNoCov">          0 :     caffe_cpu_scale(variance_.count(), scale_factor,</span>
<span class="lineNum">     105 </span>            :         this-&gt;blobs_[1]-&gt;cpu_data(), variance_.mutable_cpu_data());
<span class="lineNum">     106 </span>            :   } else {
<span class="lineNum">     107 </span>            :     // compute mean
<span class="lineNum">     108 </span><span class="lineNoCov">          0 :     caffe_cpu_gemv&lt;Dtype&gt;(CblasNoTrans, channels_ * num, spatial_dim,</span>
<span class="lineNum">     109 </span>            :         1. / (num * spatial_dim), bottom_data,
<span class="lineNum">     110 </span>            :         spatial_sum_multiplier_.cpu_data(), 0.,
<span class="lineNum">     111 </span>            :         num_by_chans_.mutable_cpu_data());
<span class="lineNum">     112 </span><span class="lineNoCov">          0 :     caffe_cpu_gemv&lt;Dtype&gt;(CblasTrans, num, channels_, 1.,</span>
<span class="lineNum">     113 </span>            :         num_by_chans_.cpu_data(), batch_sum_multiplier_.cpu_data(), 0.,
<span class="lineNum">     114 </span>            :         mean_.mutable_cpu_data());
<span class="lineNum">     115 </span>            :   }
<span class="lineNum">     116 </span>            : 
<span class="lineNum">     117 </span>            :   // subtract mean
<span class="lineNum">     118 </span><span class="lineNoCov">          0 :   caffe_cpu_gemm&lt;Dtype&gt;(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,</span>
<span class="lineNum">     119 </span>            :       batch_sum_multiplier_.cpu_data(), mean_.cpu_data(), 0.,
<span class="lineNum">     120 </span>            :       num_by_chans_.mutable_cpu_data());
<span class="lineNum">     121 </span><span class="lineNoCov">          0 :   caffe_cpu_gemm&lt;Dtype&gt;(CblasNoTrans, CblasNoTrans, channels_ * num,</span>
<span class="lineNum">     122 </span>            :       spatial_dim, 1, -1, num_by_chans_.cpu_data(),
<span class="lineNum">     123 </span>            :       spatial_sum_multiplier_.cpu_data(), 1., top_data);
<span class="lineNum">     124 </span>            : 
<span class="lineNum">     125 </span><span class="lineNoCov">          0 :   if (!use_global_stats_) {</span>
<span class="lineNum">     126 </span>            :     // compute variance using var(X) = E((X-EX)^2)
<span class="lineNum">     127 </span><span class="lineNoCov">          0 :     caffe_sqr&lt;Dtype&gt;(top[0]-&gt;count(), top_data,</span>
<span class="lineNum">     128 </span>            :                      temp_.mutable_cpu_data());  // (X-EX)^2
<span class="lineNum">     129 </span><span class="lineNoCov">          0 :     caffe_cpu_gemv&lt;Dtype&gt;(CblasNoTrans, channels_ * num, spatial_dim,</span>
<span class="lineNum">     130 </span>            :         1. / (num * spatial_dim), temp_.cpu_data(),
<span class="lineNum">     131 </span>            :         spatial_sum_multiplier_.cpu_data(), 0.,
<span class="lineNum">     132 </span>            :         num_by_chans_.mutable_cpu_data());
<span class="lineNum">     133 </span><span class="lineNoCov">          0 :     caffe_cpu_gemv&lt;Dtype&gt;(CblasTrans, num, channels_, 1.,</span>
<span class="lineNum">     134 </span>            :         num_by_chans_.cpu_data(), batch_sum_multiplier_.cpu_data(), 0.,
<span class="lineNum">     135 </span>            :         variance_.mutable_cpu_data());  // E((X_EX)^2)
<span class="lineNum">     136 </span>            : 
<span class="lineNum">     137 </span>            :     // compute and save moving average
<span class="lineNum">     138 </span><span class="lineNoCov">          0 :     this-&gt;blobs_[2]-&gt;mutable_cpu_data()[0] *= moving_average_fraction_;</span>
<span class="lineNum">     139 </span><span class="lineNoCov">          0 :     this-&gt;blobs_[2]-&gt;mutable_cpu_data()[0] += 1;</span>
<span class="lineNum">     140 </span><span class="lineNoCov">          0 :     caffe_cpu_axpby(mean_.count(), Dtype(1), mean_.cpu_data(),</span>
<span class="lineNum">     141 </span>            :         moving_average_fraction_, this-&gt;blobs_[0]-&gt;mutable_cpu_data());
<span class="lineNum">     142 </span><span class="lineNoCov">          0 :     int m = bottom[0]-&gt;count()/channels_;</span>
<span class="lineNum">     143 </span><span class="lineNoCov">          0 :     Dtype bias_correction_factor = m &gt; 1 ? Dtype(m)/(m-1) : 1;</span>
<span class="lineNum">     144 </span><span class="lineNoCov">          0 :     caffe_cpu_axpby(variance_.count(), bias_correction_factor,</span>
<span class="lineNum">     145 </span>            :         variance_.cpu_data(), moving_average_fraction_,
<span class="lineNum">     146 </span>            :         this-&gt;blobs_[1]-&gt;mutable_cpu_data());
<span class="lineNum">     147 </span>            :   }
<span class="lineNum">     148 </span>            : 
<span class="lineNum">     149 </span>            :   // normalize variance
<span class="lineNum">     150 </span><span class="lineNoCov">          0 :   caffe_add_scalar(variance_.count(), eps_, variance_.mutable_cpu_data());</span>
<span class="lineNum">     151 </span><span class="lineNoCov">          0 :   caffe_sqrt(variance_.count(), variance_.cpu_data(),</span>
<span class="lineNum">     152 </span>            :              variance_.mutable_cpu_data());
<span class="lineNum">     153 </span>            : 
<span class="lineNum">     154 </span>            :   // replicate variance to input size
<span class="lineNum">     155 </span><span class="lineNoCov">          0 :   caffe_cpu_gemm&lt;Dtype&gt;(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,</span>
<span class="lineNum">     156 </span>            :       batch_sum_multiplier_.cpu_data(), variance_.cpu_data(), 0.,
<span class="lineNum">     157 </span>            :       num_by_chans_.mutable_cpu_data());
<span class="lineNum">     158 </span><span class="lineNoCov">          0 :   caffe_cpu_gemm&lt;Dtype&gt;(CblasNoTrans, CblasNoTrans, channels_ * num,</span>
<span class="lineNum">     159 </span>            :       spatial_dim, 1, 1., num_by_chans_.cpu_data(),
<span class="lineNum">     160 </span>            :       spatial_sum_multiplier_.cpu_data(), 0., temp_.mutable_cpu_data());
<span class="lineNum">     161 </span><span class="lineNoCov">          0 :   caffe_div(temp_.count(), top_data, temp_.cpu_data(), top_data);</span>
<span class="lineNum">     162 </span>            :   // TODO(cdoersch): The caching is only needed because later in-place layers
<span class="lineNum">     163 </span>            :   //                 might clobber the data.  Can we skip this if they won't?
<span class="lineNum">     164 </span><span class="lineNoCov">          0 :   caffe_copy(x_norm_.count(), top_data,</span>
<span class="lineNum">     165 </span>            :       x_norm_.mutable_cpu_data());
<span class="lineNum">     166 </span><span class="lineNoCov">          0 : }</span>
<span class="lineNum">     167 </span>            : 
<span class="lineNum">     168 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     169 </span><span class="lineNoCov">          0 : void BatchNormLayer&lt;Dtype&gt;::Backward_cpu(const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; top,</span>
<span class="lineNum">     170 </span>            :     const vector&lt;bool&gt;&amp; propagate_down,
<span class="lineNum">     171 </span>            :     const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; bottom) {
<span class="lineNum">     172 </span>            :   const Dtype* top_diff;
<span class="lineNum">     173 </span><span class="lineNoCov">          0 :   if (bottom[0] != top[0]) {</span>
<span class="lineNum">     174 </span><span class="lineNoCov">          0 :     top_diff = top[0]-&gt;cpu_diff();</span>
<span class="lineNum">     175 </span>            :   } else {
<span class="lineNum">     176 </span><span class="lineNoCov">          0 :     caffe_copy(x_norm_.count(), top[0]-&gt;cpu_diff(), x_norm_.mutable_cpu_diff());</span>
<span class="lineNum">     177 </span><span class="lineNoCov">          0 :     top_diff = x_norm_.cpu_diff();</span>
<span class="lineNum">     178 </span>            :   }
<span class="lineNum">     179 </span><span class="lineNoCov">          0 :   Dtype* bottom_diff = bottom[0]-&gt;mutable_cpu_diff();</span>
<span class="lineNum">     180 </span><span class="lineNoCov">          0 :   if (use_global_stats_) {</span>
<span class="lineNum">     181 </span><span class="lineNoCov">          0 :     caffe_div(temp_.count(), top_diff, temp_.cpu_data(), bottom_diff);</span>
<span class="lineNum">     182 </span><span class="lineNoCov">          0 :     return;</span>
<span class="lineNum">     183 </span>            :   }
<span class="lineNum">     184 </span><span class="lineNoCov">          0 :   const Dtype* top_data = x_norm_.cpu_data();</span>
<span class="lineNum">     185 </span><span class="lineNoCov">          0 :   int num = bottom[0]-&gt;shape()[0];</span>
<span class="lineNum">     186 </span><span class="lineNoCov">          0 :   int spatial_dim = bottom[0]-&gt;count()/(bottom[0]-&gt;shape(0)*channels_);</span>
<span class="lineNum">     187 </span>            :   // if Y = (X-mean(X))/(sqrt(var(X)+eps)), then
<span class="lineNum">     188 </span>            :   //
<span class="lineNum">     189 </span>            :   // dE(Y)/dX =
<span class="lineNum">     190 </span>            :   //   (dE/dY - mean(dE/dY) - mean(dE/dY \cdot Y) \cdot Y)
<span class="lineNum">     191 </span>            :   //     ./ sqrt(var(X) + eps)
<span class="lineNum">     192 </span>            :   //
<span class="lineNum">     193 </span>            :   // where \cdot and ./ are hadamard product and elementwise division,
<span class="lineNum">     194 </span>            :   // respectively, dE/dY is the top diff, and mean/var/sum are all computed
<span class="lineNum">     195 </span>            :   // along all dimensions except the channels dimension.  In the above
<span class="lineNum">     196 </span>            :   // equation, the operations allow for expansion (i.e. broadcast) along all
<span class="lineNum">     197 </span>            :   // dimensions except the channels dimension where required.
<span class="lineNum">     198 </span>            : 
<span class="lineNum">     199 </span>            :   // sum(dE/dY \cdot Y)
<span class="lineNum">     200 </span><span class="lineNoCov">          0 :   caffe_mul(temp_.count(), top_data, top_diff, bottom_diff);</span>
<span class="lineNum">     201 </span><span class="lineNoCov">          0 :   caffe_cpu_gemv&lt;Dtype&gt;(CblasNoTrans, channels_ * num, spatial_dim, 1.,</span>
<span class="lineNum">     202 </span>            :       bottom_diff, spatial_sum_multiplier_.cpu_data(), 0.,
<span class="lineNum">     203 </span>            :       num_by_chans_.mutable_cpu_data());
<span class="lineNum">     204 </span><span class="lineNoCov">          0 :   caffe_cpu_gemv&lt;Dtype&gt;(CblasTrans, num, channels_, 1.,</span>
<span class="lineNum">     205 </span>            :       num_by_chans_.cpu_data(), batch_sum_multiplier_.cpu_data(), 0.,
<span class="lineNum">     206 </span>            :       mean_.mutable_cpu_data());
<span class="lineNum">     207 </span>            : 
<span class="lineNum">     208 </span>            :   // reshape (broadcast) the above
<span class="lineNum">     209 </span><span class="lineNoCov">          0 :   caffe_cpu_gemm&lt;Dtype&gt;(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,</span>
<span class="lineNum">     210 </span>            :       batch_sum_multiplier_.cpu_data(), mean_.cpu_data(), 0.,
<span class="lineNum">     211 </span>            :       num_by_chans_.mutable_cpu_data());
<span class="lineNum">     212 </span><span class="lineNoCov">          0 :   caffe_cpu_gemm&lt;Dtype&gt;(CblasNoTrans, CblasNoTrans, channels_ * num,</span>
<span class="lineNum">     213 </span>            :       spatial_dim, 1, 1., num_by_chans_.cpu_data(),
<span class="lineNum">     214 </span>            :       spatial_sum_multiplier_.cpu_data(), 0., bottom_diff);
<span class="lineNum">     215 </span>            : 
<span class="lineNum">     216 </span>            :   // sum(dE/dY \cdot Y) \cdot Y
<span class="lineNum">     217 </span><span class="lineNoCov">          0 :   caffe_mul(temp_.count(), top_data, bottom_diff, bottom_diff);</span>
<span class="lineNum">     218 </span>            : 
<span class="lineNum">     219 </span>            :   // sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
<span class="lineNum">     220 </span><span class="lineNoCov">          0 :   caffe_cpu_gemv&lt;Dtype&gt;(CblasNoTrans, channels_ * num, spatial_dim, 1.,</span>
<span class="lineNum">     221 </span>            :       top_diff, spatial_sum_multiplier_.cpu_data(), 0.,
<span class="lineNum">     222 </span>            :       num_by_chans_.mutable_cpu_data());
<span class="lineNum">     223 </span><span class="lineNoCov">          0 :   caffe_cpu_gemv&lt;Dtype&gt;(CblasTrans, num, channels_, 1.,</span>
<span class="lineNum">     224 </span>            :       num_by_chans_.cpu_data(), batch_sum_multiplier_.cpu_data(), 0.,
<span class="lineNum">     225 </span>            :       mean_.mutable_cpu_data());
<span class="lineNum">     226 </span>            :   // reshape (broadcast) the above to make
<span class="lineNum">     227 </span>            :   // sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
<span class="lineNum">     228 </span><span class="lineNoCov">          0 :   caffe_cpu_gemm&lt;Dtype&gt;(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,</span>
<span class="lineNum">     229 </span>            :       batch_sum_multiplier_.cpu_data(), mean_.cpu_data(), 0.,
<span class="lineNum">     230 </span>            :       num_by_chans_.mutable_cpu_data());
<span class="lineNum">     231 </span><span class="lineNoCov">          0 :   caffe_cpu_gemm&lt;Dtype&gt;(CblasNoTrans, CblasNoTrans, num * channels_,</span>
<span class="lineNum">     232 </span>            :       spatial_dim, 1, 1., num_by_chans_.cpu_data(),
<span class="lineNum">     233 </span>            :       spatial_sum_multiplier_.cpu_data(), 1., bottom_diff);
<span class="lineNum">     234 </span>            : 
<span class="lineNum">     235 </span>            :   // dE/dY - mean(dE/dY)-mean(dE/dY \cdot Y) \cdot Y
<span class="lineNum">     236 </span><span class="lineNoCov">          0 :   caffe_cpu_axpby(temp_.count(), Dtype(1), top_diff,</span>
<span class="lineNum">     237 </span>            :       Dtype(-1. / (num * spatial_dim)), bottom_diff);
<span class="lineNum">     238 </span>            : 
<span class="lineNum">     239 </span>            :   // note: temp_ still contains sqrt(var(X)+eps), computed during the forward
<span class="lineNum">     240 </span>            :   // pass.
<span class="lineNum">     241 </span><span class="lineNoCov">          0 :   caffe_div(temp_.count(), bottom_diff, temp_.cpu_data(), bottom_diff);</span>
<span class="lineNum">     242 </span>            : }
<span class="lineNum">     243 </span>            : 
<a name="244"><span class="lineNum">     244 </span>            : </a>
<span class="lineNum">     245 </span>            : #ifdef CPU_ONLY
<span class="lineNum">     246 </span><span class="lineNoCov">          0 : STUB_GPU(BatchNormLayer);</span>
<span class="lineNum">     247 </span>            : #endif
<a name="248"><span class="lineNum">     248 </span>            : </a>
<a name="249"><span class="lineNum">     249 </span>            : INSTANTIATE_CLASS(BatchNormLayer);</a>
<span class="lineNum">     250 </span><span class="lineCov">          3 : REGISTER_LAYER_CLASS(BatchNorm);</span>
<span class="lineNum">     251 </span><span class="lineCov">          3 : }  // namespace caffe</span>
</pre>
      </td>
    </tr>
  </table>
  <br>

  <table width="100%" border=0 cellspacing=0 cellpadding=0>
    <tr><td class="ruler"><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>
    <tr><td class="versionInfo">Generated by: <a href="http://ltp.sourceforge.net/coverage/lcov.php" target="_parent">LCOV version 1.12</a></td></tr>
  </table>
  <br>

</body>
</html>
