<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">

<html lang="en">

<head>
  <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
  <title>LCOV - code analysis - src/caffe/layers/base_conv_layer.cpp</title>
  <link rel="stylesheet" type="text/css" href="../../../gcov.css">
</head>

<body>

  <table width="100%" border=0 cellspacing=0 cellpadding=0>
    <tr><td class="title">LCOV - code coverage report</td></tr>
    <tr><td class="ruler"><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>

    <tr>
      <td width="100%">
        <table cellpadding=1 border=0 width="100%">
          <tr>
            <td width="10%" class="headerItem">Current view:</td>
            <td width="35%" class="headerValue"><a href="../../../index.html">top level</a> - <a href="index.html">src/caffe/layers</a> - base_conv_layer.cpp<span style="font-size: 80%;"> (source / <a href="base_conv_layer.cpp.func-sort-c.html">functions</a>)</span></td>
            <td width="5%"></td>
            <td width="15%"></td>
            <td width="10%" class="headerCovTableHead">Hit</td>
            <td width="10%" class="headerCovTableHead">Total</td>
            <td width="15%" class="headerCovTableHead">Coverage</td>
          </tr>
          <tr>
            <td class="headerItem">Test:</td>
            <td class="headerValue">code analysis</td>
            <td></td>
            <td class="headerItem">Lines:</td>
            <td class="headerCovTableEntry">148</td>
            <td class="headerCovTableEntry">192</td>
            <td class="headerCovTableEntryMed">77.1 %</td>
          </tr>
          <tr>
            <td class="headerItem">Date:</td>
            <td class="headerValue">2020-09-11 22:50:33</td>
            <td></td>
            <td class="headerItem">Functions:</td>
            <td class="headerCovTableEntry">8</td>
            <td class="headerCovTableEntry">15</td>
            <td class="headerCovTableEntryLo">53.3 %</td>
          </tr>
          <tr>
            <td class="headerItem">Legend:</td>
            <td class="headerValueLeg">            Lines:
            <span class="coverLegendCov">hit</span>
            <span class="coverLegendNoCov">not hit</span>
</td>
            <td></td>
          </tr>
          <tr><td><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>
        </table>
      </td>
    </tr>

    <tr><td class="ruler"><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>
  </table>

  <table cellpadding=0 cellspacing=0 border=0>
    <tr>
      <td><br></td>
    </tr>
    <tr>
      <td>
<pre class="sourceHeading">          Line data    Source code</pre>
<pre class="source">
<a name="1"><span class="lineNum">       1 </span>            : #include &lt;algorithm&gt;</a>
<span class="lineNum">       2 </span>            : #include &lt;vector&gt;
<span class="lineNum">       3 </span>            : 
<span class="lineNum">       4 </span>            : #include &quot;caffe/filler.hpp&quot;
<span class="lineNum">       5 </span>            : #include &quot;caffe/layers/base_conv_layer.hpp&quot;
<span class="lineNum">       6 </span>            : #include &quot;caffe/util/im2col.hpp&quot;
<span class="lineNum">       7 </span>            : #include &quot;caffe/util/math_functions.hpp&quot;
<span class="lineNum">       8 </span>            : 
<span class="lineNum">       9 </span>            : namespace caffe {
<span class="lineNum">      10 </span>            : 
<span class="lineNum">      11 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">      12 </span><span class="lineCov">          4 : void BaseConvolutionLayer&lt;Dtype&gt;::LayerSetUp(const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; bottom,</span>
<span class="lineNum">      13 </span>            :       const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; top) {
<span class="lineNum">      14 </span>            :   // Configure the kernel size, padding, stride, and inputs.
<span class="lineNum">      15 </span><span class="lineCov">          8 :   ConvolutionParameter conv_param = this-&gt;layer_param_.convolution_param();</span>
<span class="lineNum">      16 </span><span class="lineCov">          4 :   force_nd_im2col_ = conv_param.force_nd_im2col();</span>
<span class="lineNum">      17 </span><span class="lineCov">          4 :   channel_axis_ = bottom[0]-&gt;CanonicalAxisIndex(conv_param.axis());</span>
<span class="lineNum">      18 </span><span class="lineCov">          4 :   const int first_spatial_axis = channel_axis_ + 1;</span>
<span class="lineNum">      19 </span><span class="lineCov">          4 :   const int num_axes = bottom[0]-&gt;num_axes();</span>
<span class="lineNum">      20 </span><span class="lineCov">          4 :   num_spatial_axes_ = num_axes - first_spatial_axis;</span>
<span class="lineNum">      21 </span><span class="lineCov">          4 :   CHECK_GE(num_spatial_axes_, 0);</span>
<span class="lineNum">      22 </span><span class="lineCov">          8 :   vector&lt;int&gt; spatial_dim_blob_shape(1, std::max(num_spatial_axes_, 1));</span>
<span class="lineNum">      23 </span>            :   // Setup filter kernel dimensions (kernel_shape_).
<span class="lineNum">      24 </span><span class="lineCov">          4 :   kernel_shape_.Reshape(spatial_dim_blob_shape);</span>
<span class="lineNum">      25 </span><span class="lineCov">          4 :   int* kernel_shape_data = kernel_shape_.mutable_cpu_data();</span>
<span class="lineNum">      26 </span><span class="lineCov">          8 :   if (conv_param.has_kernel_h() || conv_param.has_kernel_w()) {</span>
<span class="lineNum">      27 </span><span class="lineNoCov">          0 :     CHECK_EQ(num_spatial_axes_, 2)</span>
<span class="lineNum">      28 </span>            :         &lt;&lt; &quot;kernel_h &amp; kernel_w can only be used for 2D convolution.&quot;;
<span class="lineNum">      29 </span><span class="lineNoCov">          0 :     CHECK_EQ(0, conv_param.kernel_size_size())</span>
<span class="lineNum">      30 </span>            :         &lt;&lt; &quot;Either kernel_size or kernel_h/w should be specified; not both.&quot;;
<span class="lineNum">      31 </span><span class="lineNoCov">          0 :     kernel_shape_data[0] = conv_param.kernel_h();</span>
<span class="lineNum">      32 </span><span class="lineNoCov">          0 :     kernel_shape_data[1] = conv_param.kernel_w();</span>
<span class="lineNum">      33 </span>            :   } else {
<span class="lineNum">      34 </span>            :     const int num_kernel_dims = conv_param.kernel_size_size();
<span class="lineNum">      35 </span><span class="lineCov">          8 :     CHECK(num_kernel_dims == 1 || num_kernel_dims == num_spatial_axes_)</span>
<span class="lineNum">      36 </span>            :         &lt;&lt; &quot;kernel_size must be specified once, or once per spatial dimension &quot;
<span class="lineNum">      37 </span><span class="lineNoCov">          0 :         &lt;&lt; &quot;(kernel_size specified &quot; &lt;&lt; num_kernel_dims &lt;&lt; &quot; times; &quot;</span>
<span class="lineNum">      38 </span><span class="lineNoCov">          0 :         &lt;&lt; num_spatial_axes_ &lt;&lt; &quot; spatial dims).&quot;;</span>
<span class="lineNum">      39 </span><span class="lineCov">         20 :       for (int i = 0; i &lt; num_spatial_axes_; ++i) {</span>
<span class="lineNum">      40 </span><span class="lineCov">         16 :         kernel_shape_data[i] =</span>
<span class="lineNum">      41 </span><span class="lineCov">          8 :             conv_param.kernel_size((num_kernel_dims == 1) ? 0 : i);</span>
<span class="lineNum">      42 </span>            :       }
<span class="lineNum">      43 </span>            :   }
<span class="lineNum">      44 </span><span class="lineCov">         20 :   for (int i = 0; i &lt; num_spatial_axes_; ++i) {</span>
<span class="lineNum">      45 </span><span class="lineCov">         16 :     CHECK_GT(kernel_shape_data[i], 0) &lt;&lt; &quot;Filter dimensions must be nonzero.&quot;;</span>
<span class="lineNum">      46 </span>            :   }
<span class="lineNum">      47 </span>            :   // Setup stride dimensions (stride_).
<span class="lineNum">      48 </span><span class="lineCov">          4 :   stride_.Reshape(spatial_dim_blob_shape);</span>
<span class="lineNum">      49 </span><span class="lineCov">          4 :   int* stride_data = stride_.mutable_cpu_data();</span>
<span class="lineNum">      50 </span><span class="lineCov">          8 :   if (conv_param.has_stride_h() || conv_param.has_stride_w()) {</span>
<span class="lineNum">      51 </span><span class="lineNoCov">          0 :     CHECK_EQ(num_spatial_axes_, 2)</span>
<span class="lineNum">      52 </span>            :         &lt;&lt; &quot;stride_h &amp; stride_w can only be used for 2D convolution.&quot;;
<span class="lineNum">      53 </span><span class="lineNoCov">          0 :     CHECK_EQ(0, conv_param.stride_size())</span>
<span class="lineNum">      54 </span>            :         &lt;&lt; &quot;Either stride or stride_h/w should be specified; not both.&quot;;
<span class="lineNum">      55 </span><span class="lineNoCov">          0 :     stride_data[0] = conv_param.stride_h();</span>
<span class="lineNum">      56 </span><span class="lineNoCov">          0 :     stride_data[1] = conv_param.stride_w();</span>
<span class="lineNum">      57 </span>            :   } else {
<span class="lineNum">      58 </span>            :     const int num_stride_dims = conv_param.stride_size();
<span class="lineNum">      59 </span><span class="lineCov">          8 :     CHECK(num_stride_dims == 0 || num_stride_dims == 1 ||</span>
<span class="lineNum">      60 </span>            :           num_stride_dims == num_spatial_axes_)
<span class="lineNum">      61 </span>            :         &lt;&lt; &quot;stride must be specified once, or once per spatial dimension &quot;
<span class="lineNum">      62 </span><span class="lineNoCov">          0 :         &lt;&lt; &quot;(stride specified &quot; &lt;&lt; num_stride_dims &lt;&lt; &quot; times; &quot;</span>
<span class="lineNum">      63 </span><span class="lineNoCov">          0 :         &lt;&lt; num_spatial_axes_ &lt;&lt; &quot; spatial dims).&quot;;</span>
<span class="lineNum">      64 </span>            :     const int kDefaultStride = 1;
<span class="lineNum">      65 </span><span class="lineCov">         20 :     for (int i = 0; i &lt; num_spatial_axes_; ++i) {</span>
<span class="lineNum">      66 </span><span class="lineCov">         16 :       stride_data[i] = (num_stride_dims == 0) ? kDefaultStride :</span>
<span class="lineNum">      67 </span><span class="lineCov">          8 :           conv_param.stride((num_stride_dims == 1) ? 0 : i);</span>
<span class="lineNum">      68 </span><span class="lineCov">          8 :       CHECK_GT(stride_data[i], 0) &lt;&lt; &quot;Stride dimensions must be nonzero.&quot;;</span>
<span class="lineNum">      69 </span>            :     }
<span class="lineNum">      70 </span>            :   }
<span class="lineNum">      71 </span>            :   // Setup pad dimensions (pad_).
<span class="lineNum">      72 </span><span class="lineCov">          4 :   pad_.Reshape(spatial_dim_blob_shape);</span>
<span class="lineNum">      73 </span><span class="lineCov">          4 :   int* pad_data = pad_.mutable_cpu_data();</span>
<span class="lineNum">      74 </span><span class="lineCov">          8 :   if (conv_param.has_pad_h() || conv_param.has_pad_w()) {</span>
<span class="lineNum">      75 </span><span class="lineNoCov">          0 :     CHECK_EQ(num_spatial_axes_, 2)</span>
<span class="lineNum">      76 </span>            :         &lt;&lt; &quot;pad_h &amp; pad_w can only be used for 2D convolution.&quot;;
<span class="lineNum">      77 </span><span class="lineNoCov">          0 :     CHECK_EQ(0, conv_param.pad_size())</span>
<span class="lineNum">      78 </span>            :         &lt;&lt; &quot;Either pad or pad_h/w should be specified; not both.&quot;;
<span class="lineNum">      79 </span><span class="lineNoCov">          0 :     pad_data[0] = conv_param.pad_h();</span>
<span class="lineNum">      80 </span><span class="lineNoCov">          0 :     pad_data[1] = conv_param.pad_w();</span>
<span class="lineNum">      81 </span>            :   } else {
<span class="lineNum">      82 </span>            :     const int num_pad_dims = conv_param.pad_size();
<span class="lineNum">      83 </span><span class="lineCov">          8 :     CHECK(num_pad_dims == 0 || num_pad_dims == 1 ||</span>
<span class="lineNum">      84 </span>            :           num_pad_dims == num_spatial_axes_)
<span class="lineNum">      85 </span>            :         &lt;&lt; &quot;pad must be specified once, or once per spatial dimension &quot;
<span class="lineNum">      86 </span><span class="lineNoCov">          0 :         &lt;&lt; &quot;(pad specified &quot; &lt;&lt; num_pad_dims &lt;&lt; &quot; times; &quot;</span>
<span class="lineNum">      87 </span><span class="lineNoCov">          0 :         &lt;&lt; num_spatial_axes_ &lt;&lt; &quot; spatial dims).&quot;;</span>
<span class="lineNum">      88 </span>            :     const int kDefaultPad = 0;
<span class="lineNum">      89 </span><span class="lineCov">         20 :     for (int i = 0; i &lt; num_spatial_axes_; ++i) {</span>
<span class="lineNum">      90 </span><span class="lineCov">          8 :       pad_data[i] = (num_pad_dims == 0) ? kDefaultPad :</span>
<span class="lineNum">      91 </span><span class="lineNoCov">          0 :           conv_param.pad((num_pad_dims == 1) ? 0 : i);</span>
<span class="lineNum">      92 </span>            :     }
<span class="lineNum">      93 </span>            :   }
<span class="lineNum">      94 </span>            :   // Setup dilation dimensions (dilation_).
<span class="lineNum">      95 </span><span class="lineCov">          4 :   dilation_.Reshape(spatial_dim_blob_shape);</span>
<span class="lineNum">      96 </span><span class="lineCov">          4 :   int* dilation_data = dilation_.mutable_cpu_data();</span>
<span class="lineNum">      97 </span>            :   const int num_dilation_dims = conv_param.dilation_size();
<span class="lineNum">      98 </span><span class="lineCov">          8 :   CHECK(num_dilation_dims == 0 || num_dilation_dims == 1 ||</span>
<span class="lineNum">      99 </span>            :         num_dilation_dims == num_spatial_axes_)
<span class="lineNum">     100 </span>            :       &lt;&lt; &quot;dilation must be specified once, or once per spatial dimension &quot;
<span class="lineNum">     101 </span><span class="lineNoCov">          0 :       &lt;&lt; &quot;(dilation specified &quot; &lt;&lt; num_dilation_dims &lt;&lt; &quot; times; &quot;</span>
<span class="lineNum">     102 </span><span class="lineNoCov">          0 :       &lt;&lt; num_spatial_axes_ &lt;&lt; &quot; spatial dims).&quot;;</span>
<span class="lineNum">     103 </span>            :   const int kDefaultDilation = 1;
<span class="lineNum">     104 </span><span class="lineCov">         20 :   for (int i = 0; i &lt; num_spatial_axes_; ++i) {</span>
<span class="lineNum">     105 </span><span class="lineCov">          8 :     dilation_data[i] = (num_dilation_dims == 0) ? kDefaultDilation :</span>
<span class="lineNum">     106 </span><span class="lineNoCov">          0 :                        conv_param.dilation((num_dilation_dims == 1) ? 0 : i);</span>
<span class="lineNum">     107 </span>            :   }
<span class="lineNum">     108 </span>            :   // Special case: im2col is the identity for 1x1 convolution with stride 1
<span class="lineNum">     109 </span>            :   // and no padding, so flag for skipping the buffer and transformation.
<span class="lineNum">     110 </span><span class="lineCov">          4 :   is_1x1_ = true;</span>
<span class="lineNum">     111 </span><span class="lineCov">          4 :   for (int i = 0; i &lt; num_spatial_axes_; ++i) {</span>
<span class="lineNum">     112 </span><span class="lineCov">          8 :     is_1x1_ &amp;=</span>
<span class="lineNum">     113 </span><span class="lineCov">          4 :         kernel_shape_data[i] == 1 &amp;&amp; stride_data[i] == 1 &amp;&amp; pad_data[i] == 0;</span>
<span class="lineNum">     114 </span><span class="lineCov">          4 :     if (!is_1x1_) { break; }</span>
<span class="lineNum">     115 </span>            :   }
<span class="lineNum">     116 </span>            :   // Configure output channels and groups.
<span class="lineNum">     117 </span><span class="lineCov">          8 :   channels_ = bottom[0]-&gt;shape(channel_axis_);</span>
<span class="lineNum">     118 </span><span class="lineCov">          4 :   num_output_ = this-&gt;layer_param_.convolution_param().num_output();</span>
<span class="lineNum">     119 </span><span class="lineCov">          4 :   CHECK_GT(num_output_, 0);</span>
<span class="lineNum">     120 </span><span class="lineCov">          4 :   group_ = this-&gt;layer_param_.convolution_param().group();</span>
<span class="lineNum">     121 </span><span class="lineCov">          8 :   CHECK_EQ(channels_ % group_, 0);</span>
<span class="lineNum">     122 </span><span class="lineCov">          8 :   CHECK_EQ(num_output_ % group_, 0)</span>
<span class="lineNum">     123 </span>            :       &lt;&lt; &quot;Number of output should be multiples of group.&quot;;
<span class="lineNum">     124 </span><span class="lineCov">          4 :   if (reverse_dimensions()) {</span>
<span class="lineNum">     125 </span><span class="lineNoCov">          0 :     conv_out_channels_ = channels_;</span>
<span class="lineNum">     126 </span><span class="lineNoCov">          0 :     conv_in_channels_ = num_output_;</span>
<span class="lineNum">     127 </span>            :   } else {
<span class="lineNum">     128 </span><span class="lineCov">          4 :     conv_out_channels_ = num_output_;</span>
<span class="lineNum">     129 </span><span class="lineCov">          4 :     conv_in_channels_ = channels_;</span>
<span class="lineNum">     130 </span>            :   }
<span class="lineNum">     131 </span>            :   // Handle the parameters: weights and biases.
<span class="lineNum">     132 </span>            :   // - blobs_[0] holds the filter weights
<span class="lineNum">     133 </span>            :   // - blobs_[1] holds the biases (optional)
<span class="lineNum">     134 </span><span class="lineCov">          4 :   vector&lt;int&gt; weight_shape(2);</span>
<span class="lineNum">     135 </span><span class="lineCov">          4 :   weight_shape[0] = conv_out_channels_;</span>
<span class="lineNum">     136 </span><span class="lineCov">          4 :   weight_shape[1] = conv_in_channels_ / group_;</span>
<span class="lineNum">     137 </span><span class="lineCov">         20 :   for (int i = 0; i &lt; num_spatial_axes_; ++i) {</span>
<span class="lineNum">     138 </span><span class="lineCov">          8 :     weight_shape.push_back(kernel_shape_data[i]);</span>
<span class="lineNum">     139 </span>            :   }
<span class="lineNum">     140 </span><span class="lineCov">          4 :   bias_term_ = this-&gt;layer_param_.convolution_param().bias_term();</span>
<span class="lineNum">     141 </span><span class="lineCov">          4 :   vector&lt;int&gt; bias_shape(bias_term_, num_output_);</span>
<span class="lineNum">     142 </span><span class="lineCov">          4 :   if (this-&gt;blobs_.size() &gt; 0) {</span>
<span class="lineNum">     143 </span><span class="lineNoCov">          0 :     CHECK_EQ(1 + bias_term_, this-&gt;blobs_.size())</span>
<span class="lineNum">     144 </span>            :         &lt;&lt; &quot;Incorrect number of weight blobs.&quot;;
<span class="lineNum">     145 </span><span class="lineNoCov">          0 :     if (weight_shape != this-&gt;blobs_[0]-&gt;shape()) {</span>
<span class="lineNum">     146 </span><span class="lineNoCov">          0 :       Blob&lt;Dtype&gt; weight_shaped_blob(weight_shape);</span>
<span class="lineNum">     147 </span><span class="lineNoCov">          0 :       LOG(FATAL) &lt;&lt; &quot;Incorrect weight shape: expected shape &quot;</span>
<span class="lineNum">     148 </span><span class="lineNoCov">          0 :           &lt;&lt; weight_shaped_blob.shape_string() &lt;&lt; &quot;; instead, shape was &quot;</span>
<span class="lineNum">     149 </span><span class="lineNoCov">          0 :           &lt;&lt; this-&gt;blobs_[0]-&gt;shape_string();</span>
<span class="lineNum">     150 </span>            :     }
<span class="lineNum">     151 </span><span class="lineNoCov">          0 :     if (bias_term_ &amp;&amp; bias_shape != this-&gt;blobs_[1]-&gt;shape()) {</span>
<span class="lineNum">     152 </span><span class="lineNoCov">          0 :       Blob&lt;Dtype&gt; bias_shaped_blob(bias_shape);</span>
<span class="lineNum">     153 </span><span class="lineNoCov">          0 :       LOG(FATAL) &lt;&lt; &quot;Incorrect bias shape: expected shape &quot;</span>
<span class="lineNum">     154 </span><span class="lineNoCov">          0 :           &lt;&lt; bias_shaped_blob.shape_string() &lt;&lt; &quot;; instead, shape was &quot;</span>
<span class="lineNum">     155 </span><span class="lineNoCov">          0 :           &lt;&lt; this-&gt;blobs_[1]-&gt;shape_string();</span>
<span class="lineNum">     156 </span>            :     }
<span class="lineNum">     157 </span><span class="lineNoCov">          0 :     LOG(INFO) &lt;&lt; &quot;Skipping parameter initialization&quot;;</span>
<span class="lineNum">     158 </span>            :   } else {
<span class="lineNum">     159 </span><span class="lineCov">          4 :     if (bias_term_) {</span>
<span class="lineNum">     160 </span><span class="lineCov">          8 :       this-&gt;blobs_.resize(2);</span>
<span class="lineNum">     161 </span>            :     } else {
<span class="lineNum">     162 </span><span class="lineNoCov">          0 :       this-&gt;blobs_.resize(1);</span>
<span class="lineNum">     163 </span>            :     }
<span class="lineNum">     164 </span>            :     // Initialize and fill the weights:
<span class="lineNum">     165 </span>            :     // output channels x input channels per-group x kernel height x kernel width
<span class="lineNum">     166 </span><span class="lineCov">          4 :     this-&gt;blobs_[0].reset(new Blob&lt;Dtype&gt;(weight_shape));</span>
<span class="lineNum">     167 </span>            :     shared_ptr&lt;Filler&lt;Dtype&gt; &gt; weight_filler(GetFiller&lt;Dtype&gt;(
<span class="lineNum">     168 </span><span class="lineCov">          4 :         this-&gt;layer_param_.convolution_param().weight_filler()));</span>
<span class="lineNum">     169 </span><span class="lineCov">          4 :     weight_filler-&gt;Fill(this-&gt;blobs_[0].get());</span>
<span class="lineNum">     170 </span>            :     // If necessary, initialize and fill the biases.
<span class="lineNum">     171 </span><span class="lineCov">          4 :     if (bias_term_) {</span>
<span class="lineNum">     172 </span><span class="lineCov">          8 :       this-&gt;blobs_[1].reset(new Blob&lt;Dtype&gt;(bias_shape));</span>
<span class="lineNum">     173 </span>            :       shared_ptr&lt;Filler&lt;Dtype&gt; &gt; bias_filler(GetFiller&lt;Dtype&gt;(
<span class="lineNum">     174 </span><span class="lineCov">          4 :           this-&gt;layer_param_.convolution_param().bias_filler()));</span>
<span class="lineNum">     175 </span><span class="lineCov">          4 :       bias_filler-&gt;Fill(this-&gt;blobs_[1].get());</span>
<span class="lineNum">     176 </span>            :     }
<span class="lineNum">     177 </span>            :   }
<span class="lineNum">     178 </span><span class="lineCov">          4 :   kernel_dim_ = this-&gt;blobs_[0]-&gt;count(1);</span>
<span class="lineNum">     179 </span><span class="lineCov">          4 :   weight_offset_ = conv_out_channels_ * kernel_dim_ / group_;</span>
<span class="lineNum">     180 </span>            :   // Propagate gradients to the parameters (as directed by backward pass).
<span class="lineNum">     181 </span><span class="lineCov">          4 :   this-&gt;param_propagate_down_.resize(this-&gt;blobs_.size(), true);</span>
<span class="lineNum">     182 </span><span class="lineCov">          4 : }</span>
<span class="lineNum">     183 </span>            : 
<span class="lineNum">     184 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     185 </span><span class="lineCov">      24206 : void BaseConvolutionLayer&lt;Dtype&gt;::Reshape(const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; bottom,</span>
<span class="lineNum">     186 </span>            :       const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; top) {
<span class="lineNum">     187 </span><span class="lineCov">      24206 :   const int first_spatial_axis = channel_axis_ + 1;</span>
<span class="lineNum">     188 </span><span class="lineCov">      48412 :   CHECK_EQ(bottom[0]-&gt;num_axes(), first_spatial_axis + num_spatial_axes_)</span>
<span class="lineNum">     189 </span>            :       &lt;&lt; &quot;bottom num_axes may not change.&quot;;
<span class="lineNum">     190 </span><span class="lineCov">      24206 :   num_ = bottom[0]-&gt;count(0, channel_axis_);</span>
<span class="lineNum">     191 </span><span class="lineCov">      48412 :   CHECK_EQ(bottom[0]-&gt;shape(channel_axis_), channels_)</span>
<span class="lineNum">     192 </span>            :       &lt;&lt; &quot;Input size incompatible with convolution kernel.&quot;;
<span class="lineNum">     193 </span>            :   // TODO: generalize to handle inputs of different shapes.
<span class="lineNum">     194 </span><span class="lineCov">      48412 :   for (int bottom_id = 1; bottom_id &lt; bottom.size(); ++bottom_id) {</span>
<span class="lineNum">     195 </span><span class="lineNoCov">          0 :     CHECK(bottom[0]-&gt;shape() == bottom[bottom_id]-&gt;shape())</span>
<span class="lineNum">     196 </span><span class="lineNoCov">          0 :         &lt;&lt; &quot;shape mismatch - bottom[0]: &quot; &lt;&lt; bottom[0]-&gt;shape_string()</span>
<span class="lineNum">     197 </span><span class="lineNoCov">          0 :         &lt;&lt; &quot; vs. bottom[&quot; &lt;&lt; bottom_id &lt;&lt; &quot;]: &quot;</span>
<span class="lineNum">     198 </span><span class="lineNoCov">          0 :         &lt;&lt; bottom[bottom_id]-&gt;shape_string();</span>
<span class="lineNum">     199 </span>            :   }
<span class="lineNum">     200 </span>            :   // Shape the tops.
<span class="lineNum">     201 </span><span class="lineCov">      48412 :   bottom_shape_ = &amp;bottom[0]-&gt;shape();</span>
<span class="lineNum">     202 </span><span class="lineCov">      24206 :   compute_output_shape();</span>
<span class="lineNum">     203 </span>            :   vector&lt;int&gt; top_shape(bottom[0]-&gt;shape().begin(),
<span class="lineNum">     204 </span><span class="lineCov">      48412 :       bottom[0]-&gt;shape().begin() + channel_axis_);</span>
<span class="lineNum">     205 </span><span class="lineCov">      24206 :   top_shape.push_back(num_output_);</span>
<span class="lineNum">     206 </span><span class="lineCov">     121030 :   for (int i = 0; i &lt; num_spatial_axes_; ++i) {</span>
<span class="lineNum">     207 </span><span class="lineCov">      96824 :     top_shape.push_back(output_shape_[i]);</span>
<span class="lineNum">     208 </span>            :   }
<span class="lineNum">     209 </span><span class="lineCov">     121030 :   for (int top_id = 0; top_id &lt; top.size(); ++top_id) {</span>
<span class="lineNum">     210 </span><span class="lineCov">      24206 :     top[top_id]-&gt;Reshape(top_shape);</span>
<span class="lineNum">     211 </span>            :   }
<span class="lineNum">     212 </span><span class="lineCov">      24206 :   if (reverse_dimensions()) {</span>
<span class="lineNum">     213 </span><span class="lineNoCov">          0 :     conv_out_spatial_dim_ = bottom[0]-&gt;count(first_spatial_axis);</span>
<span class="lineNum">     214 </span>            :   } else {
<span class="lineNum">     215 </span><span class="lineCov">      48412 :     conv_out_spatial_dim_ = top[0]-&gt;count(first_spatial_axis);</span>
<span class="lineNum">     216 </span>            :   }
<span class="lineNum">     217 </span><span class="lineCov">      24206 :   col_offset_ = kernel_dim_ * conv_out_spatial_dim_;</span>
<span class="lineNum">     218 </span><span class="lineCov">      24206 :   output_offset_ = conv_out_channels_ * conv_out_spatial_dim_ / group_;</span>
<span class="lineNum">     219 </span>            :   // Setup input dimensions (conv_input_shape_).
<span class="lineNum">     220 </span><span class="lineCov">      24206 :   vector&lt;int&gt; bottom_dim_blob_shape(1, num_spatial_axes_ + 1);</span>
<span class="lineNum">     221 </span><span class="lineCov">      24206 :   conv_input_shape_.Reshape(bottom_dim_blob_shape);</span>
<span class="lineNum">     222 </span><span class="lineCov">      24206 :   int* conv_input_shape_data = conv_input_shape_.mutable_cpu_data();</span>
<span class="lineNum">     223 </span><span class="lineCov">     169442 :   for (int i = 0; i &lt; num_spatial_axes_ + 1; ++i) {</span>
<span class="lineNum">     224 </span><span class="lineCov">      72618 :     if (reverse_dimensions()) {</span>
<span class="lineNum">     225 </span><span class="lineNoCov">          0 :       conv_input_shape_data[i] = top[0]-&gt;shape(channel_axis_ + i);</span>
<span class="lineNum">     226 </span>            :     } else {
<span class="lineNum">     227 </span><span class="lineCov">     145236 :       conv_input_shape_data[i] = bottom[0]-&gt;shape(channel_axis_ + i);</span>
<span class="lineNum">     228 </span>            :     }
<span class="lineNum">     229 </span>            :   }
<span class="lineNum">     230 </span>            :   // The im2col result buffer will only hold one image at a time to avoid
<span class="lineNum">     231 </span>            :   // overly large memory usage. In the special case of 1x1 convolution
<span class="lineNum">     232 </span>            :   // it goes lazily unused to save memory.
<span class="lineNum">     233 </span>            :   col_buffer_shape_.clear();
<span class="lineNum">     234 </span><span class="lineCov">      24206 :   col_buffer_shape_.push_back(kernel_dim_ * group_);</span>
<span class="lineNum">     235 </span><span class="lineCov">     121030 :   for (int i = 0; i &lt; num_spatial_axes_; ++i) {</span>
<span class="lineNum">     236 </span><span class="lineCov">      48412 :     if (reverse_dimensions()) {</span>
<span class="lineNum">     237 </span><span class="lineNoCov">          0 :       col_buffer_shape_.push_back(input_shape(i + 1));</span>
<span class="lineNum">     238 </span>            :     } else {
<span class="lineNum">     239 </span><span class="lineCov">      96824 :       col_buffer_shape_.push_back(output_shape_[i]);</span>
<span class="lineNum">     240 </span>            :     }
<span class="lineNum">     241 </span>            :   }
<span class="lineNum">     242 </span><span class="lineCov">      24206 :   col_buffer_.Reshape(col_buffer_shape_);</span>
<span class="lineNum">     243 </span><span class="lineCov">      48412 :   bottom_dim_ = bottom[0]-&gt;count(channel_axis_);</span>
<span class="lineNum">     244 </span><span class="lineCov">      48412 :   top_dim_ = top[0]-&gt;count(channel_axis_);</span>
<span class="lineNum">     245 </span><span class="lineCov">      24206 :   num_kernels_im2col_ = conv_in_channels_ * conv_out_spatial_dim_;</span>
<span class="lineNum">     246 </span><span class="lineCov">      24206 :   num_kernels_col2im_ = reverse_dimensions() ? top_dim_ : bottom_dim_;</span>
<span class="lineNum">     247 </span>            :   // Set up the all ones &quot;bias multiplier&quot; for adding biases by BLAS
<span class="lineNum">     248 </span><span class="lineCov">      48412 :   out_spatial_dim_ = top[0]-&gt;count(first_spatial_axis);</span>
<span class="lineNum">     249 </span><span class="lineCov">      24206 :   if (bias_term_) {</span>
<span class="lineNum">     250 </span><span class="lineCov">      24206 :     vector&lt;int&gt; bias_multiplier_shape(1, out_spatial_dim_);</span>
<span class="lineNum">     251 </span><span class="lineCov">      24206 :     bias_multiplier_.Reshape(bias_multiplier_shape);</span>
<span class="lineNum">     252 </span><span class="lineCov">      24206 :     caffe_set(bias_multiplier_.count(), Dtype(1),</span>
<span class="lineNum">     253 </span>            :         bias_multiplier_.mutable_cpu_data());
<span class="lineNum">     254 </span>            :   }
<span class="lineNum">     255 </span><span class="lineCov">      24206 : }</span>
<a name="256"><span class="lineNum">     256 </span>            : </a>
<span class="lineNum">     257 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     258 </span><span class="lineCov">    1700128 : void BaseConvolutionLayer&lt;Dtype&gt;::forward_cpu_gemm(const Dtype* input,</span>
<span class="lineNum">     259 </span>            :     const Dtype* weights, Dtype* output, bool skip_im2col) {
<span class="lineNum">     260 </span>            :   const Dtype* col_buff = input;
<span class="lineNum">     261 </span><span class="lineCov">    1700128 :   if (!is_1x1_) {</span>
<span class="lineNum">     262 </span><span class="lineCov">    1700128 :     if (!skip_im2col) {</span>
<span class="lineNum">     263 </span><span class="lineCov">    1700128 :       conv_im2col_cpu(input, col_buffer_.mutable_cpu_data());</span>
<span class="lineNum">     264 </span>            :     }
<span class="lineNum">     265 </span><span class="lineCov">    1700128 :     col_buff = col_buffer_.cpu_data();</span>
<span class="lineNum">     266 </span>            :   }
<span class="lineNum">     267 </span><span class="lineCov">    5100384 :   for (int g = 0; g &lt; group_; ++g) {</span>
<span class="lineNum">     268 </span><span class="lineCov">    5100384 :     caffe_cpu_gemm&lt;Dtype&gt;(CblasNoTrans, CblasNoTrans, conv_out_channels_ /</span>
<span class="lineNum">     269 </span>            :         group_, conv_out_spatial_dim_, kernel_dim_,
<span class="lineNum">     270 </span><span class="lineCov">    3400256 :         (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g,</span>
<span class="lineNum">     271 </span><span class="lineCov">    1700128 :         (Dtype)0., output + output_offset_ * g);</span>
<span class="lineNum">     272 </span>            :   }
<span class="lineNum">     273 </span><span class="lineCov">    1700128 : }</span>
<a name="274"><span class="lineNum">     274 </span>            : </a>
<span class="lineNum">     275 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     276 </span><span class="lineCov">    1700128 : void BaseConvolutionLayer&lt;Dtype&gt;::forward_cpu_bias(Dtype* output,</span>
<span class="lineNum">     277 </span>            :     const Dtype* bias) {
<span class="lineNum">     278 </span><span class="lineCov">    1700128 :   caffe_cpu_gemm&lt;Dtype&gt;(CblasNoTrans, CblasNoTrans, num_output_,</span>
<span class="lineNum">     279 </span>            :       out_spatial_dim_, 1, (Dtype)1., bias, bias_multiplier_.cpu_data(),
<span class="lineNum">     280 </span>            :       (Dtype)1., output);
<span class="lineNum">     281 </span><span class="lineCov">    1700128 : }</span>
<a name="282"><span class="lineNum">     282 </span>            : </a>
<span class="lineNum">     283 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     284 </span><span class="lineCov">     640000 : void BaseConvolutionLayer&lt;Dtype&gt;::backward_cpu_gemm(const Dtype* output,</span>
<span class="lineNum">     285 </span>            :     const Dtype* weights, Dtype* input) {
<span class="lineNum">     286 </span><span class="lineCov">     640000 :   Dtype* col_buff = col_buffer_.mutable_cpu_data();</span>
<span class="lineNum">     287 </span><span class="lineCov">     640000 :   if (is_1x1_) {</span>
<span class="lineNum">     288 </span>            :     col_buff = input;
<span class="lineNum">     289 </span>            :   }
<span class="lineNum">     290 </span><span class="lineCov">    1920000 :   for (int g = 0; g &lt; group_; ++g) {</span>
<span class="lineNum">     291 </span><span class="lineCov">    2560000 :     caffe_cpu_gemm&lt;Dtype&gt;(CblasTrans, CblasNoTrans, kernel_dim_,</span>
<span class="lineNum">     292 </span><span class="lineCov">     640000 :         conv_out_spatial_dim_, conv_out_channels_ / group_,</span>
<span class="lineNum">     293 </span><span class="lineCov">    1280000 :         (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g,</span>
<span class="lineNum">     294 </span><span class="lineCov">     640000 :         (Dtype)0., col_buff + col_offset_ * g);</span>
<span class="lineNum">     295 </span>            :   }
<span class="lineNum">     296 </span><span class="lineCov">     640000 :   if (!is_1x1_) {</span>
<span class="lineNum">     297 </span><span class="lineCov">     640000 :     conv_col2im_cpu(col_buff, input);</span>
<span class="lineNum">     298 </span>            :   }
<span class="lineNum">     299 </span><span class="lineCov">     640000 : }</span>
<a name="300"><span class="lineNum">     300 </span>            : </a>
<span class="lineNum">     301 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     302 </span><span class="lineCov">    1280000 : void BaseConvolutionLayer&lt;Dtype&gt;::weight_cpu_gemm(const Dtype* input,</span>
<span class="lineNum">     303 </span>            :     const Dtype* output, Dtype* weights) {
<span class="lineNum">     304 </span>            :   const Dtype* col_buff = input;
<span class="lineNum">     305 </span><span class="lineCov">    1280000 :   if (!is_1x1_) {</span>
<span class="lineNum">     306 </span><span class="lineCov">    1280000 :     conv_im2col_cpu(input, col_buffer_.mutable_cpu_data());</span>
<span class="lineNum">     307 </span><span class="lineCov">    1280000 :     col_buff = col_buffer_.cpu_data();</span>
<span class="lineNum">     308 </span>            :   }
<span class="lineNum">     309 </span><span class="lineCov">    3840000 :   for (int g = 0; g &lt; group_; ++g) {</span>
<span class="lineNum">     310 </span><span class="lineCov">    3840000 :     caffe_cpu_gemm&lt;Dtype&gt;(CblasNoTrans, CblasTrans, conv_out_channels_ / group_,</span>
<span class="lineNum">     311 </span>            :         kernel_dim_, conv_out_spatial_dim_,
<span class="lineNum">     312 </span><span class="lineCov">    2560000 :         (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g,</span>
<span class="lineNum">     313 </span><span class="lineCov">    1280000 :         (Dtype)1., weights + weight_offset_ * g);</span>
<span class="lineNum">     314 </span>            :   }
<span class="lineNum">     315 </span><span class="lineCov">    1280000 : }</span>
<a name="316"><span class="lineNum">     316 </span>            : </a>
<span class="lineNum">     317 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     318 </span><span class="lineCov">    1280000 : void BaseConvolutionLayer&lt;Dtype&gt;::backward_cpu_bias(Dtype* bias,</span>
<span class="lineNum">     319 </span>            :     const Dtype* input) {
<span class="lineNum">     320 </span><span class="lineCov">    1280000 :   caffe_cpu_gemv&lt;Dtype&gt;(CblasNoTrans, num_output_, out_spatial_dim_, 1.,</span>
<span class="lineNum">     321 </span>            :       input, bias_multiplier_.cpu_data(), 1., bias);
<span class="lineNum">     322 </span><span class="lineCov">    1280000 : }</span>
<span class="lineNum">     323 </span>            : 
<span class="lineNum">     324 </span>            : #ifndef CPU_ONLY
<span class="lineNum">     325 </span>            : 
<span class="lineNum">     326 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     327 </span>            : void BaseConvolutionLayer&lt;Dtype&gt;::forward_gpu_gemm(const Dtype* input,
<span class="lineNum">     328 </span>            :     const Dtype* weights, Dtype* output, bool skip_im2col) {
<span class="lineNum">     329 </span>            :   const Dtype* col_buff = input;
<span class="lineNum">     330 </span>            :   if (!is_1x1_) {
<span class="lineNum">     331 </span>            :     if (!skip_im2col) {
<span class="lineNum">     332 </span>            :       conv_im2col_gpu(input, col_buffer_.mutable_gpu_data());
<span class="lineNum">     333 </span>            :     }
<span class="lineNum">     334 </span>            :     col_buff = col_buffer_.gpu_data();
<span class="lineNum">     335 </span>            :   }
<span class="lineNum">     336 </span>            :   for (int g = 0; g &lt; group_; ++g) {
<span class="lineNum">     337 </span>            :     caffe_gpu_gemm&lt;Dtype&gt;(CblasNoTrans, CblasNoTrans, conv_out_channels_ /
<span class="lineNum">     338 </span>            :         group_, conv_out_spatial_dim_, kernel_dim_,
<span class="lineNum">     339 </span>            :         (Dtype)1., weights + weight_offset_ * g, col_buff + col_offset_ * g,
<span class="lineNum">     340 </span>            :         (Dtype)0., output + output_offset_ * g);
<span class="lineNum">     341 </span>            :   }
<span class="lineNum">     342 </span>            : }
<span class="lineNum">     343 </span>            : 
<span class="lineNum">     344 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     345 </span>            : void BaseConvolutionLayer&lt;Dtype&gt;::forward_gpu_bias(Dtype* output,
<span class="lineNum">     346 </span>            :     const Dtype* bias) {
<span class="lineNum">     347 </span>            :   caffe_gpu_gemm&lt;Dtype&gt;(CblasNoTrans, CblasNoTrans, num_output_,
<span class="lineNum">     348 </span>            :       out_spatial_dim_, 1, (Dtype)1., bias, bias_multiplier_.gpu_data(),
<span class="lineNum">     349 </span>            :       (Dtype)1., output);
<span class="lineNum">     350 </span>            : }
<span class="lineNum">     351 </span>            : 
<span class="lineNum">     352 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     353 </span>            : void BaseConvolutionLayer&lt;Dtype&gt;::backward_gpu_gemm(const Dtype* output,
<span class="lineNum">     354 </span>            :     const Dtype* weights, Dtype* input) {
<span class="lineNum">     355 </span>            :   Dtype* col_buff = col_buffer_.mutable_gpu_data();
<span class="lineNum">     356 </span>            :   if (is_1x1_) {
<span class="lineNum">     357 </span>            :     col_buff = input;
<span class="lineNum">     358 </span>            :   }
<span class="lineNum">     359 </span>            :   for (int g = 0; g &lt; group_; ++g) {
<span class="lineNum">     360 </span>            :     caffe_gpu_gemm&lt;Dtype&gt;(CblasTrans, CblasNoTrans, kernel_dim_,
<span class="lineNum">     361 </span>            :         conv_out_spatial_dim_, conv_out_channels_ / group_,
<span class="lineNum">     362 </span>            :         (Dtype)1., weights + weight_offset_ * g, output + output_offset_ * g,
<span class="lineNum">     363 </span>            :         (Dtype)0., col_buff + col_offset_ * g);
<span class="lineNum">     364 </span>            :   }
<span class="lineNum">     365 </span>            :   if (!is_1x1_) {
<span class="lineNum">     366 </span>            :     conv_col2im_gpu(col_buff, input);
<span class="lineNum">     367 </span>            :   }
<span class="lineNum">     368 </span>            : }
<span class="lineNum">     369 </span>            : 
<span class="lineNum">     370 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     371 </span>            : void BaseConvolutionLayer&lt;Dtype&gt;::weight_gpu_gemm(const Dtype* input,
<span class="lineNum">     372 </span>            :     const Dtype* output, Dtype* weights) {
<span class="lineNum">     373 </span>            :   const Dtype* col_buff = input;
<span class="lineNum">     374 </span>            :   if (!is_1x1_) {
<span class="lineNum">     375 </span>            :     conv_im2col_gpu(input, col_buffer_.mutable_gpu_data());
<span class="lineNum">     376 </span>            :     col_buff = col_buffer_.gpu_data();
<span class="lineNum">     377 </span>            :   }
<span class="lineNum">     378 </span>            :   for (int g = 0; g &lt; group_; ++g) {
<span class="lineNum">     379 </span>            :     caffe_gpu_gemm&lt;Dtype&gt;(CblasNoTrans, CblasTrans, conv_out_channels_ / group_,
<span class="lineNum">     380 </span>            :         kernel_dim_, conv_out_spatial_dim_,
<span class="lineNum">     381 </span>            :         (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g,
<span class="lineNum">     382 </span>            :         (Dtype)1., weights + weight_offset_ * g);
<span class="lineNum">     383 </span>            :   }
<span class="lineNum">     384 </span>            : }
<span class="lineNum">     385 </span>            : 
<span class="lineNum">     386 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">     387 </span>            : void BaseConvolutionLayer&lt;Dtype&gt;::backward_gpu_bias(Dtype* bias,
<span class="lineNum">     388 </span>            :     const Dtype* input) {
<span class="lineNum">     389 </span>            :   caffe_gpu_gemv&lt;Dtype&gt;(CblasNoTrans, num_output_, out_spatial_dim_, 1.,
<span class="lineNum">     390 </span>            :       input, bias_multiplier_.gpu_data(), 1., bias);
<span class="lineNum">     391 </span>            : }
<span class="lineNum">     392 </span>            : 
<span class="lineNum">     393 </span>            : #endif  // !CPU_ONLY
<span class="lineNum">     394 </span>            : 
<a name="395"><span class="lineNum">     395 </span>            : INSTANTIATE_CLASS(BaseConvolutionLayer);</a>
<span class="lineNum">     396 </span>            : 
<span class="lineNum">     397 </span><span class="lineCov">          2 : }  // namespace caffe</span>
</pre>
      </td>
    </tr>
  </table>
  <br>

  <table width="100%" border=0 cellspacing=0 cellpadding=0>
    <tr><td class="ruler"><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>
    <tr><td class="versionInfo">Generated by: <a href="http://ltp.sourceforge.net/coverage/lcov.php" target="_parent">LCOV version 1.12</a></td></tr>
  </table>
  <br>

</body>
</html>
