<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">

<html lang="en">

<head>
  <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
  <title>LCOV - code analysis - include/caffe/layers/base_conv_layer.hpp</title>
  <link rel="stylesheet" type="text/css" href="../../../gcov.css">
</head>

<body>

  <table width="100%" border=0 cellspacing=0 cellpadding=0>
    <tr><td class="title">LCOV - code coverage report</td></tr>
    <tr><td class="ruler"><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>

    <tr>
      <td width="100%">
        <table cellpadding=1 border=0 width="100%">
          <tr>
            <td width="10%" class="headerItem">Current view:</td>
            <td width="35%" class="headerValue"><a href="../../../index.html">top level</a> - <a href="index.html">include/caffe/layers</a> - base_conv_layer.hpp<span style="font-size: 80%;"> (source / <a href="base_conv_layer.hpp.func-sort-c.html">functions</a>)</span></td>
            <td width="5%"></td>
            <td width="15%"></td>
            <td width="10%" class="headerCovTableHead">Hit</td>
            <td width="10%" class="headerCovTableHead">Total</td>
            <td width="15%" class="headerCovTableHead">Coverage</td>
          </tr>
          <tr>
            <td class="headerItem">Test:</td>
            <td class="headerValue">code analysis</td>
            <td></td>
            <td class="headerItem">Lines:</td>
            <td class="headerCovTableEntry">16</td>
            <td class="headerCovTableEntry">28</td>
            <td class="headerCovTableEntryLo">57.1 %</td>
          </tr>
          <tr>
            <td class="headerItem">Date:</td>
            <td class="headerValue">2020-09-11 22:25:26</td>
            <td></td>
            <td class="headerItem">Functions:</td>
            <td class="headerCovTableEntry">6</td>
            <td class="headerCovTableEntry">18</td>
            <td class="headerCovTableEntryLo">33.3 %</td>
          </tr>
          <tr>
            <td class="headerItem">Legend:</td>
            <td class="headerValueLeg">            Lines:
            <span class="coverLegendCov">hit</span>
            <span class="coverLegendNoCov">not hit</span>
</td>
            <td></td>
          </tr>
          <tr><td><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>
        </table>
      </td>
    </tr>

    <tr><td class="ruler"><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>
  </table>

  <table cellpadding=0 cellspacing=0 border=0>
    <tr>
      <td><br></td>
    </tr>
    <tr>
      <td>
<pre class="sourceHeading">          Line data    Source code</pre>
<pre class="source">
<a name="1"><span class="lineNum">       1 </span>            : #ifndef CAFFE_BASE_CONVOLUTION_LAYER_HPP_</a>
<span class="lineNum">       2 </span>            : #define CAFFE_BASE_CONVOLUTION_LAYER_HPP_
<span class="lineNum">       3 </span>            : 
<span class="lineNum">       4 </span>            : #include &lt;vector&gt;
<span class="lineNum">       5 </span>            : 
<span class="lineNum">       6 </span>            : #include &quot;caffe/blob.hpp&quot;
<span class="lineNum">       7 </span>            : #include &quot;caffe/layer.hpp&quot;
<span class="lineNum">       8 </span>            : #include &quot;caffe/proto/caffe.pb.h&quot;
<span class="lineNum">       9 </span>            : #include &quot;caffe/util/im2col.hpp&quot;
<span class="lineNum">      10 </span>            : 
<span class="lineNum">      11 </span>            : namespace caffe {
<span class="lineNum">      12 </span>            : 
<span class="lineNum">      13 </span>            : /**
<span class="lineNum">      14 </span>            :  * @brief Abstract base class that factors out the BLAS code common to
<span class="lineNum">      15 </span>            :  *        ConvolutionLayer and DeconvolutionLayer.
<a name="16"><span class="lineNum">      16 </span>            :  */</a>
<span class="lineNum">      17 </span>            : template &lt;typename Dtype&gt;
<a name="18"><span class="lineNum">      18 </span><span class="lineCov">          4 : class BaseConvolutionLayer : public Layer&lt;Dtype&gt; {</span></a>
<span class="lineNum">      19 </span>            :  public:
<span class="lineNum">      20 </span><span class="lineCov">          2 :   explicit BaseConvolutionLayer(const LayerParameter&amp; param)</span>
<span class="lineNum">      21 </span><span class="lineCov">          4 :       : Layer&lt;Dtype&gt;(param) {}</span>
<span class="lineNum">      22 </span>            :   virtual void LayerSetUp(const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; bottom,
<span class="lineNum">      23 </span>            :       const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; top);
<span class="lineNum">      24 </span>            :   virtual void Reshape(const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; bottom,
<a name="25"><span class="lineNum">      25 </span>            :       const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; top);</a>
<a name="26"><span class="lineNum">      26 </span>            : </a>
<a name="27"><span class="lineNum">      27 </span><span class="lineCov">          4 :   virtual inline int MinBottomBlobs() const { return 1; }</span></a>
<span class="lineNum">      28 </span><span class="lineCov">          4 :   virtual inline int MinTopBlobs() const { return 1; }</span>
<span class="lineNum">      29 </span><span class="lineCov">          2 :   virtual inline bool EqualNumBottomTopBlobs() const { return true; }</span>
<span class="lineNum">      30 </span>            : 
<span class="lineNum">      31 </span>            :  protected:
<span class="lineNum">      32 </span>            :   // Helper functions that abstract away the column buffer and gemm arguments.
<span class="lineNum">      33 </span>            :   // The last argument in forward_cpu_gemm is so that we can skip the im2col if
<span class="lineNum">      34 </span>            :   // we just called weight_cpu_gemm with the same input.
<span class="lineNum">      35 </span>            :   void forward_cpu_gemm(const Dtype* input, const Dtype* weights,
<span class="lineNum">      36 </span>            :       Dtype* output, bool skip_im2col = false);
<span class="lineNum">      37 </span>            :   void forward_cpu_bias(Dtype* output, const Dtype* bias);
<span class="lineNum">      38 </span>            :   void backward_cpu_gemm(const Dtype* input, const Dtype* weights,
<span class="lineNum">      39 </span>            :       Dtype* output);
<span class="lineNum">      40 </span>            :   void weight_cpu_gemm(const Dtype* input, const Dtype* output, Dtype*
<span class="lineNum">      41 </span>            :       weights);
<span class="lineNum">      42 </span>            :   void backward_cpu_bias(Dtype* bias, const Dtype* input);
<span class="lineNum">      43 </span>            : 
<span class="lineNum">      44 </span>            : #ifndef CPU_ONLY
<span class="lineNum">      45 </span>            :   void forward_gpu_gemm(const Dtype* col_input, const Dtype* weights,
<span class="lineNum">      46 </span>            :       Dtype* output, bool skip_im2col = false);
<span class="lineNum">      47 </span>            :   void forward_gpu_bias(Dtype* output, const Dtype* bias);
<span class="lineNum">      48 </span>            :   void backward_gpu_gemm(const Dtype* input, const Dtype* weights,
<span class="lineNum">      49 </span>            :       Dtype* col_output);
<span class="lineNum">      50 </span>            :   void weight_gpu_gemm(const Dtype* col_input, const Dtype* output, Dtype*
<span class="lineNum">      51 </span>            :       weights);
<span class="lineNum">      52 </span>            :   void backward_gpu_bias(Dtype* bias, const Dtype* input);
<span class="lineNum">      53 </span>            : #endif
<span class="lineNum">      54 </span>            : 
<span class="lineNum">      55 </span>            :   /// @brief The spatial dimensions of the input.
<span class="lineNum">      56 </span><span class="lineNoCov">          0 :   inline int input_shape(int i) {</span>
<span class="lineNum">      57 </span><span class="lineCov">        808 :     return (*bottom_shape_)[channel_axis_ + i];</span>
<span class="lineNum">      58 </span>            :   }
<span class="lineNum">      59 </span>            :   // reverse_dimensions should return true iff we are implementing deconv, so
<span class="lineNum">      60 </span>            :   // that conv helpers know which dimensions are which.
<span class="lineNum">      61 </span>            :   virtual bool reverse_dimensions() = 0;
<span class="lineNum">      62 </span>            :   // Compute height_out_ and width_out_ from other parameters.
<span class="lineNum">      63 </span>            :   virtual void compute_output_shape() = 0;
<span class="lineNum">      64 </span>            : 
<span class="lineNum">      65 </span>            :   /// @brief The spatial dimensions of a filter kernel.
<span class="lineNum">      66 </span>            :   Blob&lt;int&gt; kernel_shape_;
<span class="lineNum">      67 </span>            :   /// @brief The spatial dimensions of the stride.
<span class="lineNum">      68 </span>            :   Blob&lt;int&gt; stride_;
<span class="lineNum">      69 </span>            :   /// @brief The spatial dimensions of the padding.
<span class="lineNum">      70 </span>            :   Blob&lt;int&gt; pad_;
<span class="lineNum">      71 </span>            :   /// @brief The spatial dimensions of the dilation.
<span class="lineNum">      72 </span>            :   Blob&lt;int&gt; dilation_;
<span class="lineNum">      73 </span>            :   /// @brief The spatial dimensions of the convolution input.
<span class="lineNum">      74 </span>            :   Blob&lt;int&gt; conv_input_shape_;
<span class="lineNum">      75 </span>            :   /// @brief The spatial dimensions of the col_buffer.
<span class="lineNum">      76 </span>            :   vector&lt;int&gt; col_buffer_shape_;
<span class="lineNum">      77 </span>            :   /// @brief The spatial dimensions of the output.
<span class="lineNum">      78 </span>            :   vector&lt;int&gt; output_shape_;
<span class="lineNum">      79 </span>            :   const vector&lt;int&gt;* bottom_shape_;
<span class="lineNum">      80 </span>            : 
<span class="lineNum">      81 </span>            :   int num_spatial_axes_;
<span class="lineNum">      82 </span>            :   int bottom_dim_;
<span class="lineNum">      83 </span>            :   int top_dim_;
<span class="lineNum">      84 </span>            : 
<span class="lineNum">      85 </span>            :   int channel_axis_;
<span class="lineNum">      86 </span>            :   int num_;
<span class="lineNum">      87 </span>            :   int channels_;
<span class="lineNum">      88 </span>            :   int group_;
<span class="lineNum">      89 </span>            :   int out_spatial_dim_;
<span class="lineNum">      90 </span>            :   int weight_offset_;
<span class="lineNum">      91 </span>            :   int num_output_;
<span class="lineNum">      92 </span>            :   bool bias_term_;
<span class="lineNum">      93 </span>            :   bool is_1x1_;
<span class="lineNum">      94 </span>            :   bool force_nd_im2col_;
<span class="lineNum">      95 </span>            : 
<a name="96"><span class="lineNum">      96 </span>            :  private:</a>
<span class="lineNum">      97 </span>            :   // wrap im2col/col2im so we don't have to remember the (long) argument lists
<span class="lineNum">      98 </span><span class="lineCov">      20000 :   inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) {</span>
<span class="lineNum">      99 </span><span class="lineCov">      20000 :     if (!force_nd_im2col_ &amp;&amp; num_spatial_axes_ == 2) {</span>
<span class="lineNum">     100 </span><span class="lineCov">     200000 :       im2col_cpu(data, conv_in_channels_,</span>
<span class="lineNum">     101 </span><span class="lineCov">      40000 :           conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2],</span>
<span class="lineNum">     102 </span><span class="lineCov">      40000 :           kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1],</span>
<span class="lineNum">     103 </span><span class="lineCov">      40000 :           pad_.cpu_data()[0], pad_.cpu_data()[1],</span>
<span class="lineNum">     104 </span><span class="lineCov">      40000 :           stride_.cpu_data()[0], stride_.cpu_data()[1],</span>
<span class="lineNum">     105 </span><span class="lineCov">      40000 :           dilation_.cpu_data()[0], dilation_.cpu_data()[1], col_buff);</span>
<span class="lineNum">     106 </span>            :     } else {
<span class="lineNum">     107 </span><span class="lineNoCov">          0 :       im2col_nd_cpu(data, num_spatial_axes_, conv_input_shape_.cpu_data(),</span>
<span class="lineNum">     108 </span>            :           col_buffer_shape_.data(), kernel_shape_.cpu_data(),
<span class="lineNum">     109 </span>            :           pad_.cpu_data(), stride_.cpu_data(), dilation_.cpu_data(), col_buff);
<a name="110"><span class="lineNum">     110 </span>            :     }</a>
<span class="lineNum">     111 </span><span class="lineCov">      20000 :   }</span>
<span class="lineNum">     112 </span><span class="lineNoCov">          0 :   inline void conv_col2im_cpu(const Dtype* col_buff, Dtype* data) {</span>
<span class="lineNum">     113 </span><span class="lineNoCov">          0 :     if (!force_nd_im2col_ &amp;&amp; num_spatial_axes_ == 2) {</span>
<span class="lineNum">     114 </span><span class="lineNoCov">          0 :       col2im_cpu(col_buff, conv_in_channels_,</span>
<span class="lineNum">     115 </span><span class="lineNoCov">          0 :           conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2],</span>
<span class="lineNum">     116 </span><span class="lineNoCov">          0 :           kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1],</span>
<span class="lineNum">     117 </span><span class="lineNoCov">          0 :           pad_.cpu_data()[0], pad_.cpu_data()[1],</span>
<span class="lineNum">     118 </span><span class="lineNoCov">          0 :           stride_.cpu_data()[0], stride_.cpu_data()[1],</span>
<span class="lineNum">     119 </span><span class="lineNoCov">          0 :           dilation_.cpu_data()[0], dilation_.cpu_data()[1], data);</span>
<span class="lineNum">     120 </span>            :     } else {
<span class="lineNum">     121 </span><span class="lineNoCov">          0 :       col2im_nd_cpu(col_buff, num_spatial_axes_, conv_input_shape_.cpu_data(),</span>
<span class="lineNum">     122 </span>            :           col_buffer_shape_.data(), kernel_shape_.cpu_data(),
<span class="lineNum">     123 </span>            :           pad_.cpu_data(), stride_.cpu_data(), dilation_.cpu_data(), data);
<span class="lineNum">     124 </span>            :     }
<span class="lineNum">     125 </span><span class="lineNoCov">          0 :   }</span>
<span class="lineNum">     126 </span>            : #ifndef CPU_ONLY
<span class="lineNum">     127 </span>            :   inline void conv_im2col_gpu(const Dtype* data, Dtype* col_buff) {
<span class="lineNum">     128 </span>            :     if (!force_nd_im2col_ &amp;&amp; num_spatial_axes_ == 2) {
<span class="lineNum">     129 </span>            :       im2col_gpu(data, conv_in_channels_,
<span class="lineNum">     130 </span>            :           conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2],
<span class="lineNum">     131 </span>            :           kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1],
<span class="lineNum">     132 </span>            :           pad_.cpu_data()[0], pad_.cpu_data()[1],
<span class="lineNum">     133 </span>            :           stride_.cpu_data()[0], stride_.cpu_data()[1],
<span class="lineNum">     134 </span>            :           dilation_.cpu_data()[0], dilation_.cpu_data()[1], col_buff);
<span class="lineNum">     135 </span>            :     } else {
<span class="lineNum">     136 </span>            :       im2col_nd_gpu(data, num_spatial_axes_, num_kernels_im2col_,
<span class="lineNum">     137 </span>            :           conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(),
<span class="lineNum">     138 </span>            :           kernel_shape_.gpu_data(), pad_.gpu_data(),
<span class="lineNum">     139 </span>            :           stride_.gpu_data(), dilation_.gpu_data(), col_buff);
<span class="lineNum">     140 </span>            :     }
<span class="lineNum">     141 </span>            :   }
<span class="lineNum">     142 </span>            :   inline void conv_col2im_gpu(const Dtype* col_buff, Dtype* data) {
<span class="lineNum">     143 </span>            :     if (!force_nd_im2col_ &amp;&amp; num_spatial_axes_ == 2) {
<span class="lineNum">     144 </span>            :       col2im_gpu(col_buff, conv_in_channels_,
<span class="lineNum">     145 </span>            :           conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2],
<span class="lineNum">     146 </span>            :           kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1],
<span class="lineNum">     147 </span>            :           pad_.cpu_data()[0], pad_.cpu_data()[1],
<span class="lineNum">     148 </span>            :           stride_.cpu_data()[0], stride_.cpu_data()[1],
<span class="lineNum">     149 </span>            :           dilation_.cpu_data()[0], dilation_.cpu_data()[1], data);
<span class="lineNum">     150 </span>            :     } else {
<span class="lineNum">     151 </span>            :       col2im_nd_gpu(col_buff, num_spatial_axes_, num_kernels_col2im_,
<span class="lineNum">     152 </span>            :           conv_input_shape_.gpu_data(), col_buffer_.gpu_shape(),
<span class="lineNum">     153 </span>            :           kernel_shape_.gpu_data(), pad_.gpu_data(), stride_.gpu_data(),
<span class="lineNum">     154 </span>            :           dilation_.gpu_data(), data);
<span class="lineNum">     155 </span>            :     }
<span class="lineNum">     156 </span>            :   }
<span class="lineNum">     157 </span>            : #endif
<span class="lineNum">     158 </span>            : 
<span class="lineNum">     159 </span>            :   int num_kernels_im2col_;
<span class="lineNum">     160 </span>            :   int num_kernels_col2im_;
<span class="lineNum">     161 </span>            :   int conv_out_channels_;
<span class="lineNum">     162 </span>            :   int conv_in_channels_;
<span class="lineNum">     163 </span>            :   int conv_out_spatial_dim_;
<span class="lineNum">     164 </span>            :   int kernel_dim_;
<span class="lineNum">     165 </span>            :   int col_offset_;
<span class="lineNum">     166 </span>            :   int output_offset_;
<span class="lineNum">     167 </span>            : 
<span class="lineNum">     168 </span>            :   Blob&lt;Dtype&gt; col_buffer_;
<span class="lineNum">     169 </span>            :   Blob&lt;Dtype&gt; bias_multiplier_;
<span class="lineNum">     170 </span>            : };
<span class="lineNum">     171 </span>            : 
<span class="lineNum">     172 </span>            : }  // namespace caffe
<span class="lineNum">     173 </span>            : 
<span class="lineNum">     174 </span>            : #endif  // CAFFE_BASE_CONVOLUTION_LAYER_HPP_
</pre>
      </td>
    </tr>
  </table>
  <br>

  <table width="100%" border=0 cellspacing=0 cellpadding=0>
    <tr><td class="ruler"><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>
    <tr><td class="versionInfo">Generated by: <a href="http://ltp.sourceforge.net/coverage/lcov.php" target="_parent">LCOV version 1.12</a></td></tr>
  </table>
  <br>

</body>
</html>
