<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">

<html lang="en">

<head>
  <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
  <title>LCOV - code analysis - include/caffe/layers/conv_layer.hpp</title>
  <link rel="stylesheet" type="text/css" href="../../../gcov.css">
</head>

<body>

  <table width="100%" border=0 cellspacing=0 cellpadding=0>
    <tr><td class="title">LCOV - code coverage report</td></tr>
    <tr><td class="ruler"><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>

    <tr>
      <td width="100%">
        <table cellpadding=1 border=0 width="100%">
          <tr>
            <td width="10%" class="headerItem">Current view:</td>
            <td width="35%" class="headerValue"><a href="../../../index.html">top level</a> - <a href="index.html">include/caffe/layers</a> - conv_layer.hpp<span style="font-size: 80%;"> (source / <a href="conv_layer.hpp.func-sort-c.html">functions</a>)</span></td>
            <td width="5%"></td>
            <td width="15%"></td>
            <td width="10%" class="headerCovTableHead">Hit</td>
            <td width="10%" class="headerCovTableHead">Total</td>
            <td width="15%" class="headerCovTableHead">Coverage</td>
          </tr>
          <tr>
            <td class="headerItem">Test:</td>
            <td class="headerValue">code analysis</td>
            <td></td>
            <td class="headerItem">Lines:</td>
            <td class="headerCovTableEntry">3</td>
            <td class="headerCovTableEntry">5</td>
            <td class="headerCovTableEntryLo">60.0 %</td>
          </tr>
          <tr>
            <td class="headerItem">Date:</td>
            <td class="headerValue">2020-09-11 22:25:26</td>
            <td></td>
            <td class="headerItem">Functions:</td>
            <td class="headerCovTableEntry">2</td>
            <td class="headerCovTableEntry">10</td>
            <td class="headerCovTableEntryLo">20.0 %</td>
          </tr>
          <tr>
            <td class="headerItem">Legend:</td>
            <td class="headerValueLeg">            Lines:
            <span class="coverLegendCov">hit</span>
            <span class="coverLegendNoCov">not hit</span>
</td>
            <td></td>
          </tr>
          <tr><td><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>
        </table>
      </td>
    </tr>

    <tr><td class="ruler"><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>
  </table>

  <table cellpadding=0 cellspacing=0 border=0>
    <tr>
      <td><br></td>
    </tr>
    <tr>
      <td>
<pre class="sourceHeading">          Line data    Source code</pre>
<pre class="source">
<a name="1"><span class="lineNum">       1 </span>            : #ifndef CAFFE_CONV_LAYER_HPP_</a>
<span class="lineNum">       2 </span>            : #define CAFFE_CONV_LAYER_HPP_
<span class="lineNum">       3 </span>            : 
<span class="lineNum">       4 </span>            : #include &lt;vector&gt;
<span class="lineNum">       5 </span>            : 
<span class="lineNum">       6 </span>            : #include &quot;caffe/blob.hpp&quot;
<span class="lineNum">       7 </span>            : #include &quot;caffe/layer.hpp&quot;
<span class="lineNum">       8 </span>            : #include &quot;caffe/proto/caffe.pb.h&quot;
<span class="lineNum">       9 </span>            : 
<span class="lineNum">      10 </span>            : #include &quot;caffe/layers/base_conv_layer.hpp&quot;
<span class="lineNum">      11 </span>            : 
<span class="lineNum">      12 </span>            : namespace caffe {
<span class="lineNum">      13 </span>            : 
<span class="lineNum">      14 </span>            : /**
<span class="lineNum">      15 </span>            :  * @brief Convolves the input image with a bank of learned filters,
<span class="lineNum">      16 </span>            :  *        and (optionally) adds biases.
<span class="lineNum">      17 </span>            :  *
<span class="lineNum">      18 </span>            :  *   Caffe convolves by reduction to matrix multiplication. This achieves
<span class="lineNum">      19 </span>            :  *   high-throughput and generality of input and filter dimensions but comes at
<span class="lineNum">      20 </span>            :  *   the cost of memory for matrices. This makes use of efficiency in BLAS.
<span class="lineNum">      21 </span>            :  *
<span class="lineNum">      22 </span>            :  *   The input is &quot;im2col&quot; transformed to a channel K' x H x W data matrix
<span class="lineNum">      23 </span>            :  *   for multiplication with the N x K' x H x W filter matrix to yield a
<span class="lineNum">      24 </span>            :  *   N' x H x W output matrix that is then &quot;col2im&quot; restored. K' is the
<span class="lineNum">      25 </span>            :  *   input channel * kernel height * kernel width dimension of the unrolled
<span class="lineNum">      26 </span>            :  *   inputs so that the im2col matrix has a column for each input region to
<span class="lineNum">      27 </span>            :  *   be filtered. col2im restores the output spatial structure by rolling up
<span class="lineNum">      28 </span>            :  *   the output channel N' columns of the output matrix.
<a name="29"><span class="lineNum">      29 </span>            :  */</a>
<span class="lineNum">      30 </span>            : template &lt;typename Dtype&gt;
<span class="lineNum">      31 </span><span class="lineCov">          2 : class ConvolutionLayer : public BaseConvolutionLayer&lt;Dtype&gt; {</span>
<span class="lineNum">      32 </span>            :  public:
<span class="lineNum">      33 </span>            :   /**
<span class="lineNum">      34 </span>            :    * @param param provides ConvolutionParameter convolution_param,
<span class="lineNum">      35 </span>            :    *    with ConvolutionLayer options:
<span class="lineNum">      36 </span>            :    *  - num_output. The number of filters.
<span class="lineNum">      37 </span>            :    *  - kernel_size / kernel_h / kernel_w. The filter dimensions, given by
<span class="lineNum">      38 </span>            :    *  kernel_size for square filters or kernel_h and kernel_w for rectangular
<span class="lineNum">      39 </span>            :    *  filters.
<span class="lineNum">      40 </span>            :    *  - stride / stride_h / stride_w (\b optional, default 1). The filter
<span class="lineNum">      41 </span>            :    *  stride, given by stride_size for equal dimensions or stride_h and stride_w
<span class="lineNum">      42 </span>            :    *  for different strides. By default the convolution is dense with stride 1.
<span class="lineNum">      43 </span>            :    *  - pad / pad_h / pad_w (\b optional, default 0). The zero-padding for
<span class="lineNum">      44 </span>            :    *  convolution, given by pad for equal dimensions or pad_h and pad_w for
<span class="lineNum">      45 </span>            :    *  different padding. Input padding is computed implicitly instead of
<span class="lineNum">      46 </span>            :    *  actually padding.
<span class="lineNum">      47 </span>            :    *  - dilation (\b optional, default 1). The filter
<span class="lineNum">      48 </span>            :    *  dilation, given by dilation_size for equal dimensions for different
<span class="lineNum">      49 </span>            :    *  dilation. By default the convolution has dilation 1.
<span class="lineNum">      50 </span>            :    *  - group (\b optional, default 1). The number of filter groups. Group
<span class="lineNum">      51 </span>            :    *  convolution is a method for reducing parameterization by selectively
<span class="lineNum">      52 </span>            :    *  connecting input and output channels. The input and output channel dimensions must be divisible
<span class="lineNum">      53 </span>            :    *  by the number of groups. For group @f$ \geq 1 @f$, the
<span class="lineNum">      54 </span>            :    *  convolutional filters' input and output channels are separated s.t. each
<span class="lineNum">      55 </span>            :    *  group takes 1 / group of the input channels and makes 1 / group of the
<span class="lineNum">      56 </span>            :    *  output channels. Concretely 4 input channels, 8 output channels, and
<span class="lineNum">      57 </span>            :    *  2 groups separate input channels 1-2 and output channels 1-4 into the
<span class="lineNum">      58 </span>            :    *  first group and input channels 3-4 and output channels 5-8 into the second
<span class="lineNum">      59 </span>            :    *  group.
<span class="lineNum">      60 </span>            :    *  - bias_term (\b optional, default true). Whether to have a bias.
<span class="lineNum">      61 </span>            :    *  - engine: convolution has CAFFE (matrix multiplication) and CUDNN (library
<a name="62"><span class="lineNum">      62 </span>            :    *    kernels + stream parallelism) engines.</a>
<span class="lineNum">      63 </span>            :    */
<span class="lineNum">      64 </span><span class="lineNoCov">          0 :   explicit ConvolutionLayer(const LayerParameter&amp; param)</span>
<a name="65"><span class="lineNum">      65 </span><span class="lineCov">          2 :       : BaseConvolutionLayer&lt;Dtype&gt;(param) {}</span></a>
<span class="lineNum">      66 </span>            : 
<span class="lineNum">      67 </span><span class="lineNoCov">          0 :   virtual inline const char* type() const { return &quot;Convolution&quot;; }</span>
<span class="lineNum">      68 </span>            : 
<span class="lineNum">      69 </span>            :  protected:
<span class="lineNum">      70 </span>            :   virtual void Forward_cpu(const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; bottom,
<span class="lineNum">      71 </span>            :       const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; top);
<span class="lineNum">      72 </span>            :   virtual void Forward_gpu(const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; bottom,
<span class="lineNum">      73 </span>            :       const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; top);
<span class="lineNum">      74 </span>            :   virtual void Backward_cpu(const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; top,
<span class="lineNum">      75 </span>            :       const vector&lt;bool&gt;&amp; propagate_down, const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; bottom);
<a name="76"><span class="lineNum">      76 </span>            :   virtual void Backward_gpu(const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; top,</a>
<span class="lineNum">      77 </span>            :       const vector&lt;bool&gt;&amp; propagate_down, const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; bottom);
<span class="lineNum">      78 </span><span class="lineCov">       1416 :   virtual inline bool reverse_dimensions() { return false; }</span>
<span class="lineNum">      79 </span>            :   virtual void compute_output_shape();
<span class="lineNum">      80 </span>            : };
<span class="lineNum">      81 </span>            : 
<span class="lineNum">      82 </span>            : }  // namespace caffe
<span class="lineNum">      83 </span>            : 
<span class="lineNum">      84 </span>            : #endif  // CAFFE_CONV_LAYER_HPP_
</pre>
      </td>
    </tr>
  </table>
  <br>

  <table width="100%" border=0 cellspacing=0 cellpadding=0>
    <tr><td class="ruler"><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>
    <tr><td class="versionInfo">Generated by: <a href="http://ltp.sourceforge.net/coverage/lcov.php" target="_parent">LCOV version 1.12</a></td></tr>
  </table>
  <br>

</body>
</html>
