<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">

<html lang="en">

<head>
  <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
  <title>LCOV - code analysis - include/caffe/layers/hinge_loss_layer.hpp</title>
  <link rel="stylesheet" type="text/css" href="../../../gcov.css">
</head>

<body>

  <table width="100%" border=0 cellspacing=0 cellpadding=0>
    <tr><td class="title">LCOV - code coverage report</td></tr>
    <tr><td class="ruler"><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>

    <tr>
      <td width="100%">
        <table cellpadding=1 border=0 width="100%">
          <tr>
            <td width="10%" class="headerItem">Current view:</td>
            <td width="35%" class="headerValue"><a href="../../../index.html">top level</a> - <a href="index.html">include/caffe/layers</a> - hinge_loss_layer.hpp<span style="font-size: 80%;"> (source / <a href="hinge_loss_layer.hpp.func-sort-c.html">functions</a>)</span></td>
            <td width="5%"></td>
            <td width="15%"></td>
            <td width="10%" class="headerCovTableHead">Hit</td>
            <td width="10%" class="headerCovTableHead">Total</td>
            <td width="15%" class="headerCovTableHead">Coverage</td>
          </tr>
          <tr>
            <td class="headerItem">Test:</td>
            <td class="headerValue">code analysis</td>
            <td></td>
            <td class="headerItem">Lines:</td>
            <td class="headerCovTableEntry">0</td>
            <td class="headerCovTableEntry">4</td>
            <td class="headerCovTableEntryLo">0.0 %</td>
          </tr>
          <tr>
            <td class="headerItem">Date:</td>
            <td class="headerValue">2020-09-11 22:50:33</td>
            <td></td>
            <td class="headerItem">Functions:</td>
            <td class="headerCovTableEntry">0</td>
            <td class="headerCovTableEntry">8</td>
            <td class="headerCovTableEntryLo">0.0 %</td>
          </tr>
          <tr>
            <td class="headerItem">Legend:</td>
            <td class="headerValueLeg">            Lines:
            <span class="coverLegendCov">hit</span>
            <span class="coverLegendNoCov">not hit</span>
</td>
            <td></td>
          </tr>
          <tr><td><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>
        </table>
      </td>
    </tr>

    <tr><td class="ruler"><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>
  </table>

  <table cellpadding=0 cellspacing=0 border=0>
    <tr>
      <td><br></td>
    </tr>
    <tr>
      <td>
<pre class="sourceHeading">          Line data    Source code</pre>
<pre class="source">
<a name="1"><span class="lineNum">       1 </span>            : #ifndef CAFFE_HINGE_LOSS_LAYER_HPP_</a>
<span class="lineNum">       2 </span>            : #define CAFFE_HINGE_LOSS_LAYER_HPP_
<span class="lineNum">       3 </span>            : 
<span class="lineNum">       4 </span>            : #include &lt;vector&gt;
<span class="lineNum">       5 </span>            : 
<span class="lineNum">       6 </span>            : #include &quot;caffe/blob.hpp&quot;
<span class="lineNum">       7 </span>            : #include &quot;caffe/layer.hpp&quot;
<span class="lineNum">       8 </span>            : #include &quot;caffe/proto/caffe.pb.h&quot;
<span class="lineNum">       9 </span>            : 
<span class="lineNum">      10 </span>            : #include &quot;caffe/layers/loss_layer.hpp&quot;
<span class="lineNum">      11 </span>            : 
<span class="lineNum">      12 </span>            : namespace caffe {
<span class="lineNum">      13 </span>            : 
<span class="lineNum">      14 </span>            : /**
<span class="lineNum">      15 </span>            :  * @brief Computes the hinge loss for a one-of-many classification task.
<span class="lineNum">      16 </span>            :  *
<span class="lineNum">      17 </span>            :  * @param bottom input Blob vector (length 2)
<span class="lineNum">      18 </span>            :  *   -# @f$ (N \times C \times H \times W) @f$
<span class="lineNum">      19 </span>            :  *      the predictions @f$ t @f$, a Blob with values in
<span class="lineNum">      20 </span>            :  *      @f$ [-\infty, +\infty] @f$ indicating the predicted score for each of
<span class="lineNum">      21 </span>            :  *      the @f$ K = CHW @f$ classes. In an SVM, @f$ t @f$ is the result of
<span class="lineNum">      22 </span>            :  *      taking the inner product @f$ X^T W @f$ of the D-dimensional features
<span class="lineNum">      23 </span>            :  *      @f$ X \in \mathcal{R}^{D \times N} @f$ and the learned hyperplane
<span class="lineNum">      24 </span>            :  *      parameters @f$ W \in \mathcal{R}^{D \times K} @f$, so a Net with just
<span class="lineNum">      25 </span>            :  *      an InnerProductLayer (with num_output = D) providing predictions to a
<span class="lineNum">      26 </span>            :  *      HingeLossLayer and no other learnable parameters or losses is
<span class="lineNum">      27 </span>            :  *      equivalent to an SVM.
<span class="lineNum">      28 </span>            :  *   -# @f$ (N \times 1 \times 1 \times 1) @f$
<span class="lineNum">      29 </span>            :  *      the labels @f$ l @f$, an integer-valued Blob with values
<span class="lineNum">      30 </span>            :  *      @f$ l_n \in [0, 1, 2, ..., K - 1] @f$
<span class="lineNum">      31 </span>            :  *      indicating the correct class label among the @f$ K @f$ classes
<span class="lineNum">      32 </span>            :  * @param top output Blob vector (length 1)
<span class="lineNum">      33 </span>            :  *   -# @f$ (1 \times 1 \times 1 \times 1) @f$
<span class="lineNum">      34 </span>            :  *      the computed hinge loss: @f$ E =
<span class="lineNum">      35 </span>            :  *        \frac{1}{N} \sum\limits_{n=1}^N \sum\limits_{k=1}^K
<span class="lineNum">      36 </span>            :  *        [\max(0, 1 - \delta\{l_n = k\} t_{nk})] ^ p
<span class="lineNum">      37 </span>            :  *      @f$, for the @f$ L^p @f$ norm
<span class="lineNum">      38 </span>            :  *      (defaults to @f$ p = 1 @f$, the L1 norm; L2 norm, as in L2-SVM,
<span class="lineNum">      39 </span>            :  *      is also available), and @f$
<span class="lineNum">      40 </span>            :  *      \delta\{\mathrm{condition}\} = \left\{
<span class="lineNum">      41 </span>            :  *         \begin{array}{lr}
<span class="lineNum">      42 </span>            :  *            1 &amp; \mbox{if condition} \\
<span class="lineNum">      43 </span>            :  *           -1 &amp; \mbox{otherwise}
<span class="lineNum">      44 </span>            :  *         \end{array} \right.
<span class="lineNum">      45 </span>            :  *      @f$
<span class="lineNum">      46 </span>            :  *
<span class="lineNum">      47 </span>            :  * In an SVM, @f$ t \in \mathcal{R}^{N \times K} @f$ is the result of taking
<span class="lineNum">      48 </span>            :  * the inner product @f$ X^T W @f$ of the features
<span class="lineNum">      49 </span>            :  * @f$ X \in \mathcal{R}^{D \times N} @f$
<span class="lineNum">      50 </span>            :  * and the learned hyperplane parameters
<span class="lineNum">      51 </span>            :  * @f$ W \in \mathcal{R}^{D \times K} @f$. So, a Net with just an
<span class="lineNum">      52 </span>            :  * InnerProductLayer (with num_output = @f$k@f$) providing predictions to a
<span class="lineNum">      53 </span>            :  * HingeLossLayer is equivalent to an SVM (assuming it has no other learned
<span class="lineNum">      54 </span>            :  * outside the InnerProductLayer and no other losses outside the
<span class="lineNum">      55 </span>            :  * HingeLossLayer).
<a name="56"><span class="lineNum">      56 </span>            :  */</a>
<span class="lineNum">      57 </span>            : template &lt;typename Dtype&gt;
<a name="58"><span class="lineNum">      58 </span><span class="lineNoCov">          0 : class HingeLossLayer : public LossLayer&lt;Dtype&gt; {</span></a>
<span class="lineNum">      59 </span>            :  public:
<span class="lineNum">      60 </span><span class="lineNoCov">          0 :   explicit HingeLossLayer(const LayerParameter&amp; param)</span>
<a name="61"><span class="lineNum">      61 </span><span class="lineNoCov">          0 :       : LossLayer&lt;Dtype&gt;(param) {}</span></a>
<span class="lineNum">      62 </span>            : 
<span class="lineNum">      63 </span><span class="lineNoCov">          0 :   virtual inline const char* type() const { return &quot;HingeLoss&quot;; }</span>
<span class="lineNum">      64 </span>            : 
<span class="lineNum">      65 </span>            :  protected:
<span class="lineNum">      66 </span>            :   /// @copydoc HingeLossLayer
<span class="lineNum">      67 </span>            :   virtual void Forward_cpu(const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; bottom,
<span class="lineNum">      68 </span>            :       const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; top);
<span class="lineNum">      69 </span>            : 
<span class="lineNum">      70 </span>            :   /**
<span class="lineNum">      71 </span>            :    * @brief Computes the hinge loss error gradient w.r.t. the predictions.
<span class="lineNum">      72 </span>            :    *
<span class="lineNum">      73 </span>            :    * Gradients cannot be computed with respect to the label inputs (bottom[1]),
<span class="lineNum">      74 </span>            :    * so this method ignores bottom[1] and requires !propagate_down[1], crashing
<span class="lineNum">      75 </span>            :    * if propagate_down[1] is set.
<span class="lineNum">      76 </span>            :    *
<span class="lineNum">      77 </span>            :    * @param top output Blob vector (length 1), providing the error gradient with
<span class="lineNum">      78 </span>            :    *      respect to the outputs
<span class="lineNum">      79 </span>            :    *   -# @f$ (1 \times 1 \times 1 \times 1) @f$
<span class="lineNum">      80 </span>            :    *      This Blob's diff will simply contain the loss_weight* @f$ \lambda @f$,
<span class="lineNum">      81 </span>            :    *      as @f$ \lambda @f$ is the coefficient of this layer's output
<span class="lineNum">      82 </span>            :    *      @f$\ell_i@f$ in the overall Net loss
<span class="lineNum">      83 </span>            :    *      @f$ E = \lambda_i \ell_i + \mbox{other loss terms}@f$; hence
<span class="lineNum">      84 </span>            :    *      @f$ \frac{\partial E}{\partial \ell_i} = \lambda_i @f$.
<span class="lineNum">      85 </span>            :    *      (*Assuming that this top Blob is not used as a bottom (input) by any
<span class="lineNum">      86 </span>            :    *      other layer of the Net.)
<span class="lineNum">      87 </span>            :    * @param propagate_down see Layer::Backward.
<span class="lineNum">      88 </span>            :    *      propagate_down[1] must be false as we can't compute gradients with
<span class="lineNum">      89 </span>            :    *      respect to the labels.
<span class="lineNum">      90 </span>            :    * @param bottom input Blob vector (length 2)
<span class="lineNum">      91 </span>            :    *   -# @f$ (N \times C \times H \times W) @f$
<span class="lineNum">      92 </span>            :    *      the predictions @f$t@f$; Backward computes diff
<span class="lineNum">      93 </span>            :    *      @f$ \frac{\partial E}{\partial t} @f$
<span class="lineNum">      94 </span>            :    *   -# @f$ (N \times 1 \times 1 \times 1) @f$
<span class="lineNum">      95 </span>            :    *      the labels -- ignored as we can't compute their error gradients
<span class="lineNum">      96 </span>            :    */
<span class="lineNum">      97 </span>            :   virtual void Backward_cpu(const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; top,
<span class="lineNum">      98 </span>            :       const vector&lt;bool&gt;&amp; propagate_down, const vector&lt;Blob&lt;Dtype&gt;*&gt;&amp; bottom);
<span class="lineNum">      99 </span>            : };
<span class="lineNum">     100 </span>            : 
<span class="lineNum">     101 </span>            : 
<span class="lineNum">     102 </span>            : }  // namespace caffe
<span class="lineNum">     103 </span>            : 
<span class="lineNum">     104 </span>            : #endif  // CAFFE_HINGE_LOSS_LAYER_HPP_
</pre>
      </td>
    </tr>
  </table>
  <br>

  <table width="100%" border=0 cellspacing=0 cellpadding=0>
    <tr><td class="ruler"><img src="../../../glass.png" width=3 height=3 alt=""></td></tr>
    <tr><td class="versionInfo">Generated by: <a href="http://ltp.sourceforge.net/coverage/lcov.php" target="_parent">LCOV version 1.12</a></td></tr>
  </table>
  <br>

</body>
</html>
