<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">

<html lang="en">

<head>
  <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
  <title>LCOV - code analysis - include/caffe/syncedmem.hpp</title>
  <link rel="stylesheet" type="text/css" href="../../gcov.css">
</head>

<body>

  <table width="100%" border=0 cellspacing=0 cellpadding=0>
    <tr><td class="title">LCOV - code coverage report</td></tr>
    <tr><td class="ruler"><img src="../../glass.png" width=3 height=3 alt=""></td></tr>

    <tr>
      <td width="100%">
        <table cellpadding=1 border=0 width="100%">
          <tr>
            <td width="10%" class="headerItem">Current view:</td>
            <td width="35%" class="headerValue"><a href="../../index.html">top level</a> - <a href="index.html">include/caffe</a> - syncedmem.hpp<span style="font-size: 80%;"> (source / <a href="syncedmem.hpp.func-sort-c.html">functions</a>)</span></td>
            <td width="5%"></td>
            <td width="15%"></td>
            <td width="10%" class="headerCovTableHead">Hit</td>
            <td width="10%" class="headerCovTableHead">Total</td>
            <td width="15%" class="headerCovTableHead">Coverage</td>
          </tr>
          <tr>
            <td class="headerItem">Test:</td>
            <td class="headerValue">code analysis</td>
            <td></td>
            <td class="headerItem">Lines:</td>
            <td class="headerCovTableEntry">6</td>
            <td class="headerCovTableEntry">6</td>
            <td class="headerCovTableEntryHi">100.0 %</td>
          </tr>
          <tr>
            <td class="headerItem">Date:</td>
            <td class="headerValue">2020-09-11 22:50:33</td>
            <td></td>
            <td class="headerItem">Functions:</td>
            <td class="headerCovTableEntry">1</td>
            <td class="headerCovTableEntry">1</td>
            <td class="headerCovTableEntryHi">100.0 %</td>
          </tr>
          <tr>
            <td class="headerItem">Legend:</td>
            <td class="headerValueLeg">            Lines:
            <span class="coverLegendCov">hit</span>
            <span class="coverLegendNoCov">not hit</span>
</td>
            <td></td>
          </tr>
          <tr><td><img src="../../glass.png" width=3 height=3 alt=""></td></tr>
        </table>
      </td>
    </tr>

    <tr><td class="ruler"><img src="../../glass.png" width=3 height=3 alt=""></td></tr>
  </table>

  <table cellpadding=0 cellspacing=0 border=0>
    <tr>
      <td><br></td>
    </tr>
    <tr>
      <td>
<pre class="sourceHeading">          Line data    Source code</pre>
<pre class="source">
<a name="1"><span class="lineNum">       1 </span>            : #ifndef CAFFE_SYNCEDMEM_HPP_</a>
<span class="lineNum">       2 </span>            : #define CAFFE_SYNCEDMEM_HPP_
<span class="lineNum">       3 </span>            : 
<span class="lineNum">       4 </span>            : #include &lt;cstdlib&gt;
<span class="lineNum">       5 </span>            : 
<span class="lineNum">       6 </span>            : #ifdef USE_MKL
<span class="lineNum">       7 </span>            :   #include &quot;mkl.h&quot;
<span class="lineNum">       8 </span>            : #endif
<span class="lineNum">       9 </span>            : 
<span class="lineNum">      10 </span>            : #include &quot;caffe/common.hpp&quot;
<span class="lineNum">      11 </span>            : 
<span class="lineNum">      12 </span>            : namespace caffe {
<span class="lineNum">      13 </span>            : 
<span class="lineNum">      14 </span>            : // If CUDA is available and in GPU mode, host memory will be allocated pinned,
<span class="lineNum">      15 </span>            : // using cudaMallocHost. It avoids dynamic pinning for transfers (DMA).
<span class="lineNum">      16 </span>            : // The improvement in performance seems negligible in the single GPU case,
<a name="17"><span class="lineNum">      17 </span>            : // but might be more significant for parallel training. Most importantly,</a>
<span class="lineNum">      18 </span>            : // it improved stability for large models on many GPUs.
<span class="lineNum">      19 </span><span class="lineCov">        239 : inline void CaffeMallocHost(void** ptr, size_t size, bool* use_cuda) {</span>
<span class="lineNum">      20 </span>            : #ifndef CPU_ONLY
<span class="lineNum">      21 </span>            :   if (Caffe::mode() == Caffe::GPU) {
<span class="lineNum">      22 </span>            :     CUDA_CHECK(cudaMallocHost(ptr, size));
<span class="lineNum">      23 </span>            :     *use_cuda = true;
<span class="lineNum">      24 </span>            :     return;
<span class="lineNum">      25 </span>            :   }
<span class="lineNum">      26 </span>            : #endif
<span class="lineNum">      27 </span>            : #ifdef USE_MKL
<span class="lineNum">      28 </span>            :   *ptr = mkl_malloc(size ? size:1, 64);
<span class="lineNum">      29 </span>            : #else
<span class="lineNum">      30 </span><span class="lineCov">        239 :   *ptr = malloc(size);</span>
<span class="lineNum">      31 </span>            : #endif
<span class="lineNum">      32 </span><span class="lineCov">        239 :   *use_cuda = false;</span>
<span class="lineNum">      33 </span><span class="lineCov">        478 :   CHECK(*ptr) &lt;&lt; &quot;host allocation of size &quot; &lt;&lt; size &lt;&lt; &quot; failed&quot;;</span>
<span class="lineNum">      34 </span><span class="lineCov">        239 : }</span>
<span class="lineNum">      35 </span>            : 
<span class="lineNum">      36 </span>            : inline void CaffeFreeHost(void* ptr, bool use_cuda) {
<span class="lineNum">      37 </span>            : #ifndef CPU_ONLY
<span class="lineNum">      38 </span>            :   if (use_cuda) {
<span class="lineNum">      39 </span>            :     CUDA_CHECK(cudaFreeHost(ptr));
<span class="lineNum">      40 </span>            :     return;
<span class="lineNum">      41 </span>            :   }
<span class="lineNum">      42 </span>            : #endif
<span class="lineNum">      43 </span>            : #ifdef USE_MKL
<span class="lineNum">      44 </span>            :   mkl_free(ptr);
<span class="lineNum">      45 </span>            : #else
<span class="lineNum">      46 </span><span class="lineCov">        239 :   free(ptr);</span>
<span class="lineNum">      47 </span>            : #endif
<span class="lineNum">      48 </span>            : }
<span class="lineNum">      49 </span>            : 
<span class="lineNum">      50 </span>            : 
<span class="lineNum">      51 </span>            : /**
<span class="lineNum">      52 </span>            :  * @brief Manages memory allocation and synchronization between the host (CPU)
<span class="lineNum">      53 </span>            :  *        and device (GPU).
<span class="lineNum">      54 </span>            :  *
<span class="lineNum">      55 </span>            :  * TODO(dox): more thorough description.
<span class="lineNum">      56 </span>            :  */
<span class="lineNum">      57 </span>            : class SyncedMemory {
<span class="lineNum">      58 </span>            :  public:
<span class="lineNum">      59 </span>            :   SyncedMemory();
<span class="lineNum">      60 </span>            :   explicit SyncedMemory(size_t size);
<span class="lineNum">      61 </span>            :   ~SyncedMemory();
<span class="lineNum">      62 </span>            :   const void* cpu_data();
<span class="lineNum">      63 </span>            :   void set_cpu_data(void* data);
<span class="lineNum">      64 </span>            :   const void* gpu_data();
<span class="lineNum">      65 </span>            :   void set_gpu_data(void* data);
<span class="lineNum">      66 </span>            :   void* mutable_cpu_data();
<span class="lineNum">      67 </span>            :   void* mutable_gpu_data();
<span class="lineNum">      68 </span>            :   enum SyncedHead { UNINITIALIZED, HEAD_AT_CPU, HEAD_AT_GPU, SYNCED };
<span class="lineNum">      69 </span>            :   SyncedHead head() const { return head_; }
<span class="lineNum">      70 </span>            :   size_t size() const { return size_; }
<span class="lineNum">      71 </span>            : 
<span class="lineNum">      72 </span>            : #ifndef CPU_ONLY
<span class="lineNum">      73 </span>            :   void async_gpu_push(const cudaStream_t&amp; stream);
<span class="lineNum">      74 </span>            : #endif
<span class="lineNum">      75 </span>            : 
<span class="lineNum">      76 </span>            :  private:
<span class="lineNum">      77 </span>            :   void check_device();
<span class="lineNum">      78 </span>            : 
<span class="lineNum">      79 </span>            :   void to_cpu();
<span class="lineNum">      80 </span>            :   void to_gpu();
<span class="lineNum">      81 </span>            :   void* cpu_ptr_;
<span class="lineNum">      82 </span>            :   void* gpu_ptr_;
<span class="lineNum">      83 </span>            :   size_t size_;
<span class="lineNum">      84 </span>            :   SyncedHead head_;
<span class="lineNum">      85 </span>            :   bool own_cpu_data_;
<span class="lineNum">      86 </span>            :   bool cpu_malloc_use_cuda_;
<span class="lineNum">      87 </span>            :   bool own_gpu_data_;
<span class="lineNum">      88 </span>            :   int device_;
<span class="lineNum">      89 </span>            : 
<span class="lineNum">      90 </span>            :   DISABLE_COPY_AND_ASSIGN(SyncedMemory);
<span class="lineNum">      91 </span>            : };  // class SyncedMemory
<span class="lineNum">      92 </span>            : 
<span class="lineNum">      93 </span>            : }  // namespace caffe
<span class="lineNum">      94 </span>            : 
<span class="lineNum">      95 </span>            : #endif  // CAFFE_SYNCEDMEM_HPP_
</pre>
      </td>
    </tr>
  </table>
  <br>

  <table width="100%" border=0 cellspacing=0 cellpadding=0>
    <tr><td class="ruler"><img src="../../glass.png" width=3 height=3 alt=""></td></tr>
    <tr><td class="versionInfo">Generated by: <a href="http://ltp.sourceforge.net/coverage/lcov.php" target="_parent">LCOV version 1.12</a></td></tr>
  </table>
  <br>

</body>
</html>
