<!DOCTYPE HTML>
<html lang="zh-CN">


<head>
    <meta charset="utf-8">
    <meta name="keywords" content="Paper阅读笔记 1: ImageNet Classification with Deep Convolutional Neural Networks, python,machine learning,deep learning,html,css,c,c++,cpp,cmake,ros,linux,ubuntu">
    <meta name="description" content="本文是NIPS 2012的ImageNet Classification with Deep Convolutional Neural Networks的阅读笔记">
    <meta http-equiv="X-UA-Compatible" content="IE=edge">
    <meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
    <meta name="renderer" content="webkit|ie-stand|ie-comp">
    <meta name="mobile-web-app-capable" content="yes">
    <meta name="format-detection" content="telephone=no">
    <meta name="apple-mobile-web-app-capable" content="yes">
    <meta name="apple-mobile-web-app-status-bar-style" content="black-translucent">
    <meta name="referrer" content="no-referrer-when-downgrade">
    <!-- Global site tag (gtag.js) - Google Analytics -->


    <title>Paper阅读笔记 1: ImageNet Classification with Deep Convolutional Neural Networks | JackWang&#39;s Blog</title>
    <link rel="icon" type="image/png" href="/favicon.png">

    <link rel="stylesheet" type="text/css" href="/libs/awesome/css/all.min.css">
    <link rel="stylesheet" type="text/css" href="/libs/materialize/materialize.min.css">
    <link rel="stylesheet" type="text/css" href="/libs/aos/aos.css">
    <link rel="stylesheet" type="text/css" href="/libs/animate/animate.min.css">
    <link rel="stylesheet" type="text/css" href="/libs/lightGallery/css/lightgallery.min.css">
    <link rel="stylesheet" type="text/css" href="/css/matery.css">
    <link rel="stylesheet" type="text/css" href="/css/my.css">

    <script src="/libs/jquery/jquery-3.6.0.min.js"></script>

<meta name="generator" content="Hexo 5.4.2">
<style>.github-emoji { position: relative; display: inline-block; width: 1.2em; min-height: 1.2em; overflow: hidden; vertical-align: top; color: transparent; }  .github-emoji > span { position: relative; z-index: 10; }  .github-emoji img, .github-emoji .fancybox { margin: 0 !important; padding: 0 !important; border: none !important; outline: none !important; text-decoration: none !important; user-select: none !important; cursor: auto !important; }  .github-emoji img { height: 1.2em !important; width: 1.2em !important; position: absolute !important; left: 50% !important; top: 50% !important; transform: translate(-50%, -50%) !important; user-select: none !important; cursor: auto !important; } .github-emoji-fallback { color: inherit; } .github-emoji-fallback img { opacity: 0 !important; }</style>
<link rel="stylesheet" href="/css/prism-tomorrow.css" type="text/css">
<link rel="stylesheet" href="/css/prism-line-numbers.css" type="text/css"></head>



   <style>
    body{
       background-image: url(https://cdn.jsdelivr.net/gh/Tokisaki-Galaxy/res/site/medias/background.jpg);
       background-repeat:no-repeat;
       background-size: 100% 100%;
       background-attachment:fixed;
    }
</style>



<body>
    <header class="navbar-fixed">
    <nav id="headNav" class="bg-color nav-transparent">
        <div id="navContainer" class="nav-wrapper container">
            <div class="brand-logo">
                <a href="/" class="waves-effect waves-light">
                    
                    <img src="/medias/logo.png" class="logo-img" alt="LOGO">
                    
                    <span class="logo-span">JackWang&#39;s Blog</span>
                </a>
            </div>
            

<a href="#" data-target="mobile-nav" class="sidenav-trigger button-collapse"><i class="fas fa-bars"></i></a>
<ul class="right nav-menu">
  
  <li class="hide-on-med-and-down nav-item">
    
    <a href="/" class="waves-effect waves-light">
      
      <i class="fas fa-home" style="zoom: 0.6;"></i>
      
      <span>首页</span>
    </a>
    
  </li>
  
  <li class="hide-on-med-and-down nav-item">
    
    <a href="" class="waves-effect waves-light">

      
      <i class="fas fa-book-reader" style="zoom: 0.6;"></i>
      
      <span>博客</span>
      <i class="fas fa-chevron-down" aria-hidden="true" style="zoom: 0.6;"></i>
    </a>
    <ul class="sub-nav menus_item_child ">
      
      <li>
        <a href="/tags">
          
          <i class="fas fa-tags" style="margin-top: -20px; zoom: 0.6;"></i>
          
	  <span>按标签归类文章</span>
        </a>
      </li>
      
      <li>
        <a href="/categories">
          
          <i class="fas fa-bookmark" style="margin-top: -20px; zoom: 0.6;"></i>
          
	  <span>按目录归类文章</span>
        </a>
      </li>
      
      <li>
        <a href="/archives">
          
          <i class="fas fa-archive" style="margin-top: -20px; zoom: 0.6;"></i>
          
	  <span>按日期分类文章</span>
        </a>
      </li>
      
    </ul>
    
  </li>
  
  <li class="hide-on-med-and-down nav-item">
    
    <a href="/about" class="waves-effect waves-light">
      
      <i class="fas fa-user-circle" style="zoom: 0.6;"></i>
      
      <span>关于</span>
    </a>
    
  </li>
  
  <li>
    <a href="#searchModal" class="modal-trigger waves-effect waves-light">
      <i id="searchIcon" class="fas fa-search" title="搜索" style="zoom: 0.85;"></i>
    </a>
  </li>
</ul>



<div id="mobile-nav" class="side-nav sidenav">

    <div class="mobile-head bg-color">
        
        <img src="/medias/logo.png" class="logo-img circle responsive-img">
        
        <div class="logo-name">JackWang&#39;s Blog</div>
        <div class="logo-desc">
            
            JackWang的个人博客
            
        </div>
    </div>

    <ul class="menu-list mobile-menu-list">
        
        <li class="m-nav-item">
	  
		<a href="/" class="waves-effect waves-light">
			
			    <i class="fa-fw fas fa-home"></i>
			
			首页
		</a>
          
        </li>
        
        <li class="m-nav-item">
	  
		<a href="javascript:;">
			
				<i class="fa-fw fas fa-book-reader"></i>
			
			博客
			<span class="m-icon"><i class="fas fa-chevron-right"></i></span>
		</a>
            <ul  style="background:  ;" >
              
                <li>

                  <a href="/tags " style="margin-left:75px">
				  
				   <i class="fa fas fa-tags" style="position: absolute;left:50px" ></i>
			      
                              <span>按标签归类文章</    span>

                  </a>
                </li>
              
                <li>

                  <a href="/categories " style="margin-left:75px">
				  
				   <i class="fa fas fa-bookmark" style="position: absolute;left:50px" ></i>
			      
                              <span>按目录归类文章</    span>

                  </a>
                </li>
              
                <li>

                  <a href="/archives " style="margin-left:75px">
				  
				   <i class="fa fas fa-archive" style="position: absolute;left:50px" ></i>
			      
                              <span>按日期分类文章</    span>

                  </a>
                </li>
              
            </ul>
          
        </li>
        
        <li class="m-nav-item">
	  
		<a href="/about" class="waves-effect waves-light">
			
			    <i class="fa-fw fas fa-user-circle"></i>
			
			关于
		</a>
          
        </li>
        
        
    </ul>
</div>


        </div>

        
    </nav>

</header>

    
<script src="/libs/cryptojs/crypto-js.min.js"></script>
<script>
    (function() {
        let pwd = '';
        if (pwd && pwd.length > 0) {
            if (pwd !== CryptoJS.SHA256(prompt('抱歉，这篇文章并不想让所有人都看到，请输入授权密码观看')).toString(CryptoJS.enc.Hex)) {
                alert('密码错误，将返回主页！');
                location.href = '/';
            }
        }
    })();
</script>




<div class="bg-cover pd-header post-cover" style="background-image: url('https://jack-1307599355.cos.ap-shanghai.myqcloud.com/image-20220725205629608.png')">
    <div class="container" style="right: 0px;left: 0px;">
        <div class="row">
            <div class="col s12 m12 l12">
                <div class="brand">
                    <h1 class="description center-align post-title">Paper阅读笔记 1: ImageNet Classification with Deep Convolutional Neural Networks</h1>
                </div>
            </div>
        </div>
    </div>
</div>




<main class="post-container content">

    
    <link rel="stylesheet" href="/libs/tocbot/tocbot.css">
<style>
    #articleContent h1::before,
    #articleContent h2::before,
    #articleContent h3::before,
    #articleContent h4::before,
    #articleContent h5::before,
    #articleContent h6::before {
        display: block;
        content: " ";
        height: 100px;
        margin-top: -100px;
        visibility: hidden;
    }

    #articleContent :focus {
        outline: none;
    }

    .toc-fixed {
        position: fixed;
        top: 64px;
    }

    .toc-widget {
        width: 345px;
        padding-left: 20px;
    }

    .toc-widget .toc-title {
        padding: 35px 0 15px 17px;
        font-size: 1.5rem;
        font-weight: bold;
        line-height: 1.5rem;
    }

    .toc-widget ol {
        padding: 0;
        list-style: none;
    }

    #toc-content {
        padding-bottom: 30px;
        overflow: auto;
    }

    #toc-content ol {
        padding-left: 10px;
    }

    #toc-content ol li {
        padding-left: 10px;
    }

    #toc-content .toc-link:hover {
        color: #42b983;
        font-weight: 700;
        text-decoration: underline;
    }

    #toc-content .toc-link::before {
        background-color: transparent;
        max-height: 25px;

        position: absolute;
        right: 23.5vw;
        display: block;
    }

    #toc-content .is-active-link {
        color: #42b983;
    }

    #floating-toc-btn {
        position: fixed;
        right: 15px;
        bottom: 76px;
        padding-top: 15px;
        margin-bottom: 0;
        z-index: 998;
    }

    #floating-toc-btn .btn-floating {
        width: 48px;
        height: 48px;
    }

    #floating-toc-btn .btn-floating i {
        line-height: 48px;
        font-size: 1.4rem;
    }
</style>
<div class="row">
    <div id="main-content" class="col s12 m12 l9">
        <!-- 文章内容详情 -->
<div id="artDetail">
    <div class="card">
        <div class="card-content article-info">
            <div class="row tag-cate">
                <div class="col s7">
                    
                    <div class="article-tag">
                        
                            <a href="/tags/AlexNet/">
                                <span class="chip bg-color">AlexNet</span>
                            </a>
                        
                            <a href="/tags/NIPS/">
                                <span class="chip bg-color">NIPS</span>
                            </a>
                        
                            <a href="/tags/NIPS-2012/">
                                <span class="chip bg-color">NIPS 2012</span>
                            </a>
                        
                            <a href="/tags/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0/">
                                <span class="chip bg-color">深度学习</span>
                            </a>
                        
                            <a href="/tags/Deep-Learning/">
                                <span class="chip bg-color">Deep Learning</span>
                            </a>
                        
                    </div>
                    
                </div>
                <div class="col s5 right-align">
                    
                    <div class="post-cate">
                        <i class="fas fa-bookmark fa-fw icon-category"></i>
                        
                            <a href="/categories/Paper%E9%98%85%E8%AF%BB%E7%AC%94%E8%AE%B0/" class="post-category">
                                Paper阅读笔记
                            </a>
                        
                            <a href="/categories/Paper%E9%98%85%E8%AF%BB%E7%AC%94%E8%AE%B0/Image-Classification/" class="post-category">
                                Image Classification
                            </a>
                        
                    </div>
                    
                </div>
            </div>

            <div class="post-info">
                
                <div class="post-date info-break-policy">
                    <i class="far fa-calendar-minus fa-fw"></i>发布日期:&nbsp;&nbsp;
                    2022-07-26
                </div>
                

                
                <div class="post-date info-break-policy">
                    <i class="far fa-calendar-check fa-fw"></i>更新日期:&nbsp;&nbsp;
                    2023-06-01
                </div>
                

                
                <div class="info-break-policy">
                    <i class="far fa-file-word fa-fw"></i>文章字数:&nbsp;&nbsp;
                    6.2k
                </div>
                

                
                <div class="info-break-policy">
                    <i class="far fa-clock fa-fw"></i>阅读时长:&nbsp;&nbsp;
                    23 分
                </div>
                

                
                    <div id="busuanzi_container_page_pv" class="info-break-policy">
                        <i class="far fa-eye fa-fw"></i>阅读次数:&nbsp;&nbsp;
                        <span id="busuanzi_value_page_pv"></span>
                    </div>
				
            </div>
        </div>
        <hr class="clearfix">

        

        

        <div class="card-content article-card-content">
            <div id="articleContent">
                <p><img src="https://jack-1307599355.cos.ap-shanghai.myqcloud.com/image-20220725205629608.png" alt="NIPS 2012: ImageNet Classification with Deep Convolutional Neural Networks"></p>
<p>看这篇文章的阅读笔记前，建议先去看一下这篇阅读笔记的前言：<a href="./Paper阅读笔记-1-ImageNet-Classification-with-Deep-Convolutional-Neural-Networks（前言）.md">前言</a></p>
<h2 id="0-评价"><a href="#0-评价" class="headerlink" title="0. 评价"></a>0. 评价</h2><p>我对这篇文章的评价如下：</p>
<ul>
<li><p><strong>这篇文章首先反应了当时的时代背景</strong></p>
<blockquote>
<p>现在看来，这个已经是上古时期的文章了。而在上古时期（2012-2016），网络如果泛化性能上不去，就会认为是模型overfit了小的训练集，因为模型学习能力太强，所以学到了小数据集的bias。然而其实在今天，我们知道模型的泛化能力差并不一定是overfit了小数据集，完全有可能是因为模型optimization做的不好，也就是进行了optimization，但又没有完全进行。更形象的说，你还没有达到一个比较好的Local Minima，就去和容易optimize的模型的好的Local Minima去比较。</p>
<p>那么，意识到泛化性能差可能不是由于overfit而是由于优化存在问题这件事是在<strong>2016年的ResNet中发现的</strong>，ResNet文章中提出了做了一个思想实验，<code>Identical Mapping</code>，从而说明深的模型的性能下界至少是浅的模型的性能（放心吧，ResNet未来一定会写笔记的）。因此在2012-2016年间，大家在做的事情的可以分成三个：</p>
<ul>
<li>就是寻找能够使得泛化性能更好的方法，包括引入正则（Batch Norm）、对梯度下降下手……</li>
<li>提出新的，准确率更佳的模型，例如：GoogLeNet、VGGNet等等；</li>
<li>把深度学习，或者说CNN当成工具去解决不同领域的问题，就类似于CNN是一个Hammer，在不同的领域去找钉子砸</li>
</ul>
<p>虽然说很多文章的模型得到的了比较好的效果，但是他们并没有意识到它们的方法其实是提升了优化的效果，还是认为他们减轻了overfit小数据集。</p>
</blockquote>
</li>
<li><p>其次，这篇文章开启了未来一段时间内的研究</p>
</li>
</ul>
<p>而具体的，这篇文章的贡献有：</p>
<ul>
<li><p><strong>使用了<code>ReLu</code>激活函数，大大加速了模型的训练</strong></p>
<blockquote>
<p>这篇文章发表前，有很多的模型性能也提不上去，大家就会说是模型的Overfitting问题很严重，但其实根本原因还是模型没有优化好，而先前的模型没有优化好的一个重要原因就是使用tanh、sigmoid这类激活函数激活函数太复杂了，优化太慢、太复杂</p>
</blockquote>
</li>
<li><p><strong>文章使用到的数据增强方法，成为后来数据增强的标准化方法</strong></p>
<blockquote>
<p>文章把原始输入图像截取出$224\times224$的Patch，这一尺寸和方法成为后来诸多方法沿用的尺寸</p>
</blockquote>
</li>
<li><p>文章在测试阶段使用的数据增强成为后来测试方法的标准。</p>
<blockquote>
<p>文章把一个测试图像，左上角、右上角、左下角、右下角和中心截取出来5个Patch进行Voting，这一方法成为后来方法测试的标注。</p>
</blockquote>
</li>
</ul>
<p>下面就让我们一起来解开这篇论文的庐山真面目。</p>
<p><img src="https://jack-1307599355.cos.ap-shanghai.myqcloud.com/image-20220725210417633.png" alt="文章阅读记录"></p>
<h2 id="1-动机"><a href="#1-动机" class="headerlink" title="1. 动机"></a>1. 动机</h2><p>按照我的习惯，解析一篇论文时一定要弄清楚论文的动机是什么？弄清楚作者为什么要这么做，目的是什么，以及解决了一个什么样的问题？只有了解清楚了这一过程，才有助于引导自己形成类似的思维模式，从而顺着这一思路想出自己的改进点，而不仅仅只是说会实现这个网络了，能跑通代码就完事。技术只是外功，思想与理念才是内功。</p>
<h3 id="动机1：CNN容量更大、学习能力更强"><a href="#动机1：CNN容量更大、学习能力更强" class="headerlink" title="动机1：CNN容量更大、学习能力更强"></a>动机1：CNN容量更大、学习能力更强</h3><p>在论文的介绍部分作者提到，目前进行物体识别的方法在根本上都使用了机器学习的方法。而为了提高模型的性能，人们收集了大量的数据集，早期的数据集中标记的数据量只有上千、上万张图像，这个体量的数据集对于一些简单的任务来说已经足够了，例如MNIST手写数字识别问题。但是，<strong>现实中的物体却有非常大的多样性（Diversity），因此学习如何识别现实中的物体就需要更大的训练数据。</strong></p>
<p>所以就有了ImageNet这样大的数据集、在有了大的数据量之后，现在问题就是模型有没有能力去学习这么大的数据集。因此，一个真正实用的模型必须要有要拥有很多的容量来学习。而卷积神经网络(CNN)就是一类拥有强大学习能力的模型。CNN的容量能够通过其深度与广度进行控制。</p>
<h3 id="动机2：GPU的硬件支持"><a href="#动机2：GPU的硬件支持" class="headerlink" title="动机2：GPU的硬件支持"></a>动机2：GPU的硬件支持</h3><p>文章说了很多CNN的很多很多非常诱人的性质，包括但不限于学习容量大、性能高。但是训练一个神经网络并且把它们放到大规模数据集上进行验证的代价依旧是非常高昂的。因此，文章的第二个动机就是，在2011年前后，Nividia推出的GPU具有高算力，并且支持用户编程调用GPU去进行计算。而在2011年，已经能够在GPU上运行的、高度优化的2D卷积的实现，因此为训练大规模的CNN提供了动力。所以使得在ImageNet上去训练一个大型的CNN成为了可能。</p>
<p>总结一下，这两个动机，一个是说CNN这个东西好，另外一个是说CNN这个东西用GPU可以训练出来。所以自然而然，这篇文章要用CNN去进行物体识别。</p>
<h2 id="2-思路"><a href="#2-思路" class="headerlink" title="2. 思路"></a>2. 思路</h2><p>既然前面在动机上说了ImageNet有足够多的样本，支持我们学习出来强大的模型，而CNN又有足够的容量去从ImageNet中学习到表示（Representation），GPU又为CNN的训练提供了支持。因此，自然而然，<strong>这篇文章的思路就是用GPU在ImageNet上训练一个CNN</strong>。</p>
<p>所以，这篇文章的思路其实非常简单，精彩之处还要看后面方法部分的几个创新。</p>
<h2 id="3-技术手段（方法）"><a href="#3-技术手段（方法）" class="headerlink" title="3. 技术手段（方法）"></a>3. 技术手段（方法）</h2><p>相比于方法，其实我更愿意称文章的这一部分为技术手段，因为正如我在前面所说的：</p>
<blockquote>
<p>按照我的习惯，解析一篇论文时一定要弄清楚论文的动机是什么？弄清楚作者为什么要这么做，目的是什么，以及解决了一个什么样的问题？只有了解清楚了这一过程，才有助于引导自己形成类似的思维模式，从而顺着这一思路想出自己的改进点，而不仅仅只是说会实现这个网络了，能跑通代码就完事。技术只是外功，思想与理念才是内功。</p>
</blockquote>
<p>读一篇文章最重要的就是了解其思想，明白其思路，而技术技巧只不过是其思想的承载罢了。所以下面就讲解这篇文章的几个技术的要点。</p>
<h3 id="A-ReLU-Nonlinearity"><a href="#A-ReLU-Nonlinearity" class="headerlink" title="A. ReLU Nonlinearity"></a>A. ReLU Nonlinearity</h3><p>文章在技术上的第一个创新就是使用了<code>Rectified Linear Units (ReLUs)</code>来作为激活层的激活函数。因为之前的文章，往往都是使用<code>Sigmoid</code>、<code>tanh</code>这样的激活函数。而<code>Sigmoid</code>和<code>tanh</code>这样的激活函数会造成<code>梯度消失（Vanishing Gradient)</code>和<code>梯度爆炸（Gradient Explosion）</code>这样的问题。</p>
<blockquote>
<p><code>梯度消失（Vanishing Gradient)</code>：反向传播的时候，上一层的梯度不为0，但是经过这一层之后梯度就变成0了。这是因为这一层的参数绝大多数都为0，导致的局部导数为0。这个问题对于<code>Sigmoid</code>和<code>tanh</code>这样在边缘饱和的激活函数来说非常常见。</p>
<p><code>`梯度爆炸（Gradient Explosion）</code>：反向传播的时候，上一层的梯度是一个合理的数（一般模在1左右），但是经过这一层之后梯度就变成几百几千了，很快就会让模型里的参数变成NaN。</p>
</blockquote>
<p>这两个激活函数除了会对梯度造成影响外，还会大量的计算$e^x$这样的取指数操作，会消耗大量的计算资源。因此，传统的网络中大量的使用<code>Sigmoid</code>和<code>tanh</code>这样的函数，一个是梯度无法更新，造成难以优化，另一个是计算量太大，假设对梯度没有影响，优化花费时间也要很久。</p>
<p><img src="https://jack-1307599355.cos.ap-shanghai.myqcloud.com/image-20220726143324026.png" alt="三种激活函数"></p>
<p>因此就换成了<code>ReLu</code>激活函数，<code>ReLu</code>的表达式如下</p>
<p><img src="https://jack-1307599355.cos.ap-shanghai.myqcloud.com/f8dd-kkmphps7572766.png" alt="ReLu激活函数的表达式"></p>
<p>其梯度要么为0，要么为1，计算只需要一个简单的比较运算即可。因此满足了计算量小、梯度维持在合理的区间内的这一个要求。</p>
<p>未来说明<code>ReLu</code>的高效性，作者把用了一个四层的网络，网络中的激活函数分别使用<code>Sigmoid</code>和<code>ReLu</code>进行了两次实验，得到的实验结果如下：</p>
<p><img src="https://jack-1307599355.cos.ap-shanghai.myqcloud.com/image-20220726143831843.png" alt="ReLu性能的实验图"></p>
<blockquote>
<p>Figure 1: A four-layer convolutional neural network with ReLUs (solid line) reaches a 25% training error rate on CIFAR-10 six times faster than an equivalent network with tanh neurons(dashed line). The learning rates for each network were chosen independently to make training as fast as possible. No regularization of any kind was employed. The magnitude of the effect demonstrated here varies with network architecture, but networks with ReLUs consistently learn several times faster than equivalents with saturating neurons.</p>
</blockquote>
<p>即同样是达到25%的错误率，使用<code>ReLu</code>的模型比使用<code>Sigmoid</code>的模型速度快了6倍。而且不管怎么为<code>Sigmoid</code>的模型调学习率，总是使用<code>ReLu</code>的模型速度更快。</p>
<p>因此这就说明了使用ReLu可以大大简化模型的训练。</p>
<h3 id="B-Training-on-Multiple-GPUs"><a href="#B-Training-on-Multiple-GPUs" class="headerlink" title="B. Training on Multiple GPUs"></a>B. Training on Multiple GPUs</h3><p>这个手段其实没啥好说的，主要就是写代码实现以下，原文这段主要就是介绍了一下实现的细节：</p>
<blockquote>
<p>A single GTX 580 GPU has only 3GB of memory, which limits the maximum size of the networks that can be trained on it. It turns out that 1.2 million training examples are enough to train networks which are too big to fifit on one GPU. Therefore we spread the net across two GPUs. Current GPUs are particularly well-suited to cross-GPU parallelization, as they are able to read from and write to one another’s memory directly, without going through host machine memory. The parallelization scheme that we employ essentially puts half of the kernels (or neurons) on each GPU, with one additional trick: the GPUs communicate only in certain layers. This means that, for example, the kernels of layer 3 take input from all kernel maps in layer 2. However, kernels in layer 4 take input only from those kernel maps in layer 3 which reside on the same GPU. Choosing the pattern of connectivity is a problem for cross-validation, but this allows us to precisely tune the amount of communication until it is an acceptable fraction of the amount of computation.</p>
</blockquote>
<p>现在<code>Pytorch</code>中已经有多卡训练的框架，我们直接调用即可。</p>
<h3 id="C-Local-Response-Normalization"><a href="#C-Local-Response-Normalization" class="headerlink" title="C. Local Response Normalization"></a>C. Local Response Normalization</h3><p>这篇文章的第三个创新点就是提出了一个新的网络层，称为Local Response Normalization（LRN）。这个层的原理就是观察到人类的大脑皮层中有这样一个现象：一个活跃的神经元会抑制周围的几个神经元。那么对于神经网络来说，神经元之间的连接是依靠参数矩阵实现的，所以就是就是大的经过激活函数激活后的激活会抑制局部其他的值。</p>
<p>所以操作起来就是在每一个神经元局部，比如3*3的小范围内，根据这个神经元的激活值去调整这九个激活值。调整的公式如下：    </p>
<p><img src="https://jack-1307599355.cos.ap-shanghai.myqcloud.com/image-20220726144948815.png" alt="Local Response Normalization（LRN）的计算公式"></p>
<blockquote>
<p>Denoting by $a^i<em>{x,y}$, the activity of a neuron computed by applying kernel $i$ at position $(x, y)$ and then applying the ReLU nonlinearity, the response-normalized activity $b^i</em>{x,y}$ is given by the expression, where the sum runs over $n$ “adjacent” kernel maps at the same spatial position, and $N$ is the total number of kernels in the layer.</p>
</blockquote>
<p>不过其实现在看，LRN这个东西其实没啥用，这篇文章的几个技术里用处最大的就是<code>ReLU</code>和模型结构。</p>
<h3 id="D-Overlapping-Pooling"><a href="#D-Overlapping-Pooling" class="headerlink" title="D. Overlapping Pooling"></a>D. Overlapping Pooling</h3><p>这段是说，CNN中的池化层可以帮助网络来总结（Summarize）同一个卷积核计算得到的<code>activation map</code>中相邻的几个激活值。传统上的池化，池化窗口之间是没有重叠的。更加准确的说，假设一个池化层是$z\times z$大小的，每次水平或者竖直移动$s$个像素。如果$s=z$的话那么就是传统的池化，但是如果$s&lt;z$的话就是这里说的重叠池化（Overlapping Pooling）。这篇文章里取$s=2,z=3$。</p>
<p>在今天来说，所谓Overlapping Pooling其实都是调API的时候指定一下<code>stride</code>就行。</p>
<h3 id="E-Overall-Architecture"><a href="#E-Overall-Architecture" class="headerlink" title="E. Overall Architecture"></a>E. Overall Architecture</h3><p>接下来就是模型整体的结构了。网络包含八个含有可训练参数的层，前五层是卷积层，后三个是全连接层。最后一个全连接层的输出是1000维的向量。这个向量而后被softmax吃进去，计算得到在1000个类别标签上的分布。最后预测输入图像属于哪个类就是取最后经过逻辑回归后得到的最大的那个值属于的类。</p>
<p>因为这篇文章的网络拆分到了两个GPU上，所以文章还对怎么拆分的进行了一下说明：</p>
<ul>
<li>第三四五层的卷积层都只连接到了相同GPU上的前一层的输出，而第三层卷积层除了接受自己这个GPU上的输出以外，还连接了另外一块GPU上的输出。</li>
<li>LRN只在第一和第二层后面接着，第三四五层中间没有池化或者LRN</li>
<li>池化层在所有层后都接着</li>
</ul>
<p>网络中间每一层具体的参数为：</p>
<ul>
<li>第一个卷积层对224*224*3的输入图像进行卷积,一共有96个11*11*3的卷积核，以4为步长进行滑动。他这里是把96个activation map放到两个GPU上去了，一个GPU上有48个激活层。</li>
<li>第二个卷积层输入第一个卷积层池化和LRN之后activation map，第二层一共有256个5*5*48大小的卷积核</li>
</ul>
<p><img src="https://jack-1307599355.cos.ap-shanghai.myqcloud.com/image-20220726145711879.png" alt="网络结构"></p>
<blockquote>
<p>Figure 2: An illustration of the architecture of our CNN, explicitly showing the delineation of responsibilities between the two GPUs. One GPU runs the layer-parts at the top of the figure while the other runs the layer-parts at the bottom. The GPUs communicate only at certain layers. The network’s input is 150,528-dimensional, and the number of neurons in the network’s remaining layers is given by 253,440–186,624–64,896–64,896–43,264–4096–4096–1000.</p>
</blockquote>
<h2 id="4-训练细节"><a href="#4-训练细节" class="headerlink" title="4. 训练细节"></a>4. 训练细节</h2><p>其实从我自己训练模型的经验来说，想要训练出来一个有用的模型，训练的过程中是需要很多技巧的。所以作为深度学习开山鼻祖性质的第一篇文章,其中的训练细节还是需要讲讲的。</p>
<h3 id="A-数据增强"><a href="#A-数据增强" class="headerlink" title="A. 数据增强"></a>A. 数据增强</h3><p>数据增强已经被认为是非常有效的增强模型性能的方法，目前有不同的观点解释，一种观点认为我们对数据进行增强的时候人为引入了噪声，因此经过学习之后模型就可以分辨出噪声，因而提升了性能；另外一种观点认为数据增强了之后，图像会蕴含更多的模式，采样得到的图像的分布越接近真实的分布会模型能够学习到更多的特征，因此性能更加鲁棒。</p>
<p>但是，前面提到过本文提出的时代下，模型性能不好就认为是模型过于强大，过拟合了数据集，但我们现在其实知道是因为优化做的不好。所以本文的作者认为即便是ImageNet这么大的数据集，还是会被AlexNet给过拟合，所以需要进行数据增强。所以原文中才会把<code>数据增强</code>这一节放到<code>减缓过拟合</code>这一章下。</p>
<p><img src="https://jack-1307599355.cos.ap-shanghai.myqcloud.com/image-20220727110938004.png" alt="数据增强被放到减缓过拟合下"></p>
<h4 id="平移和翻转（Translation-amp-Flipping）"><a href="#平移和翻转（Translation-amp-Flipping）" class="headerlink" title="平移和翻转（Translation &amp; Flipping）"></a>平移和翻转（Translation &amp; Flipping）</h4><p>AlexNet使用的第一种数据增强方式是对图像进行平移和水平翻转。</p>
<p>在训练阶段，首先把原始输入的$256\times256$的图像随机截取成$224\times224$大小的Patch。然后对截取到了Patch进行随机翻转。原文中说这样干能够让可用的数据扩充到2048倍，但我真没搞懂这个2048怎么算的。</p>
<blockquote>
<p>The first form of data augmentation consists of generating image translations and horizontal reflections. We do this by extracting random 224 × 224 patches (and their horizontal reflections) from the 256×256 images and training our network on these extracted patches. This increases the size of our training set by a factor of 2048, though the resulting training examples are, of course, highly interdependent</p>
</blockquote>
<p>而在测试阶段，也会进行数据增强，不过此时的意义可能更多的在于集成方法。测试时，一张图像会选取四个角落和中心的五个Patch，再进行水平翻转之后得到10张图像。最后这十张图像经过softmax得到十个概率向量，最终分类的结果是这是个概率向量平均之后取argmax的结果。</p>
<h4 id="颜色调整（Color-Jitting）"><a href="#颜色调整（Color-Jitting）" class="headerlink" title="颜色调整（Color Jitting）"></a>颜色调整（Color Jitting）</h4><p>第二种数据增强的方式就是调整训练图像的RGB三个通道的强度，具体来说就是先对ImageNet中的平均图像进行PCA，得到几个主成分$\lambda_i$和对应的向量$p_i$。接下来从高斯分布中抽样得到几个参数$\alpha_i$，按照下式组合起来之后，加到训练图像上去</p>
<p><img src="https://jack-1307599355.cos.ap-shanghai.myqcloud.com/image-20220727115241909.png" alt="颜色调整"></p>
<h3 id="B-Dropout"><a href="#B-Dropout" class="headerlink" title="B. Dropout"></a>B. Dropout</h3><p>文章中训练时候使用的第二个技术就是<code>Dropout</code>。<code>dropout</code>主要用在测试阶段，在前向传播的时候，会随机选择$p$的神经元（注意$p&lt;1$），让他们的值变成0。这样的话这些神经元不会参与到运算，并且也不会进行梯度的反向传播。</p>
<p>这样做的好处就是会打断神经元的联合适宜性，使得每一个神经元都会学习到独立的、更加强大的特征。不过也有的人解释<code>dropout</code>之后每次都是不同的网络，所有<code>dropout</code>其实是暗中进行了模型集成。</p>
<p>此外，作者认为在测试阶段，由于这个时候我们是使用了所有的神经元，所以每一层的输出都要乘以$0.5$，因为这样这样做是对所有可能的子网进行一个平均。</p>
<h3 id="C-超参数设置"><a href="#C-超参数设置" class="headerlink" title="C. 超参数设置"></a>C. 超参数设置</h3><p>文章用的<code>SGD优化器</code>，动量（momentum）取$0.9$，动量衰减（<code>weight_decay</code>）取$0.0005，5e-4$</p>
<p>所有层的学习率都是相同的，并且在训练的过程中会调整。如果模型在当前学习阶段停止了学习，即错误率不在下降，那么学习率就会除以$10$。初始学习率设为$0.01$。在停止训练前学习率会减少三次。文章中使用ImageNet训练，用120万张图像训练了大概90个<code>epoch</code>。</p>
<p>此外，网络的网络参数的初始化是从一个$0$为均值，$0.01$为方差的标准正态分布中采样得到的。</p>
<p>我自己的经验就是，对于大型的网络，学习率一开始一定要设置的大一些，不然优化速度慢不说，优化最终达到的性能还差，可能是陷在了local minima）。</p>
<h2 id="5-实验结果"><a href="#5-实验结果" class="headerlink" title="5. 实验结果"></a>5. 实验结果</h2><blockquote>
<p>是骡子是马，拉出来溜溜</p>
</blockquote>
<h3 id="A-主实验"><a href="#A-主实验" class="headerlink" title="A. 主实验"></a>A. 主实验</h3><p>主实验其实就是ImageNet 2012挑战赛，因此，主实验的报告就是AlexNet在ImageNet 2012挑战赛上的结果。</p>
<p>可以看到，AlexNet基本上把SIFT特征+Fisher Vectors、Spares coding这类传统方法按在地上摩擦。</p>
<p><img src="https://jack-1307599355.cos.ap-shanghai.myqcloud.com/image-20220727224851785.png" alt="主实验报告"></p>
<h3 id="B-消融实验"><a href="#B-消融实验" class="headerlink" title="B. 消融实验"></a>B. 消融实验</h3><p>因为ILSVRC 2012的测试集不公开，因此文章除了主实验以外的性能，都是利用验证集合测试出来的。</p>
<p>文章的消融实验只有一个，就是研究了网络层数对性能的影响，如下表。带*的是<strong>使用</strong>了ImageNet 2011秋季的数据与训练得到的性能</p>
<p>最终得出的结论就是：</p>
<ul>
<li><strong>随着网络层数的增加，模型的性能会有所提升</strong></li>
<li><strong>预训练能够帮助模型得到更好的性能</strong></li>
</ul>
<p><img src="https://jack-1307599355.cos.ap-shanghai.myqcloud.com/image-20220728135023393.png" alt="消融实验：网络层数对性能的影响"></p>
<h3 id="C-定性分析"><a href="#C-定性分析" class="headerlink" title="C. 定性分析"></a>C. 定性分析</h3><p>最后，文章对模型进行了一些可视化，进行了定性分析。</p>
<h4 id="卷积核定性分析"><a href="#卷积核定性分析" class="headerlink" title="卷积核定性分析"></a>卷积核定性分析</h4><p>文章对两个GPU中第一层的$11\times11\times3$的卷积核进行了可视化，前三行是第一个GPU上的卷积核，后三行是第二个GPU上的性能。</p>
<p>可以看到，模型学习到了很多种不同频率（frequency）选择性、方向选择性（orientation-selective）的卷积核。方向选择性还能理解，但是文章为啥会说频率选择性，其实是因为卷积神经网络中的卷积核除了特征提取的理解以外，还有一种滤波器的理解。这个就涉及到图像的频域分析了。</p>
<p>需要注意的是，第一个GPU上的卷积核总是会学习到和颜色无关的方向信息，而第二个GPU 上的卷积核总是会学习到了和颜色相关的特征。</p>
<p><img src="https://jack-1307599355.cos.ap-shanghai.myqcloud.com/image-20220728140420771.png" alt="两个卷积核提取的特征各不相同"></p>
<p>文章中还报告，这个现象是通用（general）的，即多次训练每次都能够观察到这个现象。</p>
<h4 id="推断结果定性分析"><a href="#推断结果定性分析" class="headerlink" title="推断结果定性分析"></a>推断结果定性分析</h4><p>文章接下来做的定性分析，就是选取了一些图片，让模型去推断，然后对模型的推断结果进行了定性分析。</p>
<p><img src="https://jack-1307599355.cos.ap-shanghai.myqcloud.com/image-20220728141132276.png" alt="推断结果定性分析"></p>
<p>这个图分为左右两个部分，分别表示两种不同的分析。</p>
<ul>
<li>首先对于左边的图<ul>
<li>左边的图像选取自<code>ILSVRC 2010</code>的测试图像，因为这一年的测试集是公开了的。<code>Ground Truth</code>被写在了下面。如果模型的<code>Top5</code>预测中存在<code>Ground Truth</code>，那么就会用红色的柱子表示，否则用蓝色柱子表示</li>
<li>通过队对左侧的图像的分析，能够观察到：<ol>
<li><strong>模型对平移具有鲁棒性</strong>，即便是偏离图像中心的物体依然能够被网络识别出来</li>
<li><strong>绝大多数<code>top5</code>的预测<code>label</code>都是合理的</strong>，例如猎豹的前五个预测都是猫科动物</li>
<li><strong>一些推断错误的图像则是由于图像本身具有很大的模糊性</strong>，例如最后两张的狗和樱桃、马达加斯加狐猴</li>
</ol>
</li>
</ul>
</li>
<li>其次，对于右边的图<ul>
<li>右侧第一列图像也是从<code>ILSVRC 2010</code>测试集中获得的，因为模型对一张图像会推断得到一个概率向量。而经过softmax之前的、最后一个隐藏层输出的向量称为logits。因此文章用左侧第一列的图像的logit作为key，从<code>ILSVRC 2010</code>测试集中检索得到的。具体的检索方式就是，KNN选取和左侧第一列的图像的logit的欧式距离最接近的向量所对应的图片。</li>
<li>结果表明：<ol>
<li>模型的确从数据中学习到了有用的特征，可以把类似的图像映射为同一类上去</li>
</ol>
</li>
</ul>
</li>
</ul>
<h2 id="6-总结与讨论（Discussion）"><a href="#6-总结与讨论（Discussion）" class="headerlink" title="6. 总结与讨论（Discussion）"></a>6. 总结与讨论（Discussion）</h2><p>文章的最后一部分，自然就是总结了一下文章的贡献，然后挑选了一些不太重要的问题说了一下。</p>
<blockquote>
<p>Our results show that a large, deep convolutional neural network is capable of achieving record-breaking results on a highly challenging dataset using purely supervised learning. It is notable that our network’s performance degrades if a single convolutional layer is removed. For example,removing any of the middle layers results in a loss of about 2% for the top-1 performance of the network. So the depth really is important for achieving our results.</p>
<p>To simplify our experiments, we did not use any unsupervised pre-training even though we expect that it will help, especially if we obtain enough computational power to significantly increase the size of the network without obtaining a corresponding increase in the amount of labeled data. Thus far, our results have improved as we have made our network larger and trained it longer but we still have many orders of magnitude to go in order to match the infero-temporal pathway of the human visual system. Ultimately we would like to use very large and deep convolutional nets on video sequences where the temporal structure provides very helpful information that is missing or far less obvious in static images.</p>
</blockquote>
<p>主要意思就是说：</p>
<ul>
<li>没有使用任何无监督的预训练，尽管这样做绝对会提升我们的性能。尤其是现在有了更大的算力和数据集。甚至只要我们愿意等的更久一些，模型的性能都会继续有所提升</li>
<li>本文通过使用更大，更深的网络实现了很强的性能。但是如果继续引入人类视觉系统的中的时间信息的话，那么模型性能可能会有更强的提升。因此，作者最后提出来，接下来有希望的方向就是把更深的卷积网络运用到视频领域中去，因为对于视频来说，视频可以提供很多静态图像中无法提供的时间信息，未来的模型中需要对时间进行建模。</li>
</ul>

                
            </div>
            <hr/>

            

    <div class="reprint" id="reprint-statement">
        
            <div class="reprint__author">
                <span class="reprint-meta" style="font-weight: bold;">
                    <i class="fas fa-user">
                        文章作者:
                    </i>
                </span>
                <span class="reprint-info">
                    <a href="/about" rel="external nofollow noreferrer">Jack Wang</a>
                </span>
            </div>
            <div class="reprint__type">
                <span class="reprint-meta" style="font-weight: bold;">
                    <i class="fas fa-link">
                        文章链接:
                    </i>
                </span>
                <span class="reprint-info">
                    <a href="https://jackwang0107.github.io/2022/07/26/paper-yue-du-bi-ji-1-imagenet-classification-with-deep-convolutional-neural-networks/">https://jackwang0107.github.io/2022/07/26/paper-yue-du-bi-ji-1-imagenet-classification-with-deep-convolutional-neural-networks/</a>
                </span>
            </div>
            <div class="reprint__notice">
                <span class="reprint-meta" style="font-weight: bold;">
                    <i class="fas fa-copyright">
                        版权声明:
                    </i>
                </span>
                <span class="reprint-info">
                    本博客所有文章除特別声明外，均采用
                    <a href="https://creativecommons.org/licenses/by/4.0/deed.zh" rel="external nofollow noreferrer" target="_blank">CC BY 4.0</a>
                    许可协议。转载请注明来源
                    <a href="/about" target="_blank">Jack Wang</a>
                    !
                </span>
            </div>
        
    </div>

    <script async defer>
      document.addEventListener("copy", function (e) {
        let toastHTML = '<span>复制成功，请遵循本文的转载规则</span><button class="btn-flat toast-action" onclick="navToReprintStatement()" style="font-size: smaller">查看</a>';
        M.toast({html: toastHTML})
      });

      function navToReprintStatement() {
        $("html, body").animate({scrollTop: $("#reprint-statement").offset().top - 80}, 800);
      }
    </script>



            <div class="tag_share" style="display: block;">
                <div class="post-meta__tag-list" style="display: inline-block;">
                    
                        <div class="article-tag">
                            
                                <a href="/tags/AlexNet/">
                                    <span class="chip bg-color">AlexNet</span>
                                </a>
                            
                                <a href="/tags/NIPS/">
                                    <span class="chip bg-color">NIPS</span>
                                </a>
                            
                                <a href="/tags/NIPS-2012/">
                                    <span class="chip bg-color">NIPS 2012</span>
                                </a>
                            
                                <a href="/tags/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0/">
                                    <span class="chip bg-color">深度学习</span>
                                </a>
                            
                                <a href="/tags/Deep-Learning/">
                                    <span class="chip bg-color">Deep Learning</span>
                                </a>
                            
                        </div>
                    
                </div>
                <div class="post_share" style="zoom: 80%; width: fit-content; display: inline-block; float: right; margin: -0.15rem 0;">
                    <link rel="stylesheet" type="text/css" href="/libs/share/css/share.min.css">
<div id="article-share">

    
    <div class="social-share" data-sites="twitter,facebook,google,qq,qzone,wechat,weibo,douban,linkedin" data-wechat-qrcode-helper="<p>微信扫一扫即可分享！</p>"></div>
    <script src="/libs/share/js/social-share.min.js"></script>
    

    

</div>

                </div>
            </div>
            
                <style>
    #reward {
        margin: 40px 0;
        text-align: center;
    }

    #reward .reward-link {
        font-size: 1.4rem;
        line-height: 38px;
    }

    #reward .btn-floating:hover {
        box-shadow: 0 6px 12px rgba(0, 0, 0, 0.2), 0 5px 15px rgba(0, 0, 0, 0.2);
    }

    #rewardModal {
        width: 320px;
        height: 350px;
    }

    #rewardModal .reward-title {
        margin: 15px auto;
        padding-bottom: 5px;
    }

    #rewardModal .modal-content {
        padding: 10px;
    }

    #rewardModal .close {
        position: absolute;
        right: 15px;
        top: 15px;
        color: rgba(0, 0, 0, 0.5);
        font-size: 1.3rem;
        line-height: 20px;
        cursor: pointer;
    }

    #rewardModal .close:hover {
        color: #ef5350;
        transform: scale(1.3);
        -moz-transform:scale(1.3);
        -webkit-transform:scale(1.3);
        -o-transform:scale(1.3);
    }

    #rewardModal .reward-tabs {
        margin: 0 auto;
        width: 210px;
    }

    .reward-tabs .tabs {
        height: 38px;
        margin: 10px auto;
        padding-left: 0;
    }

    .reward-content ul {
        padding-left: 0 !important;
    }

    .reward-tabs .tabs .tab {
        height: 38px;
        line-height: 38px;
    }

    .reward-tabs .tab a {
        color: #fff;
        background-color: #ccc;
    }

    .reward-tabs .tab a:hover {
        background-color: #ccc;
        color: #fff;
    }

    .reward-tabs .wechat-tab .active {
        color: #fff !important;
        background-color: #22AB38 !important;
    }

    .reward-tabs .alipay-tab .active {
        color: #fff !important;
        background-color: #019FE8 !important;
    }

    .reward-tabs .reward-img {
        width: 210px;
        height: 210px;
    }
</style>

<div id="reward">
    <a href="#rewardModal" class="reward-link modal-trigger btn-floating btn-medium waves-effect waves-light red">赏</a>

    <!-- Modal Structure -->
    <div id="rewardModal" class="modal">
        <div class="modal-content">
            <a class="close modal-close"><i class="fas fa-times"></i></a>
            <h4 class="reward-title">你的赏识是我前进的动力</h4>
            <div class="reward-content">
                <div class="reward-tabs">
                    <ul class="tabs row">
                        <li class="tab col s6 alipay-tab waves-effect waves-light"><a href="#alipay">支付宝</a></li>
                        <li class="tab col s6 wechat-tab waves-effect waves-light"><a href="#wechat">微 信</a></li>
                    </ul>
                    <div id="alipay">
                        <img src="/medias/reward/alipay.png" class="reward-img" alt="支付宝打赏二维码">
                    </div>
                    <div id="wechat">
                        <img src="/medias/reward/wechat.jpg" class="reward-img" alt="微信打赏二维码">
                    </div>
                </div>
            </div>
        </div>
    </div>
</div>

<script>
    $(function () {
        $('.tabs').tabs();
    });
</script>

            
        </div>
    </div>

    

    

    

    

    

    

    

    

    

<article id="prenext-posts" class="prev-next articles">
    <div class="row article-row">
        
        <div class="article col s12 m6" data-aos="fade-up">
            <div class="article-badge left-badge text-color">
                <i class="fas fa-chevron-left"></i>&nbsp;上一篇</div>
            <div class="card">
                <a href="/2022/07/27/np-wan-quan-xing-li-lun/">
                    <div class="card-image">
                        
                        <img src="https://jack-1307599355.cos.ap-shanghai.myqcloud.com/image-20220728153924152.png" class="responsive-img" alt="NP完全性理论">
                        
                        <span class="card-title">NP完全性理论</span>
                    </div>
                </a>
                <div class="card-content article-content">
                    <div class="summary block-with-text">
                        
                            本文主要介绍了NP完全性理论
                        
                    </div>
                    <div class="publish-info">
                        <span class="publish-date">
                            <i class="far fa-clock fa-fw icon-date"></i>2022-07-27
                        </span>
                        <span class="publish-author">
                            
                            <i class="fas fa-bookmark fa-fw icon-category"></i>
                            
                            <a href="/categories/%E6%95%B0%E6%8D%AE%E7%BB%93%E6%9E%84%E4%B8%8E%E7%AE%97%E6%B3%95/" class="post-category">
                                    数据结构与算法
                                </a>
                            
                            
                        </span>
                    </div>
                </div>
                
                <div class="card-action article-tags">
                    
                    <a href="/tags/NP/">
                        <span class="chip bg-color">NP</span>
                    </a>
                    
                    <a href="/tags/NP-Hard/">
                        <span class="chip bg-color">NP Hard</span>
                    </a>
                    
                    <a href="/tags/NP-Complete/">
                        <span class="chip bg-color">NP Complete</span>
                    </a>
                    
                    <a href="/tags/Algorithm/">
                        <span class="chip bg-color">Algorithm</span>
                    </a>
                    
                </div>
                
            </div>
        </div>
        
        
        <div class="article col s12 m6" data-aos="fade-up">
            <div class="article-badge right-badge text-color">
                下一篇&nbsp;<i class="fas fa-chevron-right"></i>
            </div>
            <div class="card">
                <a href="/2022/07/25/paper-yue-du-bi-ji-1-imagenet-classification-with-deep-convolutional-neural-networks-qian-yan/">
                    <div class="card-image">
                        
                        <img src="https://jack-1307599355.cos.ap-shanghai.myqcloud.com/image-20220725205629608.png" class="responsive-img" alt="Paper阅读笔记-1-ImageNet-Classification-with-Deep-Convolutional-Neural-Networks（前言）">
                        
                        <span class="card-title">Paper阅读笔记-1-ImageNet-Classification-with-Deep-Convolutional-Neural-Networks（前言）</span>
                    </div>
                </a>
                <div class="card-content article-content">
                    <div class="summary block-with-text">
                        
                            本文是NIPS 2012的ImageNet Classification with Deep Convolutional Neural Networks的阅读笔记的前言
                        
                    </div>
                    <div class="publish-info">
                            <span class="publish-date">
                                <i class="far fa-clock fa-fw icon-date"></i>2022-07-25
                            </span>
                        <span class="publish-author">
                            
                            <i class="fas fa-bookmark fa-fw icon-category"></i>
                            
                            <a href="/categories/Paper%E9%98%85%E8%AF%BB%E7%AC%94%E8%AE%B0/" class="post-category">
                                    Paper阅读笔记
                                </a>
                            
                            <a href="/categories/Paper%E9%98%85%E8%AF%BB%E7%AC%94%E8%AE%B0/Image-Classification/" class="post-category">
                                    Image Classification
                                </a>
                            
                            
                        </span>
                    </div>
                </div>
                
                <div class="card-action article-tags">
                    
                    <a href="/tags/AlexNet/">
                        <span class="chip bg-color">AlexNet</span>
                    </a>
                    
                    <a href="/tags/NIPS/">
                        <span class="chip bg-color">NIPS</span>
                    </a>
                    
                    <a href="/tags/NIPS-2012/">
                        <span class="chip bg-color">NIPS 2012</span>
                    </a>
                    
                    <a href="/tags/%E6%B7%B1%E5%BA%A6%E5%AD%A6%E4%B9%A0/">
                        <span class="chip bg-color">深度学习</span>
                    </a>
                    
                    <a href="/tags/Deep-Learning/">
                        <span class="chip bg-color">Deep Learning</span>
                    </a>
                    
                </div>
                
            </div>
        </div>
        
    </div>
</article>

</div>


<script>
    $('#articleContent').on('copy', function (e) {
        // IE8 or earlier browser is 'undefined'
        if (typeof window.getSelection === 'undefined') return;

        var selection = window.getSelection();
        // if the selection is short let's not annoy our users.
        if (('' + selection).length < Number.parseInt('120')) {
            return;
        }

        // create a div outside of the visible area and fill it with the selected text.
        var bodyElement = document.getElementsByTagName('body')[0];
        var newdiv = document.createElement('div');
        newdiv.style.position = 'absolute';
        newdiv.style.left = '-99999px';
        bodyElement.appendChild(newdiv);
        newdiv.appendChild(selection.getRangeAt(0).cloneContents());

        // we need a <pre> tag workaround.
        // otherwise the text inside "pre" loses all the line breaks!
        if (selection.getRangeAt(0).commonAncestorContainer.nodeName === 'PRE' || selection.getRangeAt(0).commonAncestorContainer.nodeName === 'CODE') {
            newdiv.innerHTML = "<pre>" + newdiv.innerHTML + "</pre>";
        }

        var url = document.location.href;
        newdiv.innerHTML += '<br />'
            + '来源: JackWang&#39;s Blog<br />'
            + '文章作者: Jack Wang<br />'
            + '文章链接: <a href="' + url + '">' + url + '</a><br />'
            + '本文章著作权归作者所有，任何形式的转载都请注明出处。';

        selection.selectAllChildren(newdiv);
        window.setTimeout(function () {bodyElement.removeChild(newdiv);}, 200);
    });
</script>


<!-- 代码块功能依赖 -->
<script type="text/javascript" src="/libs/codeBlock/codeBlockFuction.js"></script>

<!-- 代码语言 -->

<script type="text/javascript" src="/libs/codeBlock/codeLang.js"></script>


<!-- 代码块复制 -->

<script type="text/javascript" src="/libs/codeBlock/codeCopy.js"></script>


<!-- 代码块收缩 -->

<script type="text/javascript" src="/libs/codeBlock/codeShrink.js"></script>


    </div>
    <div id="toc-aside" class="expanded col l3 hide-on-med-and-down">
        <div class="toc-widget card" style="background-color: white;">
            <div class="toc-title"><i class="far fa-list-alt"></i>&nbsp;&nbsp;目录</div>
            <div id="toc-content"></div>
        </div>
    </div>
</div>

<!-- TOC 悬浮按钮. -->

<div id="floating-toc-btn" class="hide-on-med-and-down">
    <a class="btn-floating btn-large bg-color">
        <i class="fas fa-list-ul"></i>
    </a>
</div>


<script src="/libs/tocbot/tocbot.min.js"></script>
<script>
    $(function () {
        tocbot.init({
            tocSelector: '#toc-content',
            contentSelector: '#articleContent',
            headingsOffset: -($(window).height() * 0.4 - 45),
            collapseDepth: Number('2'),
            headingSelector: 'h1, h2, h3, h4, h5, h6'
        });

        // modify the toc link href to support Chinese.
        let i = 0;
        let tocHeading = 'toc-heading-';
        $('#toc-content a').each(function () {
            $(this).attr('href', '#' + tocHeading + (++i));
        });

        // modify the heading title id to support Chinese.
        i = 0;
        $('#articleContent').children('h1, h2, h3, h4, h5, h6').each(function () {
            $(this).attr('id', tocHeading + (++i));
        });

        // Set scroll toc fixed.
        let tocHeight = parseInt($(window).height() * 0.4 - 64);
        let $tocWidget = $('.toc-widget');
        $(window).scroll(function () {
            let scroll = $(window).scrollTop();
            /* add post toc fixed. */
            if (scroll > tocHeight) {
                $tocWidget.addClass('toc-fixed');
            } else {
                $tocWidget.removeClass('toc-fixed');
            }
        });

        
        /* 修复文章卡片 div 的宽度. */
        let fixPostCardWidth = function (srcId, targetId) {
            let srcDiv = $('#' + srcId);
            if (srcDiv.length === 0) {
                return;
            }

            let w = srcDiv.width();
            if (w >= 450) {
                w = w + 21;
            } else if (w >= 350 && w < 450) {
                w = w + 18;
            } else if (w >= 300 && w < 350) {
                w = w + 16;
            } else {
                w = w + 14;
            }
            $('#' + targetId).width(w);
        };

        // 切换TOC目录展开收缩的相关操作.
        const expandedClass = 'expanded';
        let $tocAside = $('#toc-aside');
        let $mainContent = $('#main-content');
        $('#floating-toc-btn .btn-floating').click(function () {
            if ($tocAside.hasClass(expandedClass)) {
                $tocAside.removeClass(expandedClass).hide();
                $mainContent.removeClass('l9');
            } else {
                $tocAside.addClass(expandedClass).show();
                $mainContent.addClass('l9');
            }
            fixPostCardWidth('artDetail', 'prenext-posts');
        });
        
    });
</script>

    

</main>




    <footer class="page-footer bg-color">
    

    <div class="container row center-align"
         style="margin-bottom: 15px !important;">
        <div class="col s12 m8 l8 copy-right">
            Copyright&nbsp;&copy;
            
                <span id="year">2021-2023</span>
            
            <a href="/about" target="_blank">Jack Wang</a>
            <!-- |&nbsp;Powered by&nbsp;<a href="https://hexo.io/" target="_blank">Hexo</a> -->
            <!-- |&nbsp;Theme&nbsp;<a href="https://github.com/blinkfox/hexo-theme-matery" target="_blank">Matery</a> -->
            <br>
            
                &nbsp;<i class="fas fa-chart-area"></i>&nbsp;站点总字数:&nbsp;<span
                        class="white-color">603.8k</span>
            
            
            
                
            
            
                <span id="busuanzi_container_site_pv">
                &nbsp;|&nbsp;<i class="far fa-eye"></i>&nbsp;总访问量:&nbsp;
                    <span id="busuanzi_value_site_pv" class="white-color"></span>
            </span>
            
            
                <span id="busuanzi_container_site_uv">
                &nbsp;|&nbsp;<i class="fas fa-users"></i>&nbsp;总访问人数:&nbsp;
                    <span id="busuanzi_value_site_uv" class="white-color"></span>
            </span>
            
            <br>

            <!-- 运行天数提醒. -->
            
                <span id="sitetime"> Loading ...</span>
                <script>
                    var calcSiteTime = function () {
                        var seconds = 1000;
                        var minutes = seconds * 60;
                        var hours = minutes * 60;
                        var days = hours * 24;
                        var years = days * 365;
                        var today = new Date();
                        var startYear = "2021";
                        var startMonth = "11";
                        var startDate = "12";
                        var startHour = "0";
                        var startMinute = "0";
                        var startSecond = "0";
                        var todayYear = today.getFullYear();
                        var todayMonth = today.getMonth() + 1;
                        var todayDate = today.getDate();
                        var todayHour = today.getHours();
                        var todayMinute = today.getMinutes();
                        var todaySecond = today.getSeconds();
                        var t1 = Date.UTC(startYear, startMonth, startDate, startHour, startMinute, startSecond);
                        var t2 = Date.UTC(todayYear, todayMonth, todayDate, todayHour, todayMinute, todaySecond);
                        var diff = t2 - t1;
                        var diffYears = Math.floor(diff / years);
                        var diffDays = Math.floor((diff / days) - diffYears * 365);

                        // 区分是否有年份.
                        var language = 'zh-CN';
                        if (startYear === String(todayYear)) {
                            document.getElementById("year").innerHTML = todayYear;
                            var daysTip = 'This site has been running for ' + diffDays + ' days';
                            if (language === 'zh-CN') {
                                daysTip = '本站已运行 ' + diffDays + ' 天';
                            } else if (language === 'zh-HK') {
                                daysTip = '本站已運行 ' + diffDays + ' 天';
                            }
                            document.getElementById("sitetime").innerHTML = daysTip;
                        } else {
                            document.getElementById("year").innerHTML = startYear + " - " + todayYear;
                            var yearsAndDaysTip = 'This site has been running for ' + diffYears + ' years and '
                                + diffDays + ' days';
                            if (language === 'zh-CN') {
                                yearsAndDaysTip = '本站已运行 ' + diffYears + ' 年 ' + diffDays + ' 天';
                            } else if (language === 'zh-HK') {
                                yearsAndDaysTip = '本站已運行 ' + diffYears + ' 年 ' + diffDays + ' 天';
                            }
                            document.getElementById("sitetime").innerHTML = yearsAndDaysTip;
                        }
                    }

                    calcSiteTime();
                </script>
            
            <br>
            
                <span id="icp"><img src="/medias/icp.png"
                                    style="vertical-align: text-bottom;"/>
                <a href="https://beian.miit.gov.cn" target="_blank">陕ICP备2021014294号-1</a>
            </span>
            
        </div>
        <div class="col s12 m4 l4 social-link social-statis">
    <a href="https://github.com/jackwang0108" class="tooltipped" target="_blank" data-tooltip="访问我的GitHub" data-position="top" data-delay="50">
        <i class="fab fa-github"></i>
    </a>



    <a href="mailto:2232123545@qq.com" class="tooltipped" target="_blank" data-tooltip="邮件联系我" data-position="top" data-delay="50">
        <i class="fas fa-envelope-open"></i>
    </a>







    <a href="tencent://AddContact/?fromId=50&fromSubId=1&subcmd=all&uin=2232123545" class="tooltipped" target="_blank" data-tooltip="QQ联系我: 2232123545" data-position="top" data-delay="50">
        <i class="fab fa-qq"></i>
    </a>







</div>
    </div>
</footer>

<div class="progress-bar"></div>


    <!-- 搜索遮罩框 -->
<div id="searchModal" class="modal">
    <div class="modal-content">
        <div class="search-header">
            <span class="title"><i class="fas fa-search"></i>&nbsp;&nbsp;搜索</span>
            <input type="search" id="searchInput" name="s" placeholder="请输入搜索的关键字"
                   class="search-input">
        </div>
        <div id="searchResult"></div>
    </div>
</div>

<script type="text/javascript">
$(function () {
    var searchFunc = function (path, search_id, content_id) {
        'use strict';
        $.ajax({
            url: path,
            dataType: "xml",
            success: function (xmlResponse) {
                // get the contents from search data
                var datas = $("entry", xmlResponse).map(function () {
                    return {
                        title: $("title", this).text(),
                        content: $("content", this).text(),
                        url: $("url", this).text()
                    };
                }).get();
                var $input = document.getElementById(search_id);
                var $resultContent = document.getElementById(content_id);
                $input.addEventListener('input', function () {
                    var str = '<ul class=\"search-result-list\">';
                    var keywords = this.value.trim().toLowerCase().split(/[\s\-]+/);
                    $resultContent.innerHTML = "";
                    if (this.value.trim().length <= 0) {
                        return;
                    }
                    // perform local searching
                    datas.forEach(function (data) {
                        var isMatch = true;
                        var data_title = data.title.trim().toLowerCase();
                        var data_content = data.content.trim().replace(/<[^>]+>/g, "").toLowerCase();
                        var data_url = data.url;
                        data_url = data_url.indexOf('/') === 0 ? data.url : '/' + data_url;
                        var index_title = -1;
                        var index_content = -1;
                        var first_occur = -1;
                        // only match artiles with not empty titles and contents
                        if (data_title !== '' && data_content !== '') {
                            keywords.forEach(function (keyword, i) {
                                index_title = data_title.indexOf(keyword);
                                index_content = data_content.indexOf(keyword);
                                if (index_title < 0 && index_content < 0) {
                                    isMatch = false;
                                } else {
                                    if (index_content < 0) {
                                        index_content = 0;
                                    }
                                    if (i === 0) {
                                        first_occur = index_content;
                                    }
                                }
                            });
                        }
                        // show search results
                        if (isMatch) {
                            str += "<li><a href='" + data_url + "' class='search-result-title'>" + data_title + "</a>";
                            var content = data.content.trim().replace(/<[^>]+>/g, "");
                            if (first_occur >= 0) {
                                // cut out 100 characters
                                var start = first_occur - 20;
                                var end = first_occur + 80;
                                if (start < 0) {
                                    start = 0;
                                }
                                if (start === 0) {
                                    end = 100;
                                }
                                if (end > content.length) {
                                    end = content.length;
                                }
                                var match_content = content.substr(start, end);
                                // highlight all keywords
                                keywords.forEach(function (keyword) {
                                    var regS = new RegExp(keyword, "gi");
                                    match_content = match_content.replace(regS, "<em class=\"search-keyword\">" + keyword + "</em>");
                                });

                                str += "<p class=\"search-result\">" + match_content + "...</p>"
                            }
                            str += "</li>";
                        }
                    });
                    str += "</ul>";
                    $resultContent.innerHTML = str;
                });
            }
        });
    };

    searchFunc('/search.xml', 'searchInput', 'searchResult');
});
</script>

    <!-- 回到顶部按钮 -->
<div id="backTop" class="top-scroll">
    <a class="btn-floating btn-large waves-effect waves-light" href="#!">
        <i class="fas fa-arrow-up"></i>
    </a>
</div>


    <script src="/libs/materialize/materialize.min.js"></script>
    <script src="/libs/masonry/masonry.pkgd.min.js"></script>
    <script src="/libs/aos/aos.js"></script>
    <script src="/libs/scrollprogress/scrollProgress.min.js"></script>
    <script src="/libs/lightGallery/js/lightgallery-all.min.js"></script>
    <script src="/js/matery.js"></script>

    

    
        
        <script type="text/javascript">
            // 只在桌面版网页启用特效
            var windowWidth = $(window).width();
            if (windowWidth > 768) {
                document.write('<script type="text/javascript" src="/libs/others/sakura.js"><\/script>');
            }
        </script>
    

    <!-- 雪花特效 -->
    

    <!-- 鼠标星星特效 -->
    

     
        <script src="https://ssl.captcha.qq.com/TCaptcha.js"></script>
        <script src="/libs/others/TencentCaptcha.js"></script>
        <button id="TencentCaptcha" data-appid="xxxxxxxxxx" data-cbfn="callback" type="button" hidden></button>
    

    <!-- Baidu Analytics -->

    <!-- Baidu Push -->

<script>
    (function () {
        var bp = document.createElement('script');
        var curProtocol = window.location.protocol.split(':')[0];
        if (curProtocol === 'https') {
            bp.src = 'https://zz.bdstatic.com/linksubmit/push.js';
        } else {
            bp.src = 'http://push.zhanzhang.baidu.com/push.js';
        }
        var s = document.getElementsByTagName("script")[0];
        s.parentNode.insertBefore(bp, s);
    })();
</script>

    
    <script src="/libs/others/clicklove.js" async="async"></script>
    
    
    <script async src="/libs/others/busuanzi.pure.mini.js"></script>
    

    

    

    <!--腾讯兔小巢-->
    
    

    

    

    
    <script src="/libs/instantpage/instantpage.js" type="module"></script>
    

</body>

</html>
