<!-- build time:Wed Jun 21 2023 22:33:35 GMT+0800 (GMT+08:00) --><!DOCTYPE html><html lang="zh-CN"><head><meta charset="UTF-8"><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=2"><meta name="theme-color" content="#FFF"><meta name="baidu-site-verification" content="code-C0oocRvMWv"><link rel="apple-touch-icon" sizes="180x180" href="/images/apple-touch-icon.png"><link rel="icon" type="image/ico" sizes="32x32" href="/images/favicon.ico"><link rel="mask-icon" href="/images/logo.svg" color=""><link rel="manifest" href="/images/manifest.json"><meta name="msapplication-config" content="/images/browserconfig.xml"><meta http-equiv="Cache-Control" content="no-transform"><meta http-equiv="Cache-Control" content="no-siteapp"><meta name="baidu-site-verification" content="https://jiang-hs.gitee.io"><link rel="alternate" type="application/rss+xml" title="航 順" href="https://jiang-hs.gitee.io/rss.xml"><link rel="alternate" type="application/atom+xml" title="航 順" href="https://jiang-hs.gitee.io/atom.xml"><link rel="alternate" type="application/json" title="航 順" href="https://jiang-hs.gitee.io/feed.json"><link rel="stylesheet" href="//fonts.googleapis.com/css?family=Mulish:300,300italic,400,400italic,700,700italic%7CFredericka%20the%20Great:300,300italic,400,400italic,700,700italic%7CNoto%20Serif%20JP:300,300italic,400,400italic,700,700italic%7CNoto%20Serif%20SC:300,300italic,400,400italic,700,700italic%7CInconsolata:300,300italic,400,400italic,700,700italic&display=swap&subset=latin,latin-ext"><link rel="stylesheet" href="/css/app.css?v=0.0.0"><meta name="keywords" content="人工智能,机器学习基础"><link rel="canonical" href="https://jiang-hs.gitee.io/posts/c6767314/"><meta name="description" content="# 一、卷积神经网络简介 卷积神经网络（Convolutional Neural Networks, CNN）是一类包含卷积计算且具有深度结构的前馈神经网络（Feedforward Neural Networks），是深度学习（deep learning）的代表算法之一。卷积神经网络具有表征学习（表征学习指，学习单个符号或一组符号代表什么）能力，能够按其阶层结构对输入信息进行平移不变分类。 对卷积"><meta property="og:type" content="article"><meta property="og:title" content="卷积神经网络"><meta property="og:url" content="https://jiang-hs.gitee.io/posts/c6767314/index.html"><meta property="og:site_name" content="航 順"><meta property="og:description" content="# 一、卷积神经网络简介 卷积神经网络（Convolutional Neural Networks, CNN）是一类包含卷积计算且具有深度结构的前馈神经网络（Feedforward Neural Networks），是深度学习（deep learning）的代表算法之一。卷积神经网络具有表征学习（表征学习指，学习单个符号或一组符号代表什么）能力，能够按其阶层结构对输入信息进行平移不变分类。 对卷积"><meta property="og:locale" content="zh_CN"><meta property="og:image" content="https://jiang-hs.github.io/post-images/1596349412902.jpg"><meta property="og:image" content="https://jiang-hs.github.io/post-images/1596363685613.png"><meta property="og:image" content="https://jiang-hs.github.io/post-images/1596362332207.gif"><meta property="og:image" content="https://jiang-hs.github.io/post-images/1596524052019.jpeg"><meta property="og:image" content="https://jiang-hs.github.io/post-images/1596528537976.jpg"><meta property="og:image" content="https://jiang-hs.github.io/post-images/1596534656283.jpg"><meta property="og:image" content="https://jiang-hs.github.io/post-images/1596535462312.jpg"><meta property="og:image" content="https://jiang-hs.github.io/post-images/1596536088417.gif"><meta property="og:image" content="https://jiang-hs.github.io/post-images/1596535081987.jpg"><meta property="article:published_time" content="2021-03-06T08:12:27.000Z"><meta property="article:modified_time" content="2021-08-25T03:32:03.812Z"><meta property="article:author" content="hang shun"><meta property="article:tag" content="人工智能"><meta property="article:tag" content="机器学习基础"><meta name="twitter:card" content="summary"><meta name="twitter:image" content="https://jiang-hs.github.io/post-images/1596349412902.jpg"><title>卷积神经网络 - 机器学习基础 | hang shun = 航 順 = 天官赐福，百无禁忌</title><meta name="generator" content="Hexo 5.4.2"></head><body itemscope itemtype="http://schema.org/WebPage"><div id="loading"><div class="cat"><div class="body"></div><div class="head"><div class="face"></div></div><div class="foot"><div class="tummy-end"></div><div class="bottom"></div><div class="legs left"></div><div class="legs right"></div></div><div class="paw"><div class="hands left"></div><div class="hands right"></div></div></div></div><div id="container"><header id="header" itemscope itemtype="http://schema.org/WPHeader"><div class="inner"><div id="brand"><div class="pjax"><h1 itemprop="name headline">卷积神经网络</h1><div class="meta"><span class="item" title="创建时间：2021-03-06 16:12:27"><span class="icon"><i class="ic i-calendar"></i> </span><span class="text">发表于</span> <time itemprop="dateCreated datePublished" datetime="2021-03-06T16:12:27+08:00">2021-03-06</time> </span><span class="item" title="本文字数"><span class="icon"><i class="ic i-pen"></i> </span><span class="text">本文字数</span> <span>6.5k</span> <span class="text">字</span> </span><span class="item" title="阅读时长"><span class="icon"><i class="ic i-clock"></i> </span><span class="text">阅读时长</span> <span>6 分钟</span></span></div></div></div><nav id="nav"><div class="inner"><div class="toggle"><div class="lines" aria-label="切换导航栏"><span class="line"></span> <span class="line"></span> <span class="line"></span></div></div><ul class="menu"><li class="item title"><a href="/" rel="start">hang shun</a></li></ul><ul class="right"><li class="item theme"><i class="ic i-sun"></i></li><li class="item search"><i class="ic i-search"></i></li></ul></div></nav></div><div id="imgs" class="pjax"><ul><li class="item" data-background-image="https://pic1.imgdb.cn/item/60d7f9855132923bf8a9f1d4.jpg"></li><li class="item" data-background-image="https://pic1.imgdb.cn/item/60d7fbf55132923bf8b6f25f.jpg"></li><li class="item" data-background-image="https://pic1.imgdb.cn/item/64427a860d2dde5777acb22d.jpg"></li><li class="item" data-background-image="https://pic1.imgdb.cn/item/60d7f95f5132923bf8a9244e.jpg"></li><li class="item" data-background-image="https://pic1.imgdb.cn/item/60d7f96c5132923bf8a968aa.jpg"></li><li class="item" data-background-image="https://pic1.imgdb.cn/item/60d7f95f5132923bf8a92484.jpg"></li></ul></div></header><div id="waves"><svg class="waves" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 24 150 28" preserveAspectRatio="none" shape-rendering="auto"><defs><path id="gentle-wave" d="M-160 44c30 0 58-18 88-18s 58 18 88 18 58-18 88-18 58 18 88 18 v44h-352z"/></defs><g class="parallax"><use xlink:href="#gentle-wave" x="48" y="0"/><use xlink:href="#gentle-wave" x="48" y="3"/><use xlink:href="#gentle-wave" x="48" y="5"/><use xlink:href="#gentle-wave" x="48" y="7"/></g></svg></div><main><div class="inner"><div id="main" class="pjax"><div class="article wrap"><div class="breadcrumb" itemscope itemtype="https://schema.org/BreadcrumbList"><i class="ic i-home"></i> <span><a href="/">首页</a></span><i class="ic i-angle-right"></i> <span class="current" itemprop="itemListElement" itemscope itemtype="https://schema.org/ListItem"><a href="/categories/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E5%9F%BA%E7%A1%80/" itemprop="item" rel="index" title="分类于 机器学习基础"><span itemprop="name">机器学习基础</span></a><meta itemprop="position" content="1"></span></div><article itemscope itemtype="http://schema.org/Article" class="post block" lang="zh-CN"><link itemprop="mainEntityOfPage" href="https://jiang-hs.gitee.io/posts/c6767314/"><span hidden itemprop="author" itemscope itemtype="http://schema.org/Person"><meta itemprop="image" content="/images/avatar.jpg"><meta itemprop="name" content="hang shun"><meta itemprop="description" content="天官赐福，百无禁忌, 世中逢尔，雨中逢花"></span><span hidden itemprop="publisher" itemscope itemtype="http://schema.org/Organization"><meta itemprop="name" content="航 順"></span><div class="body md" itemprop="articleBody"><h1 id="一-卷积神经网络简介"><a class="anchor" href="#一-卷积神经网络简介">#</a> 一、卷积神经网络简介</h1><p>卷积神经网络（Convolutional Neural Networks, CNN）是一类包含卷积计算且具有深度结构的前馈神经网络（Feedforward Neural Networks），是深度学习（deep learning）的代表算法之一。卷积神经网络具有表征学习（表征学习指，学习单个符号或一组符号代表什么）能力，能够按其阶层结构对输入信息进行平移不变分类。</p><p>对卷积神经网络的研究始于二十世纪 80 至 90 年代，时间延迟网络和 LeNet-5 是最早出现的卷积神经网络；在二十一世纪后，随着深度学习理论的提出和数值计算设备的改进，卷积神经网络得到了快速发展，并被应用于计算机视觉、自然语言处理等领域。</p><p>卷积神经网络仿造生物的视知觉（visual perception）机制构建，可以进行监督学习和非监督学习，其隐含层内的卷积核参数共享和层间连接的稀疏性使得卷积神经网络能够以较小的计算量对格点化（grid-like topology）特征，例如像素和音频进行学习、有稳定的效果且对数据没有额外的特征工程（feature engineering）要求。</p><h1 id="二-卷积神经网络的结构"><a class="anchor" href="#二-卷积神经网络的结构">#</a> 二、卷积神经网络的结构</h1><p>卷积神经网络的主要结构包括：<strong>输入层（Input Layer）</strong> 、<strong>卷积层（Cony Net Layer）</strong>、<strong>池化层（Pooling Layer）</strong>、<strong>全连接层（Full Connection Layer）</strong> 和 <strong>输出层（Output Layer）</strong> 。它是一种前馈式神经网络，每一层都有对应的一种特征输出，并且每个特征图有多个神经元。神经元通过利用对应的滤波器（卷积块或池化块）处理图像所传递过来的信息，构成特征图。每个卷积层后都有一个池化层，从低维映射到高维，此时的映射由于参数过多，维度过高，不适宜作为后层神经的输入，所以必须对该层的输出做降维处理，因此就引入了池化层。若是不对后期数据进行降维，则容易造成过拟合，甚至还会导致维数灾难。</p><p>LeCun 最先提出了一个完整的卷积神经网络算法和经典的网络结构模型 LeNet-5。下面将采用 LeNet-5 网络结构来介绍卷积神经网络的结构组成，以及网络的运算法则。其结构如下图所示，当时已成功将其应用于美国银行业的手写字符识别处理中。LeNet-5 是第一个产生实际商业价值的卷积神经网络，同时也为卷积神经网络以后的发展奠定了坚实的基础。<br><img data-src="https://jiang-hs.github.io/post-images/1596349412902.jpg" alt="img"></p><p>LeNet-5 网络结构<em> L<strong>e</strong>N<strong>e</strong>t</em>−5 网络结构</p><p>下面大致介绍各个网络部分。</p><ul><li><strong>输入层（INPUT）</strong>：卷积输入层可以直接作用于原始输入数据，对于输入的图像来说，输入数据就是图像的像素值。</li><li><strong>卷积层（Convolutions）</strong>：卷积神经网络的卷积层，也叫做特征提取层，包括两个部分。第一部分是真正的卷积层，主要作用是提取输入数据特征。<strong>每一个不同的卷积核提取输入数据的特征都不相同</strong>，卷积层的卷积核数量越多，就能提取越多的输入数据的特征。第二部分是 pooling 层，也叫下采样层 / <strong>池化层</strong>，主要目的是在保留有用信息的基础上减少数据处理量，加快训练网络的速度。通常情况下，<strong>卷积神经网络至少包含二层卷积层</strong>（这里把真正的卷积层和池化层统称为卷积层），即卷积层，pooling 层，卷积层，pooling 层。<strong>卷积层数越多，在前一层卷积层基础上就能够提取更加抽象的特征</strong>。</li><li><strong>全连接层（Full comection）</strong>：可以包含多个全连接层，实际上就是多层感知机的隐含层部分，通常情况下后面层的神经节点都和前一层的每一个神经节点连接，用一层的神经元节点之间是没有连接的。每一层的神经元节点分别通过连接线上的权值进行前向传播，加权组合得到下一层神经元节点的输入。</li><li><strong>输出层（OUTPUT）</strong>：输出层神经节点的数目是根据具体应用任务来设定的。如果是分类任务，卷积神经网络输出层通常是一个分类器。</li></ul><h1 id="三-卷积神经网络的原理"><a class="anchor" href="#三-卷积神经网络的原理">#</a> 三、卷积神经网络的原理</h1><h2 id="31-卷积层"><a class="anchor" href="#31-卷积层">#</a> 3.1 卷积层</h2><h3 id="311-卷积公式"><a class="anchor" href="#311-卷积公式">#</a> 3.1.1 卷积公式</h3><p>在了解卷积层模型原理之前，我们要先了解什么是卷积。数学中，卷积是两个变量在某范围内相乘后求和的结果。<br>设：x (t),w (t) 是 R 上的两个可积函数，我们可以得到微积分中卷积的表达式为：</p><p><span class="katex-display"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><semantics><mrow><mi>S</mi><mo stretchy="false">(</mo><mi>t</mi><mo stretchy="false">)</mo><mo>=</mo><mo>∫</mo><mi>x</mi><mo stretchy="false">(</mo><mi>t</mi><mo>−</mo><mi>a</mi><mo stretchy="false">)</mo><mi>w</mi><mo stretchy="false">(</mo><mi>a</mi><mo stretchy="false">)</mo><mi>d</mi><mi>a</mi></mrow><annotation encoding="application/x-tex">S(t)=\int x(t-a)w(a)da</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mord mathnormal" style="margin-right:.05764em">S</span><span class="mopen">(</span><span class="mord mathnormal">t</span><span class="mclose">)</span><span class="mspace" style="margin-right:.2777777777777778em"></span><span class="mrel">=</span><span class="mspace" style="margin-right:.2777777777777778em"></span></span><span class="base"><span class="strut" style="height:2.22225em;vertical-align:-.86225em"></span><span class="mop op-symbol large-op" style="margin-right:.44445em;position:relative;top:-.0011249999999999316em">∫</span><span class="mspace" style="margin-right:.16666666666666666em"></span><span class="mord mathnormal">x</span><span class="mopen">(</span><span class="mord mathnormal">t</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">−</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mord mathnormal">a</span><span class="mclose">)</span><span class="mord mathnormal" style="margin-right:.02691em">w</span><span class="mopen">(</span><span class="mord mathnormal">a</span><span class="mclose">)</span><span class="mord mathnormal">d</span><span class="mord mathnormal">a</span></span></span></span></span></p><p>如果卷积的变量是序列<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>x</mi><mo stretchy="false">(</mo><mi>t</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">x(t)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mord mathnormal">x</span><span class="mopen">(</span><span class="mord mathnormal">t</span><span class="mclose">)</span></span></span></span> 和 <span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>w</mi><mo stretchy="false">(</mo><mi>t</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">w(t)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mord mathnormal" style="margin-right:.02691em">w</span><span class="mopen">(</span><span class="mord mathnormal">t</span><span class="mclose">)</span></span></span></span>（即卷积的离散形式），则卷积的表达式为：</p><p><span class="katex-display"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><semantics><mrow><mi>s</mi><mo stretchy="false">(</mo><mi>t</mi><mo stretchy="false">)</mo><mo>=</mo><munder><mo>∑</mo><mi>a</mi></munder><mi>x</mi><mo stretchy="false">(</mo><mi>t</mi><mo>−</mo><mi>a</mi><mo stretchy="false">)</mo><mi>w</mi><mo stretchy="false">(</mo><mi>a</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">s(t)=\sum_{a}x(t-a)w(a)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mord mathnormal">s</span><span class="mopen">(</span><span class="mord mathnormal">t</span><span class="mclose">)</span><span class="mspace" style="margin-right:.2777777777777778em"></span><span class="mrel">=</span><span class="mspace" style="margin-right:.2777777777777778em"></span></span><span class="base"><span class="strut" style="height:2.3000100000000003em;vertical-align:-1.250005em"></span><span class="mop op-limits"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.050005em"><span style="top:-1.8999949999999999em;margin-left:0"><span class="pstrut" style="height:3.05em"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">a</span></span></span></span><span style="top:-3.0500049999999996em"><span class="pstrut" style="height:3.05em"></span><span><span class="mop op-symbol large-op">∑</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:1.250005em"><span></span></span></span></span></span><span class="mspace" style="margin-right:.16666666666666666em"></span><span class="mord mathnormal">x</span><span class="mopen">(</span><span class="mord mathnormal">t</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">−</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mord mathnormal">a</span><span class="mclose">)</span><span class="mord mathnormal" style="margin-right:.02691em">w</span><span class="mopen">(</span><span class="mord mathnormal">a</span><span class="mclose">)</span></span></span></span></span></p><p>上面两个式子都可以表示为：</p><p><span class="katex-display"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><semantics><mrow><mi>s</mi><mo stretchy="false">(</mo><mi>t</mi><mo stretchy="false">)</mo><mo>=</mo><mo stretchy="false">(</mo><mi>X</mi><mo>∗</mo><mi>W</mi><mo stretchy="false">)</mo><mo stretchy="false">(</mo><mi>t</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">s(t)=(X*W)(t)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mord mathnormal">s</span><span class="mopen">(</span><span class="mord mathnormal">t</span><span class="mclose">)</span><span class="mspace" style="margin-right:.2777777777777778em"></span><span class="mrel">=</span><span class="mspace" style="margin-right:.2777777777777778em"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mopen">(</span><span class="mord mathnormal" style="margin-right:.07847em">X</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mord mathnormal" style="margin-right:.13889em">W</span><span class="mclose">)</span><span class="mopen">(</span><span class="mord mathnormal">t</span><span class="mclose">)</span></span></span></span></span></p><p>式子中的星号 * 表示卷积。<br>我们再把卷积公式推广到二维卷积，则表达式为：</p><p><span class="katex-display"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><semantics><mrow><mi>s</mi><mo stretchy="false">(</mo><mi>i</mi><mo separator="true">,</mo><mi>j</mi><mo stretchy="false">)</mo><mo>=</mo><mo stretchy="false">(</mo><mi>X</mi><mo>∗</mo><mi>W</mi><mo stretchy="false">)</mo><mo stretchy="false">(</mo><mi>i</mi><mo separator="true">,</mo><mi>j</mi><mo stretchy="false">)</mo><mo>=</mo><munder><mo>∑</mo><mi>m</mi></munder><munder><mo>∑</mo><mi>n</mi></munder><mi>x</mi><mo stretchy="false">(</mo><mi>i</mi><mo>−</mo><mi>m</mi><mo separator="true">,</mo><mi>j</mi><mo>−</mo><mi>n</mi><mo stretchy="false">)</mo><mi>w</mi><mo stretchy="false">(</mo><mi>m</mi><mo separator="true">,</mo><mi>n</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">s(i,j)=(X*W)(i,j)=\sum_{m}\sum_{n}x(i-m,j-n)w(m,n)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mord mathnormal">s</span><span class="mopen">(</span><span class="mord mathnormal">i</span><span class="mpunct">,</span><span class="mspace" style="margin-right:.16666666666666666em"></span><span class="mord mathnormal" style="margin-right:.05724em">j</span><span class="mclose">)</span><span class="mspace" style="margin-right:.2777777777777778em"></span><span class="mrel">=</span><span class="mspace" style="margin-right:.2777777777777778em"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mopen">(</span><span class="mord mathnormal" style="margin-right:.07847em">X</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mord mathnormal" style="margin-right:.13889em">W</span><span class="mclose">)</span><span class="mopen">(</span><span class="mord mathnormal">i</span><span class="mpunct">,</span><span class="mspace" style="margin-right:.16666666666666666em"></span><span class="mord mathnormal" style="margin-right:.05724em">j</span><span class="mclose">)</span><span class="mspace" style="margin-right:.2777777777777778em"></span><span class="mrel">=</span><span class="mspace" style="margin-right:.2777777777777778em"></span></span><span class="base"><span class="strut" style="height:2.3000100000000003em;vertical-align:-1.250005em"></span><span class="mop op-limits"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.050005em"><span style="top:-1.8999949999999999em;margin-left:0"><span class="pstrut" style="height:3.05em"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">m</span></span></span></span><span style="top:-3.0500049999999996em"><span class="pstrut" style="height:3.05em"></span><span><span class="mop op-symbol large-op">∑</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:1.250005em"><span></span></span></span></span></span><span class="mspace" style="margin-right:.16666666666666666em"></span><span class="mop op-limits"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.050005em"><span style="top:-1.8999949999999999em;margin-left:0"><span class="pstrut" style="height:3.05em"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">n</span></span></span></span><span style="top:-3.0500049999999996em"><span class="pstrut" style="height:3.05em"></span><span><span class="mop op-symbol large-op">∑</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:1.250005em"><span></span></span></span></span></span><span class="mspace" style="margin-right:.16666666666666666em"></span><span class="mord mathnormal">x</span><span class="mopen">(</span><span class="mord mathnormal">i</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">−</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.85396em;vertical-align:-.19444em"></span><span class="mord mathnormal">m</span><span class="mpunct">,</span><span class="mspace" style="margin-right:.16666666666666666em"></span><span class="mord mathnormal" style="margin-right:.05724em">j</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">−</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mord mathnormal">n</span><span class="mclose">)</span><span class="mord mathnormal" style="margin-right:.02691em">w</span><span class="mopen">(</span><span class="mord mathnormal">m</span><span class="mpunct">,</span><span class="mspace" style="margin-right:.16666666666666666em"></span><span class="mord mathnormal">n</span><span class="mclose">)</span></span></span></span></span></p><p>至此，我们已经大致的了解了什么是卷积，但是 CNN 中的卷积层用到的卷积公式和数学中的稍有不同，比如对于二维的卷积，CNN 中的公式为：</p><p><span class="katex-display"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML" display="block"><semantics><mrow><mi>s</mi><mo stretchy="false">(</mo><mi>i</mi><mo separator="true">,</mo><mi>j</mi><mo stretchy="false">)</mo><mo>=</mo><mo stretchy="false">(</mo><mi>X</mi><mo>∗</mo><mi>W</mi><mo stretchy="false">)</mo><mo stretchy="false">(</mo><mi>i</mi><mo separator="true">,</mo><mi>j</mi><mo stretchy="false">)</mo><mo>=</mo><munder><mo>∑</mo><mi>m</mi></munder><munder><mo>∑</mo><mi>n</mi></munder><mi>x</mi><mo stretchy="false">(</mo><mi>i</mi><mo>+</mo><mi>m</mi><mo separator="true">,</mo><mi>j</mi><mo>+</mo><mi>n</mi><mo stretchy="false">)</mo><mi>w</mi><mo stretchy="false">(</mo><mi>m</mi><mo separator="true">,</mo><mi>n</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">s(i,j)=(X*W)(i,j)=\sum_{m}\sum_{n}x(i+m,j+n)w(m,n)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mord mathnormal">s</span><span class="mopen">(</span><span class="mord mathnormal">i</span><span class="mpunct">,</span><span class="mspace" style="margin-right:.16666666666666666em"></span><span class="mord mathnormal" style="margin-right:.05724em">j</span><span class="mclose">)</span><span class="mspace" style="margin-right:.2777777777777778em"></span><span class="mrel">=</span><span class="mspace" style="margin-right:.2777777777777778em"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mopen">(</span><span class="mord mathnormal" style="margin-right:.07847em">X</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mord mathnormal" style="margin-right:.13889em">W</span><span class="mclose">)</span><span class="mopen">(</span><span class="mord mathnormal">i</span><span class="mpunct">,</span><span class="mspace" style="margin-right:.16666666666666666em"></span><span class="mord mathnormal" style="margin-right:.05724em">j</span><span class="mclose">)</span><span class="mspace" style="margin-right:.2777777777777778em"></span><span class="mrel">=</span><span class="mspace" style="margin-right:.2777777777777778em"></span></span><span class="base"><span class="strut" style="height:2.3000100000000003em;vertical-align:-1.250005em"></span><span class="mop op-limits"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.050005em"><span style="top:-1.8999949999999999em;margin-left:0"><span class="pstrut" style="height:3.05em"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">m</span></span></span></span><span style="top:-3.0500049999999996em"><span class="pstrut" style="height:3.05em"></span><span><span class="mop op-symbol large-op">∑</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:1.250005em"><span></span></span></span></span></span><span class="mspace" style="margin-right:.16666666666666666em"></span><span class="mop op-limits"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.050005em"><span style="top:-1.8999949999999999em;margin-left:0"><span class="pstrut" style="height:3.05em"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mathnormal mtight">n</span></span></span></span><span style="top:-3.0500049999999996em"><span class="pstrut" style="height:3.05em"></span><span><span class="mop op-symbol large-op">∑</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:1.250005em"><span></span></span></span></span></span><span class="mspace" style="margin-right:.16666666666666666em"></span><span class="mord mathnormal">x</span><span class="mopen">(</span><span class="mord mathnormal">i</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">+</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.85396em;vertical-align:-.19444em"></span><span class="mord mathnormal">m</span><span class="mpunct">,</span><span class="mspace" style="margin-right:.16666666666666666em"></span><span class="mord mathnormal" style="margin-right:.05724em">j</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">+</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mord mathnormal">n</span><span class="mclose">)</span><span class="mord mathnormal" style="margin-right:.02691em">w</span><span class="mopen">(</span><span class="mord mathnormal">m</span><span class="mpunct">,</span><span class="mspace" style="margin-right:.16666666666666666em"></span><span class="mord mathnormal">n</span><span class="mclose">)</span></span></span></span></span></p><p>其中，W 称为<strong>卷积核</strong>，而 X 则称为输入。W 和 X 的维度是相通的，比如：如果 X 是一个二维码输入的矩阵，则 W 也是一个二维的矩阵；如果 X 是多为张量，则 W 也是一个多维的张量。</p><h3 id="312-卷积层工作原理"><a class="anchor" href="#312-卷积层工作原理">#</a> 3.1.2 卷积层工作原理</h3><p>不同的卷积核能够提取到图像中的不同特征，卷积运算的一个重要的特点就是：通过即卷积运算，可以使原信号特征增强，并且<strong>降低噪音</strong>。<br>卷积核在二维平面上移动，并且卷积核的每个元素与被卷积图像对应位置相乘，再求和，通过卷积核的不断移动，就有了一个新的图像，这个图像完全有卷积核在各个位置时的乘积求和结果组成。<br>二维卷积在图像中的效果就是：对图像的每个像素的邻域（邻域大小就是卷积核的大小）加权求和得到该像素点的输出值。具体做法如下图所示（图中，输入的图像大小为 5<em>5，有一个 3</em>3 大小的滤波器（filter）在移动，输出的是一个 3*3 的特征图 (Feature Map)）：<br><img data-src="https://jiang-hs.github.io/post-images/1596363685613.png" alt="img"><br><img data-src="https://jiang-hs.github.io/post-images/1596362332207.gif" alt="img"></p><p>为了增加网络的表达能力，卷积层的输出结果往往还要经过激活函数，即把得到的特征图中的数据带入到激活函数中，得到一个新的矩阵，这个矩阵就是卷积层真真的输出。<br>事实上，卷积网络中的卷积核参数是通过网络训练学出的，除了可以学到类似的横向、纵向边缘滤波器，还可以学到任意角度的边缘滤波器。当然，不仅如此，检测颜色、形状、纹理等众多基本模式的滤波器（卷积核）都可以包含在一个足够复杂的深层卷积神经网络中。通过 “组合” 这些滤波器（卷积核）以及随着网络后续操作的进行，基本而一般的模式会逐渐被抽象为具有高层语义的 “概念” 表示，并以此对应到具体的样本类别中。</p><p>在经典的 LeNet 网络中，以<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>C</mi><mn>1</mn></mrow><annotation encoding="application/x-tex">C1</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.68333em;vertical-align:0"></span><span class="mord mathnormal" style="margin-right:.07153em">C</span><span class="mord">1</span></span></span></span> 层进行说明：<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>C</mi><mn>1</mn></mrow><annotation encoding="application/x-tex">C1</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.68333em;vertical-align:0"></span><span class="mord mathnormal" style="margin-right:.07153em">C</span><span class="mord">1</span></span></span></span> 层是一个卷积层，有<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>6</mn></mrow><annotation encoding="application/x-tex">6</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">6</span></span></span></span> 个卷积核（提取<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>6</mn></mrow><annotation encoding="application/x-tex">6</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">6</span></span></span></span> 种局部特征），核大小为<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>5</mn><mo>∗</mo><mn>5</mn></mrow><annotation encoding="application/x-tex">5*5</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">5</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">5</span></span></span></span>，能够输出<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>6</mn></mrow><annotation encoding="application/x-tex">6</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">6</span></span></span></span> 个特征图，大小为<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>28</mn><mo>∗</mo><mn>28</mn></mrow><annotation encoding="application/x-tex">28*28</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">2</span><span class="mord">8</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">2</span><span class="mord">8</span></span></span></span>。<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>C</mi><mn>1</mn></mrow><annotation encoding="application/x-tex">C1</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.68333em;vertical-align:0"></span><span class="mord mathnormal" style="margin-right:.07153em">C</span><span class="mord">1</span></span></span></span> 有<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>156</mn></mrow><annotation encoding="application/x-tex">156</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">1</span><span class="mord">5</span><span class="mord">6</span></span></span></span> 个可训练参数（每个滤波器有<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>5</mn><mo>∗</mo><mn>5</mn><mo>=</mo><mn>25</mn></mrow><annotation encoding="application/x-tex">5*5=25</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">5</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">5</span><span class="mspace" style="margin-right:.2777777777777778em"></span><span class="mrel">=</span><span class="mspace" style="margin-right:.2777777777777778em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">2</span><span class="mord">5</span></span></span></span> 个<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>u</mi><mi>n</mi><mi>i</mi><mi>t</mi></mrow><annotation encoding="application/x-tex">unit</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.65952em;vertical-align:0"></span><span class="mord mathnormal">u</span><span class="mord mathnormal">n</span><span class="mord mathnormal">i</span><span class="mord mathnormal">t</span></span></span></span> 参数和一个<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>b</mi><mi>i</mi><mi>a</mi><mi>s</mi></mrow><annotation encoding="application/x-tex">bias</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.69444em;vertical-align:0"></span><span class="mord mathnormal">b</span><span class="mord mathnormal">i</span><span class="mord mathnormal">a</span><span class="mord mathnormal">s</span></span></span></span> 参数，一共<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>6</mn></mrow><annotation encoding="application/x-tex">6</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">6</span></span></span></span> 个滤波器，共<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mn>5</mn><mo>∗</mo><mn>5</mn><mo>+</mo><mn>1</mn><mo stretchy="false">)</mo><mo>∗</mo><mn>6</mn><mo>=</mo><mn>156</mn></mrow><annotation encoding="application/x-tex">(5*5+1)*6=156</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mopen">(</span><span class="mord">5</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.72777em;vertical-align:-.08333em"></span><span class="mord">5</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">+</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mord">1</span><span class="mclose">)</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">6</span><span class="mspace" style="margin-right:.2777777777777778em"></span><span class="mrel">=</span><span class="mspace" style="margin-right:.2777777777777778em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">1</span><span class="mord">5</span><span class="mord">6</span></span></span></span> 个参数），共<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>156</mn><mo>∗</mo><mo stretchy="false">(</mo><mn>28</mn><mo>∗</mo><mn>28</mn><mo stretchy="false">)</mo><mo>=</mo><mn>122304</mn></mrow><annotation encoding="application/x-tex">156*(28*28)=122304</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">1</span><span class="mord">5</span><span class="mord">6</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mopen">(</span><span class="mord">2</span><span class="mord">8</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-.25em"></span><span class="mord">2</span><span class="mord">8</span><span class="mclose">)</span><span class="mspace" style="margin-right:.2777777777777778em"></span><span class="mrel">=</span><span class="mspace" style="margin-right:.2777777777777778em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">1</span><span class="mord">2</span><span class="mord">2</span><span class="mord">3</span><span class="mord">0</span><span class="mord">4</span></span></span></span> 个连接。</p><h2 id="32-池化层"><a class="anchor" href="#32-池化层">#</a> 3.2 池化层</h2><p>下面将介绍池化层，池化层夹在连续的卷积层中间， 用于压缩数据和参数的量，减小过拟合。简而言之，如果输入是图像的话，那么池化层的最主要作用就是压缩图像。<br>通常使用的池化操作为<strong>平均值池化（average-pooling）</strong> 和<strong>最大值池化（max-pooling）</strong>。需要指出的是，同卷积层操作不同，池化层不包含需要学习的参数，使用时仅需指定<strong>池化类型</strong>（average 或 max）、<strong>池化操作的核大小</strong>（kernel size） 和 <strong>池化操作的步长</strong>（stride）等超参数即可。平均值（最大值）池化在每次操作时，将池化核覆盖区域中所有值的平均值（最大值）作为池化结果。</p><p>接下来我们就举例说明池化层到底是如何实现的。下图中，池化核的大小为<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>2</mn><mo>∗</mo><mn>2</mn></mrow><annotation encoding="application/x-tex">2*2</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">2</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">2</span></span></span></span>，池化操作的步长为<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>2</mn></mrow><annotation encoding="application/x-tex">2</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">2</span></span></span></span>。<br><img data-src="https://jiang-hs.github.io/post-images/1596524052019.jpeg" alt="img"><br>首先对黄色（左上角 1,3,1,3）的<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>2</mn><mo>∗</mo><mn>2</mn></mrow><annotation encoding="application/x-tex">2*2</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">2</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">2</span></span></span></span> 区域进行池化，由于此<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>2</mn><mo>∗</mo><mn>2</mn></mrow><annotation encoding="application/x-tex">2*2</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">2</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">2</span></span></span></span> 区域的平均值为<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>2</mn></mrow><annotation encoding="application/x-tex">2</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">2</span></span></span></span>（最大值为 3），那么对应的池化输出位置的值也为 2（3），由于步幅为 2，此时移动到红色（右上角 2,4,2,4）的位置去进行池化，输出的平均值为 3（最大值为 4）。同样的方法，可以得到蓝色区域（左下角 5,7,5,7）和 橙色区域（右下角 6,8,6,8）的输出值。最终，输入<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>4</mn><mo>∗</mo><mn>4</mn></mrow><annotation encoding="application/x-tex">4*4</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">4</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">4</span></span></span></span> 的矩阵在池化后变成了<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>2</mn><mo>∗</mo><mn>2</mn></mrow><annotation encoding="application/x-tex">2*2</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">2</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">2</span></span></span></span> 的矩阵，进行了压缩。</p><p>在上面的例子中可以发现，池化操作后的结果相比其输入变了，其实池化操作实际上就是一种 “<strong>下采样</strong>” 操作。另一方面，池化也可以看成是一个用 p 范数作为非线性映射的 “卷积” 操作，特别的，当 p 趋近正无穷时就是最常见的最大值池化。<br>池化层的引入是仿照人的视觉系统对视觉输入对象进行降维（下采样）和抽象。在卷积神经网络过去的工作中，研究者普遍认为池化层有如下 3 种功效：</p><ul><li><strong>特征不变性</strong>：池化操作使模型更关注是否存在某些特征而不是特征具体的位置，可看做是一种很强的先验，使特征学习包含某种强度的自由度，能容忍一些特征微小的位移。</li><li><strong>特征降维</strong>：由于池化操作的下采样作用，池化结果中的一个元素对应于原输入数据的一个子区域，因此池化相当于在空间范围内做了维度约减，从而使模型可以抽取更广范围的特征。同时减小了下一层输入大小，进而减小计算量和参数个数。</li><li>在一定程度<strong>防止过拟合</strong>，更方便优化。</li></ul><p>拓展：池化操作并不是卷积神经网络必须的元件或操作。近期，德国著名高校弗赖堡大学的研究者提出用一种特殊的卷积操作来代替池化层实现降采样，进而构建一个只含卷积操作的网络，其实验结果显示这种改造的网络可以达到甚至超过传统卷积神经网络（卷积层池化层交替）的分类精度。</p><h2 id="33-全连接层"><a class="anchor" href="#33-全连接层">#</a> 3.3 全连接层</h2><p>全连接层（Fully Connected layers,FC）的每一个结点都与上一层的所有结点相连，用来把前边提取到的特征综合起来。由于其全相连的特性，一般全连接层的参数也是最多的。<br>全连接层在整个卷积神经网络中起到 “分类器” 的作用。如果说卷积层、池化层和激活函数等操作是将原始数据映射到隐藏层特征空间的话，全连接层则起到将学到的 “分布式特征表示” 映射到样本标记空间的作用。在基本的 CNN 网络中，全连接层的作用是将经过多个卷积层和池化层的图像特征图中的特征进行整合，获取图像特征具有的高层含义，之后用于图像分类。在 CNN 网络中，全连接层将卷积层产生的特征图映射成一个固定长度（一般为输入图像数据集中的图像类别数）的特征向量。这个特征向量包含了输入图像所有特征的组合信息，虽然丢失了图像的位置信息，但是该向量将图像中最具有特点的图像特征保留了下来，以此完成图像分类任务。<br>下图中连线最密集的 2 个地方就是全连接层，这很明显的可以看出全连接层的参数的确很多。在前向计算过程，也就是一个线性的加权求和的过程，全连接层的每一个输出都可以看成前一层的每一个结点乘以一个权重系数 W，最后加上一个偏置值 b 得到，即 。如下图中第一个全连接层，输入有<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>50</mn><mo>∗</mo><mn>4</mn><mo>∗</mo><mn>4</mn></mrow><annotation encoding="application/x-tex">50*4*4</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">5</span><span class="mord">0</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">4</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">4</span></span></span></span> 个神经元结点，输出有<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>500</mn></mrow><annotation encoding="application/x-tex">500</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">5</span><span class="mord">0</span><span class="mord">0</span></span></span></span> 个结点，则一共需要<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>50</mn><mo>∗</mo><mn>4</mn><mo>∗</mo><mn>4</mn><mo>∗</mo><mn>500</mn><mo>=</mo><mn>400000</mn></mrow><annotation encoding="application/x-tex">50*4*4*500=400000</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">5</span><span class="mord">0</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">4</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">4</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">∗</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">5</span><span class="mord">0</span><span class="mord">0</span><span class="mspace" style="margin-right:.2777777777777778em"></span><span class="mrel">=</span><span class="mspace" style="margin-right:.2777777777777778em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">4</span><span class="mord">0</span><span class="mord">0</span><span class="mord">0</span><span class="mord">0</span><span class="mord">0</span></span></span></span> 个权值参数<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>W</mi></mrow><annotation encoding="application/x-tex">W</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.68333em;vertical-align:0"></span><span class="mord mathnormal" style="margin-right:.13889em">W</span></span></span></span> 和<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>500</mn></mrow><annotation encoding="application/x-tex">500</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">5</span><span class="mord">0</span><span class="mord">0</span></span></span></span> 个偏置参数<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>b</mi></mrow><annotation encoding="application/x-tex">b</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.69444em;vertical-align:0"></span><span class="mord mathnormal">b</span></span></span></span>。<br><img data-src="https://jiang-hs.github.io/post-images/1596528537976.jpg" alt="img"></p><h2 id="34-反向传播"><a class="anchor" href="#34-反向传播">#</a> 3.4 反向传播</h2><h1 id="四-卷积神经网络的优点"><a class="anchor" href="#四-卷积神经网络的优点">#</a> 四、卷积神经网络的优点</h1><h2 id="41-图像特征的层次化结构"><a class="anchor" href="#41-图像特征的层次化结构">#</a> 4.1 图像特征的层次化结构</h2><p>图像特征具有层次化结构，传统方法无法做到自动组合这些层次化的特征，图像特征的层次化结构如下图所示：<br><img data-src="https://jiang-hs.github.io/post-images/1596534656283.jpg" alt="img"><br>图像相邻区域的像素组成边缘线条，边缘线条组合得到图像纹理，图像纹理经过组合形成局部图案，最后所有的局部图案构成一个图像物体。卷积神经网络具有组合图像层次化特征的能力。</p><h2 id="42-卷积神经网络的仿生物学理论"><a class="anchor" href="#42-卷积神经网络的仿生物学理论">#</a> 4.2 卷积神经网络的仿生物学理论</h2><p>1981 年，David Hubel 和 Torsten Wiesel 在猫的大脑视觉皮层上所做的实验证明了人类大脑视觉系统其实是不断地将低级特征通过神经元之间的连线传递为高级特征的过程，通过组合低层特征一步一步得到高层特征，越是高层特征就变得越抽象。卷积神经网络卷积层的二维卷积方式使其能够直接从图像像素中提取数据特征，这种处理方式更加接近人类大脑视觉系统的工作方式。</p><h2 id="43-卷积神经网络的局部连接属性"><a class="anchor" href="#43-卷积神经网络的局部连接属性">#</a> 4.3 卷积神经网络的局部连接属性</h2><p>卷积神经网络属于局部连接网络，局部连接网络是基于对自然图像的深刻研究而提出来的。由于自然图像存在局部区域稳定性的属性，自然图像中某一局部区域的统计特征相对于图像的其他相邻局部区域具有相似性，因此<strong>神经网络从自然图像中学习到的某一局部区域的特征同样适合于图像的其他相邻局部区域</strong>。下图中，左边为全连接网络，右边为局部连接网络。<br><img data-src="https://jiang-hs.github.io/post-images/1596535462312.jpg" alt="img"><br>局部连接网络比全连接网络有很大的优势。假设左边的输入图像层为 L1 层，右边神经节点层为 L2 层，对于图中的全连接网络层来说，如果 L1 层输入图像的分辨率为<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>1000</mn><mo>×</mo><mn>1000</mn></mrow><annotation encoding="application/x-tex">1000×1000</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.72777em;vertical-align:-.08333em"></span><span class="mord">1</span><span class="mord">0</span><span class="mord">0</span><span class="mord">0</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">×</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">1</span><span class="mord">0</span><span class="mord">0</span><span class="mord">0</span></span></span></span>，L2 隐含层有<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>100</mn></mrow><annotation encoding="application/x-tex">100</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">1</span><span class="mord">0</span><span class="mord">0</span></span></span></span> 万个神经元，每个隐含层神经元全部都连接到 LI 层输入图像的每一个像素点，那么连接线达到了<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>1000</mn><mo>×</mo><mn>1000</mn><mo>×</mo><mn>1000000</mn><mo>=</mo><mn>1</mn><msup><mn>0</mn><mn>12</mn></msup></mrow><annotation encoding="application/x-tex">1000×1000×1000000=10^{12}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.72777em;vertical-align:-.08333em"></span><span class="mord">1</span><span class="mord">0</span><span class="mord">0</span><span class="mord">0</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">×</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.72777em;vertical-align:-.08333em"></span><span class="mord">1</span><span class="mord">0</span><span class="mord">0</span><span class="mord">0</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">×</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">1</span><span class="mord">0</span><span class="mord">0</span><span class="mord">0</span><span class="mord">0</span><span class="mord">0</span><span class="mord">0</span><span class="mspace" style="margin-right:.2777777777777778em"></span><span class="mrel">=</span><span class="mspace" style="margin-right:.2777777777777778em"></span></span><span class="base"><span class="strut" style="height:.8141079999999999em;vertical-align:0"></span><span class="mord">1</span><span class="mord"><span class="mord">0</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:.8141079999999999em"><span style="top:-3.063em;margin-right:.05em"><span class="pstrut" style="height:2.7em"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">1</span><span class="mord mtight">2</span></span></span></span></span></span></span></span></span></span></span></span>，也就是<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>10</mn></mrow><annotation encoding="application/x-tex">10</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">1</span><span class="mord">0</span></span></span></span> 的<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>12</mn></mrow><annotation encoding="application/x-tex">12</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">1</span><span class="mord">2</span></span></span></span> 次方个权值参数。对于图中的局部连接网络，L2 隐含层中每一个神经节点与 L1 层节点相同位置附近<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>10</mn><mo>×</mo><mn>10</mn></mrow><annotation encoding="application/x-tex">10×10</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.72777em;vertical-align:-.08333em"></span><span class="mord">1</span><span class="mord">0</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">×</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">1</span><span class="mord">0</span></span></span></span> 大小的图像区域相连接，则<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>100</mn></mrow><annotation encoding="application/x-tex">100</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">1</span><span class="mord">0</span><span class="mord">0</span></span></span></span> 万个隐层神经元只有<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>100</mn><mtext>万</mtext><mo>×</mo><mn>100</mn></mrow><annotation encoding="application/x-tex">100万×100</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.76666em;vertical-align:-.08333em"></span><span class="mord">1</span><span class="mord">0</span><span class="mord">0</span><span class="mord cjk_fallback">万</span><span class="mspace" style="margin-right:.2222222222222222em"></span><span class="mbin">×</span><span class="mspace" style="margin-right:.2222222222222222em"></span></span><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">1</span><span class="mord">0</span><span class="mord">0</span></span></span></span>，即<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>1</mn><msup><mn>0</mn><mn>8</mn></msup></mrow><annotation encoding="application/x-tex">10^{8}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.8141079999999999em;vertical-align:0"></span><span class="mord">1</span><span class="mord"><span class="mord">0</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:.8141079999999999em"><span style="top:-3.063em;margin-right:.05em"><span class="pstrut" style="height:2.7em"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord mtight">8</span></span></span></span></span></span></span></span></span></span></span></span> 个权值参数，其权值连接个数比全连接网络足足减少了<span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mn>4</mn></mrow><annotation encoding="application/x-tex">4</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:.64444em;vertical-align:0"></span><span class="mord">4</span></span></span></span> 个数量级。因此卷积神经网络相对于全连接网络来说，在训练速度上有很大的优势。</p><h2 id="44-卷积神经网络的权值共享特征"><a class="anchor" href="#44-卷积神经网络的权值共享特征">#</a> 4.4 卷积神经网络的权值共享特征</h2><p>权值共享是卷积神经网络的另外一个重要特性，卷积神经网络中<strong>相同的卷积核共享相同的卷积核权值和偏置值</strong>。同一种卷积核使用同样的权值按照某种顺序去卷积图像，比如从左到右，从上到下的顺序进行图像卷积。那么卷积后得到所有神经节点都是共享连接参数，也就是说每个神经元都是用同一个卷积核去卷积图像。所以一种卷积核只提取了图像的一种特征，如果需要提取多种输入图像的不同特征，则需要使用多种卷积核。<strong>权值共享减少了卷积神经网络需要学习的参数</strong>。<br>下面的动态图演示很好地体现了这一特征：<br><img data-src="https://jiang-hs.github.io/post-images/1596536088417.gif" alt="img"></p><h2 id="45-卷积神经网络端对端的处理方式"><a class="anchor" href="#45-卷积神经网络端对端的处理方式">#</a> 4.5 卷积神经网络端对端的处理方式</h2><p>传统图像分类中，研究人员需要花费大量精力去研究如何提取到更好的图像特征，如 HOG 和 SIFT 等特征。卷积神经网络进行图像分类的最大优点是采用端对端的处理方式，把传统图像分类任务中的图像预处理、特征提取变为一个黑盒子，研究人员只需要把精力放在研究如何设计卷积神经网络的网络架构和优化网络参数上。卷积神经网络把与图像卷积得到的特征进行前向传播，然后通过网络输出值与数据标签的差值反向传播来调整网络参数，通过这样的方式卷积神经网络能够自动提取到有利于分类任务的特征，不需要人为干预。下图展示了卷积神经网络与传统图像分类之间的差异性。<br><img data-src="https://jiang-hs.github.io/post-images/1596535081987.jpg" alt="img"><br>传统图像分类任务中的图像预处理，图像特征提取的每一个处理步骤需要非常专业的图像处理知识才能很好地完成整个图像分类任务。卷积神经网络大大简化了图像分类任务的流程，使用者不需要知道如何进行图像特征提取也能出色地完成图像分类任务。</p><div class="tags"><a href="/tags/%E4%BA%BA%E5%B7%A5%E6%99%BA%E8%83%BD/" rel="tag"><i class="ic i-tag"></i> 人工智能</a> <a href="/tags/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E5%9F%BA%E7%A1%80/" rel="tag"><i class="ic i-tag"></i> 机器学习基础</a></div></div><footer><div class="meta"><span class="item"><span class="icon"><i class="ic i-calendar-check"></i> </span><span class="text">更新于</span> <time title="修改时间：2021-08-25 11:32:03" itemprop="dateModified" datetime="2021-08-25T11:32:03+08:00">2021-08-25</time> </span><span id="posts/c6767314/" class="item leancloud_visitors" data-flag-title="卷积神经网络" title="阅读次数"><span class="icon"><i class="ic i-eye"></i> </span><span class="text">阅读次数</span> <span class="leancloud-visitors-count"></span> <span class="text">次</span></span></div><div class="reward"><button><i class="ic i-heartbeat"></i> 赞赏</button><p>请我喝[茶]~(￣▽￣)~*</p><div id="qr"><div><img data-src="/images/wechatpay.png" alt="hang shun 微信支付"><p>微信支付</p></div><div><img data-src="/images/alipay.png" alt="hang shun 支付宝"><p>支付宝</p></div><div><img data-src="/images/paypal.png" alt="hang shun 贝宝"><p>贝宝</p></div></div></div><div id="copyright"><ul><li class="author"><strong>本文作者： </strong>hang shun <i class="ic i-at"><em>@</em></i>航 順</li><li class="link"><strong>本文链接：</strong> <a href="https://jiang-hs.gitee.io/posts/c6767314/" title="卷积神经网络">https://jiang-hs.gitee.io/posts/c6767314/</a></li><li class="license"><strong>版权声明： </strong>本站所有文章除特别声明外，均采用 <span class="exturl" data-url="aHR0cHM6Ly9jcmVhdGl2ZWNvbW1vbnMub3JnL2xpY2Vuc2VzL2J5LW5jLXNhLzQuMC9kZWVkLnpo"><i class="ic i-creative-commons"><em>(CC)</em></i>BY-NC-SA</span> 许可协议。转载请注明出处！</li></ul></div></footer></article></div><div class="post-nav"><div class="item left"><a href="/posts/7ca31f7/" itemprop="url" rel="prev" data-background-image="https:&#x2F;&#x2F;pic1.imgdb.cn&#x2F;item&#x2F;64427ca10d2dde5777b04c1f.png" title="神经网络"><span class="type">上一篇</span> <span class="category"><i class="ic i-flag"></i> 机器学习基础</span><h3>神经网络</h3></a></div><div class="item right"><a href="/posts/20417848/" itemprop="url" rel="next" data-background-image="https:&#x2F;&#x2F;pic1.imgdb.cn&#x2F;item&#x2F;64427c6f0d2dde5777affbe2.jpg" title="循环神经网络"><span class="type">下一篇</span> <span class="category"><i class="ic i-flag"></i> 机器学习基础</span><h3>循环神经网络</h3></a></div></div><div class="wrap" id="comments"></div></div><div id="sidebar"><div class="inner"><div class="panels"><div class="inner"><div class="contents panel pjax" data-title="文章目录"><ol class="toc"><li class="toc-item toc-level-1"><a class="toc-link" href="#%E4%B8%80-%E5%8D%B7%E7%A7%AF%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E7%AE%80%E4%BB%8B"><span class="toc-number">1.</span> <span class="toc-text">一、卷积神经网络简介</span></a></li><li class="toc-item toc-level-1"><a class="toc-link" href="#%E4%BA%8C-%E5%8D%B7%E7%A7%AF%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E7%9A%84%E7%BB%93%E6%9E%84"><span class="toc-number">2.</span> <span class="toc-text">二、卷积神经网络的结构</span></a></li><li class="toc-item toc-level-1"><a class="toc-link" href="#%E4%B8%89-%E5%8D%B7%E7%A7%AF%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E7%9A%84%E5%8E%9F%E7%90%86"><span class="toc-number">3.</span> <span class="toc-text">三、卷积神经网络的原理</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#31-%E5%8D%B7%E7%A7%AF%E5%B1%82"><span class="toc-number">3.1.</span> <span class="toc-text">3.1 卷积层</span></a><ol class="toc-child"><li class="toc-item toc-level-3"><a class="toc-link" href="#311-%E5%8D%B7%E7%A7%AF%E5%85%AC%E5%BC%8F"><span class="toc-number">3.1.1.</span> <span class="toc-text">3.1.1 卷积公式</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#312-%E5%8D%B7%E7%A7%AF%E5%B1%82%E5%B7%A5%E4%BD%9C%E5%8E%9F%E7%90%86"><span class="toc-number">3.1.2.</span> <span class="toc-text">3.1.2 卷积层工作原理</span></a></li></ol></li><li class="toc-item toc-level-2"><a class="toc-link" href="#32-%E6%B1%A0%E5%8C%96%E5%B1%82"><span class="toc-number">3.2.</span> <span class="toc-text">3.2 池化层</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#33-%E5%85%A8%E8%BF%9E%E6%8E%A5%E5%B1%82"><span class="toc-number">3.3.</span> <span class="toc-text">3.3 全连接层</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#34-%E5%8F%8D%E5%90%91%E4%BC%A0%E6%92%AD"><span class="toc-number">3.4.</span> <span class="toc-text">3.4 反向传播</span></a></li></ol></li><li class="toc-item toc-level-1"><a class="toc-link" href="#%E5%9B%9B-%E5%8D%B7%E7%A7%AF%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E7%9A%84%E4%BC%98%E7%82%B9"><span class="toc-number">4.</span> <span class="toc-text">四、卷积神经网络的优点</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#41-%E5%9B%BE%E5%83%8F%E7%89%B9%E5%BE%81%E7%9A%84%E5%B1%82%E6%AC%A1%E5%8C%96%E7%BB%93%E6%9E%84"><span class="toc-number">4.1.</span> <span class="toc-text">4.1 图像特征的层次化结构</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#42-%E5%8D%B7%E7%A7%AF%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E7%9A%84%E4%BB%BF%E7%94%9F%E7%89%A9%E5%AD%A6%E7%90%86%E8%AE%BA"><span class="toc-number">4.2.</span> <span class="toc-text">4.2 卷积神经网络的仿生物学理论</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#43-%E5%8D%B7%E7%A7%AF%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E7%9A%84%E5%B1%80%E9%83%A8%E8%BF%9E%E6%8E%A5%E5%B1%9E%E6%80%A7"><span class="toc-number">4.3.</span> <span class="toc-text">4.3 卷积神经网络的局部连接属性</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#44-%E5%8D%B7%E7%A7%AF%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E7%9A%84%E6%9D%83%E5%80%BC%E5%85%B1%E4%BA%AB%E7%89%B9%E5%BE%81"><span class="toc-number">4.4.</span> <span class="toc-text">4.4 卷积神经网络的权值共享特征</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#45-%E5%8D%B7%E7%A7%AF%E7%A5%9E%E7%BB%8F%E7%BD%91%E7%BB%9C%E7%AB%AF%E5%AF%B9%E7%AB%AF%E7%9A%84%E5%A4%84%E7%90%86%E6%96%B9%E5%BC%8F"><span class="toc-number">4.5.</span> <span class="toc-text">4.5 卷积神经网络端对端的处理方式</span></a></li></ol></li></ol></div><div class="related panel pjax" data-title="系列文章"><ul><li><a href="/posts/202f1f0f/" rel="bookmark" title="梯度下降及线性回归">梯度下降及线性回归</a></li><li><a href="/posts/d27e233f/" rel="bookmark" title="K最近邻分类算法（KNN）分析及实现">K最近邻分类算法（KNN）分析及实现</a></li><li><a href="/posts/30c02801/" rel="bookmark" title="KNN算法实现鸢尾花数据集的分类">KNN算法实现鸢尾花数据集的分类</a></li><li><a href="/posts/2afaae3d/" rel="bookmark" title="K-means算法">K-means算法</a></li><li><a href="/posts/fe5ae0e7/" rel="bookmark" title="基于矩阵分解的推荐算法">基于矩阵分解的推荐算法</a></li><li><a href="/posts/a10feb4a/" rel="bookmark" title="协同过滤算法">协同过滤算法</a></li><li><a href="/posts/7ca31f7/" rel="bookmark" title="神经网络">神经网络</a></li><li class="active"><a href="/posts/c6767314/" rel="bookmark" title="卷积神经网络">卷积神经网络</a></li><li><a href="/posts/20417848/" rel="bookmark" title="循环神经网络">循环神经网络</a></li></ul></div><div class="overview panel" data-title="站点概览"><div class="author" itemprop="author" itemscope itemtype="http://schema.org/Person"><img class="image" itemprop="image" alt="hang shun" data-src="/images/avatar.jpg"><p class="name" itemprop="name">hang shun</p><div class="description" itemprop="description">世中逢尔，雨中逢花</div></div><nav class="state"><div class="item posts"><a href="/archives/"><span class="count">45</span> <span class="name">文章</span></a></div><div class="item categories"><a href="/categories/"><span class="count">10</span> <span class="name">分类</span></a></div><div class="item tags"><a href="/tags/"><span class="count">25</span> <span class="name">标签</span></a></div></nav><div class="social"><span class="exturl item github" data-url="aHR0cHM6Ly9naXRodWIuY29tL0pJQU5HLUhT" title="https:&#x2F;&#x2F;github.com&#x2F;JIANG-HS"><i class="ic i-github"></i></span> <span class="exturl item zhihu" data-url="aHR0cHM6Ly93d3cuemhpaHUuY29tL3Blb3BsZS9odWktc2h1bi14aW4tbGl1" title="https:&#x2F;&#x2F;www.zhihu.com&#x2F;people&#x2F;hui-shun-xin-liu"><i class="ic i-zhihu"></i></span> <span class="exturl item music" data-url="aHR0cHM6Ly9tdXNpYy4xNjMuY29tLyMvdXNlci9ob21lP2lkPTE4MzkwMTczMzI=" title="https:&#x2F;&#x2F;music.163.com&#x2F;#&#x2F;user&#x2F;home?id&#x3D;1839017332"><i class="ic i-cloud-music"></i></span> <span class="exturl item bilibili" data-url="aHR0cHM6Ly9zcGFjZS5iaWxpYmlsaS5jb20vMzIxMTYyNDg1" title="https:&#x2F;&#x2F;space.bilibili.com&#x2F;321162485"><i class="ic i-bilibili"></i></span></div><ul class="menu"><li class="item"><a href="/" rel="section"><i class="ic i-home"></i>首页</a></li><li class="item"><a href="/about/" rel="section"><i class="ic i-user"></i>关于</a></li><li class="item dropdown"><a href="javascript:void(0);"><i class="ic i-feather"></i>文章</a><ul class="submenu"><li class="item"><a href="/archives/" rel="section"><i class="ic i-list-alt"></i>归档</a></li><li class="item"><a href="/categories/" rel="section"><i class="ic i-th"></i>分类</a></li><li class="item"><a href="/tags/" rel="section"><i class="ic i-tags"></i>标签</a></li></ul></li><li class="item"><a href="/friends/" rel="section"><i class="ic i-heart"></i>友達</a></li><li class="item"><a href="/movie/" rel="section"><i class="ic i-play"></i>movie</a></li><li class="item"><a href="/music/" rel="section"><i class="ic i-music"></i>music</a></li></ul></div></div></div><ul id="quick"><li class="prev pjax"><a href="/posts/7ca31f7/" rel="prev" title="上一篇"><i class="ic i-chevron-left"></i></a></li><li class="up"><i class="ic i-arrow-up"></i></li><li class="down"><i class="ic i-arrow-down"></i></li><li class="next pjax"><a href="/posts/20417848/" rel="next" title="下一篇"><i class="ic i-chevron-right"></i></a></li><li class="percent"></li></ul></div></div><div class="dimmer"></div></div></main><footer id="footer"><div class="inner"><div class="widgets"><div class="rpost pjax"><h2>随机文章</h2><ul><li class="item"><div class="breadcrumb"><a href="/categories/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E5%9F%BA%E7%A1%80/" title="分类于 机器学习基础">机器学习基础</a></div><span><a href="/posts/d27e233f/" title="K最近邻分类算法（KNN）分析及实现">K最近邻分类算法（KNN）分析及实现</a></span></li><li class="item"><div class="breadcrumb"></div><span><a href="/posts/748f0bbe/" title="强化学习">强化学习</a></span></li><li class="item"><div class="breadcrumb"></div><span><a href="/posts/1a7c4498/" title="51单片机基础-1">51单片机基础-1</a></span></li><li class="item"><div class="breadcrumb"><a href="/categories/%E8%AE%BA%E6%96%87%E7%B2%BE%E8%AF%BB/" title="分类于 论文精读">论文精读</a> <i class="ic i-angle-right"></i> <a href="/categories/%E8%AE%BA%E6%96%87%E7%B2%BE%E8%AF%BB/%E6%96%B0%E9%97%BB%E6%8E%A8%E8%8D%90/" title="分类于 新闻推荐">新闻推荐</a> <i class="ic i-angle-right"></i> <a href="/categories/%E8%AE%BA%E6%96%87%E7%B2%BE%E8%AF%BB/%E6%96%B0%E9%97%BB%E6%8E%A8%E8%8D%90/%E9%9A%90%E7%A7%81%E4%BF%9D%E6%8A%A4%E6%96%B0%E9%97%BB%E6%8E%A8%E8%8D%90/" title="分类于 隐私保护新闻推荐">隐私保护新闻推荐</a></div><span><a href="/posts/b2c2f458/" title="Adv-MultVAE：基于对抗学习的隐私保护推荐算法">Adv-MultVAE：基于对抗学习的隐私保护推荐算法</a></span></li><li class="item"><div class="breadcrumb"><a href="/categories/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E5%9F%BA%E7%A1%80/" title="分类于 机器学习基础">机器学习基础</a></div><span><a href="/posts/2afaae3d/" title="K-means算法">K-means算法</a></span></li><li class="item"><div class="breadcrumb"></div><span><a href="/posts/e4142071/" title="GAN网络-简单明了">GAN网络-简单明了</a></span></li><li class="item"><div class="breadcrumb"><a href="/categories/%E7%94%9F%E6%88%90%E6%A8%A1%E5%9E%8B/" title="分类于 生成模型">生成模型</a></div><span><a href="/posts/b2bcd60d/" title="一文读懂主流生成模型GAN、VAE、DM和DILL·E2等">一文读懂主流生成模型GAN、VAE、DM和DILL·E2等</a></span></li><li class="item"><div class="breadcrumb"><a href="/categories/%E6%9C%BA%E5%99%A8%E5%AD%A6%E4%B9%A0%E5%9F%BA%E7%A1%80/" title="分类于 机器学习基础">机器学习基础</a></div><span><a href="/posts/7ca31f7/" title="神经网络">神经网络</a></span></li><li class="item"><div class="breadcrumb"><a href="/categories/%E8%AE%BA%E6%96%87%E7%B2%BE%E8%AF%BB/" title="分类于 论文精读">论文精读</a> <i class="ic i-angle-right"></i> <a href="/categories/%E8%AE%BA%E6%96%87%E7%B2%BE%E8%AF%BB/%E6%96%B0%E9%97%BB%E6%8E%A8%E8%8D%90/" title="分类于 新闻推荐">新闻推荐</a> <i class="ic i-angle-right"></i> <a href="/categories/%E6%B3%A8%E6%84%8F%E5%8A%9B%E6%9C%BA%E5%88%B6/" title="分类于 注意力机制">注意力机制</a> <i class="ic i-angle-right"></i> <a href="/categories/%E8%AE%BA%E6%96%87%E7%B2%BE%E8%AF%BB/%E6%96%B0%E9%97%BB%E6%8E%A8%E8%8D%90/%E9%9A%90%E7%A7%81%E4%BF%9D%E6%8A%A4%E6%96%B0%E9%97%BB%E6%8E%A8%E8%8D%90/" title="分类于 隐私保护新闻推荐">隐私保护新闻推荐</a></div><span><a href="/posts/e3b95b70/" title="Fastformer：Additive Attention Can Be All You Need">Fastformer：Additive Attention Can Be All You Need</a></span></li><li class="item"><div class="breadcrumb"><a href="/categories/%E8%AE%BA%E6%96%87%E7%B2%BE%E8%AF%BB/" title="分类于 论文精读">论文精读</a> <i class="ic i-angle-right"></i> <a href="/categories/%E8%AE%BA%E6%96%87%E7%B2%BE%E8%AF%BB/%E6%96%B0%E9%97%BB%E6%8E%A8%E8%8D%90/" title="分类于 新闻推荐">新闻推荐</a> <i class="ic i-angle-right"></i> <a href="/categories/%E8%AE%BA%E6%96%87%E7%B2%BE%E8%AF%BB/%E6%96%B0%E9%97%BB%E6%8E%A8%E8%8D%90/%E9%9A%90%E7%A7%81%E4%BF%9D%E6%8A%A4%E6%96%B0%E9%97%BB%E6%8E%A8%E8%8D%90/" title="分类于 隐私保护新闻推荐">隐私保护新闻推荐</a></div><span><a href="/posts/f8ce3000/" title="Hetedp：基于异构图神经网络的隐私保护推荐">Hetedp：基于异构图神经网络的隐私保护推荐</a></span></li></ul></div><div><h2>最新评论</h2><ul class="leancloud-recent-comment"></ul></div></div><div class="status"><div class="copyright">&copy; 2020 – <span itemprop="copyrightYear">2023</span> <span class="with-love"><i class="ic i-sakura rotate"></i> </span><span class="author" itemprop="copyrightHolder">hang shun @ hang shun</span></div><div class="count"><span class="post-meta-item-icon"><i class="ic i-chart-area"></i> </span><span title="站点总字数">267k 字</span> <span class="post-meta-divider">|</span> <span class="post-meta-item-icon"><i class="ic i-coffee"></i> </span><span title="站点阅读时长">4:02</span></div><div class="powered-by">基于 <span class="exturl" data-url="aHR0cHM6Ly9oZXhvLmlv">Hexo</span> & Theme.<span class="exturl" data-url="aHR0cHM6Ly9naXRodWIuY29tL2FtZWhpbWUvaGV4by10aGVtZS1zaG9rYQ==">Shoka</span></div></div></div></footer></div><script data-config type="text/javascript">var LOCAL={path:"posts/c6767314/",favicon:{show:"(´Д｀)被发现了！",hide:"（●´3｀●）我藏好了~"},search:{placeholder:"文章搜索",empty:"关于 「 ${query} 」，什么也没搜到",stats:"${time} ms 内找到 ${hits} 条结果"},valine:!0,copy_tex:!0,katex:!0,fancybox:!0,copyright:'复制成功，转载请遵守 <i class="ic i-creative-commons"></i>BY-NC-SA 协议。',ignores:[function(e){return e.includes("#")},function(e){return new RegExp(LOCAL.path+"$").test(e)}]}</script><script src="https://cdn.polyfill.io/v2/polyfill.js"></script><script src="//cdn.jsdelivr.net/combine/npm/pace-js@1.0.2/pace.min.js,npm/pjax@0.2.8/pjax.min.js,npm/whatwg-fetch@3.4.0/dist/fetch.umd.min.js,npm/animejs@3.2.0/lib/anime.min.js,npm/algoliasearch@4/dist/algoliasearch-lite.umd.js,npm/instantsearch.js@4/dist/instantsearch.production.min.js,npm/lozad@1/dist/lozad.min.js,npm/quicklink@2/dist/quicklink.umd.js"></script><script src="/js/app.js?v=0.0.0"></script></body></html><!-- rebuild by hrmmi -->