
<!DOCTYPE HTML>
<html lang="zh-hans" >
    <head>
        <meta charset="UTF-8">
        <meta content="text/html; charset=utf-8" http-equiv="Content-Type">
        <title>第二节 lenel5代码讲解 · Tensorflow学习笔记</title>
        <meta http-equiv="X-UA-Compatible" content="IE=edge" />
        <meta name="description" content="">
        <meta name="generator" content="GitBook 3.2.3">
        <meta name="author" content="scottdu">
        
        
    
    <link rel="stylesheet" href="../gitbook/style.css">

    
            
                
                <link rel="stylesheet" href="../gitbook/gitbook-plugin-katex/katex.min.css">
                
            
                
                <link rel="stylesheet" href="../gitbook/gitbook-plugin-expandable-chapters-small/expandable-chapters-small.css">
                
            
                
                <link rel="stylesheet" href="../gitbook/gitbook-plugin-tbfed-pagefooter/footer.css">
                
            
                
                <link rel="stylesheet" href="../gitbook/gitbook-plugin-alerts/style.css">
                
            
                
                <link rel="stylesheet" href="../gitbook/gitbook-plugin-donate/plugin.css">
                
            
                
                <link rel="stylesheet" href="../gitbook/gitbook-plugin-splitter/splitter.css">
                
            
                
                <link rel="stylesheet" href="../gitbook/gitbook-plugin-highlight/website.css">
                
            
                
                <link rel="stylesheet" href="../gitbook/gitbook-plugin-search/search.css">
                
            
                
                <link rel="stylesheet" href="../gitbook/gitbook-plugin-fontsettings/website.css">
                
            
        

    

    
        
    
        
    
        
    
        
    
        
    
        
    

        
    
    
    
    
    <meta name="HandheldFriendly" content="true"/>
    <meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no">
    <meta name="apple-mobile-web-app-capable" content="yes">
    <meta name="apple-mobile-web-app-status-bar-style" content="black">
    <link rel="apple-touch-icon-precomposed" sizes="152x152" href="../gitbook/images/apple-touch-icon-precomposed-152.png">
    <link rel="shortcut icon" href="../gitbook/images/favicon.ico" type="image/x-icon">

    
    <link rel="next" href="../chapter6/" />
    
    
    <link rel="prev" href="section5.1.html" />
    

    
    <link rel="stylesheet" href="../gitbook/gitbook-plugin-chart/c3/c3.min.css">
    <script src="../gitbook/gitbook-plugin-chart/c3/d3.min.js"></script>
    <script src="../gitbook/gitbook-plugin-chart/c3/c3.min.js"></script>
    

    <script src="../gitbook/gitbook-plugin-graph/d3.min.js"></script>
    <script src="../gitbook/gitbook-plugin-graph/function-plot.js"></script>    

    </head>
    <body>
        
<div class="book">
    <div class="book-summary">
        
            
<div id="book-search-input" role="search">
    <input type="text" placeholder="输入并搜索" />
</div>

            
                <nav role="navigation">
                


<ul class="summary">
    
    

    

    
        
        
    
        <li class="chapter " data-level="1.1" data-path="../">
            
                <a href="../">
            
                    
                    简介
            
                </a>
            

            
        </li>
    
        <li class="chapter " data-level="1.2" data-path="../chapter1/">
            
                <a href="../chapter1/">
            
                    
                    第一章 Tensorflow框架
            
                </a>
            

            
            <ul class="articles">
                
    
        <li class="chapter " data-level="1.2.1" data-path="../chapter1/section1.1.html">
            
                <a href="../chapter1/section1.1.html">
            
                    
                    第一节 张量、计算图、会话
            
                </a>
            

            
        </li>
    
        <li class="chapter " data-level="1.2.2" data-path="../chapter1/section1.2.html">
            
                <a href="../chapter1/section1.2.html">
            
                    
                    第二节 前向传播
            
                </a>
            

            
        </li>
    
        <li class="chapter " data-level="1.2.3" data-path="../chapter1/section1.3.html">
            
                <a href="../chapter1/section1.3.html">
            
                    
                    第三节 反向传播
            
                </a>
            

            
        </li>
    
        <li class="chapter " data-level="1.2.4" data-path="../chapter1/section1.4.html">
            
                <a href="../chapter1/section1.4.html">
            
                    
                    第四节 搭建神经网络的步骤
            
                </a>
            

            
        </li>
    

            </ul>
            
        </li>
    
        <li class="chapter " data-level="1.3" data-path="../chapter2/">
            
                <a href="../chapter2/">
            
                    
                    第二章 神经网络优化
            
                </a>
            

            
            <ul class="articles">
                
    
        <li class="chapter " data-level="1.3.1" data-path="../chapter2/section2.1.html">
            
                <a href="../chapter2/section2.1.html">
            
                    
                    第一节 损失函数
            
                </a>
            

            
        </li>
    
        <li class="chapter " data-level="1.3.2" data-path="../chapter2/section2.2.html">
            
                <a href="../chapter2/section2.2.html">
            
                    
                    第二节 学习率
            
                </a>
            

            
        </li>
    
        <li class="chapter " data-level="1.3.3" data-path="../chapter2/section2.3.html">
            
                <a href="../chapter2/section2.3.html">
            
                    
                    第三节 滑动平均
            
                </a>
            

            
        </li>
    
        <li class="chapter " data-level="1.3.4" data-path="../chapter2/section2.4.html">
            
                <a href="../chapter2/section2.4.html">
            
                    
                    第四节 正则化
            
                </a>
            

            
        </li>
    
        <li class="chapter " data-level="1.3.5" data-path="../chapter2/section2.5.html">
            
                <a href="../chapter2/section2.5.html">
            
                    
                    第五节 神经网络的搭建
            
                </a>
            

            
        </li>
    

            </ul>
            
        </li>
    
        <li class="chapter " data-level="1.4" data-path="../chapter3/">
            
                <a href="../chapter3/">
            
                    
                    第三章 全连接网络基础
            
                </a>
            

            
            <ul class="articles">
                
    
        <li class="chapter " data-level="1.4.1" data-path="../chapter3/section3.1.html">
            
                <a href="../chapter3/section3.1.html">
            
                    
                    第一节 MINIST数据
            
                </a>
            

            
        </li>
    
        <li class="chapter " data-level="1.4.2" data-path="../chapter3/section3.2.html">
            
                <a href="../chapter3/section3.2.html">
            
                    
                    第二节 模块化搭建神经网络方法
            
                </a>
            

            
        </li>
    
        <li class="chapter " data-level="1.4.3" data-path="../chapter3/section3.3.html">
            
                <a href="../chapter3/section3.3.html">
            
                    
                    第三节 手写数字识别准确率输出
            
                </a>
            

            
        </li>
    

            </ul>
            
        </li>
    
        <li class="chapter " data-level="1.5" data-path="../chapter4/">
            
                <a href="../chapter4/">
            
                    
                    第四章 全连接网络实践
            
                </a>
            

            
            <ul class="articles">
                
    
        <li class="chapter " data-level="1.5.1" data-path="../chapter4/section4.1.html">
            
                <a href="../chapter4/section4.1.html">
            
                    
                    第一节 输入手写数字图片输出识别结果
            
                </a>
            

            
        </li>
    
        <li class="chapter " data-level="1.5.2" data-path="../chapter4/section4.2.html">
            
                <a href="../chapter4/section4.2.html">
            
                    
                    第二节 制作数据集
            
                </a>
            

            
        </li>
    

            </ul>
            
        </li>
    
        <li class="chapter " data-level="1.6" data-path="./">
            
                <a href="./">
            
                    
                    第五章 卷积网络基础
            
                </a>
            

            
            <ul class="articles">
                
    
        <li class="chapter " data-level="1.6.1" data-path="section5.1.html">
            
                <a href="section5.1.html">
            
                    
                    第一节 卷积神经网络
            
                </a>
            

            
        </li>
    
        <li class="chapter active" data-level="1.6.2" data-path="section5.2.html">
            
                <a href="section5.2.html">
            
                    
                    第二节 lenel5代码讲解
            
                </a>
            

            
        </li>
    

            </ul>
            
        </li>
    
        <li class="chapter " data-level="1.7" data-path="../chapter6/">
            
                <a href="../chapter6/">
            
                    
                    第六章 卷积网络实践
            
                </a>
            

            
            <ul class="articles">
                
    
        <li class="chapter " data-level="1.7.1" data-path="section6.1.html">
            
                <a href="section6.1.html">
            
                    
                    第一节 复现已有的卷积神经网络
            
                </a>
            

            
        </li>
    
        <li class="chapter " data-level="1.7.2" data-path="../chapter6/section6.2.html">
            
                <a href="../chapter6/section6.2.html">
            
                    
                    第二节 用vgg16实现图片识别
            
                </a>
            

            
        </li>
    

            </ul>
            
        </li>
    
        <li class="chapter " data-level="1.8" data-path="../chapter7/">
            
                <a href="../chapter7/">
            
                    
                    第七章 Tensorflow应用
            
                </a>
            

            
        </li>
    

    

    <li class="divider"></li>

    <li>
        <a href="https://www.gitbook.com" target="blank" class="gitbook-link">
            本书使用 GitBook 发布
        </a>
    </li>
</ul>


                </nav>
            
        
    </div>

    <div class="book-body">
        
            <div class="body-inner">
                
                    

<div class="book-header" role="navigation">
    

    <!-- Title -->
    <h1>
        <i class="fa fa-circle-o-notch fa-spin"></i>
        <a href=".." >第二节 lenel5代码讲解</a>
    </h1>
</div>




                    <div class="page-wrapper" tabindex="-1" role="main">
                        <div class="page-inner">
                            
<div id="book-search-results">
    <div class="search-noresults">
    
                                <section class="normal markdown-section">
                                
                                <h1 id="&#x7B2C;&#x4E8C;&#x8282;-lenel5&#x4EE3;&#x7801;&#x8BB2;&#x89E3;">&#x7B2C;&#x4E8C;&#x8282; lenel5&#x4EE3;&#x7801;&#x8BB2;&#x89E3;</h1>
<p><code>Lenet</code>&#x795E;&#x7ECF;&#x7F51;&#x7EDC;&#x662F;<code>Yann LeCun</code>&#x7B49;&#x4EBA;&#x5728; 1998 &#x5E74;&#x63D0;&#x51FA;&#x7684;&#xFF0C;&#x8BE5;&#x795E;&#x7ECF;&#x7F51;&#x7EDC;&#x5145;&#x5206;&#x8003;&#x8651;&#x56FE;&#x50CF; &#x7684;&#x76F8;&#x5173;&#x6027;&#x3002;</p>
<ul>
<li><code>Lenet</code>&#x795E;&#x7ECF;&#x7F51;&#x7EDC;&#x7ED3;&#x6784;&#x4E3A;&#xFF1A;</li>
</ul>
<p>(1) &#x8F93;&#x5165;&#x4E3A;<code>32*32*1</code>&#x7684;&#x56FE;&#x7247;&#x5927;&#x5C0F;&#xFF0C;&#x4E3A;&#x5355;&#x901A;&#x9053;&#x7684;&#x8F93;&#x5165;;</p>
<p>(2) &#x8FDB;&#x884C;&#x5377;&#x79EF;&#xFF0C;&#x5377;&#x79EF;&#x6838;&#x5927;&#x5C0F;&#x4E3A;<code>5*5*1</code>&#xFF0C;&#x4E2A;&#x6570;&#x4E3A;<code>6</code>&#xFF0C;&#x6B65;&#x957F;&#x4E3A;<code>1</code>&#xFF0C;&#x975E;&#x5168;&#x96F6;&#x586B;&#x5145;&#x6A21;&#x5F0F;;</p>
<p>(3) &#x5C06;&#x5377;&#x79EF;&#x7ED3;&#x679C;&#x901A;&#x8FC7;&#x975E;&#x7EBF;&#x6027;&#x6FC0;&#x6D3B;&#x51FD;&#x6570;;</p>
<p>(4) &#x8FDB;&#x884C;&#x6C60;&#x5316;&#xFF0C;&#x6C60;&#x5316;&#x5927;&#x5C0F;&#x4E3A;<code>2*2</code>&#xFF0C;&#x6B65;&#x957F;&#x4E3A;<code>1</code>&#xFF0C;&#x5168;&#x96F6;&#x586B;&#x5145;&#x6A21;&#x5F0F;; </p>
<p>(5) &#x8FDB;&#x884C;&#x5377;&#x79EF;&#xFF0C;&#x5377;&#x79EF;&#x6838;&#x5927;&#x5C0F;&#x4E3A;<code>5*5*6</code>&#xFF0C;&#x4E2A;&#x6570;&#x4E3A;<code>16</code>&#xFF0C;&#x6B65;&#x957F;&#x4E3A;<code>1</code>&#xFF0C;&#x975E;&#x5168;&#x96F6;&#x586B;&#x5145;&#x6A21;&#x5F0F;;</p>
<p>(6) &#x5C06;&#x5377;&#x79EF;&#x7ED3;&#x679C;&#x901A;&#x8FC7;&#x975E;&#x7EBF;&#x6027;&#x6FC0;&#x6D3B;&#x51FD;&#x6570;;</p>
<p>(7) &#x8FDB;&#x884C;&#x6C60;&#x5316;&#xFF0C;&#x6C60;&#x5316;&#x5927;&#x5C0F;&#x4E3A;<code>2*2</code>&#xFF0C;&#x6B65;&#x957F;&#x4E3A;<code>1</code>&#xFF0C;&#x5168;&#x96F6;&#x586B;&#x5145;&#x6A21;&#x5F0F;;</p>
<p>(8) &#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x8FDB;&#x884C;<code>10</code>&#x5206;&#x7C7B;&#x3002;</p>
<p><code>Lenet</code>&#x795E;&#x7ECF;&#x7F51;&#x7EDC;&#x7684;&#x7ED3;&#x6784;&#x56FE;&#x53CA;&#x7279;&#x5F81;&#x63D0;&#x53D6;&#x8FC7;&#x7A0B;&#x5982;&#x4E0B;&#x6240;&#x793A;&#xFF1A;</p>
<p><img src="http://ovhbzkbox.bkt.clouddn.com/2018-08-15-15343302233742.jpg" alt=""></p>
<p><code>Lenet</code>&#x795E;&#x7ECF;&#x7F51;&#x7EDC;&#x7684;&#x8F93;&#x5165;&#x662F;<code>32*32*1</code>&#xFF0C;&#x7ECF;&#x8FC7;<code>5*5*1</code>&#x7684;&#x5377;&#x79EF;&#x6838;&#xFF0C;&#x5377;&#x79EF;&#x6838;&#x4E2A;&#x6570;&#x4E3A;<code>6</code>&#x4E2A;&#xFF0C;&#x91C7;&#x7528;&#x975E;&#x5168;&#x96F6;&#x586B;&#x5145;&#x65B9;&#x5F0F;&#xFF0C;&#x6B65;&#x957F;&#x4E3A;<code>1</code>&#xFF0C;&#x6839;&#x636E;&#x975E;&#x5168;&#x96F6;&#x586B;&#x5145;&#x8BA1;&#x7B97;&#x516C;&#x5F0F;&#xFF1A;<code>&#x8F93;&#x51FA;&#x5C3A;&#x5BF8;=(&#x8F93;&#x5165; &#x5C3A;&#x5BF8;-&#x5377;&#x79EF;&#x6838;&#x5C3A;&#x5BF8;+1)/&#x6B65;&#x957F;=(32-5+1)/1=28</code>&#x3002;&#x6545;&#x7ECF;&#x8FC7;&#x5377;&#x79EF;&#x540E;&#x8F93;&#x51FA;&#x4E3A;<code>28*28*6</code>&#x3002; &#x7ECF;&#x8FC7;&#x7B2C;&#x4E00;&#x5C42;&#x6C60;&#x5316;&#x5C42;&#xFF0C;&#x6C60;&#x5316;&#x5927;&#x5C0F;&#x4E3A;<code>2*2</code>&#xFF0C;&#x5168;&#x96F6;&#x586B;&#x5145;&#xFF0C;&#x6B65;&#x957F;&#x4E3A;<code>2</code>&#xFF0C;&#x7531;&#x5168;&#x96F6;&#x586B;&#x5145;&#x8BA1;&#x7B97;&#x516C;&#x5F0F;&#xFF1A;<code>&#x8F93;&#x51FA;&#x5C3A;&#x5BF8;=&#x8F93;&#x5165;&#x5C3A;&#x5BF8;/&#x6B65;&#x957F;=28/2=14</code>&#xFF0C;&#x6C60;&#x5316;&#x5C42;&#x4E0D;&#x6539;&#x53D8;&#x6DF1;&#x5EA6;&#xFF0C;&#x6DF1;&#x5EA6;&#x4ECD;&#x4E3A;<code>6</code>&#x3002;&#x7528;&#x540C;&#x6837;&#x8BA1;&#x7B97;&#x65B9;&#x6CD5;&#xFF0C;&#x5F97;&#x5230;&#x7B2C;&#x4E8C;&#x5C42;&#x6C60;&#x5316;&#x540E;&#x7684;&#x8F93;&#x51FA;&#x4E3A;<code>5*5*16</code>&#x3002;&#x5C06;&#x7B2C;&#x4E8C;&#x6C60;&#x5316;&#x5C42;&#x540E;&#x7684;&#x8F93;&#x51FA;&#x62C9;&#x76F4;&#x9001;&#x5165;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x3002;</p>
<ul>
<li>&#x6839;&#x636E;<code>Lenet</code>&#x795E;&#x7ECF;&#x7F51;&#x7EDC;&#x7684;&#x7ED3;&#x6784;&#x53EF;&#x5F97;&#xFF0C;<code>Lenet</code>&#x795E;&#x7ECF;&#x7F51;&#x7EDC;&#x5177;&#x6709;&#x5982;&#x4E0B;&#x7279;&#x70B9;:</li>
</ul>
<p>(1)&#x5377;&#x79EF;(Conv)&#x3001;&#x6C60;&#x5316;(ave-pooling)&#x3001;&#x975E;&#x7EBF;&#x6027;&#x6FC0;&#x6D3B;&#x51FD;&#x6570;(sigmoid)&#x76F8;&#x4E92;&#x4EA4;&#x66FF;;</p>
<p>(2)&#x5C42;&#x4E0E;&#x5C42;&#x4E4B;&#x95F4;&#x7A00;&#x758F;&#x8FDE;&#x63A5;&#xFF0C;&#x51CF;&#x5C11;&#x8BA1;&#x7B97;&#x590D;&#x6742;&#x5EA6;&#x3002;</p>
<ul>
<li>&#x5BF9;<code>Lenet</code>&#x795E;&#x7ECF;&#x7F51;&#x7EDC;&#x8FDB;&#x884C;&#x5FAE;&#x8C03;&#xFF0C;&#x4F7F;&#x5176;&#x9002;&#x5E94;<code>Mnist</code>&#x6570;&#x636E;&#x96C6;:</li>
</ul>
<p>&#x7531;&#x4E8E;<code>Mnist</code>&#x6570;&#x636E;&#x96C6;&#x4E2D;&#x56FE;&#x7247;&#x5927;&#x5C0F;&#x4E3A;<code>28*28*1</code>&#x7684;&#x7070;&#x5EA6;&#x56FE;&#x7247;&#xFF0C;&#x800C;<code>Lenet</code>&#x795E;&#x7ECF;&#x7F51;&#x7EDC;&#x7684;&#x8F93;&#x5165;&#x4E3A;<code>32*32*1</code>&#xFF0C;&#x6545;&#x9700;&#x8981;&#x5BF9;<code>Lenet</code>&#x795E;&#x7ECF;&#x7F51;&#x7EDC;&#x8FDB;&#x884C;&#x5FAE;&#x8C03;&#x3002;</p>
<p>(1)&#x8F93;&#x5165;&#x4E3A;<code>28*28*1</code>&#x7684;&#x56FE;&#x7247;&#x5927;&#x5C0F;&#xFF0C;&#x4E3A;&#x5355;&#x901A;&#x9053;&#x7684;&#x8F93;&#x5165;;</p>
<p>(2)&#x8FDB;&#x884C;&#x5377;&#x79EF;&#xFF0C;&#x5377;&#x79EF;&#x6838;&#x5927;&#x5C0F;&#x4E3A;<code>5*5*1</code>&#xFF0C;&#x4E2A;&#x6570;&#x4E3A;<code>32</code>&#xFF0C;&#x6B65;&#x957F;&#x4E3A;<code>1</code>&#xFF0C;&#x5168;&#x96F6;&#x586B;&#x5145;&#x6A21;&#x5F0F;; </p>
<p>(3)&#x5C06;&#x5377;&#x79EF;&#x7ED3;&#x679C;&#x901A;&#x8FC7;&#x975E;&#x7EBF;&#x6027;&#x6FC0;&#x6D3B;&#x51FD;&#x6570;;</p>
<p>(4)&#x8FDB;&#x884C;&#x6C60;&#x5316;&#xFF0C;&#x6C60;&#x5316;&#x5927;&#x5C0F;&#x4E3A;<code>2*2</code>&#xFF0C;&#x6B65;&#x957F;&#x4E3A;<code>2</code>&#xFF0C;&#x5168;&#x96F6;&#x586B;&#x5145;&#x6A21;&#x5F0F;;</p>
<p>(5)&#x8FDB;&#x884C;&#x5377;&#x79EF;&#xFF0C;&#x5377;&#x79EF;&#x6838;&#x5927;&#x5C0F;&#x4E3A;<code>5*5*32</code>&#xFF0C;&#x4E2A;&#x6570;&#x4E3A;<code>64</code>&#xFF0C;&#x6B65;&#x957F;&#x4E3A;<code>1</code>&#xFF0C;&#x5168;&#x96F6;&#x586B;&#x5145;&#x6A21;&#x5F0F;;</p>
<p>(6)&#x5C06;&#x5377;&#x79EF;&#x7ED3;&#x679C;&#x901A;&#x8FC7;&#x975E;&#x7EBF;&#x6027;&#x6FC0;&#x6D3B;&#x51FD;&#x6570;;</p>
<p>(7)&#x8FDB;&#x884C;&#x6C60;&#x5316;&#xFF0C;&#x6C60;&#x5316;&#x5927;&#x5C0F;&#x4E3A; <code>2*2</code>&#xFF0C;&#x6B65;&#x957F;&#x4E3A;<code>2</code>&#xFF0C;&#x5168;&#x96F6;&#x586B;&#x5145;&#x6A21;&#x5F0F;; </p>
<p>(8)&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#xFF0C;&#x8FDB;&#x884C;<code>10</code>&#x5206;&#x7C7B;&#x3002;</p>
<p><code>Lenet</code>&#x8FDB;&#x884C;&#x5FAE;&#x8C03;&#x540E;&#x7684;&#x7ED3;&#x6784;&#x5982;&#x4E0B;&#x6240;&#x793A;&#xFF1A;</p>
<p><img src="http://ovhbzkbox.bkt.clouddn.com/2018-08-15-15343305267999.jpg" alt=""></p>
<ul>
<li><code>Lenet</code>&#x795E;&#x7ECF;&#x7F51;&#x7EDC;&#x5728;<code>Mnist</code>&#x6570;&#x636E;&#x96C6;&#x4E0A;&#x7684;&#x5B9E;&#x73B0;&#xFF0C;&#x4E3B;&#x8981;&#x5206;&#x4E3A;&#x4E09;&#x4E2A;&#x90E8;&#x5206;&#xFF1A;&#x524D;&#x5411;&#x4F20;&#x64AD;&#x8FC7;&#x7A0B; (<code>mnist_lenet5_forward.py</code>)&#x3001;&#x53CD;&#x5411;&#x4F20;&#x64AD;&#x8FC7;&#x7A0B;(<code>mnist_lenet5_backword.py</code>)&#x3001; &#x6D4B;&#x8BD5;&#x8FC7;&#x7A0B;(<code>mnist_lenet5_test.py</code>)&#x3002;</li>
</ul>
<p>&#x7B2C;&#x4E00;&#xFF0C;&#x524D;&#x5411;&#x4F20;&#x64AD;&#x8FC7;&#x7A0B;(<code>mnist_lenet5_forward.py</code>)&#x5B9E;&#x73B0;&#x5BF9;&#x7F51;&#x7EDC;&#x4E2D;&#x53C2;&#x6570;&#x548C;&#x504F;&#x7F6E;&#x7684;&#x521D;&#x59CB;&#x5316;&#x3001;&#x5B9A;&#x4E49;&#x5377;&#x79EF;&#x7ED3;&#x6784;&#x548C;&#x6C60;&#x5316;&#x7ED3;&#x6784;&#x3001;&#x5B9A;&#x4E49;&#x524D;&#x5411;&#x4F20;&#x64AD;&#x8FC7;&#x7A0B;&#x3002;&#x5177;&#x4F53;&#x4EE3;&#x7801;&#x5982;&#x4E0B;&#x6240;&#x793A;&#xFF1A;</p>
<pre><code class="lang-python"><span class="hljs-comment">#coding:utf-8</span>
<span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf
IMAGE_SIZE = <span class="hljs-number">28</span>
NUM_CHANNELS = <span class="hljs-number">1</span>
CONV1_SIZE = <span class="hljs-number">5</span>
CONV1_KERNEL_NUM = <span class="hljs-number">32</span>
CONV2_SIZE = <span class="hljs-number">5</span>
CONV2_KERNEL_NUM = <span class="hljs-number">64</span>
FC_SIZE = <span class="hljs-number">512</span>
OUTPUT_NODE =<span class="hljs-number">10</span>

<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">get_weight</span><span class="hljs-params">(shape, regularizer)</span>:</span>
  w = tf.Variable(tf.truncated_normal(shape, stddev=<span class="hljs-number">0.1</span>))
  <span class="hljs-keyword">if</span> regularizer != <span class="hljs-keyword">None</span>:
    tf.add_to_collection(<span class="hljs-string">&apos;losses&apos;</span>, tf.contrib.layers.l2_regularizer(regularizer)(w))
    <span class="hljs-keyword">return</span> w

<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">get_bias</span><span class="hljs-params">(shape)</span>:</span>
  b = tf.Variable(tf.zeros(shape))
  <span class="hljs-keyword">return</span> b

<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">conv2d</span><span class="hljs-params">(x, w)</span>:</span>
  <span class="hljs-keyword">return</span> tf.nn.conv2d(x, w, strides=[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>], padding=<span class="hljs-string">&apos;SAME&apos;</span>)

<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">max_pool_2x2</span><span class="hljs-params">(x)</span>:</span>
  <span class="hljs-keyword">return</span> tf.nn.max_pool(x, ksize=[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>], strides=[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>], padding=<span class="hljs-string">&apos;SAME&apos;</span>)

<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">forward</span><span class="hljs-params">(x, train, regularizer)</span>:</span>
  conv1_w = get_weight([CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_KERNEL_NUM], regularizer)
  conv1_b = get_bias([CONV1_KERNEL_NUM])
  conv1 = conv2d(x, conv1_w)
  relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_b))
  pool1 = max_pool_2x2(relu1)

  conv2_w = get_weight([CONV2_SIZE, CONV2_SIZE, CONV1_KERNEL_NUM, CONV2_KERNEL_NUM], regularizer)
  conv2_b = get_bias([CONV2_KERNEL_NUM])
  conv2 = conv2d(pool1, conv2_w)
  relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_b))
  pool2 = max_pool_2x2(relu2)

  pool_shape = pool2.get_shape().as_list()
  nodes = pool_shape[<span class="hljs-number">1</span>] * pool_shape[<span class="hljs-number">2</span>] * pool_shape[<span class="hljs-number">3</span>]
  reshaped = tf.reshape(pool2, [pool_shape[<span class="hljs-number">0</span>], nodes])

  fc1_w = get_weight([nodes, FC_SIZE], regularizer)
  fc1_b = get_bias([FC_SIZE])
  fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_w) + fc1_b)
  <span class="hljs-keyword">if</span> train:
    fc1 = tf.nn.dropout(fc1, <span class="hljs-number">0.5</span>)

  fc2_w = get_weight([FC_SIZE, OUTPUT_NODE], regularizer)
  fc2_b = get_bias([OUTPUT_NODE])
  y = tf.matmul(fc1, fc2_w) + fc2_b
  <span class="hljs-keyword">return</span> y
</code></pre>
<p>1) &#x5B9A;&#x4E49;&#x524D;&#x5411;&#x4F20;&#x64AD;&#x8FC7;&#x7A0B;&#x4E2D;&#x5E38;&#x7528;&#x5230;&#x7684;&#x53C2;&#x6570;&#x3002;
&#x56FE;&#x7247;&#x5927;&#x5C0F;&#x5373;&#x6BCF;&#x5F20;&#x56FE;&#x7247;&#x5206;&#x8FA8;&#x7387;&#x4E3A;<code>28*28</code>&#xFF0C;&#x6545;<code>IMAGE_SIZE</code>&#x53D6;&#x503C;&#x4E3A;<code>28</code>;<code>Mnist</code>&#x6570;&#x636E;&#x96C6;&#x4E3A;&#x7070;&#x5EA6;&#x56FE;&#xFF0C;&#x6545;&#x8F93;&#x5165;&#x56FE;&#x7247;&#x901A;&#x9053;&#x6570;<code>NUM_CHANNELS</code>&#x53D6;&#x503C;&#x4E3A;<code>1</code>;&#x7B2C;&#x4E00;&#x5C42;&#x5377;&#x79EF;&#x6838;&#x5927;&#x5C0F;&#x4E3A;<code>5</code>&#xFF0C;&#x5377;&#x79EF;&#x6838;&#x4E2A;&#x6570;&#x4E3A;<code>32</code>&#xFF0C;&#x6545;<code>CONV1_SIZE</code>&#x53D6;&#x503C;&#x4E3A;<code>5</code>&#xFF0C;<code>CONV1_KERNEL_NUM</code>&#x53D6;&#x503C;&#x4E3A;<code>32</code>;&#x7B2C;&#x4E8C;&#x5C42;&#x5377;&#x79EF;&#x6838;&#x5927;&#x5C0F;&#x4E3A;<code>5</code>&#xFF0C;&#x5377;&#x79EF;&#x6838;&#x4E2A;&#x6570;&#x4E3A;<code>64</code>&#xFF0C;&#x6545;<code>CONV2_SIZE</code>&#x53D6;&#x503C;&#x4E3A;<code>5</code>&#xFF0C;<code>CONV2_KERNEL_NUM</code>&#x4E3A;<code>64</code>;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x7B2C;&#x4E00;&#x5C42;&#x4E3A;<code>512</code>&#x4E2A;&#x795E;&#x7ECF;&#x5143;&#xFF0C;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x7B2C;&#x4E8C;&#x5C42;&#x4E3A;<code>10</code>&#x4E2A;&#x795E;&#x7ECF;&#x5143;&#xFF0C;&#x6545;<code>FC_SIZE</code>&#x53D6;&#x503C;&#x4E3A;<code>512</code>&#xFF0C;<code>OUTPUT_NODE</code>&#x53D6;&#x503C;&#x4E3A;<code>10</code>&#xFF0C;&#x5B9E;&#x73B0;<code>10</code>&#x5206;&#x7C7B;&#x8F93;&#x51FA;&#x3002;</p>
<p> 2) &#x628A;&#x524D;&#x5411;&#x4F20;&#x64AD;&#x8FC7;&#x7A0B;&#x4E2D;&#xFF0C;&#x5E38;&#x7528;&#x5230;&#x7684;&#x65B9;&#x6CD5;&#x5B9A;&#x4E49;&#x4E3A;&#x51FD;&#x6570;&#xFF0C;&#x65B9;&#x4FBF;&#x8C03;&#x7528;&#x3002;&#x5728;<code>mnist_lenet5_forward.py</code>&#x6587;&#x4EF6;&#x4E2D;&#xFF0C;&#x5B9A;&#x4E49;&#x56DB;&#x4E2A;&#x5E38;&#x7528;&#x51FD;&#x6570;&#xFF1A;&#x6743;&#x91CD;<code>w</code>&#x751F;&#x6210;&#x51FD;&#x6570;&#x3001;&#x504F;&#x7F6E;<code>b</code>&#x751F;&#x6210;&#x51FD;&#x6570;&#x3001;&#x5377;&#x79EF;&#x5C42;&#x8BA1;&#x7B97;&#x51FD;&#x6570;&#x3001;&#x6700;&#x5927;&#x6C60;&#x5316;&#x5C42;&#x8BA1;&#x7B97;&#x51FD;&#x6570;&#xFF0C;&#x5176;&#x4E2D;&#xFF0C;&#x6743;&#x91CD;<code>w</code>&#x751F;&#x6210;&#x51FD;&#x6570;&#x548C;&#x504F;&#x7F6E;<code>b</code>&#x751F;&#x6210;&#x51FD;&#x6570;&#x4E0E;&#x4E4B;&#x524D;&#x7684;&#x5B9A;&#x4E49;&#x76F8;&#x540C;&#x3002;</p>
<ul>
<li>(1)&#x5377;&#x79EF;&#x5C42;&#x8BA1;&#x7B97;&#x51FD;&#x6570;&#x63CF;&#x8FF0;&#x5982;&#x4E0B;:</li>
</ul>
<p><code>tf.nn.conv2d(&#x8F93;&#x5165;&#x63CF;&#x8FF0;[batch,&#x884C;&#x5206;&#x8FA8;&#x7387;,&#x5217;&#x5206;&#x8FA8;&#x7387;,&#x901A;&#x9053;&#x6570;], &#x5377;&#x79EF;&#x6838;&#x63CF;&#x8FF0;[&#x884C;&#x5206;&#x8FA8;&#x7387;,&#x5217;&#x5206;&#x8FA8;&#x7387;,&#x901A;&#x9053;&#x6570;,&#x5377;&#x79EF;&#x6838;&#x4E2A;&#x6570;], &#x6838;&#x6ED1;&#x52A8;&#x6B65;&#x957F;[1,&#x884C;&#x6B65;&#x957F;,&#x5217;&#x6B65;&#x957F;,1],&#x586B;&#x5145;&#x6A21;&#x5F0F; padding)</code></p>
<p>&#x4F8B;&#x5982;:
<code>tf.nn.conv2d(x=[100,28,28,1], w=[5,5,1,6], strides=[1,1,1,1], padding=&apos;SAME&apos;)</code>
&#x672C;&#x4F8B;&#x8868;&#x793A;&#x5377;&#x79EF;&#x8F93;&#x5165;<code>x</code>&#x4E3A;<code>28*28*1</code>&#xFF0C;&#x4E00;&#x4E2A;<code>batch_size</code>&#x4E3A;<code>100</code>&#xFF0C;&#x5377;&#x79EF;&#x6838;&#x5927;&#x5C0F;&#x4E3A;<code>5*5</code>&#xFF0C;&#x5377;&#x79EF;&#x6838;&#x4E2A;&#x6570;&#x4E3A; <code>6</code>&#xFF0C;&#x5782;&#x76F4;&#x65B9;&#x5411;&#x6B65;&#x957F;&#x4E3A;<code>1</code>&#xFF0C;&#x6C34;&#x5E73;&#x65B9;&#x5411;&#x6B65;&#x957F;&#x4E3A;<code>1</code>&#xFF0C;&#x586B;&#x5145;&#x65B9;&#x5F0F;&#x4E3A;&#x5168;&#x96F6;&#x586B;&#x5145;&#x3002;</p>
<ul>
<li>(2)&#x6700;&#x5927;&#x6C60;&#x5316;&#x5C42;&#x8BA1;&#x7B97;&#x51FD;&#x6570;&#x63CF;&#x8FF0;&#x5982;&#x4E0B;&#xFF1A;</li>
</ul>
<p><code>tf.nn.max_pool(&#x8F93;&#x5165;&#x63CF;&#x8FF0;[batch,&#x884C;&#x5206;&#x8FA8;&#x7387;&#xFF0C;&#x5217;&#x5206;&#x8FA8;&#x7387;&#xFF0C;&#x901A;&#x9053;&#x6570;], &#x6C60;&#x5316;&#x6838;&#x63CF;&#x8FF0;[1,&#x884C;&#x5206;&#x8FA8;&#x7387;,&#x5217;&#x5206;&#x8FA8;&#x7387;,1], &#x6C60;&#x5316;&#x6838;&#x6ED1;&#x52A8;&#x6B65;&#x957F;[1,&#x884C;&#x6B65;&#x957F;,&#x5217;&#x6B65;&#x957F;,1], &#x586B;&#x5145;&#x6A21;&#x5F0F; padding)</code></p>
<p>&#x4F8B;&#x5982;&#xFF1A;</p>
<p><code>tf.nn.max_pool(x=[100,28,28,1],ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=&apos;SAME&apos;)</code></p>
<p>&#x672C;&#x4F8B;&#x8868;&#x793A;&#x5377;&#x79EF;&#x8F93;&#x5165;<code>x</code>&#x4E3A;<code>28*28*1</code>&#xFF0C;&#x4E00;&#x4E2A;<code>batch_size</code>&#x4E3A;<code>100</code>&#xFF0C;&#x6C60;&#x5316;&#x6838;&#x5927;&#x5C0F;&#x7528;<code>ksize</code>&#xFF0C;&#x7B2C;&#x4E00;&#x7EF4;&#x548C;&#x7B2C;&#x56DB;&#x7EF4;&#x90FD;&#x4E3A;<code>1</code>&#xFF0C;&#x6C60;&#x5316;&#x6838;&#x5927;&#x5C0F;&#x4E3A;<code>2*2</code>&#xFF0C;&#x5782;&#x76F4;&#x65B9;&#x5411;&#x6B65;&#x957F;&#x4E3A;<code>1</code>&#xFF0C;&#x6C34;&#x5E73;&#x65B9;&#x5411;&#x6B65;&#x957F;&#x4E3A;<code>1</code>&#xFF0C;&#x586B;&#x5145;&#x65B9;&#x5F0F;&#x4E3A;&#x5168;&#x96F6;&#x586B;&#x5145;&#x3002;</p>
<ul>
<li>(3) &#x5B9A;&#x4E49;&#x524D;&#x5411;&#x4F20;&#x64AD;&#x8FC7;&#x7A0B;</li>
</ul>
<p>[1] &#x5B9E;&#x73B0;&#x7B2C;&#x4E00;&#x5C42;&#x5377;&#x79EF;</p>
<pre><code class="lang-python">conv1_w =get_weight([CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS, CONV1_KERNEL_NUM],regularizer)
conv1_b = get_bias([CONV1_KERNEL_NUM])
</code></pre>
<p>&#x6839;&#x636E;&#x5148;&#x524D;&#x5B9A;&#x4E49;&#x7684;&#x53C2;&#x6570;&#x5927;&#x5C0F;&#xFF0C;&#x521D;&#x59CB;&#x5316;&#x7B2C;&#x4E00;&#x5C42;&#x5377;&#x79EF;&#x6838;&#x548C;&#x504F;&#x7F6E;&#x9879;&#x3002;</p>
<pre><code class="lang-python">conv1 = conv2d(x, conv1_w)
</code></pre>
<p>&#x5B9E;&#x73B0;&#x5377;&#x79EF;&#x8FD0;&#x7B97;&#xFF0C;&#x8F93;&#x5165;&#x53C2;&#x6570;&#x4E3A; x &#x548C;&#x7B2C;&#x4E00;&#x5C42;&#x5377;&#x79EF;&#x6838;&#x53C2;&#x6570;&#x3002;</p>
<pre><code class="lang-python">relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_b))
</code></pre>
<p>&#x7B2C;&#x4E00;&#x5C42;&#x5377;&#x79EF;&#x7684;&#x8F93;&#x51FA;&#x503C;&#x4F5C;&#x4E3A;&#x975E;&#x7EBF;&#x6027;&#x6FC0;&#x6D3B;&#x51FD;&#x6570;&#x7684;&#x8F93;&#x5165;&#x503C;&#xFF0C;&#x9996;&#x5148;&#x901A;&#x8FC7;<code>tf.nn.bias_add()</code>&#x5BF9;&#x5377;&#x79EF;&#x540E;&#x7684;&#x8F93;&#x51FA;&#x6DFB;&#x52A0;&#x504F;&#x7F6E;&#xFF0C;&#x5E76;&#x8FC7;<code>tf.nn.relu()</code>&#x5B8C;&#x6210;&#x975E;&#x7EBF;&#x6027;&#x6FC0;&#x6D3B;&#x3002;</p>
<pre><code class="lang-python">pool1 = max_pool_2x2(relu1)
</code></pre>
<p>&#x6839;&#x636E;&#x5148;&#x524D;&#x5B9A;&#x4E49;&#x7684;&#x6C60;&#x5316;&#x51FD;&#x6570;&#xFF0C;&#x5C06;&#x7B2C;&#x4E00;&#x5C42;&#x6FC0;&#x6D3B;&#x540E;&#x7684;&#x8F93;&#x51FA;&#x503C;&#x8FDB;&#x884C;&#x6700;&#x5927;&#x6C60;&#x5316;&#x3002;</p>
<ul>
<li><code>tf.nn.relu()</code>&#x7528;&#x6765;&#x5B9E;&#x73B0;&#x975E;&#x7EBF;&#x6027;&#x6FC0;&#x6D3B;&#xFF0C;&#x76F8;&#x6BD4;<code>sigmoid</code>&#x548C;<code>tanh</code>&#x51FD;&#x6570;&#xFF0C;<code>relu</code>&#x51FD;&#x6570;&#x53EF;&#x4EE5;&#x5B9E;&#x73B0;&#x5FEB;&#x901F;&#x7684;&#x6536;&#x655B;&#x3002;</li>
</ul>
<p>[2] &#x5B9E;&#x73B0;&#x7B2C;&#x4E8C;&#x5C42;&#x5377;&#x79EF;</p>
<pre><code class="lang-python">conv2_w =get_weight([CONV2_SIZE,CONV2_SIZE,CONV1_KERNEL_NUM, CONV2_KERNEL_NUM],regularizer)
conv2_b = get_bias([CONV2_KERNEL_NUM])
</code></pre>
<p>&#x521D;&#x59CB;&#x5316;&#x7B2C;&#x4E8C;&#x5C42;&#x5377;&#x79EF;&#x5C42;&#x7684;&#x53D8;&#x91CF;&#x548C;&#x504F;&#x7F6E;&#x9879;&#xFF0C;&#x8BE5;&#x5C42;&#x6BCF;&#x4E2A;&#x5377;&#x79EF;&#x6838;&#x7684;&#x901A;&#x9053;&#x6570;&#x8981;&#x4E0E;&#x4E0A;&#x4E00;&#x5C42; &#x5377;&#x79EF;&#x6838;&#x7684;&#x4E2A;&#x6570;&#x4E00;&#x81F4;&#x3002;</p>
<pre><code class="lang-python">conv2 = conv2d(pool1, conv2_w)
</code></pre>
<p>&#x5B9E;&#x73B0;&#x5377;&#x79EF;&#x8FD0;&#x7B97;&#xFF0C;&#x8F93;&#x5165;&#x53C2;&#x6570;&#x4E3A;&#x4E0A;&#x4E00;&#x5C42;&#x7684;&#x8F93;&#x51FA; pool1 &#x548C;&#x7B2C;&#x4E8C;&#x5C42;&#x5377;&#x79EF;&#x6838;&#x53C2;&#x6570;&#x3002;</p>
<pre><code class="lang-python">relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_b))
</code></pre>
<p>&#x5B9E;&#x73B0;&#x7B2C;&#x4E8C;&#x5C42;&#x975E;&#x7EBF;&#x6027;&#x6FC0;&#x6D3B;&#x51FD;&#x6570;&#x3002;</p>
<pre><code class="lang-python">pool2 = max_pool_2x2(relu2)
</code></pre>
<p>&#x6839;&#x636E;&#x5148;&#x524D;&#x5B9A;&#x4E49;&#x7684;&#x6C60;&#x5316;&#x51FD;&#x6570;&#xFF0C;&#x5C06;&#x7B2C;&#x4E8C;&#x5C42;&#x6FC0;&#x6D3B;&#x540E;&#x7684;&#x8F93;&#x51FA;&#x503C;&#x8FDB;&#x884C;&#x6700;&#x5927;&#x6C60;&#x5316;&#x3002;</p>
<p>[3] &#x5C06;&#x7B2C;&#x4E8C;&#x5C42;&#x6C60;&#x5316;&#x5C42;&#x7684;&#x8F93;&#x51FA; pool2 &#x77E9;&#x9635;&#x8F6C;&#x5316;&#x4E3A;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x7684;&#x8F93;&#x5165;&#x683C;&#x5F0F;&#x5373;&#x5411;&#x91CF;&#x5F62;&#x5F0F;&#xFF1A;</p>
<pre><code class="lang-python">pool_shape = pool2.get_shape().as_list()
</code></pre>
<p>&#x6839;&#x636E;<code>.get_shape()</code>&#x51FD;&#x6570;&#x5F97;&#x5230;<code>pool2</code>&#x8F93;&#x51FA;&#x77E9;&#x9635;&#x7684;&#x7EF4;&#x5EA6;&#xFF0C;&#x5E76;&#x5B58;&#x5165;<code>list</code>&#x4E2D;&#x3002;&#x5176;&#x4E2D;&#xFF0C;<code>pool_shape[0]</code>&#x4E3A;&#x4E00;&#x4E2A;<code>batch</code>&#x503C;&#x3002;</p>
<pre><code class="lang-python">nodes = pool_shape[<span class="hljs-number">1</span>] * pool_shape[<span class="hljs-number">2</span>] * pool_shape[<span class="hljs-number">3</span>]
</code></pre>
<p>&#x4ECE;<code>list</code>&#x4E2D;&#x4F9D;&#x6B21;&#x53D6;&#x51FA;&#x77E9;&#x9635;&#x7684;&#x957F;&#x5BBD;&#x53CA;&#x6DF1;&#x5EA6;&#xFF0C;&#x5E76;&#x6C42;&#x4E09;&#x8005;&#x7684;&#x4E58;&#x79EF;&#xFF0C;&#x5F97;&#x5230;&#x77E9;&#x9635;&#x88AB;&#x62C9;&#x957F;&#x540E;&#x7684;&#x957F;&#x5EA6;&#x3002;</p>
<pre><code class="lang-python">reshaped = tf.reshape(pool2, [pool_shape[<span class="hljs-number">0</span>], nodes])
</code></pre>
<p>&#x5C06;<code>pool2</code>&#x8F6C;&#x6362;&#x4E3A;&#x4E00;&#x4E2A;<code>batch</code>&#x7684;&#x5411;&#x91CF;&#x518D;&#x4F20;&#x5165;&#x540E;&#x7EED;&#x7684;&#x5168;&#x8FDE;&#x63A5;&#x3002;</p>
<ul>
<li><code>get_shape</code>&#x51FD;&#x6570;&#x7528;&#x4E8E;&#x83B7;&#x53D6;&#x4E00;&#x4E2A;&#x5F20;&#x91CF;&#x7684;&#x7EF4;&#x5EA6;&#xFF0C;&#x5E76;&#x4E14;&#x8F93;&#x51FA;&#x5F20;&#x91CF;&#x6BCF;&#x4E2A;&#x7EF4;&#x5EA6;&#x4E0A;&#x9762;&#x7684;&#x503C;&#x3002;</li>
</ul>
<p>&#x4F8B;&#x5982;:</p>
<pre><code class="lang-python">A = tf.random_normal(shape=[<span class="hljs-number">3</span>,<span class="hljs-number">4</span>])
print(A.get_shape())
<span class="hljs-comment"># &#x8F93;&#x51FA;&#x7ED3;&#x679C;&#x4E3A;:(3&#xFF0C;4)</span>
</code></pre>
<p>[4] &#x5B9E;&#x73B0;&#x7B2C;&#x4E09;&#x5C42;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#xFF1A;</p>
<pre><code class="lang-python">fc1_w = get_weight([nodes, FC_SIZE], regularizer)
</code></pre>
<p>&#x521D;&#x59CB;&#x5316;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x7684;&#x6743;&#x91CD;&#xFF0C;&#x5E76;&#x52A0;&#x5165;&#x6B63;&#x5219;&#x5316;&#x3002;</p>
<pre><code class="lang-python">fc1_b = get_bias([FC_SIZE])
</code></pre>
<p>&#x521D;&#x59CB;&#x5316;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x7684;&#x504F;&#x7F6E;&#x9879;&#x3002;</p>
<pre><code class="lang-python">fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_w) + fc1_b)
</code></pre>
<p>&#x5C06;&#x8F6C;&#x6362;&#x540E;&#x7684;<code>reshaped</code>&#x5411;&#x91CF;&#x4E0E;&#x6743;&#x91CD;<code>fc1_w</code>&#x505A;&#x77E9;&#x9635;&#x4E58;&#x6CD5;&#x8FD0;&#x7B97;&#xFF0C;&#x7136;&#x540E;&#x518D;&#x52A0;&#x4E0A;&#x504F;&#x7F6E;&#xFF0C;&#x6700;&#x540E;&#x518D;&#x4F7F;&#x7528;<code>relu</code>&#x8FDB;&#x884C;&#x6FC0;&#x6D3B;&#x3002;</p>
<pre><code class="lang-python"><span class="hljs-keyword">if</span> train: fc1 = tf.nn.dropout(fc1, <span class="hljs-number">0.5</span>)
</code></pre>
<p>&#x5982;&#x679C;&#x662F;&#x8BAD;&#x7EC3;&#x9636;&#x6BB5;&#xFF0C;&#x5219;&#x5BF9;&#x8BE5;&#x5C42;&#x8F93;&#x51FA;&#x4F7F;&#x7528;<code>dropout</code>&#xFF0C;&#x4E5F;&#x5C31;&#x662F;&#x968F;&#x673A;&#x7684;&#x5C06;&#x8BE5;&#x5C42;&#x8F93;&#x51FA;&#x4E2D;&#x7684;&#x4E00; &#x534A;&#x795E;&#x7ECF;&#x5143;&#x7F6E;&#x4E3A;&#x65E0;&#x6548;&#xFF0C;&#x662F;&#x4E3A;&#x4E86;&#x907F;&#x514D;&#x8FC7;&#x62DF;&#x5408;&#x800C;&#x8BBE;&#x7F6E;&#x7684;&#xFF0C;&#x4E00;&#x822C;&#x53EA;&#x5728;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x4E2D;&#x4F7F;&#x7528;&#x3002;</p>
<p>[5] &#x5B9E;&#x73B0;&#x7B2C;&#x56DB;&#x5C42;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x7684;&#x524D;&#x5411;&#x4F20;&#x64AD;&#x8FC7;&#x7A0B;&#xFF1A;</p>
<pre><code class="lang-python">fc2_w = get_weight([FC_SIZE, OUTPUT_NODE], regularizer)
fc2_b = get_bias([OUTPUT_NODE])
</code></pre>
<p>&#x521D;&#x59CB;&#x5316;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x5BF9;&#x5E94;&#x7684;&#x53D8;&#x91CF;&#x3002;</p>
<pre><code class="lang-python">y = tf.matmul(fc1, fc2_w) + fc2_b
</code></pre>
<p>&#x5C06;&#x8F6C;&#x6362;&#x540E;&#x7684;<code>reshaped</code>&#x5411;&#x91CF;&#x4E0E;&#x6743;&#x91CD;<code>fc2_w</code>&#x505A;&#x77E9;&#x9635;&#x4E58;&#x6CD5;&#x8FD0;&#x7B97;&#xFF0C;&#x7136;&#x540E;&#x518D;&#x52A0;&#x4E0A;&#x504F;&#x7F6E;&#x3002; </p>
<pre><code class="lang-python"><span class="hljs-keyword">return</span> y
</code></pre>
<p>&#x8FD4;&#x56DE;&#x8F93;&#x51FA;&#x503C;&#x6709;&#xFF0C;&#x5B8C;&#x6210;&#x6574;&#x4E2A;&#x524D;&#x5411;&#x4F20;&#x64AD;&#x8FC7;&#x7A0B;&#xFF0C;&#x4ECE;&#x800C;&#x5B9E;&#x73B0;&#x5BF9;<code>Mnist</code>&#x6570;&#x636E;&#x96C6;&#x7684;<code>10</code>&#x5206;&#x7C7B;&#x3002;</p>
<p>&#x7B2C;&#x4E8C;&#xFF0C;&#x53CD;&#x5411;&#x4F20;&#x64AD;&#x8FC7;&#x7A0B;(<code>mnist_lenet5_backward.py</code>)&#xFF0C;&#x5B8C;&#x6210;&#x8BAD;&#x7EC3;&#x795E;&#x7ECF;&#x7F51;&#x7EDC;&#x7684;&#x53C2;&#x6570;&#x3002;
&#x5177;&#x4F53;&#x4EE3;&#x7801;&#x5982;&#x4E0B;&#x6240;&#x793A;:</p>
<pre><code class="lang-python"><span class="hljs-comment">#coding:utf-8</span>
<span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf
<span class="hljs-keyword">from</span> tensorflow.examples.tutorials.mnist <span class="hljs-keyword">import</span> input_data
<span class="hljs-keyword">import</span> mnist_lenet5_forward
<span class="hljs-keyword">import</span> os
<span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np

BATCH_SIZE = <span class="hljs-number">100</span>
LEARNING_RATE_BASE = <span class="hljs-number">0.005</span>
LEARNING_RATE_DECAY = <span class="hljs-number">0.99</span>
REGULARIZER = <span class="hljs-number">0.0001</span>
STEPS = <span class="hljs-number">50000</span>
MOVING_AVERAGE_DECAY = <span class="hljs-number">0.99</span>
MODEL_SAVE_PATH = <span class="hljs-string">&apos;./model/&apos;</span>
MODEL_NAME = <span class="hljs-string">&apos;mnist_model&apos;</span>

<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">backward</span><span class="hljs-params">(mnist)</span>:</span>
  x = tf.placeholder(tf.float32, [BATCH_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS])
  y_ = tf.placeholder(tf.float32, [<span class="hljs-keyword">None</span>, mnist_lenet5_forward.OUTPUT_NODE])
  y = mnist_lenet5_forward.forward(x, <span class="hljs-keyword">True</span>, REGULARIZER)
  global_step = tf.Variable(<span class="hljs-number">0</span>, trainable=<span class="hljs-keyword">False</span>)

  ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, <span class="hljs-number">1</span>))
  cem = tf.reduce_mean(ce)
  loss = cem + tf.add_n(tf.get_collection(<span class="hljs-string">&apos;losses&apos;</span>))

  learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY, staircase=<span class="hljs-keyword">True</span>)
  train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)

  ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
  ema_op = ema.apply(tf.trainable_variables())
  <span class="hljs-keyword">with</span> tf.control_dependencies([train_step, ema_op]):
    train_op = tf.no_op(name=<span class="hljs-string">&apos;train&apos;</span>)

  saver = tf.train.Saver()
  <span class="hljs-keyword">with</span> tf.Session() <span class="hljs-keyword">as</span> sess:
    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
    <span class="hljs-keyword">if</span> ckpt <span class="hljs-keyword">and</span> ckpt.model_checkpoint_path:
      saver.restore(sess, ckpt.model_checkpoint_path)

    <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> range(STEPS):
      xs, ys = mnist.train.next_batch(BATCH_SIZE)
      reshaped_xs = np.reshape(xs,(BATCH_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS))
      _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_: ys})
      <span class="hljs-keyword">if</span> i % <span class="hljs-number">100</span> == <span class="hljs-number">0</span>:
        print(<span class="hljs-string">&quot;After %d training step(s), loss on training batch is %g.&quot;</span> % (step, loss_value))
        saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)

<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">main</span><span class="hljs-params">()</span>:</span>
  mnist = input_data.read_data_sets(<span class="hljs-string">&quot;./data/&quot;</span>, one_hot=<span class="hljs-keyword">True</span>)
  backward(mnist)

<span class="hljs-keyword">if</span> __name__ == <span class="hljs-string">&apos;__main__&apos;</span>:
  main()
</code></pre>
<p>&#x6CE8;&#x91CA;:</p>
<p>1)&#x5B9A;&#x4E49;&#x8BAD;&#x7EC3;&#x8FC7;&#x7A0B;&#x4E2D;&#x7684;&#x8D85;&#x53C2;&#x6570;
&#x89C4;&#x5B9A;&#x4E00;&#x4E2A;<code>batch</code>&#x7684;&#x6570;&#x91CF;&#x4E3A;<code>100</code>&#xFF0C;&#x6545;<code>BATCH_SIZE</code>&#x53D6;&#x503C;&#x4E3A;<code>100</code>;&#x8BBE;&#x5B9A;&#x521D;&#x59CB;&#x5B66;&#x4E60;&#x7387;&#x4E3A;<code>0.005</code>&#x3002;&#x5B66;&#x4E60;&#x7387;&#x8870;&#x51CF;&#x7387;&#x4E3A;<code>0.99</code>;&#x6700;&#x5927;&#x8FED;&#x4EE3;&#x6B21;&#x6570;&#x4E3A;<code>50000</code>&#xFF0C;&#x6545;<code>STEPS</code>&#x53D6;&#x503C;&#x4E3A;<code>50000</code>; &#x6ED1;&#x52A8;&#x5E73;&#x5747;&#x8870;&#x51CF;&#x7387;&#x8BBE;&#x7F6E;&#x4E3A;<code>0.99</code>&#xFF0C;&#x5E76;&#x89C4;&#x5B9A;&#x6A21;&#x578B;&#x4FDD;&#x5B58;&#x8DEF;&#x5F84;&#x4EE5;&#x53CA;&#x4FDD;&#x5B58;&#x7684;&#x6A21;&#x578B;&#x540D;&#x79F0;&#x3002;</p>
<p>2)&#x5B8C;&#x6210;&#x53CD;&#x5411;&#x4F20;&#x64AD;&#x8FC7;&#x7A0B;</p>
<p>[1] &#x7ED9;<code>x</code>,<code>y_</code>&#x662F;&#x5360;&#x4F4D;</p>
<pre><code class="lang-python">x = tf.placeholder(tf.float32,[BATCH_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS])
y_ = tf.placeholder(tf.float32, [<span class="hljs-keyword">None</span>,mnist_lenet5_forward.OUTPUT_NODE])
</code></pre>
<p><code>x</code>, <code>y_</code>&#x662F;&#x5B9A;&#x4E49;&#x7684;&#x5360;&#x4F4D;&#x7B26;&#xFF0C;&#x6307;&#x5B9A;&#x53C2;&#x6570;&#x4E3A;&#x6D6E;&#x70B9;&#x578B;&#x3002;&#x7531;&#x4E8E;&#x5377;&#x79EF;&#x5C42;&#x8F93;&#x5165;&#x4E3A;&#x56DB;&#x9636;&#x5F20;&#x91CF;&#xFF0C;&#x6545;<code>x</code>&#x7684;&#x5360;&#x4F4D;&#x7B26;&#x8868;&#x793A;&#x4E3A;&#x4E0A;&#x8FF0;&#x5F62;&#x5F0F;&#xFF0C;&#x7B2C;&#x4E00;&#x9636;&#x8868;&#x793A;&#x6BCF;&#x8F6E;&#x5582;&#x5165;&#x7684;&#x56FE;&#x7247;&#x6570;&#x91CF;&#xFF0C;&#x7B2C;&#x4E8C;&#x9636;&#x548C;&#x7B2C;&#x4E09;&#x9636;&#x5206;&#x522B;&#x8868;&#x793A;&#x56FE;&#x7247;&#x7684;&#x884C;&#x5206;&#x8FA8;&#x7387;&#x548C;&#x5217;&#x5206;&#x8FA8;&#x7387;&#xFF0C;&#x7B2C;&#x56DB;&#x9636;&#x8868;&#x793A;&#x901A;&#x9053;&#x6570;&#x3002;</p>
<ul>
<li><code>x = tf.placeholder(dtype,shape,name=None)</code></li>
</ul>
<p><code>tf.placeholder()</code>&#x51FD;&#x6570;&#x6709;&#x4E09;&#x4E2A;&#x53C2;&#x6570;&#xFF0C;<code>dtype</code>&#x8868;&#x793A;&#x6570;&#x636E;&#x7C7B;&#x578B;&#xFF0C;&#x5E38;&#x7528;&#x7684;&#x7C7B;&#x578B;&#x4E3A;<code>tf.float32</code>,<code>tf.float64</code>&#x7B49;&#x6570;&#x503C;&#x7C7B;&#x578B;&#xFF0C;<code>shape</code>&#x8868;&#x793A;&#x6570;&#x636E;&#x5F62;&#x72B6;&#xFF0C;<code>name</code>&#x8868;&#x793A;&#x540D;&#x79F0;&#x3002;</p>
<p>[2] &#x8C03;&#x7528;&#x524D;&#x5411;&#x4F20;&#x64AD;&#x8FC7;&#x7A0B;</p>
<pre><code class="lang-python">y = mnist_lenet5_forward.forward(x,<span class="hljs-keyword">True</span>, REGULARIZER)
</code></pre>
<p>&#x8C03;&#x7528;&#x524D;&#x5411;&#x4F20;&#x64AD;&#x7F51;&#x7EDC;&#x5F97;&#x5230;&#x7EF4;&#x5EA6;&#x4E3A;<code>10</code>&#x7684;<code>tensor</code>&#x3002;</p>
<p>[3] &#x6C42;&#x542B;&#x6709;&#x6B63;&#x5219;&#x5316;&#x7684;&#x635F;&#x5931;&#x503C;</p>
<pre><code class="lang-python">global_step = tf.Variable(<span class="hljs-number">0</span>, trainable=<span class="hljs-keyword">False</span>)
</code></pre>
<p>&#x58F0;&#x660E;&#x4E00;&#x4E2A;&#x5168;&#x5C40;&#x8BA1;&#x6570;&#x5668;&#xFF0C;&#x5E76;&#x8F93;&#x51FA;&#x5316;&#x4E3A;<code>0</code></p>
<pre><code class="lang-python">ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,labels=tf.argmax(y_, <span class="hljs-number">1</span>))
</code></pre>
<p>&#x5BF9;&#x7F51;&#x7EDC;&#x6700;&#x540E;&#x4E00;&#x5C42;&#x7684;&#x8F93;&#x51FA;<code>y</code>&#x505A;<code>softmax</code>&#xFF0C;&#x6C42;&#x53D6;&#x8F93;&#x51FA;&#x5C5E;&#x4E8E;&#x67D0;&#x4E00;&#x7C7B;&#x7684;&#x6982;&#x7387;&#xFF0C;&#x7ED3;&#x679C;&#x4E3A;&#x4E00;&#x4E2A;<code>num_classes</code>&#x5927;&#x5C0F;&#x7684;&#x5411;&#x91CF;&#xFF0C;&#x518D;&#x5C06;&#x6B64;&#x5411;&#x91CF;&#x548C;&#x5B9E;&#x9645;&#x6807;&#x7B7E;&#x503C;&#x505A;&#x4EA4;&#x53C9;&#x71B5;&#xFF0C;&#x8FD4;&#x56DE;&#x4E00;&#x4E2A;&#x5411;&#x91CF;&#x503C;&#x3002;</p>
<pre><code class="lang-python">cem = tf.reduce_mean(ce)
</code></pre>
<p>&#x901A;&#x8FC7;<code>tf.reduce_mean()</code>&#x51FD;&#x6570;&#x5BF9;&#x5F97;&#x5230;&#x7684;&#x5411;&#x91CF;&#x6C42;&#x5747;&#x503C;&#xFF0C;&#x5F97;&#x5230;<code>loss</code>&#x3002;</p>
<pre><code class="lang-python">loss = cem + tf.add_n(tf.get_collection(<span class="hljs-string">&apos;losses&apos;</span>))
</code></pre>
<p>&#x6DFB;&#x52A0;&#x6B63;&#x5219;&#x5316;&#x4E2D;&#x7684; losses &#x503C;&#x5230; loss &#x4E2D;&#x3002;</p>
<ul>
<li><code>sparse_softmax_cross_entropy_with_logits(_sentinel=None, labels=None, logits=None,name=None)</code></li>
</ul>
<p>&#x6B64;&#x51FD;&#x6570;&#x7684;&#x53C2;&#x6570;<code>logits</code>&#x4E3A;&#x795E;&#x7ECF;&#x7F51;&#x7EDC;&#x6700;&#x540E;&#x4E00;&#x5C42;&#x7684;&#x8F93;&#x51FA;&#xFF0C;&#x5B83;&#x7684;&#x5927;&#x5C0F;&#x4E3A;<code>[batch_size, num_classes]</code>&#xFF0C;&#x53C2;&#x6570;<code>labels</code>&#x8868;&#x793A;&#x5B9E;&#x9645;&#x6807;&#x7B7E;&#x503C;&#xFF0C;&#x5927;&#x5C0F;&#x4E3A;<code>[batch_size,num_classes]</code>&#x3002; &#x7B2C;&#x4E00;&#x6B65;&#x662F;&#x5148;&#x5BF9;&#x7F51;&#x7EDC;&#x6700;&#x540E;&#x4E00;&#x5C42;&#x7684;&#x8F93;&#x51FA;&#x505A;&#x4E00;&#x4E2A;<code>softmax</code>&#xFF0C;&#x8F93;&#x51FA;&#x4E3A;&#x5C5E;&#x4E8E;&#x67D0;&#x4E00;&#x5C5E;&#x6027;&#x7684;&#x6982;&#x7387;&#x5411;&#x91CF;;&#x518D;&#x5C06;&#x6982;&#x7387;&#x5411;&#x91CF;&#x4E0E;&#x5B9E;&#x9645;&#x6807;&#x7B7E;&#x5411;&#x91CF;&#x505A;&#x4EA4;&#x53C9;&#x71B5;&#xFF0C;&#x8FD4;&#x56DE;&#x5411;&#x91CF;&#x3002;</p>
<ul>
<li><code>tf.reduce_mean( input_tensor, reduction_indices=None, keep_dims=False, name=None)</code></li>
</ul>
<p>&#x6B64;&#x51FD;&#x6570;&#x8868;&#x793A;&#x5BF9;&#x5F97;&#x5230;&#x7684;&#x5411;&#x91CF;&#x6C42;&#x53D6;&#x5747;&#x503C;&#x3002;&#x53C2;&#x6570;<code>input_tensor</code>&#x8868;&#x793A;&#x8981;&#x51CF;&#x5C11;&#x7684;&#x5F20;&#x91CF;;&#x53C2;&#x6570;<code>reduction_indices</code>&#x8868;&#x793A;&#x6C42;&#x53D6;&#x5747;&#x503C;&#x7684;&#x7EF4;&#x5EA6;;&#x53C2;&#x6570;<code>keep_dims</code>&#x542B;&#x4E49;&#x4E3A;:&#x5982;&#x679C;&#x4E3A;<code>true</code>&#xFF0C;&#x5219;&#x4FDD;&#x7559;&#x957F;&#x5EA6;&#x4E3A;<code>1</code>&#x7684;&#x7F29;&#x5C0F;&#x5C3A;&#x5BF8;&#x3002;<code>name</code>&#x8868;&#x793A;&#x64CD;&#x4F5C;&#x7684;&#x540D;&#x79F0;&#x3002;</p>
<p>&#x4F8B;&#x5982;:</p>
<pre><code class="lang-python">x = tf.constant([[<span class="hljs-number">1.</span>, <span class="hljs-number">1.</span>], [<span class="hljs-number">2.</span>, <span class="hljs-number">2.</span>]])
tf.reduce_mean(x)    <span class="hljs-comment">#&#x8868;&#x793A;&#x5BF9;&#x5411;&#x91CF;&#x6574;&#x4F53;&#x6C42;&#x5747;&#x503C; 1.5</span>
tf.reduce_mean(x, <span class="hljs-number">0</span>) <span class="hljs-comment">#&#x8868;&#x793A;&#x5BF9;&#x5411;&#x91CF;&#x5728;&#x5217;&#x4E0A;&#x6C42;&#x5747;&#x503C;[1.5, 1.5]</span>
tf.reduce_mean(x, <span class="hljs-number">1</span>) <span class="hljs-comment">#&#x8868;&#x793A;&#x5BF9;&#x5411;&#x91CF;&#x5728;&#x5217;&#x4E0A;&#x6C42;&#x5747;&#x503C;[1., 2.]</span>
</code></pre>
<p>[4]&#x5B9E;&#x73B0;&#x6307;&#x6570;&#x8870;&#x51CF;&#x5B66;&#x4E60;&#x7387;</p>
<ul>
<li><code>learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY, staircase=True)</code></li>
</ul>
<p><code>tf.train.exponential_decay</code>&#x51FD;&#x6570;&#x4E2D;&#x53C2;&#x6570;<code>LEARNING_RATE_BASE</code>&#x8868;&#x793A;&#x521D;&#x59CB;&#x5B66;&#x4E60;&#x7387;&#xFF0C;&#x53C2;&#x6570;<code>LEARNING_RATE_DECAY</code>&#x8868;&#x793A;&#x5B66;&#x4E60;&#x7387;&#x8870;&#x51CF;&#x901F;&#x7387;&#x3002;&#x5B9E;&#x73B0;&#x6307;&#x6570;&#x7EA7;&#x7684;&#x51CF;&#x5C0F;&#x5B66;&#x4E60;&#x7387;&#xFF0C;&#x53EF;&#x4EE5;&#x8BA9;&#x6A21;&#x578B;&#x5728;&#x8BAD;&#x7EC3;&#x7684;&#x524D;&#x671F;&#x5FEB;&#x901F;&#x63A5;&#x8FD1;&#x8F83;&#x4F18;&#x89E3;&#xFF0C;&#x53C8;&#x53EF;&#x4EE5;&#x4FDD;&#x8BC1;&#x6A21;&#x578B;&#x5728;&#x8BAD;&#x7EC3;&#x540E;&#x671F;&#x4E0D;&#x4F1A;&#x6709;&#x592A;&#x5927;&#x6CE2;&#x52A8;&#x3002;&#x5176;&#x4E2D;&#xFF0C;&#x5F53;<code>staircase=True</code>&#x65F6;&#xFF0C;&#x4E3A;&#x9636;&#x68AF;&#x5F62;&#x8870;&#x51CF;&#xFF0C;<code>(global_step/decay_steps)</code>&#x5219;&#x88AB;&#x8F6C;&#x5316;&#x4E3A;&#x6574;&#x6570;;&#x5F53;<code>staircase=False</code>&#x65F6;&#xFF0C;&#x4E3A;&#x66F2;&#x7EBF;&#x5F62;&#x8870;&#x51CF;&#xFF0C;&#x4EE5;&#x6B64;&#x6839;&#x636E;staircase &#x6765;&#x9009;&#x62E9;&#x4E0D;&#x540C;&#x7684;&#x8870;&#x51CF;&#x65B9;&#x5F0F;&#x3002;</p>
<p>&#x8BA1;&#x7B97;&#x516C;&#x5F0F;&#x4E3A;:</p>
<pre><code class="lang-python">decayed_learning_rate=learining_rate*decay_rate^(global_step/decay_steps)
</code></pre>
<ul>
<li><code>train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)</code></li>
</ul>
<p>&#x6B64;&#x51FD;&#x6570;&#x7684;&#x53C2;&#x6570;<code>learning_rate</code>&#x4E3A;&#x4F20;&#x5165;&#x7684;&#x5B66;&#x4E60;&#x7387;&#xFF0C;&#x6784;&#x9020;&#x4E00;&#x4E2A;&#x5B9E;&#x73B0;&#x68AF;&#x5EA6;&#x4E0B;&#x964D;&#x7B97;&#x6CD5;&#x7684;&#x4F18;&#x5316;&#x5668;&#xFF0C;&#x518D;&#x901A;&#x8FC7;&#x4F7F;&#x7528; <code>minimize</code>&#x66F4;&#x65B0;&#x5B58;&#x50A8;&#x8981;&#x8BAD;&#x7EC3;&#x7684;&#x53D8;&#x91CF;&#x7684;&#x5217;&#x8868;&#x6765;&#x51CF;&#x5C0F;<code>loss</code>&#x3002;</p>
<p>[5] &#x5B9E;&#x73B0;&#x6ED1;&#x52A8;&#x5E73;&#x5747;&#x6A21;&#x578B;</p>
<ul>
<li><code>ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)</code>
<code>ema_op = ema.apply(tf.trainable_variables())</code></li>
</ul>
<p><code>tf.train.ExponentialMovingAverage</code>&#x51FD;&#x6570;&#x91C7;&#x7528;&#x6ED1;&#x52A8;&#x5E73;&#x5747;&#x7684;&#x65B9;&#x6CD5;&#x66F4;&#x65B0;&#x53C2;&#x6570;&#x3002;&#x6B64;&#x51FD;&#x6570;&#x7684;&#x53C2;&#x6570; <code>MOVING_AVERAGE_DECAY</code>&#x8868;&#x793A;&#x8870;&#x51CF;&#x901F;&#x7387;&#xFF0C;&#x7528;&#x4E8E;&#x63A7;&#x5236;&#x6A21;&#x578B;&#x7684;&#x66F4;&#x65B0;&#x901F;&#x5EA6;;&#x6B64;&#x51FD;&#x6570;&#x7EF4;&#x62A4;&#x4E00;&#x4E2A;&#x5F71;&#x5B50;&#x53D8;&#x91CF;&#xFF0C;&#x5F71;&#x5B50;&#x53D8;&#x91CF;&#x521D;&#x59CB;&#x503C;&#x4E3A;&#x53D8;&#x91CF;&#x521D;&#x59CB;&#x503C;&#x3002;&#x5F71;&#x5B50;&#x53D8;&#x91CF;&#x503C;&#x7684;&#x66F4;&#x65B0;&#x65B9;&#x5F0F; &#x5982;&#x4E0B;:</p>
<pre><code class="lang-python">shadow_variable = decay * shadow_variable + (<span class="hljs-number">1</span>-decay) * variable&#x3002;
</code></pre>
<p>&#x5176;&#x4E2D;&#xFF0C;<code>shadow_variable</code>&#x662F;&#x5F71;&#x5B50;&#x53D8;&#x91CF;&#xFF0C;<code>variable</code>&#x8868;&#x793A;&#x5F85;&#x66F4;&#x65B0;&#x7684;&#x53D8;&#x91CF;&#xFF0C;<code>decay</code>&#x4E3A;&#x8870;&#x51CF;&#x901F;&#x7387;&#x3002;<code>decay</code> &#x4E00;&#x822C;&#x8BBE;&#x4E3A;&#x63A5;&#x8FD1;&#x4E8E;<code>1</code>&#x7684;&#x6570;<code>(0.99,0.999)</code>&#xFF0C;<code>decay</code>&#x8D8A;&#x5927;&#x6A21;&#x578B;&#x8D8A;&#x7A33;&#x5B9A;&#x3002;</p>
<p>[6] &#x5C06;<code>train_step</code>&#x548C;<code>ema_op</code>&#x4E24;&#x4E2A;&#x8BAD;&#x7EC3;&#x64CD;&#x4F5C;&#x7ED1;&#x5B9A;&#x5230;<code>train_op</code>&#x4E0A;</p>
<pre><code class="lang-python"><span class="hljs-keyword">with</span> tf.control_dependencies([train_step, ema_op]):
  train_op = tf.no_op(name=<span class="hljs-string">&apos;train&apos;</span>)
</code></pre>
<p>[7] &#x5B9E;&#x4F8B;&#x5316;&#x4E00;&#x4E2A;&#x4FDD;&#x5B58;&#x548C;&#x6062;&#x590D;&#x53D8;&#x91CF;&#x7684;<code>saver</code>&#xFF0C;&#x5E76;&#x521B;&#x5EFA;&#x4E00;&#x4E2A;&#x4F1A;&#x8BDD;</p>
<pre><code class="lang-python">saver = tf.train.Saver()
<span class="hljs-keyword">with</span> tf.Session() <span class="hljs-keyword">as</span> sess:
  init_op = tf.global_variables_initializer()
  sess.run(init_op)
</code></pre>
<p>&#x521B;&#x5EFA;&#x4E00;&#x4E2A;&#x4F1A;&#x8BDD;&#xFF0C;&#x5E76;&#x901A;&#x8FC7;<code>python</code>&#x4E2D;&#x7684;&#x4E0A;&#x4E0B;&#x6587;&#x7BA1;&#x7406;&#x5668;&#x6765;&#x7BA1;&#x7406;&#x8FD9;&#x4E2A;&#x4F1A;&#x8BDD;&#xFF0C;&#x521D;&#x59CB;&#x5316; &#x8BA1;&#x7B97;&#x56FE;&#x4E2D;&#x7684;&#x53D8;&#x91CF;&#xFF0C;&#x5E76;&#x7528; <code>sess.run</code>&#x5B9E;&#x73B0;&#x521D;&#x59CB;&#x5316;&#x3002;</p>
<pre><code class="lang-python">ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
<span class="hljs-keyword">if</span> ckpt <span class="hljs-keyword">and</span> ckpt.model_checkpoint_path:
  saver.restore(sess, ckpt.model_checkpoint_path)
</code></pre>
<p>&#x901A;&#x8FC7;<code>checkpoint</code>&#x6587;&#x4EF6;&#x5B9A;&#x4F4D;&#x5230;&#x6700;&#x65B0;&#x4FDD;&#x5B58;&#x7684;&#x6A21;&#x578B;&#xFF0C;&#x82E5;&#x6587;&#x4EF6;&#x5B58;&#x5728;&#xFF0C;&#x5219;&#x52A0;&#x8F7D;&#x6700;&#x65B0;&#x7684;&#x6A21;&#x578B;&#x3002;</p>
<pre><code class="lang-python"><span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> range(STEPS):
  xs, ys = mnist.train.next_batch(BATCH_SIZE)
  reshaped_xs = np.reshape(xs,(BATCH_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS))
</code></pre>
<p>&#x8BFB;&#x53D6;&#x4E00;&#x4E2A; batch &#x6570;&#x636E;&#xFF0C;&#x5C06;&#x8F93;&#x5165;&#x6570;&#x636E; xs &#x8F6C;&#x6210;&#x4E0E;&#x7F51;&#x7EDC;&#x8F93;&#x5165;&#x76F8;&#x540C;&#x5F62;&#x72B6;&#x7684;&#x77E9;&#x9635;&#x3002;</p>
<pre><code class="lang-python">_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_: ys})
</code></pre>
<p>&#x5582;&#x5165;&#x8BAD;&#x7EC3;&#x56FE;&#x50CF;&#x548C;&#x6807;&#x7B7E;&#xFF0C;&#x5F00;&#x59CB;&#x8BAD;&#x7EC3;&#x3002;</p>
<pre><code class="lang-python"><span class="hljs-keyword">if</span> i % <span class="hljs-number">100</span> == <span class="hljs-number">0</span>:
  print(<span class="hljs-string">&quot;After %d training step(s), loss on training batch is %g.&quot;</span> % (step, loss_value))
<span class="hljs-comment"># &#x6BCF;&#x8FED;&#x4EE3;100&#x6B21;&#x6253;&#x5370;loss&#x4FE1;&#x606F;&#xFF0C;&#x5E76;&#x4FDD;&#x5B58;&#x6700;&#x65B0;&#x7684;&#x6A21;&#x578B;&#x3002;</span>
</code></pre>
<p><img src="http://ovhbzkbox.bkt.clouddn.com/2018-08-15-15343422294723.jpg" alt=""></p>
<p>&#x7531;&#x8FD0;&#x884C;&#x7ED3;&#x679C;&#x53EF;&#x4EE5;&#x770B;&#x51FA;&#xFF0C;&#x635F;&#x5931;&#x503C;&#x5728;&#x4E0D;&#x65AD;&#x51CF;&#x5C0F;&#xFF0C;&#x4E14;&#x53EF;&#x4EE5;&#x5B9E;&#x73B0;&#x65AD;&#x70B9;&#x7EED;&#x8BAD;&#x3002; &#x7B2C;&#x4E09;&#xFF0C;&#x6D4B;&#x8BD5;&#x8FC7;&#x7A0B;(<code>mnist_lenet5_test.py</code>)&#xFF0C;&#x5BF9;<code>Mnist</code>&#x6570;&#x636E;&#x96C6;&#x4E2D;&#x7684;&#x6D4B;&#x8BD5;&#x6570;&#x636E;&#x8FDB;&#x884C;&#x9884;&#x6D4B;&#xFF0C;&#x6D4B;&#x8BD5;&#x6A21;&#x578B;&#x51C6;&#x786E;&#x7387;&#x3002;&#x5177;&#x4F53;&#x4EE3;&#x7801;&#x5982;&#x4E0B;&#x6240;&#x793A;:</p>
<pre><code class="lang-python"><span class="hljs-comment">#coding:utf-8</span>
<span class="hljs-keyword">import</span> time
<span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf
<span class="hljs-keyword">from</span> tensorflow.examples.tutorials.mnist <span class="hljs-keyword">import</span> input_data
<span class="hljs-keyword">import</span> mnist_lenet5_forward
<span class="hljs-keyword">import</span> mnist_lenet5_backward
<span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np

TEST_INTERVAL_SECS = <span class="hljs-number">5</span>

<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">test</span><span class="hljs-params">(mnist)</span>:</span>
  <span class="hljs-keyword">with</span> tf.Graph().as_default() <span class="hljs-keyword">as</span> g:
    x = tf.placeholder(tf.float32, [mnist.test.num_examples, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS])
  y_ = tf.placeholder(tf.float32, [<span class="hljs-keyword">None</span>, mnist_lenet5_forward.OUTPUT_NODE])
  y = mnist_lenet5_forward.forward(x, <span class="hljs-keyword">False</span>, <span class="hljs-keyword">None</span>)

  ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY)
  ema_restore = ema.variables_to_restore()
  saver = tf.train.Saver(ema_restore)

  corrent_prediction = tf.equal(tf.argmax(y, <span class="hljs-number">1</span>), tf.argmax(y_, <span class="hljs-number">1</span>))
  accuracy = tf.reduce_mean(tf.cast(corrent_prediction, tf.float32))

  <span class="hljs-keyword">while</span> <span class="hljs-keyword">True</span>:
    <span class="hljs-keyword">with</span> tf.Session() <span class="hljs-keyword">as</span> sess:
      ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH)
      <span class="hljs-keyword">if</span> ckpt <span class="hljs-keyword">and</span> ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)

        global_step = ckpt.model_checkpoint_path.split(<span class="hljs-string">&apos;/&apos;</span>)[<span class="hljs-number">-1</span>].split(<span class="hljs-string">&apos;-&apos;</span>)[<span class="hljs-number">-1</span>]
        reshaped_x = np.reshape(mnist.test.images, (mnist.test.num_examples, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS))
        accuracy_score = sess.run(accuracy, feed_dict={x: reshaped_x, y_: mnist.test.labels})
        print(<span class="hljs-string">&quot;After %s training step(s), test accuracy = %g&quot;</span> % (global_step, accuracy_score))
      <span class="hljs-keyword">else</span>:
        print(<span class="hljs-string">&quot;No checkpoint file found&quot;</span>)
        <span class="hljs-keyword">return</span>
    time.sleep(TEST_INTERVAL_SECS)

<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">main</span><span class="hljs-params">()</span>:</span>
  mnist = input_data.read_data_sets(<span class="hljs-string">&quot;./data/&quot;</span>, one_hot=<span class="hljs-keyword">True</span>)
  test(mnist)

<span class="hljs-keyword">if</span> __name__ == <span class="hljs-string">&apos;__main__&apos;</span>:
  main()
</code></pre>
<p>&#x6CE8;&#x91CA;:</p>
<p>1)&#x5728;&#x6D4B;&#x8BD5;&#x7A0B;&#x5E8F;&#x4E2D;&#x4F7F;&#x7528;&#x7684;&#x662F;&#x8BAD;&#x7EC3;&#x597D;&#x7684;&#x7F51;&#x7EDC;&#xFF0C;&#x6545;&#x4E0D;&#x4F7F;&#x7528;<code>dropout</code>&#xFF0C;&#x800C;&#x662F;&#x8BA9;&#x6240;&#x6709;&#x795E;&#x7ECF;&#x5143;&#x90FD;&#x53C2;&#x4E0E;&#x8FD0;&#x7B97;&#xFF0C;&#x4ECE;&#x800C;&#x8F93;&#x51FA;&#x8BC6;&#x522B;&#x51C6;&#x786E;&#x7387;&#x3002;</p>
<p>2)<code>correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))</code></p>
<ul>
<li><code>tf.equaf(x,y)</code></li>
</ul>
<p>&#x6B64;&#x51FD;&#x6570;&#x7528;&#x4E8E;&#x5224;&#x65AD;&#x51FD;&#x6570;&#x7684;&#x4E24;&#x4E2A;&#x53C2;&#x6570;<code>x</code>&#x4E0E;<code>y</code>&#x662F;&#x5426;&#x76F8;&#x7B49;&#xFF0C;&#x4E00;&#x822C;<code>x</code>&#x8868;&#x793A;&#x9884;&#x6D4B;&#x503C;&#xFF0C;<code>y</code>&#x8868;&#x793A;&#x5B9E;&#x9645;&#x503C;&#x3002;</p>
<p>3)<code>accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))</code>&#x6C42;&#x5E73;&#x5747;&#x5F97;&#x5230;&#x9884;&#x6D4B;&#x51C6;&#x786E;&#x7387;&#x3002;</p>
<p>&#x5728;&#x6D4B;&#x8BD5;&#x96C6;&#x4E0A;&#xFF0C;&#x8F93;&#x51FA;&#x7ED3;&#x679C;&#x5982;&#x4E0B;:</p>
<p><img src="http://ovhbzkbox.bkt.clouddn.com/2018-08-16-15343532923085.jpg" alt=""></p>
<p>&#x7531;&#x8F93;&#x51FA;&#x7ED3;&#x679C;&#x8868;&#x660E;&#xFF0C;&#x5728;&#x6D4B;&#x8BD5;&#x96C6;&#x4E0A;&#x7684;&#x51C6;&#x786E;&#x7387;&#x53EF;&#x4EE5;&#x8FBE;&#x5230;99%&#x5DE6;&#x53F3;&#xFF0C;<code>Lenet</code>&#x6027;&#x80FD;&#x826F;&#x597D;&#x3002;</p>
<h2 id="lenet5&#x6E90;&#x7801;&#x7684;&#x5168;&#x6587;&#x6CE8;&#x91CA;"><code>Lenet5</code>&#x6E90;&#x7801;&#x7684;&#x5168;&#x6587;&#x6CE8;&#x91CA;</h2>
<p><code>mnist_lenet5_forward.py</code></p>
<pre><code class="lang-python"><span class="hljs-comment">#coding:utf-8</span>
<span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf
<span class="hljs-comment"># &#x8BBE;&#x5B9A;&#x795E;&#x7ECF;&#x7F51;&#x7EDC;&#x7684;&#x8D85;&#x53C2;&#x6570;</span>
<span class="hljs-comment"># &#x5B9A;&#x4E49;&#x795E;&#x7ECF;&#x7F51;&#x7EDC;&#x53EF;&#x4EE5;&#x63A5;&#x6536;&#x7684;&#x56FE;&#x7247;&#x7684;&#x5C3A;&#x5BF8;&#x548C;&#x901A;&#x9053;&#x6570; IMAGE_SIZE = 28</span>
NUM_CHANNELS = <span class="hljs-number">1</span>

<span class="hljs-comment"># &#x5B9A;&#x4E49;&#x7B2C;&#x4E00;&#x5C42;&#x5377;&#x79EF;&#x6838;&#x7684;&#x5927;&#x5C0F;&#x548C;&#x4E2A;&#x6570; </span>
CONV1_SIZE = <span class="hljs-number">5</span> 
CONV1_KERNEL_NUM = <span class="hljs-number">32</span>

<span class="hljs-comment"># &#x5B9A;&#x4E49;&#x7B2C;&#x4E8C;&#x5C42;&#x5377;&#x79EF;&#x6838;&#x7684;&#x5927;&#x5C0F;&#x548C;&#x4E2A;&#x6570;</span>
CONV2_SIZE = <span class="hljs-number">5</span> 
CONV2_KERNEL_NUM = <span class="hljs-number">64</span>

<span class="hljs-comment"># &#x5B9A;&#x4E49;&#x7B2C;&#x4E09;&#x5C42;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x7684;&#x795E;&#x7ECF;&#x5143;&#x4E2A;&#x6570; </span>
FC_SIZE = <span class="hljs-number">512</span>
<span class="hljs-comment"># &#x5B9A;&#x4E49;&#x7B2C;&#x56DB;&#x5C42;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x7684;&#x795E;&#x7ECF;&#x5143;&#x4E2A;&#x6570; </span>
OUTPUT_NODE = <span class="hljs-number">10</span>

<span class="hljs-comment"># &#x5B9A;&#x4E49;&#x521D;&#x59CB;&#x5316;&#x7F51;&#x7EDC;&#x6743;&#x91CD;&#x51FD;&#x6570;</span>
<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">get_weight</span><span class="hljs-params">(shape, regularizer)</span>:</span>
  <span class="hljs-string">&apos;&apos;&apos; args:
  shape:&#x751F;&#x6210;&#x5F20;&#x91CF;&#x7684;&#x7EF4;&#x5EA6;
  &#x8BFE;&#x7A0B;&#x4E2D; Lenet5 &#x6E90;&#x7801;&#x7684;&#x5168;&#x6587;&#x6CE8;&#x91CA;
  regularizer: &#x6B63;&#x5219;&#x5316;&#x9879;&#x7684;&#x6743;&#x91CD; 
  &apos;&apos;&apos;</span>
  <span class="hljs-comment"># tf.truncated_normal &#x751F;&#x6210;&#x53BB;&#x6389;&#x8FC7;&#x5927;&#x504F;&#x79BB;&#x70B9;&#x7684;&#x6B63;&#x6001;&#x5206;&#x5E03;&#x968F;&#x673A;&#x6570;&#x7684;&#x5F20;&#x91CF;&#xFF0C;stddev &#x662F;&#x6307;&#x5B9A;&#x6807;&#x51C6;&#x5DEE;</span>
  w = tf.Variable(tf.truncated_normal(shape,stddev=<span class="hljs-number">0.1</span>))
  <span class="hljs-comment"># &#x4E3A;&#x6743;&#x91CD;&#x52A0;&#x5165; L2 &#x6B63;&#x5219;&#x5316;&#xFF0C;&#x901A;&#x8FC7;&#x9650;&#x5236;&#x6743;&#x91CD;&#x7684;&#x5927;&#x5C0F;&#xFF0C;&#x4F7F;&#x6A21;&#x578B;&#x4E0D;&#x4F1A;&#x968F;&#x610F;&#x62DF;&#x5408;&#x8BAD;&#x7EC3;&#x6570;&#x636E;&#x4E2D;&#x7684;&#x968F;&#x673A;&#x566A;&#x97F3;</span>
  <span class="hljs-keyword">if</span> regularizer != <span class="hljs-keyword">None</span>: 
    tf.add_to_collection(<span class="hljs-string">&apos;losses&apos;</span>, tf.contrib.layers.l2_regularizer(regularizer)(w))   
  <span class="hljs-keyword">return</span> w

<span class="hljs-comment"># &#x5B9A;&#x4E49;&#x521D;&#x59CB;&#x5316;&#x504F;&#x7F6E;&#x9879;&#x51FD;&#x6570;</span>
<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">get_bias</span><span class="hljs-params">(shape)</span>:</span>
  <span class="hljs-string">&apos;&apos;&apos; args:
  shape:&#x751F;&#x6210;&#x5F20;&#x91CF;&#x7684;&#x7EF4;&#x5EA6; 
  &apos;&apos;&apos;</span>
  b = tf.Variable(tf.zeros(shape)) <span class="hljs-comment"># &#x7EDF;&#x4E00;&#x5C06; bias &#x521D;&#x59CB;&#x5316;&#x4E3A; 0</span>
  <span class="hljs-keyword">return</span> b

<span class="hljs-comment"># &#x5B9A;&#x4E49;&#x5377;&#x79EF;&#x8BA1;&#x7B97;&#x51FD;&#x6570;</span>
<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">conv2d</span><span class="hljs-params">(x,w)</span>:</span>
  <span class="hljs-string">&apos;&apos;&apos; args:
  x: &#x4E00;&#x4E2A;&#x8F93;&#x5165; batch
  w: &#x5377;&#x79EF;&#x5C42;&#x7684;&#x6743;&#x91CD; &apos;&apos;&apos;</span>

  <span class="hljs-comment"># strides &#x8868;&#x793A;&#x5377;&#x79EF;&#x6838;&#x5728;&#x4E0D;&#x540C;&#x7EF4;&#x5EA6;&#x4E0A;&#x7684;&#x79FB;&#x52A8;&#x6B65;&#x957F;&#x4E3A; 1&#xFF0C;&#x7B2C;&#x4E00;&#x7EF4;&#x548C;&#x7B2C;&#x56DB;&#x7EF4;&#x4E00;&#x5B9A;&#x662F; 1&#xFF0C;&#x8FD9;&#x662F;&#x56E0;&#x4E3A;&#x5377;&#x79EF;&#x5C42;&#x7684;&#x6B65; &#x957F;&#x53EA;&#x5BF9;&#x77E9;&#x9635;&#x7684;&#x957F;&#x548C;&#x5BBD;&#x6709;&#x6548;;</span>
  <span class="hljs-comment"># padding=&apos;SAME&apos;&#x8868;&#x793A;&#x4F7F;&#x7528;&#x5168; 0 &#x586B;&#x5145;&#xFF0C;&#x800C;&apos;VALID&apos;&#x8868;&#x793A;&#x4E0D;&#x586B;&#x5145; return tf.nn.conv2d(x, w, strides=[1, 1, 1, 1], padding=&apos;SAME&apos;)</span>
  <span class="hljs-keyword">return</span> tf.nn.conv2d(x, w, strides=[<span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>, <span class="hljs-number">1</span>], padding=<span class="hljs-string">&apos;SAME&apos;</span>)

<span class="hljs-comment"># &#x5B9A;&#x4E49;&#x6700;&#x5927;&#x6C60;&#x5316;&#x64CD;&#x4F5C;&#x51FD;&#x6570;</span>
<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">max_pool_2x2</span><span class="hljs-params">(x)</span>:</span> 
  <span class="hljs-string">&apos;&apos;&apos;
  args:
  x: &#x4E00;&#x4E2A;&#x8F93;&#x5165; batch
  &apos;&apos;&apos;</span>
  <span class="hljs-comment"># ksize &#x8868;&#x793A;&#x6C60;&#x5316;&#x8FC7;&#x6EE4;&#x5668;&#x7684;&#x8FB9;&#x957F;&#x4E3A; 2&#xFF0C;strides &#x8868;&#x793A;&#x8FC7;&#x6EE4;&#x5668;&#x79FB;&#x52A8;&#x6B65;&#x957F;&#x662F; 2&#xFF0C;&apos;SAME&apos;&#x63D0;&#x4F9B;&#x4F7F;&#x7528;&#x5168; 0 &#x586B;&#x5145;</span>
  <span class="hljs-keyword">return</span> tf.nn.max_pool(x, ksize=[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>], strides=[<span class="hljs-number">1</span>, <span class="hljs-number">2</span>, <span class="hljs-number">2</span>, <span class="hljs-number">1</span>], padding=<span class="hljs-string">&apos;SAME&apos;</span>)

<span class="hljs-comment">#&#x5B9A;&#x4E49;&#x524D;&#x5411;&#x4F20;&#x64AD;&#x7684;&#x8FC7;&#x7A0B;</span>
<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">forward</span><span class="hljs-params">(x, train, regularizer)</span>:</span>
  <span class="hljs-string">&apos;&apos;&apos; args:
  x: &#x4E00;&#x4E2A;&#x8F93;&#x5165; batch
  train: &#x7528;&#x4E8E;&#x533A;&#x5206;&#x8BAD;&#x7EC3;&#x8FC7;&#x7A0B; True&#xFF0C;&#x6D4B;&#x8BD5;&#x8FC7;&#x7A0B; False regularizer:&#x6B63;&#x5219;&#x5316;&#x9879;&#x7684;&#x6743;&#x91CD;
  &apos;&apos;&apos;</span>
  <span class="hljs-comment"># &#x5B9E;&#x73B0;&#x7B2C;&#x4E00;&#x5C42;&#x5377;&#x79EF;&#x5C42;&#x7684;&#x524D;&#x5411;&#x4F20;&#x64AD;&#x8FC7;&#x7A0B;</span>
  conv1_w = get_weight([CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_KERNEL_NUM], regularizer) <span class="hljs-comment"># &#x521D;&#x59CB;&#x5316;&#x5377;&#x79EF;&#x6838;</span>
  conv1_b = get_bias([CONV1_KERNEL_NUM]) <span class="hljs-comment"># &#x521D;&#x59CB;&#x5316;&#x504F;&#x7F6E;&#x9879;</span>
  conv1 = conv2d(x, conv1_w) <span class="hljs-comment"># &#x5B9E;&#x73B0;&#x5377;&#x79EF;&#x8FD0;&#x7B97;</span>
  relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_b)) <span class="hljs-comment"># &#x5BF9;&#x5377;&#x79EF;&#x540E;&#x7684;&#x8F93;&#x51FA;&#x6DFB;&#x52A0;&#x504F;&#x7F6E;&#xFF0C;&#x5E76;&#x8FC7; relu &#x975E;&#x7EBF;&#x6027;&#x6FC0;&#x6D3B;&#x51FD;&#x6570;</span>
  pool1 = max_pool_2x2(relu1) <span class="hljs-comment"># &#x5C06;&#x6FC0;&#x6D3B;&#x540E;&#x7684;&#x8F93;&#x51FA;&#x8FDB;&#x884C;&#x6700;&#x5927;&#x6C60;&#x5316;</span>

  <span class="hljs-comment"># &#x5B9E;&#x73B0;&#x7B2C;&#x4E8C;&#x5C42;&#x5377;&#x79EF;&#x5C42;&#x7684;&#x524D;&#x5411;&#x4F20;&#x64AD;&#x8FC7;&#x7A0B;&#xFF0C;&#x5E76;&#x521D;&#x59CB;&#x5316;&#x5377;&#x79EF;&#x5C42;&#x7684;&#x5BF9;&#x5E94;&#x53D8;&#x91CF;</span>
  conv2_w = get_weight([CONV2_SIZE, CONV2_SIZE, CONV1_KERNEL_NUM,
CONV2_KERNEL_NUM],regularizer) <span class="hljs-comment"># &#x8BE5;&#x5C42;&#x6BCF;&#x4E2A;&#x5377;&#x79EF;&#x6838;&#x7684;&#x901A;&#x9053;&#x6570;&#x8981;&#x4E0E;&#x4E0A;&#x4E00;&#x5C42;&#x5377;&#x79EF;&#x6838;&#x7684;&#x4E2A;&#x6570;&#x4E00;&#x81F4;</span>
  conv2_b = get_bias([CONV2_KERNEL_NUM])
  conv2 = conv2d(pool1, conv2_w) <span class="hljs-comment"># &#x8BE5;&#x5C42;&#x7684;&#x8F93;&#x5165;&#x5C31;&#x662F;&#x4E0A;&#x4E00;&#x5C42;&#x7684;&#x8F93;&#x51FA; pool1</span>
  relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_b))
  pool2 = max_pool_2x2(relu2)

  <span class="hljs-comment"># &#x5C06;&#x4E0A;&#x4E00;&#x6C60;&#x5316;&#x5C42;&#x7684;&#x8F93;&#x51FA; pool2(&#x77E9;&#x9635;)&#x8F6C;&#x5316;&#x4E3A;&#x4E0B;&#x4E00;&#x5C42;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x7684;&#x8F93;&#x5165;&#x683C;&#x5F0F;(&#x5411;&#x91CF;)</span>
  pool_shape = pool2.get_shape().as_list() <span class="hljs-comment"># &#x5F97;&#x5230; pool2 &#x8F93;&#x51FA;&#x77E9;&#x9635;&#x7684;&#x7EF4;&#x5EA6;&#xFF0C;&#x5E76;&#x5B58;&#x5165; list &#x4E2D;&#xFF0C;&#x6CE8;&#x610F; pool_shape[0]&#x662F;&#x4E00;&#x4E2A; batch &#x7684;&#x503C;</span>

  nodes = pool_shape[<span class="hljs-number">1</span>] * pool_shape[<span class="hljs-number">2</span>] * pool_shape[<span class="hljs-number">3</span>] <span class="hljs-comment"># &#x4ECE; list &#x4E2D;&#x4F9D;&#x6B21;&#x53D6;&#x51FA;&#x77E9;&#x9635;&#x7684;&#x957F;&#x5BBD;&#x53CA;&#x6DF1;&#x5EA6;&#xFF0C;&#x5E76;&#x6C42;&#x4E09;&#x8005;&#x7684;&#x4E58;&#x79EF;&#x5C31;&#x5F97;&#x5230;&#x77E9;&#x9635;&#x88AB;&#x62C9;&#x957F;&#x540E;&#x7684;&#x957F;&#x5EA6;</span>
  reshaped = tf.reshape(pool2, [pool_shape[<span class="hljs-number">0</span>], nodes]) <span class="hljs-comment"># &#x5C06; pool2 &#x8F6C;&#x6362;&#x4E3A;&#x4E00;&#x4E2A; batch &#x7684;&#x5411;&#x91CF;&#x518D;&#x4F20;&#x5165;&#x540E;&#x7EED;&#x7684;&#x5168;&#x8FDE;&#x63A5;</span>

  <span class="hljs-comment"># &#x5B9E;&#x73B0;&#x7B2C;&#x4E09;&#x5C42;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x7684;&#x524D;&#x5411;&#x4F20;&#x64AD;&#x8FC7;&#x7A0B;</span>
  fc1_w = get_weight([nodes, FC_SIZE], regularizer) <span class="hljs-comment"># &#x521D;&#x59CB;&#x5316;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x7684;&#x6743;&#x91CD;&#xFF0C;&#x5E76;&#x52A0;&#x5165;&#x6B63;&#x5219;&#x5316;</span>
  fc1_b = get_bias([FC_SIZE]) <span class="hljs-comment"># &#x521D;&#x59CB;&#x5316;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x7684;&#x504F;&#x7F6E;&#x9879;</span>
  <span class="hljs-comment"># &#x5C06;&#x8F6C;&#x6362;&#x540E;&#x7684; reshaped &#x5411;&#x91CF;&#x4E0E;&#x6743;&#x91CD; fc1_w &#x505A;&#x77E9;&#x9635;&#x4E58;&#x6CD5;&#x8FD0;&#x7B97;&#xFF0C;&#x7136;&#x540E;&#x518D;&#x52A0;&#x4E0A;&#x504F;&#x7F6E;&#xFF0C;&#x6700;&#x540E;&#x518D;&#x4F7F;&#x7528; relu &#x8FDB;&#x884C;&#x6FC0;&#x6D3B;</span>
  fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_w) + fc1_b)
  <span class="hljs-comment"># &#x5982;&#x679C;&#x662F;&#x8BAD;&#x7EC3;&#x9636;&#x6BB5;&#xFF0C;&#x5219;&#x5BF9;&#x8BE5;&#x5C42;&#x8F93;&#x51FA;&#x4F7F;&#x7528; dropout&#xFF0C;&#x4E5F;&#x5C31;&#x662F;&#x968F;&#x673A;&#x7684;&#x5C06;&#x8BE5;&#x5C42;&#x8F93;&#x51FA;&#x4E2D;&#x7684;&#x4E00;&#x534A;&#x795E;&#x7ECF;&#x5143;&#x7F6E;&#x4E3A;&#x65E0;&#x6548;&#xFF0C;&#x662F;&#x4E3A;&#x4E86;&#x907F;&#x514D;&#x8FC7;&#x62DF;&#x5408;&#x800C;&#x8BBE;&#x7F6E;&#x7684;&#xFF0C;&#x4E00;&#x822C;&#x53EA;&#x5728;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x4E2D;&#x4F7F;&#x7528;</span>
  <span class="hljs-keyword">if</span> train: fc1 = tf.nn.dropout(fc1, <span class="hljs-number">0.5</span>)
  <span class="hljs-comment"># &#x5B9E;&#x73B0;&#x7B2C;&#x56DB;&#x5C42;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x7684;&#x524D;&#x5411;&#x4F20;&#x64AD;&#x8FC7;&#x7A0B;&#xFF0C;&#x5E76;&#x521D;&#x59CB;&#x5316;&#x5168;&#x8FDE;&#x63A5;&#x5C42;&#x5BF9;&#x5E94;&#x7684;&#x53D8;&#x91CF;</span>
  fc2_w = get_weight([FC_SIZE, OUTPUT_NODE], regularizer)
  fc2_b = get_bias([OUTPUT_NODE])
  y = tf.matmul(fc1, fc2_w) + fc2_b
  <span class="hljs-keyword">return</span> y
</code></pre>
<p><code>mnist_lenet5_backward.py</code></p>
<pre><code class="lang-python"><span class="hljs-comment">#coding:utf-8</span>
<span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf
<span class="hljs-keyword">from</span> tensorflow.examples.tutorials.mnist <span class="hljs-keyword">import</span> input_data <span class="hljs-keyword">import</span> mnist_lenet5_forward
<span class="hljs-keyword">import</span> os
<span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np
<span class="hljs-comment"># &#x5B9A;&#x4E49;&#x8BAD;&#x7EC3;&#x8FC7;&#x7A0B;&#x4E2D;&#x7684;&#x8D85;&#x53C2;&#x6570;</span>
BATCH_SIZE = <span class="hljs-number">100</span> <span class="hljs-comment"># &#x4E00;&#x4E2A; batch &#x7684;&#x6570;&#x91CF; LEARNING_RATE_BASE = 0.005 # &#x521D;&#x59CB;&#x5B66;&#x4E60;&#x7387; </span>
LEARNING_RATE_DECAY = <span class="hljs-number">0.99</span> <span class="hljs-comment"># &#x5B66;&#x4E60;&#x7387;&#x7684;&#x8870;&#x51CF;&#x7387; </span>
REGULARIZER = <span class="hljs-number">0.0001</span> <span class="hljs-comment"># &#x6B63;&#x5219;&#x5316;&#x9879;&#x7684;&#x6743;&#x91CD;</span>
STEPS = <span class="hljs-number">50000</span> <span class="hljs-comment"># &#x6700;&#x5927;&#x8FED;&#x4EE3;&#x6B21;&#x6570; </span>
MOVING_AVERAGE_DECAY = <span class="hljs-number">0.99</span> <span class="hljs-comment"># &#x6ED1;&#x52A8;&#x5E73;&#x5747;&#x7684;&#x8870;&#x51CF;&#x7387; </span>
MODEL_SAVE_PATH=<span class="hljs-string">&quot;./model/&quot;</span> <span class="hljs-comment"># &#x4FDD;&#x5B58;&#x6A21;&#x578B;&#x7684;&#x8DEF;&#x5F84; </span>
MODEL_NAME=<span class="hljs-string">&quot;mnist_model&quot;</span> <span class="hljs-comment"># &#x6A21;&#x578B;&#x547D;&#x540D;</span>

<span class="hljs-comment"># &#x8BAD;&#x7EC3;&#x8FC7;&#x7A0B;</span>
<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">backward</span><span class="hljs-params">(mnist)</span>:</span>
  <span class="hljs-comment"># x, y_&#x662F;&#x5B9A;&#x4E49;&#x7684;&#x5360;&#x4F4D;&#x7B26;&#xFF0C;&#x9700;&#x8981;&#x6307;&#x5B9A;&#x53C2;&#x6570;&#x7684;&#x7C7B;&#x578B;&#xFF0C;&#x7EF4;&#x5EA6;(&#x8981;&#x548C;&#x7F51;&#x7EDC;&#x7684;&#x8F93;&#x5165;&#x4E0E;&#x8F93;&#x51FA;&#x7EF4;&#x5EA6;&#x4E00;&#x81F4;)&#xFF0C;&#x7C7B;&#x4F3C;&#x4E8E;&#x51FD;&#x6570;&#x7684;&#x5F62;&#x53C2;&#xFF0C;&#x8FD0;&#x884C;&#x65F6;&#x5FC5;&#x987B;&#x4F20;&#x5165;&#x503C;</span>
  x = tf.placeholder(tf.float32,[BATCH_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS])
  y_ = tf.placeholder(tf.float32, [<span class="hljs-keyword">None</span>, mnist_lenet5_forward.OUTPUT_NODE])
  y = mnist_lenet5_forward.forward(x,<span class="hljs-keyword">True</span>, REGULARIZER) <span class="hljs-comment"># &#x8C03;&#x7528;&#x524D;&#x5411;&#x4F20;&#x64AD;&#x7F51;&#x7EDC;&#x5F97;&#x5230;&#x7EF4;&#x5EA6;&#x4E3A; 10 &#x7684; tensor</span>
  global_step = tf.Variable(<span class="hljs-number">0</span>, trainable=<span class="hljs-keyword">False</span>) <span class="hljs-comment"># &#x58F0;&#x660E;&#x4E00;&#x4E2A;&#x5168;&#x5C40;&#x8BA1;&#x6570;&#x5668;&#xFF0C;&#x5E76;&#x8F93;&#x51FA;&#x5316;&#x4E3A; 0</span>
  <span class="hljs-comment"># &#x5148;&#x662F;&#x5BF9;&#x7F51;&#x7EDC;&#x6700;&#x540E;&#x4E00;&#x5C42;&#x7684;&#x8F93;&#x51FA; y &#x505A; softmax&#xFF0C;&#x901A;&#x5E38;&#x662F;&#x6C42;&#x53D6;&#x8F93;&#x51FA;&#x5C5E;&#x4E8E;&#x67D0;&#x4E00;&#x7C7B;&#x7684;&#x6982;&#x7387;&#xFF0C;&#x5176;&#x5B9E;&#x5C31;&#x662F;&#x4E00;&#x4E2A; num_classes &#x5927;&#x5C0F;&#x7684;&#x5411;&#x91CF;&#xFF0C;</span>
  <span class="hljs-comment"># &#x518D;&#x5C06;&#x6B64;&#x5411;&#x91CF;&#x548C;&#x5B9E;&#x9645;&#x6807;&#x7B7E;&#x503C;&#x505A;&#x4EA4;&#x53C9;&#x71B5;&#xFF0C;&#x9700;&#x8981;&#x8BF4;&#x660E;&#x7684;&#x662F;&#x8BE5;&#x51FD;&#x6570;&#x8FD4;&#x56DE;&#x7684;&#x662F;&#x4E00;&#x4E2A;&#x5411;&#x91CF;</span>
  ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, <span class="hljs-number">1</span>))
  cem = tf.reduce_mean(ce) <span class="hljs-comment"># &#x518D;&#x5BF9;&#x5F97;&#x5230;&#x7684;&#x5411;&#x91CF;&#x6C42;&#x5747;&#x503C;&#x5C31;&#x5F97;&#x5230; loss</span>
  loss = cem + tf.add_n(tf.get_collection(<span class="hljs-string">&apos;losses&apos;</span>)) <span class="hljs-comment"># &#x6DFB;&#x52A0;&#x6B63;&#x5219;&#x5316;&#x4E2D;&#x7684; losses</span>
  <span class="hljs-comment"># &#x5B9E;&#x73B0;&#x6307;&#x6570;&#x7EA7;&#x7684;&#x51CF;&#x5C0F;&#x5B66;&#x4E60;&#x7387;&#xFF0C;&#x53EF;&#x4EE5;&#x8BA9;&#x6A21;&#x578B;&#x5728;&#x8BAD;&#x7EC3;&#x7684;&#x524D;&#x671F;&#x5FEB;&#x901F;&#x63A5;&#x8FD1;&#x8F83;&#x4F18;&#x89E3;&#xFF0C;&#x53C8;&#x53EF;&#x4EE5;&#x4FDD;&#x8BC1;&#x6A21;&#x578B;&#x5728;&#x8BAD; &#x7EC3;&#x540E;&#x671F;&#x4E0D;&#x4F1A;&#x6709;&#x592A;&#x5927;&#x6CE2;&#x52A8;</span>
  <span class="hljs-comment"># &#x8BA1;&#x7B97;&#x516C;&#x5F0F;:decayed_learning_rate=learining_rate*decay_rate^(global_step/decay_steps)</span>
  learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step,mnist.train.num_examples / BATCH_SIZE, LEARNING_RATE_DECAY, staircase=<span class="hljs-keyword">True</span>) <span class="hljs-comment"># &#x5F53; staircase=True &#x65F6;&#xFF0C;(global_step/decay_steps)&#x5219;&#x88AB;&#x8F6C;&#x5316;&#x4E3A;&#x6574;&#x6570;&#xFF0C;&#x4EE5;&#x6B64;&#x6765;&#x9009;&#x62E9;&#x4E0D;&#x540C;&#x7684;&#x8870;&#x51CF;&#x65B9;&#x5F0F;</span>
  <span class="hljs-comment"># &#x4F20;&#x5165;&#x5B66;&#x4E60;&#x7387;&#xFF0C;&#x6784;&#x9020;&#x4E00;&#x4E2A;&#x5B9E;&#x73B0;&#x68AF;&#x5EA6;&#x4E0B;&#x964D;&#x7B97;&#x6CD5;&#x7684;&#x4F18;&#x5316;&#x5668;&#xFF0C;&#x518D;&#x901A;&#x8FC7;&#x4F7F;&#x7528; minimize &#x66F4;&#x65B0;&#x5B58;&#x50A8;&#x8981;&#x8BAD;&#x7EC3;&#x7684; &#x53D8;&#x91CF;&#x7684;&#x5217;&#x8868;&#x6765;&#x51CF;&#x5C0F; loss</span>
  train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
  <span class="hljs-comment"># &#x5B9E;&#x73B0;&#x6ED1;&#x52A8;&#x5E73;&#x5747;&#x6A21;&#x578B;&#xFF0C;&#x53C2;&#x6570; MOVING_AVERAGE_DECAY &#x7528;&#x4E8E;&#x63A7;&#x5236;&#x6A21;&#x578B;&#x66F4;&#x65B0;&#x7684;&#x901F;&#x5EA6;&#x3002;&#x8BAD;&#x7EC3;&#x8FC7;&#x7A0B;&#x4E2D;&#x4F1A;&#x5BF9;&#x6BCF;&#x4E00;&#x4E2A;&#x53D8;&#x91CF;&#x7EF4;&#x62A4;&#x4E00;&#x4E2A;&#x5F71;&#x5B50;&#x53D8;&#x91CF;&#xFF0C;&#x8FD9;&#x4E2A;&#x5F71;&#x5B50;&#x53D8;&#x91CF;&#x7684;&#x521D;&#x59CB;&#x503C;</span>
  <span class="hljs-comment"># &#x5C31;&#x662F;&#x76F8;&#x5E94;&#x53D8;&#x91CF;&#x7684;&#x521D;&#x59CB;&#x503C;&#xFF0C;&#x6BCF;&#x6B21;&#x53D8;&#x91CF;&#x66F4;&#x65B0;&#x65F6;&#xFF0C;&#x5F71;&#x5B50;&#x53D8;&#x91CF;&#x5C31;&#x4F1A;&#x968F;&#x4E4B;&#x66F4;&#x65B0;</span>
  ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
  ema_op = ema.apply(tf.trainable_variables())
  <span class="hljs-keyword">with</span> tf.control_dependencies([train_step,ema_op]):<span class="hljs-comment"># &#x5C06;train_step&#x548C;ema_op&#x4E24;&#x4E2A;&#x8BAD;&#x7EC3;&#x64CD;&#x4F5C;&#x7ED1;&#x5B9A;&#x5230;train_op&#x4E0A;</span>
    train_op = tf.no_op(name=<span class="hljs-string">&apos;train&apos;</span>)
  saver = tf.train.Saver() <span class="hljs-comment"># &#x5B9E;&#x4F8B;&#x5316;&#x4E00;&#x4E2A;&#x4FDD;&#x5B58;&#x548C;&#x6062;&#x590D;&#x53D8;&#x91CF;&#x7684;saver</span>
  <span class="hljs-keyword">with</span> tf.Session() <span class="hljs-keyword">as</span> sess: <span class="hljs-comment"># &#x521B;&#x5EFA;&#x4E00;&#x4E2A;&#x4F1A;&#x8BDD;&#xFF0C;&#x5E76;&#x901A;&#x8FC7; python &#x4E2D;&#x7684;&#x4E0A;&#x4E0B;&#x6587;&#x7BA1;&#x7406;&#x5668;&#x6765;&#x7BA1;&#x7406;&#x8FD9;&#x4E2A;&#x4F1A;&#x8BDD;</span>
    init_op = tf.global_variables_initializer() <span class="hljs-comment"># &#x521D;&#x59CB;&#x5316;&#x8BA1;&#x7B97;&#x56FE;&#x4E2D;&#x7684;&#x53D8;&#x91CF;</span>
    sess.run(init_op)
    ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH) <span class="hljs-comment"># &#x901A;&#x8FC7;checkpoint&#x6587;&#x4EF6;&#x5B9A;&#x4F4D; &#x5230;&#x6700;&#x65B0;&#x4FDD;&#x5B58;&#x7684;&#x6A21;&#x578B;</span>
    <span class="hljs-keyword">if</span> ckpt <span class="hljs-keyword">and</span> ckpt.model_checkpoint_path:
      saver.restore(sess, ckpt.model_checkpoint_path) <span class="hljs-comment"># &#x52A0;&#x8F7D;&#x6700;&#x65B0;&#x7684;&#x6A21;&#x578B;</span>
    <span class="hljs-keyword">for</span> i <span class="hljs-keyword">in</span> range(STEPS):
      xs, ys = mnist.train.next_batch(BATCH_SIZE) <span class="hljs-comment"># &#x8BFB;&#x53D6;&#x4E00;&#x4E2A; batch &#x7684;&#x6570;&#x636E;</span>
      <span class="hljs-comment"># &#x5C06;&#x8F93;&#x5165;&#x6570;&#x636E; xs &#x8F6C;&#x6362;&#x6210;&#x4E0E;&#x7F51;&#x7EDC;&#x8F93;&#x5165;&#x76F8;&#x540C;&#x5F62;&#x72B6;&#x7684;&#x77E9;&#x9635;</span>
      reshaped_xs = np.reshape(xs,(BATCH_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS))
      <span class="hljs-comment"># &#x5582;&#x5165;&#x8BAD;&#x7EC3;&#x56FE;&#x50CF;&#x548C;&#x6807;&#x7B7E;&#xFF0C;&#x5F00;&#x59CB;&#x8BAD;&#x7EC3;</span>
      _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_:ys})
      <span class="hljs-keyword">if</span> i % <span class="hljs-number">100</span> == <span class="hljs-number">0</span>: <span class="hljs-comment"># &#x6BCF;&#x8FED;&#x4EE3;100&#x6B21;&#x6253;&#x5370;loss&#x4FE1;&#x606F;&#xFF0C;&#x5E76;&#x4FDD;&#x5B58;&#x6700;&#x65B0;&#x7684;&#x6A21;&#x578B;</span>
        print(<span class="hljs-string">&quot;After %d training step(s), loss on training batch is %g.&quot;</span> % (step,
        saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)

<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">main</span><span class="hljs-params">()</span>:</span>
  mnist = input_data.read_data_sets(<span class="hljs-string">&quot;./data/&quot;</span>, one_hot=<span class="hljs-keyword">True</span>) <span class="hljs-comment"># &#x8BFB;&#x5165; mnist &#x6570;&#x636E; </span>
  backward(mnist)

<span class="hljs-keyword">if</span> __name__ == <span class="hljs-string">&apos;__main__&apos;</span>:
  main()
</code></pre>
<p><code>mnist_lenet5_test.py</code></p>
<pre><code class="lang-python"><span class="hljs-comment">#coding:utf-8</span>
<span class="hljs-keyword">import</span> time
<span class="hljs-keyword">import</span> tensorflow <span class="hljs-keyword">as</span> tf
<span class="hljs-keyword">from</span> tensorflow.examples.tutorials.mnist <span class="hljs-keyword">import</span> input_data 
<span class="hljs-keyword">import</span> mnist_lenet5_forward
<span class="hljs-keyword">import</span> mnist_lenet5_backward
<span class="hljs-keyword">import</span> numpy <span class="hljs-keyword">as</span> np

TEST_INTERVAL_SECS = <span class="hljs-number">5</span>

<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">test</span><span class="hljs-params">(mnist)</span>:</span>
  <span class="hljs-comment"># &#x521B;&#x5EFA;&#x4E00;&#x4E2A;&#x9ED8;&#x8BA4;&#x56FE;&#xFF0C;&#x5728;&#x8BE5;&#x56FE;&#x4E2D;&#x6267;&#x884C;&#x4EE5;&#x4E0B;&#x64CD;&#x4F5C;(&#x591A;&#x6570;&#x64CD;&#x4F5C;&#x548C; train &#x4E2D;&#x4E00;&#x6837;&#xFF0C;&#x5C31;&#x4E0D;&#x518D;&#x91CD;&#x590D;&#x89E3;&#x91CA;&#xFF0C;&#x5927;&#x5BB6; &#x5BF9;&#x7167;&#x5B66;&#x4E60;&#x5373;&#x53EF;)</span>
  <span class="hljs-keyword">with</span> tf.Graph().as_default() <span class="hljs-keyword">as</span> g:
    x = tf.placeholder(tf.float32,[ mnist.test.num_examples, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.NUM_CHANNELS])
    y_ = tf.placeholder(tf.float32, [<span class="hljs-keyword">None</span>, mnist_lenet5_forward.OUTPUT_NODE]) 
    y = mnist_lenet5_forward.forward(x,<span class="hljs-keyword">False</span>,<span class="hljs-keyword">None</span>)
    ema = tf.train.ExponentialMovingAverage(mnist_lenet5_backward.MOVING_AVERAGE_DECAY)
    ema_restore = ema.variables_to_restore() 
    saver = tf.train.Saver(ema_restore)
    correct_prediction = tf.equal(tf.argmax(y, <span class="hljs-number">1</span>), tf.argmax(y_, <span class="hljs-number">1</span>)) <span class="hljs-comment"># &#x5224;&#x65AD;&#x9884;&#x6D4B;&#x503C;&#x548C;&#x5B9E;&#x9645;&#x503C;&#x662F;&#x5426;</span>
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) <span class="hljs-comment"># &#x6C42;&#x5E73;&#x5747;&#x5F97;&#x5230;&#x51C6;&#x786E;&#x7387;</span>
    <span class="hljs-keyword">while</span> <span class="hljs-keyword">True</span>:
      <span class="hljs-keyword">with</span> tf.Session() <span class="hljs-keyword">as</span> sess:
        ckpt = tf.train.get_checkpoint_state(mnist_lenet5_backward.MODEL_SAVE_PATH)
        <span class="hljs-keyword">if</span> ckpt <span class="hljs-keyword">and</span> ckpt.model_checkpoint_path: 
          saver.restore(sess, ckpt.model_checkpoint_path)

          <span class="hljs-comment"># &#x6839;&#x636E;&#x8BFB;&#x5165;&#x7684;&#x6A21;&#x578B;&#x540D;&#x5B57;&#x5207;&#x5206;&#x51FA;&#x8BE5;&#x6A21;&#x578B;&#x662F;&#x5C5E;&#x4E8E;&#x8FED;&#x4EE3;&#x4E86;&#x591A;&#x5C11;&#x6B21;&#x4FDD;&#x5B58;&#x7684;</span>
          global_step = ckpt.model_checkpoint_path.split(<span class="hljs-string">&apos;/&apos;</span>)[<span class="hljs-number">-1</span>].split(<span class="hljs-string">&apos;-&apos;</span>)[<span class="hljs-number">-1</span>] reshaped_x = np.reshape(mnist.test.images,( mnist.test.num_examples, mnist_lenet5_forward.IMAGE_SIZE, mnist_lenet5_forward.IMAGE_SIZE,  mnist_lenet5_forward.NUM_CHANNELS))
          accuracy_score = sess.run(accuracy, feed_dict={x:reshaped_x,y_:mnist.test.labels}) <span class="hljs-comment"># &#x8BA1;&#x7B97;&#x51FA;&#x6D4B;&#x8BD5;&#x96C6;&#x4E0A;&#x51C6;&#x786E;&#x7387;</span>
          print(<span class="hljs-string">&quot;After %s training step(s), test accuracy = %g&quot;</span> % (global_step, accuracy_score))
        <span class="hljs-keyword">else</span>:
          print(<span class="hljs-string">&apos;No checkpoint file found&apos;</span>)
          <span class="hljs-keyword">return</span>
      time.sleep(TEST_INTERVAL_SECS) <span class="hljs-comment"># &#x6BCF;&#x9694; 5 &#x79D2;&#x5BFB;&#x627E;&#x4E00;&#x6B21;&#x662F;&#x5426;&#x6709;&#x6700;&#x65B0;&#x7684;&#x6A21;&#x578B;</span>

<span class="hljs-function"><span class="hljs-keyword">def</span> <span class="hljs-title">main</span><span class="hljs-params">()</span>:</span>
  mnist = input_data.read_data_sets(<span class="hljs-string">&quot;./data/&quot;</span>, one_hot=<span class="hljs-keyword">True</span>) 
  test(mnist)

<span class="hljs-keyword">if</span> __name__ == <span class="hljs-string">&apos;__main__&apos;</span>:
  main()
</code></pre>
<footer class="page-footer"><span class="copyright">Copyright &#xA9; scottdu 2018 all right reserved&#xFF0C;powered by Gitbook</span><span class="footer-modification">&#x8BE5;&#x6587;&#x4EF6;&#x4FEE;&#x8BA2;&#x65F6;&#x95F4;&#xFF1A;
2018-09-25 13:54:05
</span></footer> <link rel="stylesheet" type="text/css" href="https://storage.googleapis.com/app.klipse.tech/css/codemirror.css"> <script>     window.klipse_settings = {         selector: ".language-klipse, .lang-eval-clojure",         selector_eval_js: ".lang-eval-js",         selector_eval_python_client: ".lang-eval-python",         selector_eval_php: ".lang-eval-php",         selector_eval_scheme: ".lang-eval-scheme",         selector_eval_ruby: ".lang-eval-ruby",         selector_reagent: ".lang-reagent",        selector_google_charts: ".lang-google-chart",        selector_es2017: ".lang-eval-es2017",        selector_jsx: ".lang-eval-jsx",        selector_transpile_jsx: ".lang-transpile-jsx",        selector_render_jsx: ".lang-render-jsx",        selector_react: ".lang-react",        selector_eval_markdown: ".lang-render-markdown",        selector_eval_lambdaway: ".lang-render-lambdaway",        selector_eval_cpp: ".lang-eval-cpp",        selector_eval_html: ".lang-render-html",        selector_sql: ".lang-eval-sql",        selector_brainfuck: "lang-eval-brainfuck",        selector_js: ".lang-transpile-cljs"    }; </script> <script src="https://storage.googleapis.com/app.klipse.tech/plugin/js/klipse_plugin.js"></script>
                                
                                </section>
                            
    </div>
    <div class="search-results">
        <div class="has-results">
            
            <h1 class="search-results-title"><span class='search-results-count'></span> results matching "<span class='search-query'></span>"</h1>
            <ul class="search-results-list"></ul>
            
        </div>
        <div class="no-results">
            
            <h1 class="search-results-title">No results matching "<span class='search-query'></span>"</h1>
            
        </div>
    </div>
</div>

                        </div>
                    </div>
                
            </div>

            
                
                <a href="section5.1.html" class="navigation navigation-prev " aria-label="Previous page: 第一节 卷积神经网络">
                    <i class="fa fa-angle-left"></i>
                </a>
                
                
                <a href="../chapter6/" class="navigation navigation-next " aria-label="Next page: 第六章 卷积网络实践">
                    <i class="fa fa-angle-right"></i>
                </a>
                
            
        
    </div>

    <script>
        var gitbook = gitbook || [];
        gitbook.push(function() {
            gitbook.page.hasChanged({"page":{"title":"第二节 lenel5代码讲解","level":"1.6.2","depth":2,"next":{"title":"第六章 卷积网络实践","level":"1.7","depth":1,"path":"chapter6/README.md","ref":"chapter6/README.md","articles":[{"title":"第一节 复现已有的卷积神经网络","level":"1.7.1","depth":2,"path":"chapter5/section6.1.md","ref":"chapter5/section6.1.md","articles":[]},{"title":"第二节 用vgg16实现图片识别","level":"1.7.2","depth":2,"path":"chapter6/section6.2.md","ref":"chapter6/section6.2.md","articles":[]}]},"previous":{"title":"第一节 卷积神经网络","level":"1.6.1","depth":2,"path":"chapter5/section5.1.md","ref":"chapter5/section5.1.md","articles":[]},"dir":"ltr"},"config":{"plugins":["katex","expandable-chapters-small","tbfed-pagefooter","alerts","copy-code-button","puml","graph","chart","klipse","donate","simple-page-toc","splitter"],"root":".","styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"pluginsConfig":{"tbfed-pagefooter":{"copyright":"Copyright &copy scottdu 2018","modify_label":"该文件修订时间：","modify_format":"YYYY-MM-DD HH:mm:ss"},"puml":{},"simple-page-toc":{"maxDepth":3,"skipFirstH1":true},"splitter":{},"search":{},"lunr":{"maxIndexSize":1000000,"ignoreSpecialCharacters":false},"graph":{},"donate":{"alipay":"http://ovhbzkbox.bkt.clouddn.com/2018-08-11-alipay1.jpeg","alipayText":"支付宝打赏","button":"赏","title":"","wechat":"http://ovhbzkbox.bkt.clouddn.com/2018-08-11-wechatpay.png","wechatText":"微信打赏"},"katex":{},"fontsettings":{"theme":"white","family":"sans","size":2},"highlight":{},"alerts":{},"expandable-chapters-small":{},"copy-code-button":{},"klipse":{"myConfigKey":"it's the default value"},"sharing":{"facebook":true,"twitter":true,"google":false,"weibo":false,"instapaper":false,"vk":false,"all":["facebook","google","twitter","weibo","instapaper"]},"theme-default":{"styles":{"website":"styles/website.css","pdf":"styles/pdf.css","epub":"styles/epub.css","mobi":"styles/mobi.css","ebook":"styles/ebook.css","print":"styles/print.css"},"showLevel":false},"chart":{"type":"c3"}},"theme":"default","author":"scottdu","pdf":{"pageNumbers":true,"fontSize":12,"fontFamily":"Arial","paperSize":"a4","chapterMark":"pagebreak","pageBreaksBefore":"/","margin":{"right":62,"left":62,"top":56,"bottom":56}},"structure":{"langs":"LANGS.md","readme":"README.md","glossary":"GLOSSARY.md","summary":"SUMMARY.md"},"variables":{},"title":"Tensorflow学习笔记","language":"zh-hans","gitbook":"3.2.3","description":"记录Tensorflow的学习内容"},"file":{"path":"chapter5/section5.2.md","mtime":"2018-09-25T05:54:05.343Z","type":"markdown"},"gitbook":{"version":"3.2.3","time":"2018-09-25T05:55:22.995Z"},"basePath":"..","book":{"language":""}});
        });
    </script>
</div>

        
    <script src="../gitbook/gitbook.js"></script>
    <script src="../gitbook/theme.js"></script>
    
        
        <script src="../gitbook/gitbook-plugin-expandable-chapters-small/expandable-chapters-small.js"></script>
        
    
        
        <script src="../gitbook/gitbook-plugin-alerts/plugin.js"></script>
        
    
        
        <script src="../gitbook/gitbook-plugin-copy-code-button/toggle.js"></script>
        
    
        
        <script src="../gitbook/gitbook-plugin-donate/plugin.js"></script>
        
    
        
        <script src="../gitbook/gitbook-plugin-splitter/splitter.js"></script>
        
    
        
        <script src="../gitbook/gitbook-plugin-search/search-engine.js"></script>
        
    
        
        <script src="../gitbook/gitbook-plugin-search/search.js"></script>
        
    
        
        <script src="../gitbook/gitbook-plugin-lunr/lunr.min.js"></script>
        
    
        
        <script src="../gitbook/gitbook-plugin-lunr/search-lunr.js"></script>
        
    
        
        <script src="../gitbook/gitbook-plugin-sharing/buttons.js"></script>
        
    
        
        <script src="../gitbook/gitbook-plugin-fontsettings/fontsettings.js"></script>
        
    

    </body>
</html>

