
% Default to the notebook output style

    


% Inherit from the specified cell style.




    
\documentclass[14pt]{ctexart}

    
    
    \usepackage[T1]{fontenc}
    % Nicer default font than Computer Modern for most use cases
    \usepackage{palatino}

    % Basic figure setup, for now with no caption control since it's done
    % automatically by Pandoc (which extracts ![](path) syntax from Markdown).
    \usepackage{graphicx}
    % We will generate all images so they have a width \maxwidth. This means
    % that they will get their normal width if they fit onto the page, but
    % are scaled down if they would overflow the margins.
    \makeatletter
    \def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth
    \else\Gin@nat@width\fi}
    \makeatother
    \let\Oldincludegraphics\includegraphics
    % Set max figure width to be 80% of text width, for now hardcoded.
    \renewcommand{\includegraphics}[1]{\Oldincludegraphics[width=.8\maxwidth]{#1}}
    % Ensure that by default, figures have no caption (until we provide a
    % proper Figure object with a Caption API and a way to capture that
    % in the conversion process - todo).
    \usepackage{caption}
    \DeclareCaptionLabelFormat{nolabel}{}
    \captionsetup{labelformat=nolabel}

    \usepackage{adjustbox} % Used to constrain images to a maximum size 
    \usepackage{xcolor} % Allow colors to be defined
    \usepackage{enumerate} % Needed for markdown enumerations to work
    \usepackage{geometry} % Used to adjust the document margins
    \usepackage{amsmath} % Equations
    \usepackage{amssymb} % Equations
    \usepackage{textcomp} % defines textquotesingle
    % Hack from http://tex.stackexchange.com/a/47451/13684:
    \AtBeginDocument{%
        \def\PYZsq{\textquotesingle}% Upright quotes in Pygmentized code
    }
    \usepackage{upquote} % Upright quotes for verbatim code
    \usepackage{eurosym} % defines \euro
    \usepackage[mathletters]{ucs} % Extended unicode (utf-8) support
    \usepackage[utf8x]{inputenc} % Allow utf-8 characters in the tex document
    \usepackage{fancyvrb} % verbatim replacement that allows latex
    \usepackage{grffile} % extends the file name processing of package graphics 
                         % to support a larger range 
    % The hyperref package gives us a pdf with properly built
    % internal navigation ('pdf bookmarks' for the table of contents,
    % internal cross-reference links, web links for URLs, etc.)
    \usepackage{hyperref}
    \usepackage{longtable} % longtable support required by pandoc >1.10
    \usepackage{booktabs}  % table support for pandoc > 1.12.2
    \usepackage[normalem]{ulem} % ulem is needed to support strikethroughs (\sout)
                                % normalem makes italics be italics, not underlines
    

    
    
    % Colors for the hyperref package
    \definecolor{urlcolor}{rgb}{0,.145,.698}
    \definecolor{linkcolor}{rgb}{.71,0.21,0.01}
    \definecolor{citecolor}{rgb}{.12,.54,.11}

    % ANSI colors
    \definecolor{ansi-black}{HTML}{3E424D}
    \definecolor{ansi-black-intense}{HTML}{282C36}
    \definecolor{ansi-red}{HTML}{E75C58}
    \definecolor{ansi-red-intense}{HTML}{B22B31}
    \definecolor{ansi-green}{HTML}{00A250}
    \definecolor{ansi-green-intense}{HTML}{007427}
    \definecolor{ansi-yellow}{HTML}{DDB62B}
    \definecolor{ansi-yellow-intense}{HTML}{B27D12}
    \definecolor{ansi-blue}{HTML}{208FFB}
    \definecolor{ansi-blue-intense}{HTML}{0065CA}
    \definecolor{ansi-magenta}{HTML}{D160C4}
    \definecolor{ansi-magenta-intense}{HTML}{A03196}
    \definecolor{ansi-cyan}{HTML}{60C6C8}
    \definecolor{ansi-cyan-intense}{HTML}{258F8F}
    \definecolor{ansi-white}{HTML}{C5C1B4}
    \definecolor{ansi-white-intense}{HTML}{A1A6B2}

    % commands and environments needed by pandoc snippets
    % extracted from the output of `pandoc -s`
    \providecommand{\tightlist}{%
      \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
    \DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}}
    % Add ',fontsize=\small' for more characters per line
    \newenvironment{Shaded}{}{}
    \newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}}
    \newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.56,0.13,0.00}{{#1}}}
    \newcommand{\DecValTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
    \newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
    \newcommand{\FloatTok}[1]{\textcolor[rgb]{0.25,0.63,0.44}{{#1}}}
    \newcommand{\CharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
    \newcommand{\StringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
    \newcommand{\CommentTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textit{{#1}}}}
    \newcommand{\OtherTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{{#1}}}
    \newcommand{\AlertTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}}
    \newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.02,0.16,0.49}{{#1}}}
    \newcommand{\RegionMarkerTok}[1]{{#1}}
    \newcommand{\ErrorTok}[1]{\textcolor[rgb]{1.00,0.00,0.00}{\textbf{{#1}}}}
    \newcommand{\NormalTok}[1]{{#1}}
    
    % Additional commands for more recent versions of Pandoc
    \newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.53,0.00,0.00}{{#1}}}
    \newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
    \newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.25,0.44,0.63}{{#1}}}
    \newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.73,0.40,0.53}{{#1}}}
    \newcommand{\ImportTok}[1]{{#1}}
    \newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.73,0.13,0.13}{\textit{{#1}}}}
    \newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
    \newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
    \newcommand{\VariableTok}[1]{\textcolor[rgb]{0.10,0.09,0.49}{{#1}}}
    \newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.00,0.44,0.13}{\textbf{{#1}}}}
    \newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.40,0.40,0.40}{{#1}}}
    \newcommand{\BuiltInTok}[1]{{#1}}
    \newcommand{\ExtensionTok}[1]{{#1}}
    \newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.74,0.48,0.00}{{#1}}}
    \newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.49,0.56,0.16}{{#1}}}
    \newcommand{\InformationTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
    \newcommand{\WarningTok}[1]{\textcolor[rgb]{0.38,0.63,0.69}{\textbf{\textit{{#1}}}}}
    
    
    % Define a nice break command that doesn't care if a line doesn't already
    % exist.
    \def\br{\hspace*{\fill} \\* }
    % Math Jax compatability definitions
    \def\gt{>}
    \def\lt{<}
    % Document parameters
    \title{weight\_initialization}
    
    
    

    % Pygments definitions
    
\makeatletter
\def\PY@reset{\let\PY@it=\relax \let\PY@bf=\relax%
    \let\PY@ul=\relax \let\PY@tc=\relax%
    \let\PY@bc=\relax \let\PY@ff=\relax}
\def\PY@tok#1{\csname PY@tok@#1\endcsname}
\def\PY@toks#1+{\ifx\relax#1\empty\else%
    \PY@tok{#1}\expandafter\PY@toks\fi}
\def\PY@do#1{\PY@bc{\PY@tc{\PY@ul{%
    \PY@it{\PY@bf{\PY@ff{#1}}}}}}}
\def\PY#1#2{\PY@reset\PY@toks#1+\relax+\PY@do{#2}}

\expandafter\def\csname PY@tok@mi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@kp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@na\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.49,0.56,0.16}{##1}}}
\expandafter\def\csname PY@tok@kn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@s1\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@bp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@sh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@mo\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@gd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@gi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.63,0.00}{##1}}}
\expandafter\def\csname PY@tok@c1\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@ss\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@sd\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@ne\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.82,0.25,0.23}{##1}}}
\expandafter\def\csname PY@tok@ge\endcsname{\let\PY@it=\textit}
\expandafter\def\csname PY@tok@nd\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}}
\expandafter\def\csname PY@tok@sx\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@nc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@m\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@err\endcsname{\def\PY@bc##1{\setlength{\fboxsep}{0pt}\fcolorbox[rgb]{1.00,0.00,0.00}{1,1,1}{\strut ##1}}}
\expandafter\def\csname PY@tok@mh\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@gp\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@ow\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.67,0.13,1.00}{##1}}}
\expandafter\def\csname PY@tok@vg\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@se\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.13}{##1}}}
\expandafter\def\csname PY@tok@gu\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.50,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@cs\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@cp\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.74,0.48,0.00}{##1}}}
\expandafter\def\csname PY@tok@gs\endcsname{\let\PY@bf=\textbf}
\expandafter\def\csname PY@tok@vc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@sb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@si\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}}
\expandafter\def\csname PY@tok@go\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.53,0.53}{##1}}}
\expandafter\def\csname PY@tok@il\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@nf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@vi\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}
\expandafter\def\csname PY@tok@sc\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@nt\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@k\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@kr\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@ch\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@gr\endcsname{\def\PY@tc##1{\textcolor[rgb]{1.00,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@kc\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@no\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.53,0.00,0.00}{##1}}}
\expandafter\def\csname PY@tok@nl\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.63,0.63,0.00}{##1}}}
\expandafter\def\csname PY@tok@mb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@kt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.69,0.00,0.25}{##1}}}
\expandafter\def\csname PY@tok@s\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@o\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@nb\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@w\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.73,0.73}{##1}}}
\expandafter\def\csname PY@tok@c\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@s2\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.13,0.13}{##1}}}
\expandafter\def\csname PY@tok@gh\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,0.50}{##1}}}
\expandafter\def\csname PY@tok@gt\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.00,0.27,0.87}{##1}}}
\expandafter\def\csname PY@tok@mf\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.40,0.40,0.40}{##1}}}
\expandafter\def\csname PY@tok@ni\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.60,0.60,0.60}{##1}}}
\expandafter\def\csname PY@tok@kd\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.50,0.00}{##1}}}
\expandafter\def\csname PY@tok@sr\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.73,0.40,0.53}{##1}}}
\expandafter\def\csname PY@tok@cpf\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@nn\endcsname{\let\PY@bf=\textbf\def\PY@tc##1{\textcolor[rgb]{0.00,0.00,1.00}{##1}}}
\expandafter\def\csname PY@tok@cm\endcsname{\let\PY@it=\textit\def\PY@tc##1{\textcolor[rgb]{0.25,0.50,0.50}{##1}}}
\expandafter\def\csname PY@tok@nv\endcsname{\def\PY@tc##1{\textcolor[rgb]{0.10,0.09,0.49}{##1}}}

\def\PYZbs{\char`\\}
\def\PYZus{\char`\_}
\def\PYZob{\char`\{}
\def\PYZcb{\char`\}}
\def\PYZca{\char`\^}
\def\PYZam{\char`\&}
\def\PYZlt{\char`\<}
\def\PYZgt{\char`\>}
\def\PYZsh{\char`\#}
\def\PYZpc{\char`\%}
\def\PYZdl{\char`\$}
\def\PYZhy{\char`\-}
\def\PYZsq{\char`\'}
\def\PYZdq{\char`\"}
\def\PYZti{\char`\~}
% for compatibility with earlier versions
\def\PYZat{@}
\def\PYZlb{[}
\def\PYZrb{]}
\makeatother


    % Exact colors from NB
    \definecolor{incolor}{rgb}{0.0, 0.0, 0.5}
    \definecolor{outcolor}{rgb}{0.545, 0.0, 0.0}



    
    % Prevent overflowing lines due to hard-to-break entities
    \sloppy 
    % Setup hyperref package
    \hypersetup{
      breaklinks=true,  % so long urls are correctly broken across lines
      colorlinks=true,
      urlcolor=urlcolor,
      linkcolor=linkcolor,
      citecolor=citecolor,
      }
    % Slightly bigger margins than the latex defaults
    
    \geometry{verbose,tmargin=1in,bmargin=1in,lmargin=1in,rmargin=1in}
    
    

    \begin{document}
    
    
    \maketitle
    
    

    
    \section{Weight Initialization}\label{weight-initialization}

In this lesson, you'll learn how to find good initial weights for a
neural network. Having good initial weights can place the neural network
close to the optimal solution. This allows the neural network to come to
the best solution quicker.

\subsection{Testing Weights}\label{testing-weights}

\subsubsection{Dataset}\label{dataset}

To see how different weights perform, we'll test on the same dataset and
neural network. Let's go over the dataset and neural network.

We'll be using the
\href{https://en.wikipedia.org/wiki/MNIST_database}{MNIST dataset} to
demonstrate the different initial weights. As a reminder, the MNIST
dataset contains images of handwritten numbers, 0-9, with normalized
input (0.0 - 1.0). Run the cell below to download and load the MNIST
dataset.

    \begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} \PY{o}{\PYZpc{}}\PY{k}{matplotlib} inline
        
        \PY{k+kn}{import} \PY{n+nn}{tensorflow} \PY{k}{as} \PY{n+nn}{tf}
        \PY{k+kn}{import} \PY{n+nn}{helper}
        
        \PY{k+kn}{from} \PY{n+nn}{tensorflow}\PY{n+nn}{.}\PY{n+nn}{examples}\PY{n+nn}{.}\PY{n+nn}{tutorials}\PY{n+nn}{.}\PY{n+nn}{mnist} \PY{k}{import} \PY{n}{input\PYZus{}data}
        
        \PY{n+nb}{print}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Getting MNIST Dataset...}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
        \PY{n}{mnist} \PY{o}{=} \PY{n}{input\PYZus{}data}\PY{o}{.}\PY{n}{read\PYZus{}data\PYZus{}sets}\PY{p}{(}\PY{l+s+s2}{\PYZdq{}}\PY{l+s+s2}{MNIST\PYZus{}data/}\PY{l+s+s2}{\PYZdq{}}\PY{p}{,} \PY{n}{one\PYZus{}hot}\PY{o}{=}\PY{k+kc}{True}\PY{p}{)}
        \PY{n+nb}{print}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Data Extracted.}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}
\end{Verbatim}

    \subsubsection{Neural Network}\label{neural-network}

    For the neural network, we'll test on a 3 layer neural network with ReLU
activations and an Adam optimizer. The lessons you learn apply to other
neural networks, including different activations and optimizers.

    \begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}6}]:} \PY{c+c1}{\PYZsh{} Save the shapes of weights for each layer}
        \PY{n}{layer\PYZus{}1\PYZus{}weight\PYZus{}shape} \PY{o}{=} \PY{p}{(}\PY{n}{mnist}\PY{o}{.}\PY{n}{train}\PY{o}{.}\PY{n}{images}\PY{o}{.}\PY{n}{shape}\PY{p}{[}\PY{l+m+mi}{1}\PY{p}{]}\PY{p}{,} \PY{l+m+mi}{256}\PY{p}{)}
        \PY{n}{layer\PYZus{}2\PYZus{}weight\PYZus{}shape} \PY{o}{=} \PY{p}{(}\PY{l+m+mi}{256}\PY{p}{,} \PY{l+m+mi}{128}\PY{p}{)}
        \PY{n}{layer\PYZus{}3\PYZus{}weight\PYZus{}shape} \PY{o}{=} \PY{p}{(}\PY{l+m+mi}{128}\PY{p}{,} \PY{n}{mnist}\PY{o}{.}\PY{n}{train}\PY{o}{.}\PY{n}{labels}\PY{o}{.}\PY{n}{shape}\PY{p}{[}\PY{l+m+mi}{1}\PY{p}{]}\PY{p}{)}
        \PY{n+nb}{print}\PY{p}{(}\PY{n}{mnist}\PY{o}{.}\PY{n}{train}\PY{o}{.}\PY{n}{images}\PY{o}{.}\PY{n}{shape}\PY{p}{)}
\end{Verbatim}

    \begin{Verbatim}[commandchars=\\\{\}]
(55000, 784)

    \end{Verbatim}

    \subsection{Initialize Weights}\label{initialize-weights}

Let's start looking at some initial weights. \#\#\# All Zeros or Ones If
you follow the principle of
\href{https://en.wikipedia.org/wiki/Occam's_razor}{Occam's razor}, you
might think setting all the weights to 0 or 1 would be the best
solution. This is not the case.

With every weight the same, all the neurons at each layer are producing
the same output. This makes it hard to decide which weights to adjust.

Let's compare the loss with all ones and all zero weights using
\texttt{helper.compare\_init\_weights}. This function will run two
different initial weights on the neural network above for 2 epochs. It
will plot the loss for the first 100 batches and print out stats after
the 2 epochs (\textasciitilde{}860 batches). We plot the first 100
batches to better judge which weights performed better at the start.

Run the cell below to see the difference between weights of all zeros
against all ones.

    \subsection{初始化 Weights}\label{ux521dux59cbux5316-weights}

\subsubsection{全0或者全1}\label{ux51680ux6216ux8005ux51681}

如果你遵循 Occam's razor 规则，你可能会想把weights
设置为全0或者全1是最好的解决方案。不过不是这样的

每个weight一样，每一层的所有神经元会得出相同的输出。这让决定优化哪个weight变得很困难。

让我们用 \texttt{helper.compare\_init\_weights}
对比下全1和全0情况下的loss（损失）。它会在神经网络上运行两个不同的初始化weights。然后画出前100个batches的loss，并打印出统计信息。我们打印出前100个batches来判断那个weights变相的更好。

运行下面cell

    \begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}7}]:} \PY{n}{all\PYZus{}zero\PYZus{}weights} \PY{o}{=} \PY{p}{[}
            \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{zeros}\PY{p}{(}\PY{n}{layer\PYZus{}1\PYZus{}weight\PYZus{}shape}\PY{p}{)}\PY{p}{)}\PY{p}{,}
            \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{zeros}\PY{p}{(}\PY{n}{layer\PYZus{}2\PYZus{}weight\PYZus{}shape}\PY{p}{)}\PY{p}{)}\PY{p}{,}
            \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{zeros}\PY{p}{(}\PY{n}{layer\PYZus{}3\PYZus{}weight\PYZus{}shape}\PY{p}{)}\PY{p}{)}
        \PY{p}{]}
        
        \PY{n}{all\PYZus{}one\PYZus{}weights} \PY{o}{=} \PY{p}{[}
            \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{ones}\PY{p}{(}\PY{n}{layer\PYZus{}1\PYZus{}weight\PYZus{}shape}\PY{p}{)}\PY{p}{)}\PY{p}{,}
            \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{ones}\PY{p}{(}\PY{n}{layer\PYZus{}2\PYZus{}weight\PYZus{}shape}\PY{p}{)}\PY{p}{)}\PY{p}{,}
            \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{ones}\PY{p}{(}\PY{n}{layer\PYZus{}3\PYZus{}weight\PYZus{}shape}\PY{p}{)}\PY{p}{)}
        \PY{p}{]}
        
        \PY{n}{helper}\PY{o}{.}\PY{n}{compare\PYZus{}init\PYZus{}weights}\PY{p}{(}
            \PY{n}{mnist}\PY{p}{,}
            \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{All Zeros vs All Ones}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,}
            \PY{p}{[}
                \PY{p}{(}\PY{n}{all\PYZus{}zero\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{All Zeros}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{,}
                \PY{p}{(}\PY{n}{all\PYZus{}one\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{All Ones}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{]}\PY{p}{)}
\end{Verbatim}

    \begin{center}
    \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{weight_initialization_files/weight_initialization_7_0.png}
    \end{center}
    { \hspace*{\fill} \\}
    
    \begin{Verbatim}[commandchars=\\\{\}]
After 858 Batches (2 Epochs):
Validation Accuracy
   11.260\% -- All Zeros
   10.700\% -- All Ones
Loss
    2.310  -- All Zeros
  185.515  -- All Ones

    \end{Verbatim}

    As you can see the accuracy is close to guessing for both zeros and
ones, around 10\%.

The neural network is having a hard time determining which weights need
to be changed, since the neurons have the same output for each layer. To
avoid neurons with the same output, let's use unique weights. We can
also randomly select these weights to avoid being stuck in a local
minimum for each run.

A good solution for getting these random weights is to sample from a
uniform distribution.

    如你所见，精度差不多，在10\%左右

神经网络很难查明哪个weights需要被修改，因为所有的neurons拥有相同的输出。为了避免neurons拥有相同的输出，让我们使用唯一的weights。我们可以随机选择weights来避免陷入本地最优解

获取随机weights的一个好办法是从均匀分布中抽取样本。

    \hypertarget{uniform-distribution}{\subsubsection{Uniform
Distribution}\label{uniform-distribution}}

A \protect\hyperlink{uniform-distribution}{uniform
distribution}(https://en.wikipedia.org/wiki/Uniform\_distribution\_(continuous\%29)
has the equal probability of picking any number from a set of numbers.
We'll be picking from a continous distribution, so the chance of picking
the same number is low. We'll use TensorFlow's
\texttt{tf.random\_uniform} function to pick random numbers from a
uniform distribution.

\begin{quote}
\mbox{}%
\paragraph{\texorpdfstring{\href{https://www.tensorflow.org/api_docs/python/tf/random_uniform}{\texttt{tf.random\_uniform(shape,\ minval=0,\ maxval=None,\ dtype=tf.float32,\ seed=None,\ name=None)}}}{tf.random\_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None)}}\label{tf.random_uniformshape-minval0-maxvalnone-dtypetf.float32-seednone-namenone}

Outputs random values from a uniform distribution.
\end{quote}

\begin{quote}
The generated values follow a uniform distribution in the range
{[}minval, maxval). The lower bound minval is included in the range,
while the upper bound maxval is excluded.
\end{quote}

\begin{quote}
\begin{itemize}
\tightlist
\item
  \textbf{shape:} A 1-D integer Tensor or Python array. The shape of the
  output tensor.
\item
  \textbf{minval:} A 0-D Tensor or Python value of type dtype. The lower
  bound on the range of random values to generate. Defaults to 0.
\item
  \textbf{maxval:} A 0-D Tensor or Python value of type dtype. The upper
  bound on the range of random values to generate. Defaults to 1 if
  dtype is floating point.
\item
  \textbf{dtype:} The type of the output: float32, float64, int32, or
  int64.
\item
  \textbf{seed:} A Python integer. Used to create a random seed for the
  distribution. See tf.set\_random\_seed for behavior.
\item
  \textbf{name:} A name for the operation (optional).
\end{itemize}
\end{quote}

We can visualize the uniform distribution by using a histogram. Let's
map the values from \texttt{tf.random\_uniform({[}1000{]},\ -3,\ 3)} to
a histogram using the \texttt{helper.hist\_dist} function. This will be
\texttt{1000} random float values from \texttt{-3} to \texttt{3},
excluding the value \texttt{3}.

    \subsection{Uniform distribution
均匀分布}\label{uniform-distribution-ux5747ux5300ux5206ux5e03}

均匀分布从一组数字中挑选任意数字的概率一样。我们从连续的分布中选择数据，所以选中相同数字的几率就很小。我们使用
Tensorflow 的 \texttt{tf.random\_uniform}
方法从一个均匀分布中选择随机数字。

我们可以使用直方图画出均匀分布。让我们将
\texttt{tf.random\_uniform({[}1000{]},\ -3,\ 3)}
生成的数据映射到直方图中，通过 \texttt{helper.hist\_dist}
方法。这是1000个在-3到3之间的随机数据，包含3.

    \begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}9}]:} \PY{n}{helper}\PY{o}{.}\PY{n}{hist\PYZus{}dist}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Random Uniform (minval=\PYZhy{}3, maxval=3)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{p}{[}\PY{l+m+mi}{1000}\PY{p}{]}\PY{p}{,} \PY{o}{\PYZhy{}}\PY{l+m+mi}{3}\PY{p}{,} \PY{l+m+mi}{3}\PY{p}{)}\PY{p}{)}
\end{Verbatim}

    \begin{center}
    \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{weight_initialization_files/weight_initialization_12_0.png}
    \end{center}
    { \hspace*{\fill} \\}
    
    The histogram used 500 buckets for the 1000 values. Since the chance for
any single bucket is the same, there should be around 2 values for each
bucket. That's exactly what we see with the histogram. Some buckets have
more and some have less, but they trend around 2.

Now that you understand the \texttt{tf.random\_uniform} function, let's
apply it to some initial weights.

\subsubsection{Baseline}\label{baseline}

Let's see how well the neural network trains using the default values
for \texttt{tf.random\_uniform}, where \texttt{minval=0.0} and
\texttt{minval=1.0}.

    直方图使用500个柱子表示1000个数。由于任何一个柱的几率是相同的，所以每个柱大约有2个值。正如我们在直方图看到的。
有些柱多有些柱少，但都应该靠近2.

\subsection{底线}\label{ux5e95ux7ebf}

让我们看看神经网络使用 \texttt{tf.random\_uniform} 默认值（值在0
1）的训练结果会有多好

    \begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}10}]:} \PY{c+c1}{\PYZsh{} Default for tf.random\PYZus{}uniform is minval=0 and maxval=1}
         \PY{n}{basline\PYZus{}weights} \PY{o}{=} \PY{p}{[}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}1\PYZus{}weight\PYZus{}shape}\PY{p}{)}\PY{p}{)}\PY{p}{,}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}2\PYZus{}weight\PYZus{}shape}\PY{p}{)}\PY{p}{)}\PY{p}{,}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}3\PYZus{}weight\PYZus{}shape}\PY{p}{)}\PY{p}{)}
         \PY{p}{]}
         
         \PY{n}{helper}\PY{o}{.}\PY{n}{compare\PYZus{}init\PYZus{}weights}\PY{p}{(}
             \PY{n}{mnist}\PY{p}{,}
             \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Baseline}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,}
             \PY{p}{[}\PY{p}{(}\PY{n}{basline\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{tf.random\PYZus{}uniform [0, 1)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{]}\PY{p}{)}
\end{Verbatim}

    \begin{center}
    \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{weight_initialization_files/weight_initialization_15_0.png}
    \end{center}
    { \hspace*{\fill} \\}
    
    \begin{Verbatim}[commandchars=\\\{\}]
After 858 Batches (2 Epochs):
Validation Accuracy
   77.840\% -- tf.random\_uniform [0, 1)
Loss
    8.684  -- tf.random\_uniform [0, 1)

    \end{Verbatim}

    The loss graph is showing the neural network is learning, which it
didn't with all zeros or all ones. We're headed in the right direction.

\subsubsection{General rule for setting
weights}\label{general-rule-for-setting-weights}

The general rule for setting the weights in a neural network is to be
close to zero without being too small. A good pracitce is to start your
weights in the range of \([-y, y]\) where \(y=1/\sqrt{n}\) (\(n\) is the
number of inputs to a given neuron).

Let's see if this holds true, let's first center our range over zero.
This will give us the range {[}-1, 1).

    损失图展示了没有使用全0或者全1weights的神经网络正在学习。我们正朝着正确的方向前进

    \begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}11}]:} \PY{n}{uniform\PYZus{}neg1to1\PYZus{}weights} \PY{o}{=} \PY{p}{[}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}1\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{o}{\PYZhy{}}\PY{l+m+mi}{1}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{)}\PY{p}{)}\PY{p}{,}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}2\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{o}{\PYZhy{}}\PY{l+m+mi}{1}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{)}\PY{p}{)}\PY{p}{,}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}3\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{o}{\PYZhy{}}\PY{l+m+mi}{1}\PY{p}{,} \PY{l+m+mi}{1}\PY{p}{)}\PY{p}{)}
         \PY{p}{]}
         
         \PY{n}{helper}\PY{o}{.}\PY{n}{compare\PYZus{}init\PYZus{}weights}\PY{p}{(}
             \PY{n}{mnist}\PY{p}{,}
             \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{[0, 1) vs [\PYZhy{}1, 1)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,}
             \PY{p}{[}
                 \PY{p}{(}\PY{n}{basline\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{tf.random\PYZus{}uniform [0, 1)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{,}
                 \PY{p}{(}\PY{n}{uniform\PYZus{}neg1to1\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{tf.random\PYZus{}uniform [\PYZhy{}1, 1)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{]}\PY{p}{)}
\end{Verbatim}

    \begin{center}
    \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{weight_initialization_files/weight_initialization_18_0.png}
    \end{center}
    { \hspace*{\fill} \\}
    
    \begin{Verbatim}[commandchars=\\\{\}]
After 858 Batches (2 Epochs):
Validation Accuracy
   70.240\% -- tf.random\_uniform [0, 1)
   91.180\% -- tf.random\_uniform [-1, 1)
Loss
   19.605  -- tf.random\_uniform [0, 1)
    2.422  -- tf.random\_uniform [-1, 1)

    \end{Verbatim}

    We're going in the right direction, the accuracy and loss is better with
{[}-1, 1). We still want smaller weights. How far can we go before it's
too small?

    我们正朝着正确的方向，精度和损失函数都比{[}-1,1)好。我们仍然需要更小的weights，在太小之前我们还可以走多远。

    \subsubsection{Too small}\label{too-small}

Let's compare {[}-0.1, 0.1), {[}-0.01, 0.01), and {[}-0.001, 0.001) to
see how small is too small. We'll also set
\texttt{plot\_n\_batches=None} to show all the batches in the plot.

    \subsection{太小}\label{ux592aux5c0f}

让我们对比{[}-0.1,0.1),{[}-0.01,0.01)和{[}-0.001,0.001)看多小才是更小。我们设置
plot\_n\_batches=None 展示所有的batches

    \begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}13}]:} \PY{n}{uniform\PYZus{}neg01to01\PYZus{}weights} \PY{o}{=} \PY{p}{[}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}1\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{o}{\PYZhy{}}\PY{l+m+mf}{0.1}\PY{p}{,} \PY{l+m+mf}{0.1}\PY{p}{)}\PY{p}{)}\PY{p}{,}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}2\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{o}{\PYZhy{}}\PY{l+m+mf}{0.1}\PY{p}{,} \PY{l+m+mf}{0.1}\PY{p}{)}\PY{p}{)}\PY{p}{,}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}3\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{o}{\PYZhy{}}\PY{l+m+mf}{0.1}\PY{p}{,} \PY{l+m+mf}{0.1}\PY{p}{)}\PY{p}{)}
         \PY{p}{]}
         
         \PY{n}{uniform\PYZus{}neg001to001\PYZus{}weights} \PY{o}{=} \PY{p}{[}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}1\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{o}{\PYZhy{}}\PY{l+m+mf}{0.01}\PY{p}{,} \PY{l+m+mf}{0.01}\PY{p}{)}\PY{p}{)}\PY{p}{,}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}2\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{o}{\PYZhy{}}\PY{l+m+mf}{0.01}\PY{p}{,} \PY{l+m+mf}{0.01}\PY{p}{)}\PY{p}{)}\PY{p}{,}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}3\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{o}{\PYZhy{}}\PY{l+m+mf}{0.01}\PY{p}{,} \PY{l+m+mf}{0.01}\PY{p}{)}\PY{p}{)}
         \PY{p}{]}
         
         \PY{n}{uniform\PYZus{}neg0001to0001\PYZus{}weights} \PY{o}{=} \PY{p}{[}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}1\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{o}{\PYZhy{}}\PY{l+m+mf}{0.001}\PY{p}{,} \PY{l+m+mf}{0.001}\PY{p}{)}\PY{p}{)}\PY{p}{,}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}2\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{o}{\PYZhy{}}\PY{l+m+mf}{0.001}\PY{p}{,} \PY{l+m+mf}{0.001}\PY{p}{)}\PY{p}{)}\PY{p}{,}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}3\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{o}{\PYZhy{}}\PY{l+m+mf}{0.001}\PY{p}{,} \PY{l+m+mf}{0.001}\PY{p}{)}\PY{p}{)}
         \PY{p}{]}
         
         \PY{n}{helper}\PY{o}{.}\PY{n}{compare\PYZus{}init\PYZus{}weights}\PY{p}{(}
             \PY{n}{mnist}\PY{p}{,}
             \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{[\PYZhy{}1, 1) vs [\PYZhy{}0.1, 0.1) vs [\PYZhy{}0.01, 0.01) vs [\PYZhy{}0.001, 0.001)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,}
             \PY{p}{[}
                 \PY{p}{(}\PY{n}{uniform\PYZus{}neg1to1\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{[\PYZhy{}1, 1)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{,}
                 \PY{p}{(}\PY{n}{uniform\PYZus{}neg01to01\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{[\PYZhy{}0.1, 0.1)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{,}
                 \PY{p}{(}\PY{n}{uniform\PYZus{}neg001to001\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{[\PYZhy{}0.01, 0.01)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{,}
                 \PY{p}{(}\PY{n}{uniform\PYZus{}neg0001to0001\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{[\PYZhy{}0.001, 0.001)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{]}\PY{p}{,}
             \PY{n}{plot\PYZus{}n\PYZus{}batches}\PY{o}{=}\PY{k+kc}{None}\PY{p}{)}
\end{Verbatim}

    \begin{center}
    \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{weight_initialization_files/weight_initialization_23_0.png}
    \end{center}
    { \hspace*{\fill} \\}
    
    \begin{Verbatim}[commandchars=\\\{\}]
After 858 Batches (2 Epochs):
Validation Accuracy
   90.780\% -- [-1, 1)
   96.660\% -- [-0.1, 0.1)
   95.240\% -- [-0.01, 0.01)
   94.040\% -- [-0.001, 0.001)
Loss
    6.109  -- [-1, 1)
    0.117  -- [-0.1, 0.1)
    0.215  -- [-0.01, 0.01)
    0.197  -- [-0.001, 0.001)

    \end{Verbatim}

    Looks like anything {[}-0.01, 0.01) or smaller is too small. Let's
compare this to our typical rule of using the range \(y=1/\sqrt{n}\).

    看起来像{[}-0.01,0.01)或者更小的属于太小。让我们拿经典的规则(范围在
\(y = 1 / \sqrt{n}\))和它们对比

    \begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}8}]:} \PY{k+kn}{import} \PY{n+nn}{numpy} \PY{k}{as} \PY{n+nn}{np}
        
        \PY{n}{general\PYZus{}rule\PYZus{}weights} \PY{o}{=} \PY{p}{[}
            \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}1\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{o}{\PYZhy{}}\PY{l+m+mi}{1}\PY{o}{/}\PY{n}{np}\PY{o}{.}\PY{n}{sqrt}\PY{p}{(}\PY{n}{layer\PYZus{}1\PYZus{}weight\PYZus{}shape}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)}\PY{p}{,} \PY{l+m+mi}{1}\PY{o}{/}\PY{n}{np}\PY{o}{.}\PY{n}{sqrt}\PY{p}{(}\PY{n}{layer\PYZus{}1\PYZus{}weight\PYZus{}shape}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)}\PY{p}{)}\PY{p}{)}\PY{p}{,}
            \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}2\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{o}{\PYZhy{}}\PY{l+m+mi}{1}\PY{o}{/}\PY{n}{np}\PY{o}{.}\PY{n}{sqrt}\PY{p}{(}\PY{n}{layer\PYZus{}2\PYZus{}weight\PYZus{}shape}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)}\PY{p}{,} \PY{l+m+mi}{1}\PY{o}{/}\PY{n}{np}\PY{o}{.}\PY{n}{sqrt}\PY{p}{(}\PY{n}{layer\PYZus{}2\PYZus{}weight\PYZus{}shape}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)}\PY{p}{)}\PY{p}{)}\PY{p}{,}
            \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}uniform}\PY{p}{(}\PY{n}{layer\PYZus{}3\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{o}{\PYZhy{}}\PY{l+m+mi}{1}\PY{o}{/}\PY{n}{np}\PY{o}{.}\PY{n}{sqrt}\PY{p}{(}\PY{n}{layer\PYZus{}3\PYZus{}weight\PYZus{}shape}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)}\PY{p}{,} \PY{l+m+mi}{1}\PY{o}{/}\PY{n}{np}\PY{o}{.}\PY{n}{sqrt}\PY{p}{(}\PY{n}{layer\PYZus{}3\PYZus{}weight\PYZus{}shape}\PY{p}{[}\PY{l+m+mi}{0}\PY{p}{]}\PY{p}{)}\PY{p}{)}\PY{p}{)}
        \PY{p}{]}
        
        \PY{n}{helper}\PY{o}{.}\PY{n}{compare\PYZus{}init\PYZus{}weights}\PY{p}{(}
            \PY{n}{mnist}\PY{p}{,}
            \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{[\PYZhy{}0.1, 0.1) vs General Rule}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,}
            \PY{p}{[}
                \PY{p}{(}\PY{n}{uniform\PYZus{}neg01to01\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{[\PYZhy{}0.1, 0.1)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{,}
                \PY{p}{(}\PY{n}{general\PYZus{}rule\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{General Rule}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{]}\PY{p}{,}
            \PY{n}{plot\PYZus{}n\PYZus{}batches}\PY{o}{=}\PY{k+kc}{None}\PY{p}{)}
\end{Verbatim}

    \begin{center}
    \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{weight_initialization_files/weight_initialization_26_0.png}
    \end{center}
    { \hspace*{\fill} \\}
    
    \begin{Verbatim}[commandchars=\\\{\}]
After 858 Batches (2 Epochs):
Validation Accuracy
   96.880\% -- [-0.1, 0.1)
   96.900\% -- General Rule
Loss
    0.169  -- [-0.1, 0.1)
    0.088  -- General Rule

    \end{Verbatim}

    The range we found and \(y=1/\sqrt{n}\) are really close.

Since the uniform distribution has the same chance to pick anything in
the range, what if we used a distribution that had a higher chance of
picking numbers closer to 0. Let's look at the normal distribution.
\#\#\# Normal Distribution Unlike the uniform distribution, the
\href{https://en.wikipedia.org/wiki/Normal_distribution}{normal
distribution} has a higher likelihood of picking number close to it's
mean. To visualize it, let's plot values from TensorFlow's
\texttt{tf.random\_normal} function to a histogram.

\begin{quote}
\href{https://www.tensorflow.org/api_docs/python/tf/random_normal}{tf.random\_normal(shape,
mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)}
\end{quote}

\begin{quote}
Outputs random values from a normal distribution.
\end{quote}

\begin{quote}
\begin{itemize}
\tightlist
\item
  \textbf{shape:} A 1-D integer Tensor or Python array. The shape of the
  output tensor.
\item
  \textbf{mean:} A 0-D Tensor or Python value of type dtype. The mean of
  the normal distribution.
\item
  \textbf{stddev:} A 0-D Tensor or Python value of type dtype. The
  standard deviation of the normal distribution.
\item
  \textbf{dtype:} The type of the output.
\item
  \textbf{seed:} A Python integer. Used to create a random seed for the
  distribution. See tf.set\_random\_seed for behavior.
\item
  \textbf{name:} A name for the operation (optional).
\end{itemize}
\end{quote}

    我们发现的范围和 \(y = 1 / \sqrt{n}\) 十分接近
因为均匀分布有相同的几率从这个区间中获取任何数字，如果我们使用一个分布，有用更好的几率获取靠近0的数字呢？让我们看看正态分布。

\subsection{Normal Distribution
正态分布}\label{normal-distribution-ux6b63ux6001ux5206ux5e03}

和均匀分布不同，正态分布有更高的几率获取靠近其均值的数字。我们画出
\texttt{tr.random\_normal} 生成的值

    \begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}9}]:} \PY{n}{helper}\PY{o}{.}\PY{n}{hist\PYZus{}dist}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Random Normal (mean=0.0, stddev=1.0)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}normal}\PY{p}{(}\PY{p}{[}\PY{l+m+mi}{1000}\PY{p}{]}\PY{p}{)}\PY{p}{)}
\end{Verbatim}

    \begin{center}
    \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{weight_initialization_files/weight_initialization_29_0.png}
    \end{center}
    { \hspace*{\fill} \\}
    
    Let's compare the normal distribution against the previous uniform
distribution.

    让我们拿之前的均匀分布对比下正态分布

    \begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}10}]:} \PY{n}{normal\PYZus{}01\PYZus{}weights} \PY{o}{=} \PY{p}{[}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}normal}\PY{p}{(}\PY{n}{layer\PYZus{}1\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{n}{stddev}\PY{o}{=}\PY{l+m+mf}{0.1}\PY{p}{)}\PY{p}{)}\PY{p}{,}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}normal}\PY{p}{(}\PY{n}{layer\PYZus{}2\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{n}{stddev}\PY{o}{=}\PY{l+m+mf}{0.1}\PY{p}{)}\PY{p}{)}\PY{p}{,}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{random\PYZus{}normal}\PY{p}{(}\PY{n}{layer\PYZus{}3\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{n}{stddev}\PY{o}{=}\PY{l+m+mf}{0.1}\PY{p}{)}\PY{p}{)}
         \PY{p}{]}
         
         \PY{n}{helper}\PY{o}{.}\PY{n}{compare\PYZus{}init\PYZus{}weights}\PY{p}{(}
             \PY{n}{mnist}\PY{p}{,}
             \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Uniform [\PYZhy{}0.1, 0.1) vs Normal stddev 0.1}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,}
             \PY{p}{[}
                 \PY{p}{(}\PY{n}{uniform\PYZus{}neg01to01\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Uniform [\PYZhy{}0.1, 0.1)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{,}
                 \PY{p}{(}\PY{n}{normal\PYZus{}01\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Normal stddev 0.1}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{]}\PY{p}{)}
\end{Verbatim}

    \begin{center}
    \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{weight_initialization_files/weight_initialization_32_0.png}
    \end{center}
    { \hspace*{\fill} \\}
    
    \begin{Verbatim}[commandchars=\\\{\}]
After 858 Batches (2 Epochs):
Validation Accuracy
   96.920\% -- Uniform [-0.1, 0.1)
   97.200\% -- Normal stddev 0.1
Loss
    0.103  -- Uniform [-0.1, 0.1)
    0.099  -- Normal stddev 0.1

    \end{Verbatim}

    The normal distribution gave a slight increasse in accuracy and loss.
Let's move closer to 0 and drop picked numbers that are \texttt{x}
number of standard deviations away. This distribution is called
\href{https://en.wikipedia.org/wiki/Truncated_normal_distribution\%29}{Truncated
Normal Distribution}. \#\#\# Truncated Normal Distribution
\textgreater{}\href{https://www.tensorflow.org/api_docs/python/tf/truncated_normal}{tf.truncated\_normal(shape,
mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)}

\begin{quote}
Outputs random values from a truncated normal distribution.
\end{quote}

\begin{quote}
The generated values follow a normal distribution with specified mean
and standard deviation, except that values whose magnitude is more than
2 standard deviations from the mean are dropped and re-picked.
\end{quote}

\begin{quote}
\begin{itemize}
\tightlist
\item
  \textbf{shape:} A 1-D integer Tensor or Python array. The shape of the
  output tensor.
\item
  \textbf{mean:} A 0-D Tensor or Python value of type dtype. The mean of
  the truncated normal distribution.
\item
  \textbf{stddev:} A 0-D Tensor or Python value of type dtype. The
  standard deviation of the truncated normal distribution.
\item
  \textbf{dtype:} The type of the output.
\item
  \textbf{seed:} A Python integer. Used to create a random seed for the
  distribution. See tf.set\_random\_seed for behavior.
\item
  \textbf{name:} A name for the operation (optional).
\end{itemize}
\end{quote}

    正态分布在精度和损耗上有一点点提升。让我们更靠近0，丢掉挑选的数字？？。这个分布成为截断正态分布。

    \begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}11}]:} \PY{n}{helper}\PY{o}{.}\PY{n}{hist\PYZus{}dist}\PY{p}{(}\PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Truncated Normal (mean=0.0, stddev=1.0)}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,} \PY{n}{tf}\PY{o}{.}\PY{n}{truncated\PYZus{}normal}\PY{p}{(}\PY{p}{[}\PY{l+m+mi}{1000}\PY{p}{]}\PY{p}{)}\PY{p}{)}
\end{Verbatim}

    \begin{center}
    \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{weight_initialization_files/weight_initialization_35_0.png}
    \end{center}
    { \hspace*{\fill} \\}
    
    Again, let's compare the previous results with the previous
distribution.

    让我们和之前的结果进行对比

    \begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}16}]:} \PY{n}{trunc\PYZus{}normal\PYZus{}01\PYZus{}weights} \PY{o}{=} \PY{p}{[}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{truncated\PYZus{}normal}\PY{p}{(}\PY{n}{layer\PYZus{}1\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{n}{stddev}\PY{o}{=}\PY{l+m+mf}{0.1}\PY{p}{)}\PY{p}{)}\PY{p}{,}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{truncated\PYZus{}normal}\PY{p}{(}\PY{n}{layer\PYZus{}2\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{n}{stddev}\PY{o}{=}\PY{l+m+mf}{0.1}\PY{p}{)}\PY{p}{)}\PY{p}{,}
             \PY{n}{tf}\PY{o}{.}\PY{n}{Variable}\PY{p}{(}\PY{n}{tf}\PY{o}{.}\PY{n}{truncated\PYZus{}normal}\PY{p}{(}\PY{n}{layer\PYZus{}3\PYZus{}weight\PYZus{}shape}\PY{p}{,} \PY{n}{stddev}\PY{o}{=}\PY{l+m+mf}{0.1}\PY{p}{)}\PY{p}{)}
         \PY{p}{]}
         
         \PY{n}{helper}\PY{o}{.}\PY{n}{compare\PYZus{}init\PYZus{}weights}\PY{p}{(}
             \PY{n}{mnist}\PY{p}{,}
             \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Normal vs Truncated Normal}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,}
             \PY{p}{[}
                 \PY{p}{(}\PY{n}{normal\PYZus{}01\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Normal}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{,}
                 \PY{p}{(}\PY{n}{trunc\PYZus{}normal\PYZus{}01\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Truncated Normal}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{]}\PY{p}{)}
\end{Verbatim}

    There's no difference between the two, but that's because the neural
network we're using is too small. A larger neural network will pick more
points on the normal distribution, increasing the likelihood it's
choices are larger than 2 standard deviations.

We've come a long way from the first set of weights we tested. Let's see
the difference between the weights we used then and now.

    \begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor} }]:} 
\end{Verbatim}

    \begin{Verbatim}[commandchars=\\\{\}]
{\color{incolor}In [{\color{incolor}13}]:} \PY{n}{helper}\PY{o}{.}\PY{n}{compare\PYZus{}init\PYZus{}weights}\PY{p}{(}
             \PY{n}{mnist}\PY{p}{,}
             \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Baseline vs Truncated Normal}\PY{l+s+s1}{\PYZsq{}}\PY{p}{,}
             \PY{p}{[}
                 \PY{p}{(}\PY{n}{basline\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Baseline}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{,}
                 \PY{p}{(}\PY{n}{trunc\PYZus{}normal\PYZus{}01\PYZus{}weights}\PY{p}{,} \PY{l+s+s1}{\PYZsq{}}\PY{l+s+s1}{Truncated Normal}\PY{l+s+s1}{\PYZsq{}}\PY{p}{)}\PY{p}{]}\PY{p}{)}
\end{Verbatim}

    \begin{center}
    \adjustimage{max size={0.9\linewidth}{0.9\paperheight}}{weight_initialization_files/weight_initialization_41_0.png}
    \end{center}
    { \hspace*{\fill} \\}
    
    \begin{Verbatim}[commandchars=\\\{\}]
After 858 Batches (2 Epochs):
Validation Accuracy
   66.100\% -- Baseline
   97.040\% -- Truncated Normal
Loss
   24.090  -- Baseline
    0.075  -- Truncated Normal

    \end{Verbatim}

    That's a huge difference. You can barely see the truncated normal line.
However, this is not the end your learning path. We've provided more
resources for initializing weights in the classroom!

    真是巨大的区别，你几乎看不到截断正态的线。然后这不是你学习曲线的重点。我们提供了更多的关于初始化weights的资源在课程中


    % Add a bibliography block to the postdoc
    
    
    
    \end{document}
