{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# 查看当前挂载的数据集目录, 该目录下的变更重启环境后会自动还原\n",
    "# View dataset directory. \n",
    "# This directory will be recovered automatically after resetting environment. \n",
    "!ls /home/aistudio/data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# 查看工作区文件, 该目录下的变更将会持久保存. 请及时清理不必要的文件, 避免加载过慢.\n",
    "# View personal work directory. \n",
    "# All changes under this directory will be kept even after reset. \n",
    "# Please clean unnecessary files in time to speed up environment loading. \n",
    "!ls /home/aistudio/work"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Looking in indexes: https://mirror.baidu.com/pypi/simple/\n",
      "Collecting beautifulsoup4\n",
      "\u001b[?25l  Downloading https://mirror.baidu.com/pypi/packages/d1/41/e6495bd7d3781cee623ce23ea6ac73282a373088fcd0ddc809a047b18eae/beautifulsoup4-4.9.3-py3-none-any.whl (115kB)\n",
      "\u001b[K     |████████████████████████████████| 122kB 11.9MB/s eta 0:00:01\n",
      "\u001b[?25hCollecting soupsieve>1.2; python_version >= \"3.0\" (from beautifulsoup4)\n",
      "  Downloading https://mirror.baidu.com/pypi/packages/02/fb/1c65691a9aeb7bd6ac2aa505b84cb8b49ac29c976411c6ab3659425e045f/soupsieve-2.1-py3-none-any.whl\n",
      "Installing collected packages: soupsieve, beautifulsoup4\n",
      "Successfully installed beautifulsoup4-4.9.3 soupsieve-2.1\n"
     ]
    }
   ],
   "source": [
    "# 如果需要进行持久化安装, 需要使用持久化路径, 如下方代码示例:\n",
    "# If a persistence installation is required, \n",
    "# you need to use the persistence path as the following: \n",
    "!mkdir /home/aistudio/external-libraries\n",
    "!pip install beautifulsoup4 -t /home/aistudio/external-libraries"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# 同时添加如下代码, 这样每次环境(kernel)启动的时候只要运行下方代码即可: \n",
    "# Also add the following code, \n",
    "# so that every time the environment (kernel) starts, \n",
    "# just run the following code: \n",
    "import sys \n",
    "sys.path.append('/home/aistudio/external-libraries')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 4.1 模型构造"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 4.1.1 继承`Layer`类来构造模型"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "`Layer`类是`nn`模块里提供的一个模型构造类，是所有神经网络模块的基类，我们可以继承它来定义我们想要的模型。下面继承`Layer`类构造本节开头提到的多层感知机。这里定义的`MLP`类重载了`Layer`类的`__init__`函数和`forward`函数。它们分别用于创建模型参数和定义前向计算。前向计算也即正向传播。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 205,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "import paddle\r\n",
    "from paddle import nn\r\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 206,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "class MLP(nn.Layer):\r\n",
    "    # 声明带有模型参数的层，这里声明了两个全连接层\r\n",
    "    def __init__(self, **kwargs):\r\n",
    "        # 调用MLP父类Layer的构造函数来进行必要的初始化。这样在构造实例时还可以指定其他函数\r\n",
    "        # 参数，如“模型参数的访问、初始化和共享”一节将介绍的模型参数params\r\n",
    "        super(MLP, self).__init__()\r\n",
    "        self.hidden = nn.Linear(784, 256) # 隐藏层\r\n",
    "        self.act = nn.ReLU()\r\n",
    "        self.output = nn.Linear(256, 10)  # 输出层\r\n",
    "     # 定义模型的前向计算，即如何根据输入x计算返回所需要的模型输出\r\n",
    "    def forward(self, x):\r\n",
    "        a = self.act(self.hidden(x))\r\n",
    "        return self.output(a)\r\n",
    "\r\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "以上的`MLP`类中无须定义反向传播函数。系统将通过自动求梯度而自动生成反向传播所需的`backward`函数。\n",
    "我们可以实例化`MLP`类得到模型变量`net`。下面的代码初始化`net`并传入输入数据`X`做一次前向计算。其中，`net(X)`会调用`MLP`继承自`Layer`类的`__call__`函数，这个函数将调用`MLP`类定义的`forward`函数来完成前向计算。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 207,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<__main__.MLP object at 0x7f98c402ab90>\n",
      "Tensor(shape=[2, 10], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n",
      "       [[ 0.28169093,  0.19629075,  1.38183379,  1.10846949, -0.13574386,  0.65056670, -1.47797585, -1.14960587, -0.05882573, -0.23757470],\n",
      "        [-0.24633378,  0.01955763,  1.57667279,  0.71510363, -0.38847020,  0.86502779, -0.76577711, -0.51233286,  0.48823977, -0.58465499]])\n"
     ]
    }
   ],
   "source": [
    "X = paddle.rand(shape=[2, 784], dtype='float32')\r\n",
    "net = MLP()\r\n",
    "print(net)\r\n",
    "output = net(X)\r\n",
    "print(output)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "### 4.1.2.1 `Sequential`类"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 208,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Tensor(shape=[2, 10], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n",
       "       [[ 0.18934998, -0.19580102,  0.47853044, -0.19391450,  0.17017832,  0.06374720,  0.32739627,  0.33177695,  0.23437305,  0.05424647],\n",
       "        [ 0.24021885,  0.05470101,  0.22105788, -0.10538608,  0.20596528,  0.05485302,  0.15356833,  0.27489734,  0.10673514, -0.09995569]])"
      ]
     },
     "execution_count": 208,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x=paddle.rand(shape=[2, 20], dtype='float32')\r\n",
    "net=nn.Sequential(nn.Linear(20,256),nn.ReLU(),nn.Linear(256,10))\r\n",
    "net(x)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "在此示例中，我们通过实例化nn.Sequential来构建模型，其中应按应将其执行的顺序作为参数传递。 简而言之，nn.Sequential定义了一种特殊的模块，针对顺序的线性网络结构我们可以直接使用Sequential来快速完成组网，可以减少类的定义等代码编写"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "在以下代码段中,我们从头开始编写一个代码块"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "当模型的前向计算为简单串联各个层的计算时，`Sequential`类可以通过更加简单的方式定义模型。这正是`Sequential`类的目的：它可以接收一个子模块的有序列表（tuple）或者一系列子模块作为参数来逐一添加`Module`的实例，而模型的前向计算就是将这些实例按添加的顺序逐一计算。\n",
    "\n",
    "下面我们实现一个与`Sequential`类有相同功能的`MySequential`类。这或许可以帮助读者更加清晰地理解`Sequential`类的工作机制。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 209,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "class MySequential(nn.Layer):\r\n",
    "    def __init__(self, *layers):\r\n",
    "        super(MySequential, self).__init__()\r\n",
    "        if len(layers) > 0 and isinstance(layers[0], tuple): # 如果传入的是一个tuple\r\n",
    "            for name, layer in layers:\r\n",
    "                self.add_sublayer(name, layer)  #add_sublayer方法会将layer添加到self._sub_layers(一个tuple)\r\n",
    "        else:\r\n",
    "            for idx, layer in enumerate(layers):\r\n",
    "                self.add_sublayer(str(idx), layer)\r\n",
    "\r\n",
    "    def forward(self, input):\r\n",
    "        #self._sub_layers  返回一个tuple，保证按着成员添加时的顺序遍历\r\n",
    "        for layer in self._sub_layers.values():\r\n",
    "            input = layer(input)\r\n",
    "        return input"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 210,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<__main__.MySequential object at 0x7f98b60bdef0>\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "Tensor(shape=[2, 10], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n",
       "       [[-0.43844655, -0.10962891,  0.62806201,  0.08198743,  0.17913347, -0.22641872, -0.12756041,  0.26064324,  0.06354272,  0.05840879],\n",
       "        [-0.51974988, -0.10903625,  0.39879355,  0.14114575,  0.08750352, -0.17261472, -0.09173751,  0.20404734, -0.03171259, -0.02290946]])"
      ]
     },
     "execution_count": 210,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "net = MySequential(nn.Linear(20, 256), nn.ReLU(), nn.Linear(256, 10))\r\n",
    "print(net)\r\n",
    "net(x)\r\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 构造复杂的模型"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "虽然上面介绍的这些类可以使模型构造更加简单，且不需要定义`forward`函数，但直接继承`Layer`类可以极大地拓展模型构造的灵活性。下面我们构造一个稍微复杂点的网络`FancyMLP`。在这个网络中，我们通过`get_constant`函数创建训练中不被迭代的参数，即常数参数。在前向计算中，除了使用创建的常数参数外，我们还使用`Tensor`的函数和Python的控制流，并多次调用相同的层。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 211,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "import paddle\r\n",
    "import paddle.fluid as fluid\r\n",
    "class FancyMLP(nn.Layer):\r\n",
    "    def __init__(self, **kwargs):\r\n",
    "        super(FancyMLP, self).__init__(**kwargs)\r\n",
    "        self.rand_weight=paddle.to_tensor(paddle.rand(shape=[20, 20], dtype='float32'),stop_gradient=False)# 不可训练参数（常数参数）\r\n",
    "        self.linear = nn.Linear(20, 20)\r\n",
    "        \r\n",
    "\r\n",
    "    def forward(self, x):\r\n",
    "        x = self.linear(x)\r\n",
    "        # 使用创建的常数参数，以及fluid.layer中的relu函数和paddle.tensor.mm函数\r\n",
    "        x = fluid.layers.relu(paddle.tensor.mm(x,self.rand_weight)+1)\r\n",
    "        # 复用全连接层。等价于两个全连接层共享参数\r\n",
    "        x = self.linear(x)\r\n",
    "        # 控制流，这里我们需要调用item函数来返回标量进行比较\r\n",
    "        while x.norm() > 1:\r\n",
    "            x /= 2\r\n",
    "        if x.norm()< 0.8:\r\n",
    "            x *= 10\r\n",
    "        return x.sum()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "在这个`FancyMLP`模型中，我们使用了常数权重`rand_weight`（注意它不是可训练模型参数）、做了矩阵乘法操作（`paddle.tensor.mm`）并重复使用了相同的`Linear`层。下面我们来测试该模型的前向计算。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 212,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<__main__.FancyMLP object at 0x7f98b6007950>\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n",
       "       [14.48553085])"
      ]
     },
     "execution_count": 212,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X = paddle.rand(shape=[2, 20], dtype='float32')\r\n",
    "net = FancyMLP()\r\n",
    "print(net)\r\n",
    "net(X)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "因为`FancyMLP`和`Sequential`类都是`Module`类的子类，所以我们可以嵌套调用它们。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 215,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "class NestMLP(nn.Layer):\r\n",
    "    def __init__(self, **kwargs):\r\n",
    "        super(NestMLP, self).__init__(**kwargs)\r\n",
    "        self.net = nn.Sequential(nn.Linear(40, 30), nn.ReLU())\r\n",
    "\r\n",
    "    def forward(self, x):\r\n",
    "        return self.net(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 216,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<__main__.NestMLP object at 0x7f98b6004830>\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n",
       "       [-3.58070374])"
      ]
     },
     "execution_count": 216,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "net = nn.Sequential(NestMLP(), nn.Linear(30, 20), FancyMLP())\r\n",
    "X = paddle.rand(shape=[2, 40])\r\n",
    "print(net[0])\r\n",
    "net(X)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 小结\n",
    "\n",
    "* 可以通过继承`Layer`类来构造模型。\n",
    "* 针对顺序的线性网络结构我们可以直接使用Sequential来快速完成组网，可以减少类的定义等代码编写。\n",
    "* 针对一些比较复杂的网络结构，就可以使用Layer子类定义的方式来进行模型代码编写，在__init__构造函数中进行组网Layer的声明，在forward中使用声明的Layer变量进行前向计算。子类组网方式也可以实现sublayer的复用，针对相同的layer可以在构造函数中一次性定义，在forward中多次调用。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 4.2 模型参数的访问、初始化和共享\n",
    "\n",
    "在3.3节（线性回归的简洁实现）中，我们通过`init`模块来初始化模型的参数。我们也介绍了访问模型参数的简单方法。本节将深入讲解如何访问和初始化模型参数，以及如何在多个层之间共享同一份模型参数。\n",
    "\n",
    "我们先定义一个与上一节中相同的含单隐藏层的多层感知机。我们依然使用默认方式初始化它的参数，并做一次前向计算。与之前不同的是，在这里我们从`nn`中导入了`init`模块，它包含了多种模型初始化方法。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 236,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "import paddle\r\n",
    "from paddle  import nn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 237,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<paddle.fluid.dygraph.container.Sequential object at 0x7f98b6022530>\n",
      "Tensor(shape=[2, 4], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n",
      "       [[0.26876754, 0.67776614, 0.44837424, 0.47268873],\n",
      "        [0.15742068, 0.85344201, 0.49816018, 0.69069755]])\n",
      "Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n",
      "       [-0.35955530])\n"
     ]
    }
   ],
   "source": [
    "net=nn.Sequential(nn.Linear(4,3),nn.ReLU(),nn.Linear(3,1))\r\n",
    "x=paddle.rand(shape=[2, 4])\r\n",
    "y=net(x).sum()\r\n",
    "print(net)\r\n",
    "print(x)\r\n",
    "print(y)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 4.2.1 访问模型参数\n",
    "\n",
    "对于使用Sequential类构造的神经网络，我们可以通过方括号[]来访问网络的任一层。回忆一下上一节中提到的Sequential类与Layer类的继承关系。对于Sequential实例中含模型参数的层，我们可以通过Layer类的params属性来访问该层包含的所有参数。下面，访问多层感知机net中隐藏层的所有参数。索引0表示隐藏层为Sequential实例最先添加的层。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "让我们从如何从已经知道的模型访问参数开始。 当通过Sequential类定义模型时，我们首先可以通过将模型索引为列表来访问任何层。 每个图层的参数都方便地位于其属性中。 我们可以如下检查第二个全连接层的参数。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 238,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "OrderedDict([('weight', Parameter containing:\n",
      "Tensor(shape=[3, 1], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n",
      "       [[ 0.98203892],\n",
      "        [-0.97952133],\n",
      "        [ 0.02883578]])), ('bias', Parameter containing:\n",
      "Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n",
      "       [0.]))])\n"
     ]
    }
   ],
   "source": [
    "print(net[2].state_dict())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "回忆一下上一节中提到的`Sequential`类与`Layer`类的继承关系。对于`Sequential`实例中含模型参数的层，我们可以通过`Layer`类的`parameters()`或者`named_parameters`方法来访问所有参数（以迭代器的形式返回），后者除了返回参数`Tensor`外还会返回其名字。下面，访问多层感知机`net`的所有参数："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 239,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'generator'>\n",
      "0.weight Parameter containing:\n",
      "Tensor(shape=[4, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n",
      "       [[-0.11903492, -0.29600903,  0.45662007],\n",
      "        [-0.41848800,  0.56108201,  0.04876074],\n",
      "        [ 0.29534313,  0.72784293,  0.68447679],\n",
      "        [-0.29001480, -0.90568084, -0.89422840]])\n",
      "0.bias Parameter containing:\n",
      "Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n",
      "       [0., 0., 0.])\n",
      "2.weight Parameter containing:\n",
      "Tensor(shape=[3, 1], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n",
      "       [[ 0.98203892],\n",
      "        [-0.97952133],\n",
      "        [ 0.02883578]])\n",
      "2.bias Parameter containing:\n",
      "Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n",
      "       [0.])\n"
     ]
    }
   ],
   "source": [
    "print(type(net.named_parameters()))\r\n",
    "for name, param in net.named_parameters():\r\n",
    "    print(name, param)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "可见返回的名字自动加上了层数的索引作为前缀。\n",
    "我们再来访问`net`中单层的参数。对于使用`Sequential`类构造的神经网络，我们可以通过方括号`[]`来访问网络的任一层。索引0表示隐藏层为`Sequential`实例最先添加的层。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 252,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "weight 12 <class 'paddle.fluid.framework.ParamBase'>\n",
      "bias 3 <class 'paddle.fluid.framework.ParamBase'>\n"
     ]
    }
   ],
   "source": [
    "for name, param in net[0].named_parameters():\r\n",
    "    print(name,param.size, type(param))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "因为这里是单层的所以没有了层数索引的前缀。另外返回的`param`的类型为`torch.nn.parameter.Parameter`，其实这是`Tensor`的子类，和`Tensor`不同的是如果一个`Tensor`是`Parameter`，那么它会自动被添加到模型的参数列表里，来看下面这个例子。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 257,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "weight1\n"
     ]
    }
   ],
   "source": [
    "import paddle.fluid as fluid\r\n",
    "class MyModel(nn.Layer):\r\n",
    "    def __init__(self, **kwargs):\r\n",
    "        super(MyModel, self).__init__(**kwargs)\r\n",
    "        self.weight1 =fluid.layers.create_parameter(shape=[20, 20], dtype='float32')\r\n",
    "        self.weight2 = paddle.rand(shape=[20, 20])\r\n",
    "    def forward(self, x):\r\n",
    "        pass\r\n",
    "\r\n",
    "n = MyModel()\r\n",
    "for name, param in n.named_parameters():\r\n",
    "    print(name)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "上面的代码中`weight1`在参数列表中但是`weight2`却没在参数列表中。\n",
    "\n",
    "因为`Parameter`是`Tensor`，即`Tensor`拥有的属性它都有，比如可以根据`data`来访问参数数值，用`grad`来访问参数梯度。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 260,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[-0.11903492 -0.29600903  0.45662007]\n",
      " [-0.418488    0.561082    0.04876074]\n",
      " [ 0.29534313  0.7278429   0.6844768 ]\n",
      " [-0.2900148  -0.90568084 -0.8942284 ]]\n",
      "None\n",
      "[[ 0.         -0.41746044  0.00775012]\n",
      " [ 0.         -1.499851    0.01954391]\n",
      " [ 0.         -0.92715067  0.01292922]\n",
      " [ 0.         -1.1395617   0.01363035]]\n"
     ]
    }
   ],
   "source": [
    "weight_0 = list(net[0].parameters())[0]\r\n",
    "print(weight_0.numpy())\r\n",
    "print(weight_0.grad)\r\n",
    "y.backward()\r\n",
    "print(weight_0.grad)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 4.2.2 初始化模型参数\n",
    "\n",
    "我们在3.15节（数值稳定性和模型初始化）Paddle中`nn.Layer`的模块参数都采取了较为合理的初始化策略。\n",
    "但我们经常需要使用其他方法来初始化权重。Paddle中`init`模块里提供了多种预设的初始化方法。在下面的例子中，我们将权重参数初始化成均值为0、标准差为0.01的正态分布随机数，并依然将偏差参数清零。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 267,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.weight [[-0.11903492 -0.29600903  0.45662007]\n",
      " [-0.418488    0.561082    0.04876074]\n",
      " [ 0.29534313  0.7278429   0.6844768 ]\n",
      " [-0.2900148  -0.90568084 -0.8942284 ]]\n",
      "2.weight [[ 0.9820389 ]\n",
      " [-0.97952133]\n",
      " [ 0.02883578]]\n"
     ]
    }
   ],
   "source": [
    "import paddle.fluid as fluid\r\n",
    "for name, param in net.named_parameters():\r\n",
    "    if 'weight' in name:\r\n",
    "        paddle.fluid.initializer.Normal(loc=0,scale=0.2)\r\n",
    "        print(name, param.numpy())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "下面使用常数来初始化权重参数。\n",
    "\n",
    "class paddle.fluid.initializer.ConstantInitializer(value=0.0, force_cpu=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 271,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.bias [0. 0. 0.]\n",
      "2.bias [0.]\n",
      "0.bias [0. 0. 0.]\n",
      "2.bias [0.]\n",
      "0.bias [0. 0. 0.]\n",
      "2.bias [0.]\n",
      "0.bias [0. 0. 0.]\n",
      "2.bias [0.]\n"
     ]
    }
   ],
   "source": [
    "for name, param in net.named_parameters():\r\n",
    "    for name, param in net.named_parameters():\r\n",
    "        if 'bias' in name:\r\n",
    "            paddle.fluid.initializer.ConstantInitializer(value=0)\r\n",
    "            print(name, param.numpy())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 4.2.3 自定义初始化方法"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "有时候我们需要的初始化方法并没有在`init`模块中提供。这时，可以实现一个初始化方法，从而能够像使用其他初始化方法那样使用它。\n",
    "在这之前我们先来看看Paddle是怎么实现这些初始化方法的，例如`paddle.fluid.initializer.Normal`："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 279,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "def Normal(m, mean=0, std=1):\r\n",
    "    with fluid.dygraph.no_grad():\r\n",
    "        return paddle.fluid.initializer.Normal(mean, std)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "可以看到这就是一个inplace改变`Tensor`值的函数，而且这个过程是不记录梯度的。\n",
    "类似的我们来实现一个自定义的初始化方法。在下面的例子里，我们令权重有一半概率初始化为0，有另一半概率初始化为$[-10,-5]$和$[5,10]$两个区间里均匀分布的随机数。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 333,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "ename": "TypeError",
     "evalue": "func() missing 1 required positional argument: 'x'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-333-ee8bde5fb876>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      9\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparam\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mnet\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnamed_parameters\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     10\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0;34m'weight'\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m         \u001b[0minit_weight_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mparam\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     12\u001b[0m         \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparam\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-333-ee8bde5fb876>\u001b[0m in \u001b[0;36minit_weight_\u001b[0;34m(m)\u001b[0m\n\u001b[1;32m      4\u001b[0m     \u001b[0;32mwith\u001b[0m \u001b[0mfluid\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdygraph\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mno_grad\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      5\u001b[0m         \u001b[0mfluid\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minitializer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mUniformInitializer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlow\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m10\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mhigh\u001b[0m\u001b[0;34m=\u001b[0m \u001b[0;36m10\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m         \u001b[0mfluid\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlayers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mabs\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m>=\u001b[0m\u001b[0;36m5\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      7\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      8\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mTypeError\u001b[0m: func() missing 1 required positional argument: 'x'"
     ]
    }
   ],
   "source": [
    "import paddle.fluid as fluid\r\n",
    "\r\n",
    "def init_weight_(m):\r\n",
    "    with fluid.dygraph.no_grad():\r\n",
    "        fluid.initializer.UniformInitializer(low=-10,high= 10)\r\n",
    "        m*=m.fluid.layers.abs()>=5\r\n",
    "\r\n",
    "\r\n",
    "for name, param in net.named_parameters():\r\n",
    "    if 'weight' in name:\r\n",
    "        init_weight_(param)\r\n",
    "        print(name, param)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "此外，参考2.3.2节，我们还可以通过改变这些参数的`data`来改写模型参数值同时不会影响梯度:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 326,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.bias Tensor(shape=[3], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n",
      "       [1., 1., 1.])\n",
      "2.bias Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n",
      "       [1.])\n"
     ]
    }
   ],
   "source": [
    "for name, param in net.named_parameters():\r\n",
    "    if 'bias' in name:\r\n",
    "        param += 1\r\n",
    "        print(name, param)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "PaddlePaddle 2.0.0b0 (Python 3.5)",
   "language": "python",
   "name": "py35-paddle1.2.0"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
