{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "0522.ipynb",
      "provenance": [],
      "collapsed_sections": [],
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/yananma/5_programs_per_day/blob/master/0522.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "GPVLzUGFUuWd",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "!apt-get update && sudo apt-get install -y build-essential git libgfortran3\n",
        "!wget https://developer.nvidia.com/compute/cuda/10.0/Prod/local_installers/cuda-repo-ubuntu1804-10-0-local-10.0.130-410.48_1.0-1_amd64\n",
        "!sudo dpkg -i cuda-repo-ubuntu1804-10-0-local-10.0.130-410.48_1.0-1_amd64.deb\n",
        "!sudo apt-key add /var/cuda-repo-<version>/7fa2af80.pub\n",
        "!sudo apt-get update\n",
        "!sudo apt-get install cuda        "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "-izphx-QVgGu",
        "colab_type": "code",
        "outputId": "ad0c4905-185f-4c10-97f2-767ce0cc4fce",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "source": [
        "!cat /usr/local/cuda/version.txt"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "CUDA Version 10.0.130\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "0I6qB7lMqg9F",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "!pip install mxnet-cu100 d2lzh\n",
        "import mxnet as mx\n",
        "import d2lzh as d2l"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "7TTX3T62rWY_",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import d2lzh as d2l \n",
        "from mxnet import autograd, gluon, init, nd \n",
        "from mxnet.gluon import loss as gloss, nn \n",
        "import time "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "1D1LdG3frqlJ",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "net = nn.Sequential()\n",
        "net.add(nn.Conv2D(channels=6, kernel_size=5, activation='sigmoid'), \n",
        "    nn.MaxPool2D(pool_size=2, strides=2), \n",
        "    nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'), \n",
        "    nn.MaxPool2D(pool_size=2, strides=2), \n",
        "    nn.Dense(120, activation='sigmoid'), \n",
        "    nn.Dense(84, activation='sigmoid'), \n",
        "    nn.Dense(10))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "9ZPquBBtsWZT",
        "colab_type": "code",
        "outputId": "35ddd73d-ad37-4d76-c7b2-4e11cf4e967f",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 140
        }
      },
      "source": [
        "X = nd.random.uniform(shape=(1, 1, 28, 28))\n",
        "net.initialize()\n",
        "for layer in net:\n",
        "    X = layer(X)\n",
        "    print(layer.name, 'output shape:\\t', X.shape)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "conv0 output shape:\t (1, 6, 24, 24)\n",
            "pool0 output shape:\t (1, 6, 12, 12)\n",
            "conv1 output shape:\t (1, 16, 8, 8)\n",
            "pool1 output shape:\t (1, 16, 4, 4)\n",
            "dense0 output shape:\t (1, 120)\n",
            "dense1 output shape:\t (1, 84)\n",
            "dense2 output shape:\t (1, 10)\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "cLh07BJ2s7Eh",
        "colab_type": "code",
        "outputId": "2c63e141-eb9c-451d-98ab-48b4cd1230b2",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 107
        }
      },
      "source": [
        "batch_size = 256 \n",
        "train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size=batch_size)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Downloading /root/.mxnet/datasets/fashion-mnist/train-images-idx3-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/fashion-mnist/train-images-idx3-ubyte.gz...\n",
            "Downloading /root/.mxnet/datasets/fashion-mnist/train-labels-idx1-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/fashion-mnist/train-labels-idx1-ubyte.gz...\n",
            "Downloading /root/.mxnet/datasets/fashion-mnist/t10k-images-idx3-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/fashion-mnist/t10k-images-idx3-ubyte.gz...\n",
            "Downloading /root/.mxnet/datasets/fashion-mnist/t10k-labels-idx1-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/fashion-mnist/t10k-labels-idx1-ubyte.gz...\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "V5w2TnuitFjp",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def try_gpu():\n",
        "    try:\n",
        "        ctx = mx.gpu()\n",
        "        _ = nd.zeros((1,), ctx=ctx)\n",
        "    except mx.base.MXNetError:\n",
        "        ctx = mx.cpu()\n",
        "    return ctx "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "G_lCsDUitgQx",
        "colab_type": "code",
        "outputId": "87bff5ee-20c8-4911-802c-3ab6f6b32633",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "source": [
        "ctx = try_gpu()\n",
        "ctx "
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "gpu(0)"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 11
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "IpOCdzdktjoi",
        "colab_type": "code",
        "outputId": "61b38906-b330-4311-b98b-26e8941a502b",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "source": [
        "cttx = mx.gpu(1)\n",
        "cttx"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "gpu(1)"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 12
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "dlqeAgljtsjE",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def evaluate_accuracy(data_iter, net, ctx):\n",
        "    acc_sum, n = nd.array([0], ctx=ctx), 0 \n",
        "    for X, y in data_iter:\n",
        "        X, y = X.as_in_context(ctx), y.as_in_context(ctx).astype('float32')\n",
        "        acc_sum += (net(X).argmax(axis=1) == y).sum()\n",
        "        n += y.size \n",
        "    return acc_sum.asscalar() / n "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "cmyd6507up0b",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs):\n",
        "    print('training on', ctx)\n",
        "    loss = gloss.SoftmaxCrossEntropyLoss()\n",
        "    for epoch in range(num_epochs):\n",
        "        train_l_sum, train_acc_sum, n, start = 0.00, 0.00, 0, time.time()\n",
        "        for X, y in train_iter:\n",
        "            X, y = X.as_in_context(ctx), y.as_in_context(ctx)\n",
        "            with autograd.record():\n",
        "                y_hat = net(X)\n",
        "                l = loss(y_hat, y).sum()\n",
        "            l.backward()\n",
        "            trainer.step(batch_size)\n",
        "            y = y.astype('float32')\n",
        "            train_l_sum += l.asscalar()\n",
        "            train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()\n",
        "            n += y.size\n",
        "        test_acc = evaluate_accuracy(test_iter, net, ctx)\n",
        "        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'\n",
        "            % (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc, time.time() - start))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "92kB_6p-w2AD",
        "colab_type": "code",
        "outputId": "67dd6364-4942-4066-f32d-f9cf11b07a2f",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 122
        }
      },
      "source": [
        "lr, num_epochs = 0.9, 5 \n",
        "net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier())\n",
        "trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n",
        "train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "training on gpu(0)\n",
            "epoch 1, loss 2.3202, train acc 0.101, test acc 0.100, time 7.9 sec\n",
            "epoch 2, loss 1.9381, train acc 0.260, test acc 0.590, time 8.0 sec\n",
            "epoch 3, loss 0.9748, train acc 0.612, test acc 0.699, time 7.7 sec\n",
            "epoch 4, loss 0.7639, train acc 0.702, test acc 0.738, time 7.8 sec\n",
            "epoch 5, loss 0.6612, train acc 0.738, test acc 0.762, time 7.8 sec\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Dl5XVTlcxfn7",
        "colab_type": "text"
      },
      "source": [
        "## 5.6 深度卷积神经网络 ( AlexNet )"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "4SSYoMRDzkiC",
        "colab_type": "code",
        "outputId": "cce609bc-d3aa-43e0-d7cc-842c066a1694",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 316
        }
      },
      "source": [
        "!nvidia-smi"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Fri Nov  8 05:24:31 2019       \n",
            "+-----------------------------------------------------------------------------+\n",
            "| NVIDIA-SMI 430.50       Driver Version: 418.67       CUDA Version: 10.1     |\n",
            "|-------------------------------+----------------------+----------------------+\n",
            "| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |\n",
            "| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |\n",
            "|===============================+======================+======================|\n",
            "|   0  Tesla K80           Off  | 00000000:00:04.0 Off |                    0 |\n",
            "| N/A   68C    P8    32W / 149W |      0MiB / 11441MiB |      0%      Default |\n",
            "+-------------------------------+----------------------+----------------------+\n",
            "                                                                               \n",
            "+-----------------------------------------------------------------------------+\n",
            "| Processes:                                                       GPU Memory |\n",
            "|  GPU       PID   Type   Process name                             Usage      |\n",
            "|=============================================================================|\n",
            "|  No running processes found                                                 |\n",
            "+-----------------------------------------------------------------------------+\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "FaWouLrg2ZHd",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import d2lzh as d2l \n",
        "from mxnet import gluon, init, nd \n",
        "from mxnet.gluon import data as gdata, nn \n",
        "import os \n",
        "import sys \n",
        "\n",
        "net = nn.Sequential()\n",
        "net.add(nn.Conv2D(96, kernel_size=11, strides=4, activation='relu'), \n",
        "    nn.MaxPool2D(pool_size=3, strides=2), \n",
        "    nn.Conv2D(256, kernel_size=5, padding=2, activation='relu'), \n",
        "    nn.MaxPool2D(pool_size=3, strides=2), \n",
        "    nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'), \n",
        "    nn.Conv2D(384, kernel_size=3, padding=1, activation='relu'), \n",
        "    nn.Conv2D(256, kernel_size=3, padding=1, activation='relu'), \n",
        "    nn.MaxPool2D(pool_size=3, strides=2), \n",
        "    nn.Dense(4096, activation='relu'), nn.Dropout(0.5), \n",
        "    nn.Dense(4096, activation='relu'), nn.Dropout(0.5), \n",
        "    nn.Dense(10)\n",
        "\n",
        "    )"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Iiq-7eQ18yP2",
        "colab_type": "code",
        "outputId": "f6c37083-f987-4242-ff8e-53fb373424e2",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 246
        }
      },
      "source": [
        "X = nd.random.uniform(shape=(1, 1, 224, 224))\n",
        "net.initialize()\n",
        "for layer in net:\n",
        "    X = layer(X)\n",
        "    print(layer.name, 'output shape:\\t', X.shape)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "conv2 output shape:\t (1, 96, 54, 54)\n",
            "pool2 output shape:\t (1, 96, 26, 26)\n",
            "conv3 output shape:\t (1, 256, 26, 26)\n",
            "pool3 output shape:\t (1, 256, 12, 12)\n",
            "conv4 output shape:\t (1, 384, 12, 12)\n",
            "conv5 output shape:\t (1, 384, 12, 12)\n",
            "conv6 output shape:\t (1, 256, 12, 12)\n",
            "pool4 output shape:\t (1, 256, 5, 5)\n",
            "dense3 output shape:\t (1, 4096)\n",
            "dropout0 output shape:\t (1, 4096)\n",
            "dense4 output shape:\t (1, 4096)\n",
            "dropout1 output shape:\t (1, 4096)\n",
            "dense5 output shape:\t (1, 10)\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "j6KiNorD9IRn",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def load_data_fashion_mnist(batch_size, resize=None, root=os.path.join('~', '.mxnet', 'datasets', 'fashion-mnist')):\n",
        "    root = os.path.expanduser(root)\n",
        "    transformer = []\n",
        "    if resize:\n",
        "        transformer += [gdata.vision.transforms.Resize(resize)]\n",
        "    transformer += [gdata.vision.transforms.ToTensor()]\n",
        "    transformer = gdata.vision.transforms.Compose(transformer)\n",
        "    mnist_train = gdata.vision.FashionMNIST(root=root, train=True)\n",
        "    mnist_test = gdata.vision.FashionMNIST(root=root, train=False)\n",
        "    num_workers = 4 \n",
        "    train_iter = gdata.DataLoader(\n",
        "        mnist_train.transform_first(transformer), batch_size, shuffle=True, num_workers=num_workers\n",
        "    )\n",
        "    test_iter = gdata.DataLoader(\n",
        "        mnist_test.transform_first(transformer), batch_size, shuffle=False, num_workers=num_workers\n",
        "    )\n",
        "    return train_iter, test_iter     \n",
        "\n",
        "batch_size = 128 \n",
        "train_iter, test_iter = load_data_fashion_mnist(batch_size, resize=224)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "4KpWpaCICZKp",
        "colab_type": "code",
        "outputId": "5cb3c5bf-2808-4e69-ff90-ac613cea9499",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 122
        }
      },
      "source": [
        "lr, num_epochs, ctx = 0.01, 5, d2l.try_gpu()\n",
        "net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier())\n",
        "trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n",
        "d2l.train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "training on gpu(0)\n",
            "epoch 1, loss 1.2992, train acc 0.521, test acc 0.754, time 102.5 sec\n",
            "epoch 2, loss 0.6595, train acc 0.754, test acc 0.810, time 94.8 sec\n",
            "epoch 3, loss 0.5430, train acc 0.798, test acc 0.840, time 94.9 sec\n",
            "epoch 4, loss 0.4802, train acc 0.823, test acc 0.841, time 94.9 sec\n",
            "epoch 5, loss 0.4359, train acc 0.840, test acc 0.860, time 94.9 sec\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "nf6Xk3IlDK_Q",
        "colab_type": "text"
      },
      "source": [
        "## 5.7 使用重复元素的网络 ( VGG )"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "c6vR-BuBFuzZ",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import d2lzh as d2l \n",
        "from mxnet import gluon, init, nd \n",
        "from mxnet.gluon import nn \n",
        "\n",
        "def vgg_block(num_convs, num_channels):\n",
        "    blk = nn.Sequential()\n",
        "    for _ in range(num_convs):\n",
        "        blk.add(nn.Conv2D(num_channels, kernel_size=3, padding=1, activation='relu'))\n",
        "    blk.add(nn.MaxPool2D(pool_size=2, strides=2))\n",
        "    return blk"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "U3Kaa-O5GbKb",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "conv_arch = ((1, 64), (1, 128), (2, 256), (2, 512), (2, 512))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "dvr5QgncGvML",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def vgg(conv_arch):\n",
        "    net = nn.Sequential()\n",
        "    for (num_convs, num_channels) in conv_arch:\n",
        "        net.add(vgg_block(num_convs, num_channels))\n",
        "    net.add(nn.Dense(4096, activation='relu'), nn.Dropout(0.5), \n",
        "        nn.Dense(4096, activation='relu'), nn.Dropout(0.5), \n",
        "        nn.Dense(10))\n",
        "    return net \n",
        "net = vgg(conv_arch)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "jgU9vj4JHbbr",
        "colab_type": "code",
        "outputId": "f2e9c04e-07cb-4bb1-8067-2a9dbcdfd4aa",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 193
        }
      },
      "source": [
        "net.initialize()\n",
        "X = nd.random.uniform(shape=(1, 1, 224, 224))\n",
        "for blk in net:\n",
        "    X = blk(X)\n",
        "    print(blk.name, 'output shape:\\t', X.shape)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "sequential3 output shape:\t (1, 64, 112, 112)\n",
            "sequential4 output shape:\t (1, 128, 56, 56)\n",
            "sequential5 output shape:\t (1, 256, 28, 28)\n",
            "sequential6 output shape:\t (1, 512, 14, 14)\n",
            "sequential7 output shape:\t (1, 512, 7, 7)\n",
            "dense6 output shape:\t (1, 4096)\n",
            "dropout2 output shape:\t (1, 4096)\n",
            "dense7 output shape:\t (1, 4096)\n",
            "dropout3 output shape:\t (1, 4096)\n",
            "dense8 output shape:\t (1, 10)\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "vUOrGeZrHvB0",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "ratio = 4 \n",
        "small_conv_arch = [(pair[0], pair[1] // ratio) for pair in conv_arch]\n",
        "net = vgg(small_conv_arch)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "hCnKZi-4IUN0",
        "colab_type": "code",
        "outputId": "df2c9afd-41ee-4f73-8ea4-ff0536dd9170",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 122
        }
      },
      "source": [
        "lr, num_epochs, batch_size, ctx = 0.005, 5, 128, d2l.try_gpu()\n",
        "net.initialize(ctx=ctx, init=init.Xavier())\n",
        "trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n",
        "train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=224)\n",
        "d2l.train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "training on gpu(0)\n",
            "epoch 1, loss 1.6113, train acc 0.405, test acc 0.724, time 177.0 sec\n",
            "epoch 2, loss 0.7265, train acc 0.728, test acc 0.802, time 167.3 sec\n",
            "epoch 3, loss 0.5771, train acc 0.788, test acc 0.838, time 167.7 sec\n",
            "epoch 4, loss 0.5059, train acc 0.816, test acc 0.852, time 167.6 sec\n",
            "epoch 5, loss 0.4593, train acc 0.833, test acc 0.861, time 167.6 sec\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "N72cOLMQJMMs",
        "colab_type": "text"
      },
      "source": [
        "## 网络中的网络 ( NiN )"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "xpv4vCUUm1Wi",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import d2lzh as d2l \n",
        "from mxnet import gluon, init, nd \n",
        "from mxnet.gluon import nn \n",
        "\n",
        "def nin_block(num_channels, kernel_size, strides, padding):\n",
        "    blk = nn.Sequential()\n",
        "    blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'), \n",
        "        nn.Conv2D(num_channels, kernel_size=1, activation='relu'), \n",
        "        nn.Conv2D(num_channels, kernel_size=1, activation='relu'), \n",
        "        ) \n",
        "    return blk "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "aBbUmYY_p2Cv",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "net = nn.Sequential()\n",
        "net.add(nin_block(96, kernel_size=11, strides=4, padding=0), \n",
        "    nn.MaxPool2D(pool_size=3, strides=2), \n",
        "    nin_block(256, kernel_size=5, strides=1, padding=2), \n",
        "    nn.MaxPool2D(pool_size=3, strides=2), \n",
        "    nin_block(384, kernel_size=3, strides=1, padding=1), \n",
        "    nn.MaxPool2D(pool_size=3, strides=2), nn.Dropout(0.5), \n",
        "    nin_block(10, kernel_size=3, strides=1, padding=1), \n",
        "    nn.GlobalAvgPool2D(), \n",
        "    nn.Flatten())"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "8g7mNygyrfne",
        "colab_type": "code",
        "outputId": "68a0b191-c447-496d-99ce-406a5e1a1fba",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 193
        }
      },
      "source": [
        "X = nd.random.uniform(shape=(1, 1, 224, 224))\n",
        "net.initialize()\n",
        "for layer in net:\n",
        "    X = layer(X)\n",
        "    print(layer.name, 'output shape:\\t', X.shape)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "sequential1 output shape:\t (1, 96, 54, 54)\n",
            "pool0 output shape:\t (1, 96, 26, 26)\n",
            "sequential2 output shape:\t (1, 256, 26, 26)\n",
            "pool1 output shape:\t (1, 256, 12, 12)\n",
            "sequential3 output shape:\t (1, 384, 12, 12)\n",
            "pool2 output shape:\t (1, 384, 5, 5)\n",
            "dropout0 output shape:\t (1, 384, 5, 5)\n",
            "sequential4 output shape:\t (1, 10, 5, 5)\n",
            "pool3 output shape:\t (1, 10, 1, 1)\n",
            "flatten0 output shape:\t (1, 10)\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "XEwHncU8ryXF",
        "colab_type": "code",
        "outputId": "4d97b45c-5cff-47bc-df9c-6bf6849b812f",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 213
        }
      },
      "source": [
        "lr, num_epochs, batch_size, ctx = 0.1, 5, 128, d2l.try_gpu()\n",
        "net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier())\n",
        "trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n",
        "train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=224)\n",
        "d2l.train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Downloading /root/.mxnet/datasets/fashion-mnist/train-images-idx3-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/fashion-mnist/train-images-idx3-ubyte.gz...\n",
            "Downloading /root/.mxnet/datasets/fashion-mnist/train-labels-idx1-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/fashion-mnist/train-labels-idx1-ubyte.gz...\n",
            "Downloading /root/.mxnet/datasets/fashion-mnist/t10k-images-idx3-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/fashion-mnist/t10k-images-idx3-ubyte.gz...\n",
            "Downloading /root/.mxnet/datasets/fashion-mnist/t10k-labels-idx1-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/fashion-mnist/t10k-labels-idx1-ubyte.gz...\n",
            "training on gpu(0)\n",
            "epoch 1, loss 2.2271, train acc 0.174, test acc 0.272, time 147.2 sec\n",
            "epoch 2, loss 1.4489, train acc 0.477, test acc 0.662, time 139.1 sec\n",
            "epoch 3, loss 0.9954, train acc 0.645, test acc 0.661, time 138.9 sec\n",
            "epoch 4, loss 0.8170, train acc 0.700, test acc 0.739, time 138.7 sec\n",
            "epoch 5, loss 0.7335, train acc 0.725, test acc 0.730, time 138.8 sec\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "HbnWTE8L1ESK",
        "colab_type": "text"
      },
      "source": [
        "## 5.9 含并行连接的网络 ( GoogLeNet )"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "lOT2Waoxs3ca",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import d2lzh as d2l \n",
        "from mxnet import gluon, init, nd \n",
        "from mxnet.gluon import nn \n",
        "\n",
        "class Inception(nn.Block):\n",
        "    def __init__(self, c1, c2, c3, c4, **kwargs):\n",
        "        super(Inception, self).__init__(**kwargs)\n",
        "        self.p1_1 = nn.Conv2D(c1, kernel_size=1, activation='relu')\n",
        "        self.p2_1 = nn.Conv2D(c2[0], kernel_size=1, activation='relu')\n",
        "        self.p2_2 = nn.Conv2D(c2[1], kernel_size=3, padding=1, activation='relu')\n",
        "        self.p3_1 = nn.Conv2D(c3[0], kernel_size=1, activation='relu')\n",
        "        self.p3_2 = nn.Conv2D(c3[1], kernel_size=5, padding=2, activation='relu')\n",
        "        self.p4_1 = nn.MaxPool2D(pool_size=3, strides=1, padding=1)\n",
        "        self.p4_2 = nn.Conv2D(c4, kernel_size=1, activation='relu')\n",
        "\n",
        "    def forward(self, x):\n",
        "        p1 = self.p1_1(x)\n",
        "        p2 = self.p2_2(self.p2_1(x))\n",
        "        p3 = self.p3_2(self.p3_1(x))\n",
        "        p4 = self.p4_2(self.p4_1(x))\n",
        "        return nd.concat(p1, p2, p3, p4, dim=1)\n"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "-flSPnJ5zKbQ",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "b1 = nn.Sequential()\n",
        "b1.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3, activation='relu'), \n",
        "    nn.MaxPool2D(pool_size=3, strides=2, padding=1))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "nVWC_Z6lzg6w",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "b2 = nn.Sequential()\n",
        "b2.add(nn.Conv2D(64, kernel_size=1, activation='relu'), \n",
        "    nn.Conv2D(192, kernel_size=3, padding=1, activation='relu'), \n",
        "    nn.MaxPool2D(pool_size=3, strides=2, padding=1))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "7mxL4GJmz-vE",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "b3 = nn.Sequential()\n",
        "b3.add(Inception(64, (96, 128), (16, 32), 32), \n",
        "    Inception(128, (128, 192), (32, 96), 64), \n",
        "    nn.MaxPool2D(pool_size=3, strides=2, padding=1))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ecWKBi_B0uNx",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "b4 = nn.Sequential()\n",
        "b4.add(Inception(192, (96, 208), (16, 48), 64),\n",
        "       Inception(160, (112, 224), (24, 64), 64),\n",
        "       Inception(128, (128, 256), (24, 64), 64),\n",
        "       Inception(112, (144, 288), (32, 64), 64),\n",
        "       Inception(256, (160, 320), (32, 128), 128),\n",
        "       nn.MaxPool2D(pool_size=3, strides=2, padding=1))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "NKUwkRXo1XZ7",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "b5 = nn.Sequential()\n",
        "b5.add(Inception(256, (160, 320), (32, 128), 128),\n",
        "       Inception(384, (192, 384), (48, 128), 128),\n",
        "       nn.GlobalAvgPool2D())"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "mmFVjcFn1dC5",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "net = nn.Sequential()\n",
        "net.add(b1, b2, b3, b4, b5, nn.Dense(10))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ItCt8NCd1lsj",
        "colab_type": "code",
        "outputId": "dffd4ec5-9a5d-499b-a30d-5c126e9e96e9",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 122
        }
      },
      "source": [
        "X = nd.random.uniform(shape=(1, 1, 96, 96))\n",
        "net.initialize()\n",
        "for layer in net:\n",
        "    X = layer(X)\n",
        "    print(layer.name, 'output shape:\\t', X.shape)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "sequential8 output shape:\t (1, 64, 24, 24)\n",
            "sequential9 output shape:\t (1, 192, 12, 12)\n",
            "sequential10 output shape:\t (1, 480, 6, 6)\n",
            "sequential11 output shape:\t (1, 832, 3, 3)\n",
            "sequential12 output shape:\t (1, 1024, 1, 1)\n",
            "dense0 output shape:\t (1, 10)\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Zs1bsr_g14wB",
        "colab_type": "code",
        "outputId": "86aca59f-2dc7-47b6-d696-dc0fefa0aa79",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 122
        }
      },
      "source": [
        "lr, num_epochs, batch_size, ctx = 0.1, 5, 128, d2l.try_gpu()\n",
        "net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier())\n",
        "trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n",
        "train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)\n",
        "d2l.train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "training on gpu(0)\n",
            "epoch 1, loss 1.9761, train acc 0.248, test acc 0.602, time 118.4 sec\n",
            "epoch 2, loss 0.8583, train acc 0.659, test acc 0.786, time 117.9 sec\n",
            "epoch 3, loss 0.5071, train acc 0.809, test acc 0.842, time 118.0 sec\n",
            "epoch 4, loss 0.4012, train acc 0.847, test acc 0.869, time 117.7 sec\n",
            "epoch 5, loss 0.3496, train acc 0.867, test acc 0.870, time 117.9 sec\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "aSd082hW24Et",
        "colab_type": "text"
      },
      "source": [
        "## 5.10 批量归一化"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "EaoX61Gr56uI",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import d2lzh as d2l \n",
        "from mxnet import autograd, gluon, init, nd \n",
        "from mxnet.gluon import nn \n",
        "\n",
        "def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):\n",
        "    if not autograd.is_training():\n",
        "        X_hat = (X - moving_mean) / nd.sqrt(moving_var + eps)\n",
        "    else:\n",
        "        assert len(X.shape) in (2, 4)\n",
        "        if len(X.shape) == 2:\n",
        "            mean = X.mean(axis=0)\n",
        "            var = ((X - mean) ** 2).mean(axis=0)\n",
        "        else:\n",
        "            mean = X.mean(axis=(0, 2, 3), keepdims=True)\n",
        "            var = ((X - mean) ** 2).mean(axis=(0, 2, 3), keepdims=True)\n",
        "\n",
        "        X_hat = (X - mean) / nd.sqrt(var + eps)\n",
        "        moving_mean = momentum * moving_mean + (1.0 - momentum) * mean \n",
        "        moving_var = momentum * moving_var + (1.0 - momentum) * var \n",
        "    Y = gamma * X_hat + beta \n",
        "    return Y, moving_mean, moving_var"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "VXg8BohZyjHO",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "class BatchNorm(nn.Block):\n",
        "    def __init__(self, num_features, num_dims, **kwargs):\n",
        "        super(BatchNorm, self).__init__(**kwargs)\n",
        "        if num_dims == 2:\n",
        "            shape = (1, num_features)\n",
        "        else:\n",
        "            shape = (1, num_features, 1, 1)\n",
        "        self.gamma = self.params.get('gamma', shape=shape, init=init.One())\n",
        "        self.beta = self.params.get('beta', shape=shape, init=init.Zero())\n",
        "        self.moving_mean = nd.zeros(shape)\n",
        "        self.moving_var = nd.zeros(shape)\n",
        "\n",
        "    def forward(self, X):\n",
        "        if self.moving_mean.context != X.context:\n",
        "            self.moving_mean = self.moving_mean.copyto(X.context)\n",
        "            self.moving_var = self.moving_var.copyto(X.context)\n",
        "\n",
        "        Y, self.moving_mean, self.moving_var = batch_norm(\n",
        "            X, self.gamma.data(), self.beta.data(), self.moving_mean, \n",
        "            self.moving_var, eps=1e-5, momentum=0.9\n",
        "        )\n",
        "        return Y\n",
        "        "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "yeXKHNKg0jlK",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "net = nn.Sequential()\n",
        "net.add(nn.Conv2D(6, kernel_size=5), \n",
        "    BatchNorm(6, num_dims=4), \n",
        "    nn.Activation('sigmoid'), \n",
        "    nn.MaxPool2D(pool_size=2, strides=2), \n",
        "    nn.Conv2D(16, kernel_size=5), \n",
        "    BatchNorm(16, num_dims=4), \n",
        "    nn.Activation('sigmoid'), \n",
        "    nn.MaxPool2D(pool_size=2, strides=2), \n",
        "    nn.Dense(120), \n",
        "    BatchNorm(120, num_dims=2), \n",
        "    nn.Activation('sigmoid'), \n",
        "    nn.Dense(84), \n",
        "    BatchNorm(84, num_dims=2), \n",
        "    nn.Activation('sigmoid'), \n",
        "    nn.Dense(10))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "jImoUdJJ2RVk",
        "colab_type": "code",
        "outputId": "41f55321-7fde-456b-8ed1-40b83000861e",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 776
        }
      },
      "source": [
        "lr, num_epochs, batch_size, ctx = 1.0, 5, 256, d2l.try_gpu()\n",
        "net.initialize(ctx=ctx, init=init.Xavier())\n",
        "trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n",
        "train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\n",
        "d2l.train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'conv75_weight' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'conv75_bias' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm0_gamma' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm0_beta' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'conv76_weight' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'conv76_bias' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm1_gamma' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm1_beta' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'dense1_weight' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'dense1_bias' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm2_gamma' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm2_beta' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'dense2_weight' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'dense2_bias' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm3_gamma' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm3_beta' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'dense3_weight' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'dense3_bias' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "training on gpu(0)\n",
            "epoch 1, loss 0.2906, train acc 0.895, test acc 0.872, time 10.8 sec\n",
            "epoch 2, loss 0.2801, train acc 0.898, test acc 0.889, time 10.5 sec\n",
            "epoch 3, loss 0.2707, train acc 0.901, test acc 0.770, time 10.5 sec\n",
            "epoch 4, loss 0.2610, train acc 0.905, test acc 0.880, time 10.8 sec\n",
            "epoch 5, loss 0.2483, train acc 0.909, test acc 0.865, time 10.5 sec\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "l51xQ8LV3JGT",
        "colab_type": "code",
        "outputId": "fe3a4d37-78e0-46fe-ad55-b7b89327c176",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 105
        }
      },
      "source": [
        "net[1].gamma.data().reshape((-1, )), net[1].beta.data().reshape((-1, ))"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(\n",
              " [1.8463953 2.1018672 1.6952718 1.8522757 0.9805336 1.3830012]\n",
              " <NDArray 6 @gpu(0)>, \n",
              " [-0.20405497 -2.2198381   0.2428574   1.4535147  -0.20604345  0.7259122 ]\n",
              " <NDArray 6 @gpu(0)>)"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 35
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "g_p3UvNi3ylq",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "net = nn.Sequential()\n",
        "net.add(nn.Conv2D(6, kernel_size=5), \n",
        "    nn.BatchNorm(), \n",
        "    nn.Activation('sigmoid'), \n",
        "    nn.MaxPool2D(pool_size=2, strides=2), \n",
        "    nn.Conv2D(16, kernel_size=5), \n",
        "    nn.BatchNorm(), \n",
        "    nn.Activation('sigmoid'), \n",
        "    nn.MaxPool2D(pool_size=2, strides=2), \n",
        "    nn.Dense(120), \n",
        "    nn.BatchNorm(), \n",
        "    nn.Activation('sigmoid'), \n",
        "    nn.Dense(84), \n",
        "    nn.BatchNorm(), \n",
        "    nn.Activation('sigmoid'), \n",
        "    nn.Dense(10))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "vkzVN2jtF7Jf",
        "colab_type": "code",
        "outputId": "b52a3248-444f-4b30-be19-916d202d4cff",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000
        }
      },
      "source": [
        "net.initialize(ctx=ctx, init=init.Xavier())\n",
        "trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n",
        "d2l.train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'conv77_weight' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'conv77_bias' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm4_gamma' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm4_beta' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm4_running_mean' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm4_running_var' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'conv78_weight' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'conv78_bias' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm5_gamma' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm5_beta' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm5_running_mean' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm5_running_var' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'dense4_weight' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'dense4_bias' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm6_gamma' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm6_beta' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm6_running_mean' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm6_running_var' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'dense5_weight' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'dense5_bias' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm7_gamma' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm7_beta' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm7_running_mean' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'batchnorm7_running_var' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'dense6_weight' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n",
            "/usr/local/lib/python3.6/dist-packages/mxnet/gluon/parameter.py:862: UserWarning: Parameter 'dense6_bias' is already initialized, ignoring. Set force_reinit=True to re-initialize.\n",
            "  v.initialize(None, ctx, init, force_reinit=force_reinit)\n"
          ],
          "name": "stderr"
        },
        {
          "output_type": "stream",
          "text": [
            "training on gpu(0)\n",
            "epoch 1, loss 0.2931, train acc 0.892, test acc 0.863, time 8.2 sec\n",
            "epoch 2, loss 0.2828, train acc 0.897, test acc 0.875, time 8.2 sec\n",
            "epoch 3, loss 0.2719, train acc 0.900, test acc 0.891, time 8.5 sec\n",
            "epoch 4, loss 0.2593, train acc 0.905, test acc 0.888, time 8.2 sec\n",
            "epoch 5, loss 0.2539, train acc 0.906, test acc 0.899, time 8.3 sec\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "f41IG5uvGR-W",
        "colab_type": "text"
      },
      "source": [
        "## 5.11 残差网络 ( ResNet )"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "clFvkVlbGd2Z",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import d2lzh as d2l \n",
        "from mxnet import gluon, init, nd \n",
        "from mxnet.gluon import nn  \n",
        "\n",
        "class Residual(nn.Block):\n",
        "    def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):\n",
        "        super(Residual, self).__init__(**kwargs)\n",
        "        self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1, strides=strides)\n",
        "        self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)\n",
        "        if use_1x1conv:\n",
        "            self.conv3 = nn.Conv2D(num_channels, kernel_size=1, strides=strides)\n",
        "        else:\n",
        "            self.conv3 = None \n",
        "        self.bn1 = nn.BatchNorm()\n",
        "        self.bn2 = nn.BatchNorm()\n",
        "\n",
        "    def forward(self, X):\n",
        "        Y = nd.relu(self.bn1(self.conv1(X)))\n",
        "        Y = self.bn2(self.conv2(Y))\n",
        "        if self.conv3:\n",
        "            X = self.conv3(X)\n",
        "        return nd.relu(Y + X)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "DL37Hf6qdoFS",
        "colab_type": "code",
        "outputId": "c539cd78-3ff5-489e-d946-d5e399906062",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "source": [
        "blk = Residual(3)\n",
        "blk.initialize()\n",
        "X = nd.random.uniform(shape=(4, 3, 6, 6))\n",
        "blk(X).shape"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(4, 3, 6, 6)"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 5
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "n-W1d1Wed1eM",
        "colab_type": "code",
        "outputId": "6b0f4c46-12ac-455a-8ea4-e923e7b1ce3d",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "source": [
        "blk = Residual(6, use_1x1conv=True, strides=2)\n",
        "blk.initialize()\n",
        "blk(X).shape"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(4, 6, 3, 3)"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 6
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "5lYz4ufkedBr",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "net = nn.Sequential()\n",
        "net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=2), \n",
        "    nn.BatchNorm(), \n",
        "    nn.Activation('relu'), \n",
        "    nn.MaxPool2D(pool_size=3, strides=2, padding=1))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "46BhjW3w1QIu",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def resnet_block(num_channels, num_residuals, first_block=False):\n",
        "    blk = nn.Sequential()\n",
        "    for i in range(num_residuals):\n",
        "        if i == 0 and not first_block:\n",
        "            blk.add(Residual(num_channels, use_1x1conv=True, strides=2))\n",
        "        else:\n",
        "            blk.add(Residual(num_channels))\n",
        "    return blk"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "juY9SY_E2J-t",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "net.add(resnet_block(64, 2, first_block=True), \n",
        "    resnet_block(128, 2), \n",
        "    resnet_block(256, 2), \n",
        "    resnet_block(512, 2))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "z4WsUo4Z2YUj",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "net.add(nn.GlobalAvgPool2D(), nn.Dense(10))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "5aKGyxFb2gEa",
        "colab_type": "code",
        "outputId": "dbf57a22-4d7c-40ff-b60e-be3067f58fd9",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 193
        }
      },
      "source": [
        "X = nd.random.uniform(shape=(1, 1, 224, 224))\n",
        "net.initialize()\n",
        "for layer in net:\n",
        "    X = layer(X)\n",
        "    print(layer.name, 'output shape:\\t', X.shape)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "conv5 output shape:\t (1, 64, 111, 111)\n",
            "batchnorm4 output shape:\t (1, 64, 111, 111)\n",
            "relu0 output shape:\t (1, 64, 111, 111)\n",
            "pool0 output shape:\t (1, 64, 56, 56)\n",
            "sequential1 output shape:\t (1, 64, 56, 56)\n",
            "sequential2 output shape:\t (1, 128, 28, 28)\n",
            "sequential3 output shape:\t (1, 256, 14, 14)\n",
            "sequential4 output shape:\t (1, 512, 7, 7)\n",
            "pool1 output shape:\t (1, 512, 1, 1)\n",
            "dense0 output shape:\t (1, 10)\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "vVMXjj3323Kt",
        "colab_type": "code",
        "outputId": "b30d6603-65a4-4067-a84e-b6865640a620",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 213
        }
      },
      "source": [
        "lr, num_epochs, batch_size, ctx = 0.05, 5, 256, d2l.try_gpu()\n",
        "net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier())\n",
        "trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n",
        "train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)\n",
        "d2l.train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Downloading /root/.mxnet/datasets/fashion-mnist/train-images-idx3-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/fashion-mnist/train-images-idx3-ubyte.gz...\n",
            "Downloading /root/.mxnet/datasets/fashion-mnist/train-labels-idx1-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/fashion-mnist/train-labels-idx1-ubyte.gz...\n",
            "Downloading /root/.mxnet/datasets/fashion-mnist/t10k-images-idx3-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/fashion-mnist/t10k-images-idx3-ubyte.gz...\n",
            "Downloading /root/.mxnet/datasets/fashion-mnist/t10k-labels-idx1-ubyte.gz from https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/fashion-mnist/t10k-labels-idx1-ubyte.gz...\n",
            "training on gpu(0)\n",
            "epoch 1, loss 0.4853, train acc 0.831, test acc 0.886, time 95.1 sec\n",
            "epoch 2, loss 0.2538, train acc 0.906, test acc 0.904, time 87.4 sec\n",
            "epoch 3, loss 0.1907, train acc 0.930, test acc 0.908, time 87.5 sec\n",
            "epoch 4, loss 0.1463, train acc 0.947, test acc 0.915, time 87.3 sec\n",
            "epoch 5, loss 0.1101, train acc 0.961, test acc 0.918, time 87.3 sec\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "KH6826sbXDc2",
        "colab_type": "text"
      },
      "source": [
        "## 5.12 稠密连接网络 ( DenseNet )"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "zmy89XB-XVLr",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import d2lzh as d2l \n",
        "from mxnet import gluon, init, nd \n",
        "from mxnet.gluon import nn \n",
        "\n",
        "def conv_block(num_channels):\n",
        "    blk = nn.Sequential()\n",
        "    blk.add(nn.BatchNorm(), nn.Activation('relu'), \n",
        "        nn.Conv2D(num_channels, kernel_size=3, padding=1))\n",
        "    return blk "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "hX9qT4QHbRS2",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "class DenseBlock(nn.Block):\n",
        "    def __init__(self, num_convs, num_channels, **kwargs):\n",
        "        super(DenseBlock, self).__init__(**kwargs)\n",
        "        self.net = nn.Sequential()\n",
        "        for _ in range(num_convs):\n",
        "            self.net.add(conv_block(num_channels))\n",
        "\n",
        "    def forward(self, X):\n",
        "        for blk in self.net:\n",
        "            Y = blk(X)\n",
        "            X = nd.concat(X, Y, dim=1)\n",
        "        return X "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "SZA25BaJcHnD",
        "colab_type": "code",
        "outputId": "1d12d617-7521-4338-d6ca-19c828ce2190",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "source": [
        "blk = DenseBlock(2, 10)\n",
        "blk.initialize()\n",
        "X = nd.random.uniform(shape=(4, 3, 8, 8))\n",
        "Y = blk(X)\n",
        "Y.shape"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(4, 23, 8, 8)"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 18
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "06ulMapWcge5",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def transition_block(num_channels):\n",
        "    blk = nn.Sequential()\n",
        "    blk.add(nn.BatchNorm(), nn.Activation('relu'), \n",
        "        nn.Conv2D(num_channels, kernel_size=1), \n",
        "        nn.AvgPool2D(pool_size=2, strides=2))\n",
        "    return blk"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "A6K9alhrdFX7",
        "colab_type": "code",
        "outputId": "a6e827db-463e-48fe-c7c4-432aa4949d3b",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "source": [
        "blk = transition_block(10)\n",
        "blk.initialize()\n",
        "blk(Y).shape"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(4, 10, 4, 4)"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 20
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "MgpRttaGdQq9",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "net = nn.Sequential()\n",
        "net.add(nn.Conv2D(64, kernel_size=7, strides=2, padding=3), \n",
        "    nn.BatchNorm(), nn.Activation('relu'), \n",
        "    nn.MaxPool2D(pool_size=3, strides=2, padding=1))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Gkh8J0Qddpgr",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "num_channels, growth_rate = 64, 32  # num_channels为当前的通道数\n",
        "num_convs_in_dense_blocks = [4, 4, 4, 4]\n",
        "\n",
        "for i, num_convs in enumerate(num_convs_in_dense_blocks):\n",
        "    net.add(DenseBlock(num_convs, growth_rate))\n",
        "    # 上一个稠密块的输出通道数\n",
        "    num_channels += num_convs * growth_rate\n",
        "    # 在稠密块之间加入通道数减半的过渡层\n",
        "    if i != len(num_convs_in_dense_blocks) - 1:\n",
        "        num_channels //= 2\n",
        "        net.add(transition_block(num_channels))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "xo30HwnOeDYh",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "net.add(nn.BatchNorm(), nn.Activation('relu'), nn.GlobalAvgPool2D(),\n",
        "        nn.Dense(10))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "zXdAqZxveIdP",
        "colab_type": "code",
        "outputId": "09c92407-aacd-4876-addc-35efc5940957",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 122
        }
      },
      "source": [
        "lr, num_epochs, batch_size, ctx = 0.1, 5, 256, d2l.try_gpu()\n",
        "net.initialize(ctx=ctx, init=init.Xavier())\n",
        "trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr})\n",
        "train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)\n",
        "d2l.train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx,\n",
        "              num_epochs)"
      ],
      "execution_count": 0,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "training on gpu(0)\n",
            "epoch 1, loss 0.5292, train acc 0.816, test acc 0.856, time 75.6 sec\n",
            "epoch 2, loss 0.3101, train acc 0.888, test acc 0.888, time 69.3 sec\n",
            "epoch 3, loss 0.2607, train acc 0.905, test acc 0.797, time 69.3 sec\n",
            "epoch 4, loss 0.2323, train acc 0.916, test acc 0.910, time 69.2 sec\n",
            "epoch 5, loss 0.2125, train acc 0.923, test acc 0.907, time 69.4 sec\n"
          ],
          "name": "stdout"
        }
      ]
    }
  ]
}