{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "0521.ipynb",
      "provenance": [],
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/yananma/5_programs_per_day/blob/master/0521.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "zu-lPihFM1VK",
        "colab_type": "code",
        "outputId": "6c5ba493-f7dd-42aa-8945-69bfd6574fe6",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        }
      },
      "source": [
        "!cat /usr/local/cuda/version.txt"
      ],
      "execution_count": 1,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "CUDA Version 10.0.130\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Txue9hdrk5CD",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "!pip install mxnet-cu100 d2lzh"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "MFAVdgZeUU6B",
        "colab_type": "code",
        "outputId": "f27339f8-cf8d-40bb-9444-2fb85b1cbc7e",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 105
        }
      },
      "source": [
        "from mxnet import nd \n",
        "\n",
        "X, W_xh = nd.random.normal(shape=(3, 1)), nd.random.normal(shape=(1, 4))\n",
        "H, W_hh = nd.random.normal(shape=(3, 4)), nd.random.normal(shape=(4, 4))\n",
        "nd.dot(X, W_xh) + nd.dot(H, W_hh)"
      ],
      "execution_count": 3,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "\n",
              "[[ 5.0373516   2.6754622  -1.6607479  -0.40628886]\n",
              " [ 0.948454    0.46941757 -1.1866101  -1.180677  ]\n",
              " [-1.151402    0.83730245 -2.1974368  -5.248016  ]]\n",
              "<NDArray 3x4 @cpu(0)>"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 3
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "-qOnuKbcmgY0",
        "colab_type": "code",
        "outputId": "e3e4fcf3-3bd3-4ab4-b3d7-e0e12c1bbd99",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 105
        }
      },
      "source": [
        "nd.dot(nd.concat(X, H, dim=1), nd.concat(W_xh, W_hh, dim=0))"
      ],
      "execution_count": 4,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "\n",
              "[[ 5.0373516   2.6754622  -1.6607479  -0.40628862]\n",
              " [ 0.94845396  0.46941754 -1.1866102  -1.1806769 ]\n",
              " [-1.151402    0.8373025  -2.1974368  -5.248016  ]]\n",
              "<NDArray 3x4 @cpu(0)>"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 4
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "oJt8HMMm39YX",
        "colab_type": "text"
      },
      "source": [
        "## 语言模型数据集 ( 歌词 )"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "fRuHK8kbmvQn",
        "colab_type": "code",
        "outputId": "7a0c4444-c44f-4f93-9f7c-db38a71feb63",
        "colab": {
          "resources": {
            "http://localhost:8080/nbextensions/google.colab/files.js": {
              "data": "Ly8gQ29weXJpZ2h0IDIwMTcgR29vZ2xlIExMQwovLwovLyBMaWNlbnNlZCB1bmRlciB0aGUgQXBhY2hlIExpY2Vuc2UsIFZlcnNpb24gMi4wICh0aGUgIkxpY2Vuc2UiKTsKLy8geW91IG1heSBub3QgdXNlIHRoaXMgZmlsZSBleGNlcHQgaW4gY29tcGxpYW5jZSB3aXRoIHRoZSBMaWNlbnNlLgovLyBZb3UgbWF5IG9idGFpbiBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKLy8KLy8gICAgICBodHRwOi8vd3d3LmFwYWNoZS5vcmcvbGljZW5zZXMvTElDRU5TRS0yLjAKLy8KLy8gVW5sZXNzIHJlcXVpcmVkIGJ5IGFwcGxpY2FibGUgbGF3IG9yIGFncmVlZCB0byBpbiB3cml0aW5nLCBzb2Z0d2FyZQovLyBkaXN0cmlidXRlZCB1bmRlciB0aGUgTGljZW5zZSBpcyBkaXN0cmlidXRlZCBvbiBhbiAiQVMgSVMiIEJBU0lTLAovLyBXSVRIT1VUIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4KLy8gU2VlIHRoZSBMaWNlbnNlIGZvciB0aGUgc3BlY2lmaWMgbGFuZ3VhZ2UgZ292ZXJuaW5nIHBlcm1pc3Npb25zIGFuZAovLyBsaW1pdGF0aW9ucyB1bmRlciB0aGUgTGljZW5zZS4KCi8qKgogKiBAZmlsZW92ZXJ2aWV3IEhlbHBlcnMgZm9yIGdvb2dsZS5jb2xhYiBQeXRob24gbW9kdWxlLgogKi8KKGZ1bmN0aW9uKHNjb3BlKSB7CmZ1bmN0aW9uIHNwYW4odGV4dCwgc3R5bGVBdHRyaWJ1dGVzID0ge30pIHsKICBjb25zdCBlbGVtZW50ID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnc3BhbicpOwogIGVsZW1lbnQudGV4dENvbnRlbnQgPSB0ZXh0OwogIGZvciAoY29uc3Qga2V5IG9mIE9iamVjdC5rZXlzKHN0eWxlQXR0cmlidXRlcykpIHsKICAgIGVsZW1lbnQuc3R5bGVba2V5XSA9IHN0eWxlQXR0cmlidXRlc1trZXldOwogIH0KICByZXR1cm4gZWxlbWVudDsKfQoKLy8gTWF4IG51bWJlciBvZiBieXRlcyB3aGljaCB3aWxsIGJlIHVwbG9hZGVkIGF0IGEgdGltZS4KY29uc3QgTUFYX1BBWUxPQURfU0laRSA9IDEwMCAqIDEwMjQ7Ci8vIE1heCBhbW91bnQgb2YgdGltZSB0byBibG9jayB3YWl0aW5nIGZvciB0aGUgdXNlci4KY29uc3QgRklMRV9DSEFOR0VfVElNRU9VVF9NUyA9IDMwICogMTAwMDsKCmZ1bmN0aW9uIF91cGxvYWRGaWxlcyhpbnB1dElkLCBvdXRwdXRJZCkgewogIGNvbnN0IHN0ZXBzID0gdXBsb2FkRmlsZXNTdGVwKGlucHV0SWQsIG91dHB1dElkKTsKICBjb25zdCBvdXRwdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQob3V0cHV0SWQpOwogIC8vIENhY2hlIHN0ZXBzIG9uIHRoZSBvdXRwdXRFbGVtZW50IHRvIG1ha2UgaXQgYXZhaWxhYmxlIGZvciB0aGUgbmV4dCBjYWxsCiAgLy8gdG8gdXBsb2FkRmlsZXNDb250aW51ZSBmcm9tIFB5dGhvbi4KICBvdXRwdXRFbGVtZW50LnN0ZXBzID0gc3RlcHM7CgogIHJldHVybiBfdXBsb2FkRmlsZXNDb250aW51ZShvdXRwdXRJZCk7Cn0KCi8vIFRoaXMgaXMgcm91Z2hseSBhbiBhc3luYyBnZW5lcmF0b3IgKG5vdCBzdXBwb3J0ZWQgaW4gdGhlIGJyb3dzZXIgeWV0KSwKLy8gd2hlcmUgdGhlcmUgYXJlIG11bHRpcGxlIGFzeW5jaHJvbm91cyBzdGVwcyBhbmQgdGhlIFB5dGhvbiBzaWRlIGlzIGdvaW5nCi8vIHRvIHBvbGwgZm9yIGNvbXBsZXRpb24gb2YgZWFjaCBzdGVwLgovLyBUaGlzIHVzZXMgYSBQcm9taXNlIHRvIGJsb2NrIHRoZSBweXRob24gc2lkZSBvbiBjb21wbGV0aW9uIG9mIGVhY2ggc3RlcCwKLy8gdGhlbiBwYXNzZXMgdGhlIHJlc3VsdCBvZiB0aGUgcHJldmlvdXMgc3RlcCBhcyB0aGUgaW5wdXQgdG8gdGhlIG5leHQgc3RlcC4KZnVuY3Rpb24gX3VwbG9hZEZpbGVzQ29udGludWUob3V0cHV0SWQpIHsKICBjb25zdCBvdXRwdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQob3V0cHV0SWQpOwogIGNvbnN0IHN0ZXBzID0gb3V0cHV0RWxlbWVudC5zdGVwczsKCiAgY29uc3QgbmV4dCA9IHN0ZXBzLm5leHQob3V0cHV0RWxlbWVudC5sYXN0UHJvbWlzZVZhbHVlKTsKICByZXR1cm4gUHJvbWlzZS5yZXNvbHZlKG5leHQudmFsdWUucHJvbWlzZSkudGhlbigodmFsdWUpID0+IHsKICAgIC8vIENhY2hlIHRoZSBsYXN0IHByb21pc2UgdmFsdWUgdG8gbWFrZSBpdCBhdmFpbGFibGUgdG8gdGhlIG5leHQKICAgIC8vIHN0ZXAgb2YgdGhlIGdlbmVyYXRvci4KICAgIG91dHB1dEVsZW1lbnQubGFzdFByb21pc2VWYWx1ZSA9IHZhbHVlOwogICAgcmV0dXJuIG5leHQudmFsdWUucmVzcG9uc2U7CiAgfSk7Cn0KCi8qKgogKiBHZW5lcmF0b3IgZnVuY3Rpb24gd2hpY2ggaXMgY2FsbGVkIGJldHdlZW4gZWFjaCBhc3luYyBzdGVwIG9mIHRoZSB1cGxvYWQKICogcHJvY2Vzcy4KICogQHBhcmFtIHtzdHJpbmd9IGlucHV0SWQgRWxlbWVudCBJRCBvZiB0aGUgaW5wdXQgZmlsZSBwaWNrZXIgZWxlbWVudC4KICogQHBhcmFtIHtzdHJpbmd9IG91dHB1dElkIEVsZW1lbnQgSUQgb2YgdGhlIG91dHB1dCBkaXNwbGF5LgogKiBAcmV0dXJuIHshSXRlcmFibGU8IU9iamVjdD59IEl0ZXJhYmxlIG9mIG5leHQgc3RlcHMuCiAqLwpmdW5jdGlvbiogdXBsb2FkRmlsZXNTdGVwKGlucHV0SWQsIG91dHB1dElkKSB7CiAgY29uc3QgaW5wdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQoaW5wdXRJZCk7CiAgaW5wdXRFbGVtZW50LmRpc2FibGVkID0gZmFsc2U7CgogIGNvbnN0IG91dHB1dEVsZW1lbnQgPSBkb2N1bWVudC5nZXRFbGVtZW50QnlJZChvdXRwdXRJZCk7CiAgb3V0cHV0RWxlbWVudC5pbm5lckhUTUwgPSAnJzsKCiAgY29uc3QgcGlja2VkUHJvbWlzZSA9IG5ldyBQcm9taXNlKChyZXNvbHZlKSA9PiB7CiAgICBpbnB1dEVsZW1lbnQuYWRkRXZlbnRMaXN0ZW5lcignY2hhbmdlJywgKGUpID0+IHsKICAgICAgcmVzb2x2ZShlLnRhcmdldC5maWxlcyk7CiAgICB9KTsKICB9KTsKCiAgY29uc3QgY2FuY2VsID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnYnV0dG9uJyk7CiAgaW5wdXRFbGVtZW50LnBhcmVudEVsZW1lbnQuYXBwZW5kQ2hpbGQoY2FuY2VsKTsKICBjYW5jZWwudGV4dENvbnRlbnQgPSAnQ2FuY2VsIHVwbG9hZCc7CiAgY29uc3QgY2FuY2VsUHJvbWlzZSA9IG5ldyBQcm9taXNlKChyZXNvbHZlKSA9PiB7CiAgICBjYW5jZWwub25jbGljayA9ICgpID0+IHsKICAgICAgcmVzb2x2ZShudWxsKTsKICAgIH07CiAgfSk7CgogIC8vIENhbmNlbCB1cGxvYWQgaWYgdXNlciBoYXNuJ3QgcGlja2VkIGFueXRoaW5nIGluIHRpbWVvdXQuCiAgY29uc3QgdGltZW91dFByb21pc2UgPSBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gewogICAgc2V0VGltZW91dCgoKSA9PiB7CiAgICAgIHJlc29sdmUobnVsbCk7CiAgICB9LCBGSUxFX0NIQU5HRV9USU1FT1VUX01TKTsKICB9KTsKCiAgLy8gV2FpdCBmb3IgdGhlIHVzZXIgdG8gcGljayB0aGUgZmlsZXMuCiAgY29uc3QgZmlsZXMgPSB5aWVsZCB7CiAgICBwcm9taXNlOiBQcm9taXNlLnJhY2UoW3BpY2tlZFByb21pc2UsIHRpbWVvdXRQcm9taXNlLCBjYW5jZWxQcm9taXNlXSksCiAgICByZXNwb25zZTogewogICAgICBhY3Rpb246ICdzdGFydGluZycsCiAgICB9CiAgfTsKCiAgaWYgKCFmaWxlcykgewogICAgcmV0dXJuIHsKICAgICAgcmVzcG9uc2U6IHsKICAgICAgICBhY3Rpb246ICdjb21wbGV0ZScsCiAgICAgIH0KICAgIH07CiAgfQoKICBjYW5jZWwucmVtb3ZlKCk7CgogIC8vIERpc2FibGUgdGhlIGlucHV0IGVsZW1lbnQgc2luY2UgZnVydGhlciBwaWNrcyBhcmUgbm90IGFsbG93ZWQuCiAgaW5wdXRFbGVtZW50LmRpc2FibGVkID0gdHJ1ZTsKCiAgZm9yIChjb25zdCBmaWxlIG9mIGZpbGVzKSB7CiAgICBjb25zdCBsaSA9IGRvY3VtZW50LmNyZWF0ZUVsZW1lbnQoJ2xpJyk7CiAgICBsaS5hcHBlbmQoc3BhbihmaWxlLm5hbWUsIHtmb250V2VpZ2h0OiAnYm9sZCd9KSk7CiAgICBsaS5hcHBlbmQoc3BhbigKICAgICAgICBgKCR7ZmlsZS50eXBlIHx8ICduL2EnfSkgLSAke2ZpbGUuc2l6ZX0gYnl0ZXMsIGAgKwogICAgICAgIGBsYXN0IG1vZGlmaWVkOiAkewogICAgICAgICAgICBmaWxlLmxhc3RNb2RpZmllZERhdGUgPyBmaWxlLmxhc3RNb2RpZmllZERhdGUudG9Mb2NhbGVEYXRlU3RyaW5nKCkgOgogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAnbi9hJ30gLSBgKSk7CiAgICBjb25zdCBwZXJjZW50ID0gc3BhbignMCUgZG9uZScpOwogICAgbGkuYXBwZW5kQ2hpbGQocGVyY2VudCk7CgogICAgb3V0cHV0RWxlbWVudC5hcHBlbmRDaGlsZChsaSk7CgogICAgY29uc3QgZmlsZURhdGFQcm9taXNlID0gbmV3IFByb21pc2UoKHJlc29sdmUpID0+IHsKICAgICAgY29uc3QgcmVhZGVyID0gbmV3IEZpbGVSZWFkZXIoKTsKICAgICAgcmVhZGVyLm9ubG9hZCA9IChlKSA9PiB7CiAgICAgICAgcmVzb2x2ZShlLnRhcmdldC5yZXN1bHQpOwogICAgICB9OwogICAgICByZWFkZXIucmVhZEFzQXJyYXlCdWZmZXIoZmlsZSk7CiAgICB9KTsKICAgIC8vIFdhaXQgZm9yIHRoZSBkYXRhIHRvIGJlIHJlYWR5LgogICAgbGV0IGZpbGVEYXRhID0geWllbGQgewogICAgICBwcm9taXNlOiBmaWxlRGF0YVByb21pc2UsCiAgICAgIHJlc3BvbnNlOiB7CiAgICAgICAgYWN0aW9uOiAnY29udGludWUnLAogICAgICB9CiAgICB9OwoKICAgIC8vIFVzZSBhIGNodW5rZWQgc2VuZGluZyB0byBhdm9pZCBtZXNzYWdlIHNpemUgbGltaXRzLiBTZWUgYi82MjExNTY2MC4KICAgIGxldCBwb3NpdGlvbiA9IDA7CiAgICB3aGlsZSAocG9zaXRpb24gPCBmaWxlRGF0YS5ieXRlTGVuZ3RoKSB7CiAgICAgIGNvbnN0IGxlbmd0aCA9IE1hdGgubWluKGZpbGVEYXRhLmJ5dGVMZW5ndGggLSBwb3NpdGlvbiwgTUFYX1BBWUxPQURfU0laRSk7CiAgICAgIGNvbnN0IGNodW5rID0gbmV3IFVpbnQ4QXJyYXkoZmlsZURhdGEsIHBvc2l0aW9uLCBsZW5ndGgpOwogICAgICBwb3NpdGlvbiArPSBsZW5ndGg7CgogICAgICBjb25zdCBiYXNlNjQgPSBidG9hKFN0cmluZy5mcm9tQ2hhckNvZGUuYXBwbHkobnVsbCwgY2h1bmspKTsKICAgICAgeWllbGQgewogICAgICAgIHJlc3BvbnNlOiB7CiAgICAgICAgICBhY3Rpb246ICdhcHBlbmQnLAogICAgICAgICAgZmlsZTogZmlsZS5uYW1lLAogICAgICAgICAgZGF0YTogYmFzZTY0LAogICAgICAgIH0sCiAgICAgIH07CiAgICAgIHBlcmNlbnQudGV4dENvbnRlbnQgPQogICAgICAgICAgYCR7TWF0aC5yb3VuZCgocG9zaXRpb24gLyBmaWxlRGF0YS5ieXRlTGVuZ3RoKSAqIDEwMCl9JSBkb25lYDsKICAgIH0KICB9CgogIC8vIEFsbCBkb25lLgogIHlpZWxkIHsKICAgIHJlc3BvbnNlOiB7CiAgICAgIGFjdGlvbjogJ2NvbXBsZXRlJywKICAgIH0KICB9Owp9CgpzY29wZS5nb29nbGUgPSBzY29wZS5nb29nbGUgfHwge307CnNjb3BlLmdvb2dsZS5jb2xhYiA9IHNjb3BlLmdvb2dsZS5jb2xhYiB8fCB7fTsKc2NvcGUuZ29vZ2xlLmNvbGFiLl9maWxlcyA9IHsKICBfdXBsb2FkRmlsZXMsCiAgX3VwbG9hZEZpbGVzQ29udGludWUsCn07Cn0pKHNlbGYpOwo=",
              "ok": true,
              "headers": [
                [
                  "content-type",
                  "application/javascript"
                ]
              ],
              "status": 200,
              "status_text": ""
            }
          },
          "base_uri": "https://localhost:8080/",
          "height": 77
        }
      },
      "source": [
        "from google.colab import files \n",
        "uploaded = files.upload()"
      ],
      "execution_count": 7,
      "outputs": [
        {
          "output_type": "display_data",
          "data": {
            "text/html": [
              "\n",
              "     <input type=\"file\" id=\"files-58b7789c-74c0-4fa8-92f0-8197331dde17\" name=\"files[]\" multiple disabled />\n",
              "     <output id=\"result-58b7789c-74c0-4fa8-92f0-8197331dde17\">\n",
              "      Upload widget is only available when the cell has been executed in the\n",
              "      current browser session. Please rerun this cell to enable.\n",
              "      </output>\n",
              "      <script src=\"/nbextensions/google.colab/files.js\"></script> "
            ],
            "text/plain": [
              "<IPython.core.display.HTML object>"
            ]
          },
          "metadata": {
            "tags": []
          }
        },
        {
          "output_type": "stream",
          "text": [
            "Saving jaychou_lyrics.txt.zip to jaychou_lyrics.txt.zip\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "2PZXNrQds94a",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "325600c0-9e7a-46d2-902d-1d4d088b2edb"
      },
      "source": [
        "from mxnet import nd \n",
        "import random \n",
        "import zipfile \n",
        "\n",
        "with zipfile.ZipFile('jaychou_lyrics.txt.zip') as zin:\n",
        "    with zin.open('jaychou_lyrics.txt') as f:\n",
        "        corpus_chars = f.read().decode('utf-8')\n",
        "corpus_chars[:40]"
      ],
      "execution_count": 8,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "'想要有直升机\\n想要和你飞到宇宙去\\n想要和你融化在一起\\n融化在宇宙里\\n我每天每天每'"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 8
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "x7Lr7vLGtqG1",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "corpus_chars = corpus_chars.replace('\\n', ' ').replace('\\r', ' ')\n",
        "corpus_chars = corpus_chars[0:10000]"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "4t5kEVrpuAH9",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "2402be27-262e-4836-84f6-3e7d336f5cac"
      },
      "source": [
        "idx_to_char = list(set(corpus_chars))\n",
        "char_to_idx = dict([(char, i) for i, char in enumerate(idx_to_char)])\n",
        "vocab_size = len(char_to_idx)\n",
        "vocab_size"
      ],
      "execution_count": 10,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "1027"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 10
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Zf_wIiyxu91m",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 52
        },
        "outputId": "68be1ae4-cd6e-4e3f-a15c-0bcca3a92f41"
      },
      "source": [
        "corpus_indices = [char_to_idx[char] for char in corpus_chars]\n",
        "sample = corpus_indices[:20]\n",
        "print('chars:', ''.join([idx_to_char[idx] for idx in sample]))\n",
        "print('indices:', sample)"
      ],
      "execution_count": 11,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "chars: 想要有直升机 想要和你飞到宇宙去 想要和\n",
            "indices: [918, 734, 559, 558, 158, 760, 571, 918, 734, 656, 845, 223, 659, 894, 751, 95, 571, 918, 734, 656]\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "TZRGfDjgvdze",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def data_iter_random(corpus_indices, batch_size, num_steps, ctx=None):\n",
        "    num_examples = (len(corpus_indices) - 1) // num_steps \n",
        "    epoch_size = num_examples // batch_size \n",
        "    example_indices = list(range(num_examples))\n",
        "    random.shuffle(example_indices)\n",
        "\n",
        "    def _data(pos):\n",
        "        return corpus_indices[pos: pos + num_steps]\n",
        "    \n",
        "    for i in range(epoch_size):\n",
        "        i = i * batch_size \n",
        "        batch_indices = example_indices[i: i + batch_size]\n",
        "        X = [_data(j * num_steps) for j in batch_indices]\n",
        "        Y = [_data(j * num_steps + 1) for j in batch_indices]\n",
        "        yield nd.array(X, ctx), nd.array(Y, ctx)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "exfOAb6Xxq2A",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 334
        },
        "outputId": "0e4efe0e-c22f-4941-ddb4-6e4d0839e80a"
      },
      "source": [
        "my_seq = list(range(30))\n",
        "for X, Y in data_iter_random(my_seq, batch_size=2, num_steps=6):\n",
        "    print('X: ', X, '\\nY', Y, '\\n')"
      ],
      "execution_count": 13,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "X:  \n",
            "[[12. 13. 14. 15. 16. 17.]\n",
            " [18. 19. 20. 21. 22. 23.]]\n",
            "<NDArray 2x6 @cpu(0)> \n",
            "Y \n",
            "[[13. 14. 15. 16. 17. 18.]\n",
            " [19. 20. 21. 22. 23. 24.]]\n",
            "<NDArray 2x6 @cpu(0)> \n",
            "\n",
            "X:  \n",
            "[[ 6.  7.  8.  9. 10. 11.]\n",
            " [ 0.  1.  2.  3.  4.  5.]]\n",
            "<NDArray 2x6 @cpu(0)> \n",
            "Y \n",
            "[[ 7.  8.  9. 10. 11. 12.]\n",
            " [ 1.  2.  3.  4.  5.  6.]]\n",
            "<NDArray 2x6 @cpu(0)> \n",
            "\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "8I8--10yyf02",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def data_iter_consecutive(corpus_indices, batch_size, num_steps, ctx=None):\n",
        "    corpus_indices = nd.array(corpus_indices, ctx=ctx)\n",
        "    data_len = len(corpus_indices)\n",
        "    batch_len = data_len // batch_size \n",
        "    indices = corpus_indices[0: batch_size*batch_len].reshape((batch_size, batch_len))\n",
        "    epoch_size = (batch_len - 1) // num_steps\n",
        "    for i in range(epoch_size):\n",
        "        i = i * num_steps \n",
        "        X = indices[:, i: i + num_steps]\n",
        "        Y = indices[:, i + 1: i + num_steps + 1]\n",
        "        yield X, Y"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "r2-au0N83Y0S",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 334
        },
        "outputId": "78f46417-9dbe-45c6-ee4c-856164003238"
      },
      "source": [
        "for X, Y in data_iter_consecutive(my_seq, batch_size=2, num_steps=6):\n",
        "    print('X: ', X, '\\nY: ', Y, '\\n')"
      ],
      "execution_count": 15,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "X:  \n",
            "[[ 0.  1.  2.  3.  4.  5.]\n",
            " [15. 16. 17. 18. 19. 20.]]\n",
            "<NDArray 2x6 @cpu(0)> \n",
            "Y:  \n",
            "[[ 1.  2.  3.  4.  5.  6.]\n",
            " [16. 17. 18. 19. 20. 21.]]\n",
            "<NDArray 2x6 @cpu(0)> \n",
            "\n",
            "X:  \n",
            "[[ 6.  7.  8.  9. 10. 11.]\n",
            " [21. 22. 23. 24. 25. 26.]]\n",
            "<NDArray 2x6 @cpu(0)> \n",
            "Y:  \n",
            "[[ 7.  8.  9. 10. 11. 12.]\n",
            " [22. 23. 24. 25. 26. 27.]]\n",
            "<NDArray 2x6 @cpu(0)> \n",
            "\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "29r9kMunDbmv",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 316
        },
        "outputId": "9dde798e-994c-4262-c586-1bdef3a91b71"
      },
      "source": [
        "!nvidia-smi"
      ],
      "execution_count": 16,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Sun Nov 10 05:47:54 2019       \n",
            "+-----------------------------------------------------------------------------+\n",
            "| NVIDIA-SMI 430.50       Driver Version: 418.67       CUDA Version: 10.1     |\n",
            "|-------------------------------+----------------------+----------------------+\n",
            "| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |\n",
            "| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |\n",
            "|===============================+======================+======================|\n",
            "|   0  Tesla K80           Off  | 00000000:00:04.0 Off |                    0 |\n",
            "| N/A   43C    P8    30W / 149W |      0MiB / 11441MiB |      0%      Default |\n",
            "+-------------------------------+----------------------+----------------------+\n",
            "                                                                               \n",
            "+-----------------------------------------------------------------------------+\n",
            "| Processes:                                                       GPU Memory |\n",
            "|  GPU       PID   Type   Process name                             Usage      |\n",
            "|=============================================================================|\n",
            "|  No running processes found                                                 |\n",
            "+-----------------------------------------------------------------------------+\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "EzG8MS5pDeH5",
        "colab_type": "text"
      },
      "source": [
        "## 6.4 循环神经网络的从零开始实现"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Y3Ns3Xo04OjJ",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "!mkdir ../data"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "xvYL7pA75-eY",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 70
        },
        "outputId": "b2f58887-0667-45d0-f1f0-1dfeb287cf37"
      },
      "source": [
        "!ls .."
      ],
      "execution_count": 22,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "bin\t data\t  etc\tlib32  mnt   root  srv\t  tensorflow-2.0.0  usr\n",
            "boot\t datalab  home\tlib64  opt   run   swift  tmp\t\t    var\n",
            "content  dev\t  lib\tmedia  proc  sbin  sys\t  tools\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "lgSd712T69Tl",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "!cp jaychou_lyrics.txt.zip ../data/"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "qZlluDjL7Hc1",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "f3bec8d4-8c28-43d2-d3cd-31c5310da081"
      },
      "source": [
        "!ls ../data/"
      ],
      "execution_count": 26,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "jaychou_lyrics.txt.zip\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "KfBZHad83vdO",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import d2lzh as d2l \n",
        "import math \n",
        "from mxnet import autograd, nd \n",
        "from mxnet.gluon import loss as gloss \n",
        "import time \n",
        "\n",
        "(corpus_indices, char_to_idx, idx_to_char, vocab_size) = d2l.load_data_jay_lyrics()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "wUE_rFS-7Qf8",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 87
        },
        "outputId": "db04c8c2-3cae-4a04-d2e5-976fdbcdb8c0"
      },
      "source": [
        "nd.one_hot(nd.array([0, 2]), vocab_size)"
      ],
      "execution_count": 30,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "\n",
              "[[1. 0. 0. ... 0. 0. 0.]\n",
              " [0. 0. 1. ... 0. 0. 0.]]\n",
              "<NDArray 2x1027 @cpu(0)>"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 30
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "QjrDRi1K-Tb_",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "1ff570b3-c779-4ea5-91d6-6365adacb0a7"
      },
      "source": [
        "def to_onehot(X, size):\n",
        "    return [nd.one_hot(x, size) for x in X.T]\n",
        "\n",
        "X = nd.arange(10).reshape((2, 5))\n",
        "inputs = to_onehot(X, vocab_size)\n",
        "len(inputs), inputs[0].shape"
      ],
      "execution_count": 32,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(5, (2, 1027))"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 32
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "NZPQA_Of_BJe",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "54cf9c3c-debb-4292-d654-0b447bed4efb"
      },
      "source": [
        "num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size \n",
        "ctx = d2l.try_gpu()\n",
        "print('will use ', ctx)\n",
        "\n",
        "def get_params():\n",
        "    def _one(shape):\n",
        "        return nd.random.normal(scale=0.01, shape=shape, ctx=ctx)\n",
        "\n",
        "    W_xh = _one((num_inputs, num_hiddens))\n",
        "    W_hh = _one((num_hiddens, num_hiddens))\n",
        "    b_h = nd.zeros(num_hiddens, ctx=ctx)\n",
        "    W_hq = _one((num_hiddens, num_outputs))\n",
        "    b_q = nd.zeros(num_outputs, ctx=ctx)\n",
        "\n",
        "    params = [W_xh, W_hh, b_h, W_hq, b_q] \n",
        "    for param in params:\n",
        "        param.attach_grad()\n",
        "    return params "
      ],
      "execution_count": 37,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "will use  gpu(0)\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "nO0RxD7oA2n3",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def init_rnn_state(batch_size, num_hiddens, ctx):\n",
        "    return (nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx), )"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "N7LQPFW_Bi7-",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def rnn(inputs, state, params):\n",
        "    W_xh, W_hh, b_h, W_hq, b_q = params \n",
        "    H, = state \n",
        "    outputs = []\n",
        "    for X in inputs:\n",
        "        H = nd.tanh(nd.dot(X, W_xh) + nd.dot(H, W_hh) + b_h)\n",
        "        Y = nd.dot(H, W_hq) + b_q \n",
        "        outputs.append(Y)\n",
        "    return outputs, (H, )"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "CuTgGatECrWV",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "2acc9b7a-c2de-4516-b518-2b8667f7a844"
      },
      "source": [
        "state = init_rnn_state(X.shape[0], num_hiddens, ctx)\n",
        "inputs = to_onehot(X.as_in_context(ctx), vocab_size)\n",
        "params = get_params()\n",
        "outputs, state_new = rnn(inputs, state, params)\n",
        "len(outputs), outputs[0].shape, state_new[0].shape "
      ],
      "execution_count": 40,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(5, (2, 1027), (2, 256))"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 40
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "B0mFVLLIDWte",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def predict_rnn(prefix, num_chars, rnn, params, init_rnn_state, num_hiddens, vocab_size, ctx, idx_to_char, char_to_idx):\n",
        "    state = init_rnn_state(1, num_hiddens, ctx)\n",
        "    output = [char_to_idx[prefix[0]]]\n",
        "    for t in range(num_chars + len(prefix) - 1):\n",
        "        X = to_onehot(nd.array([output[-1]], ctx=ctx), vocab_size)\n",
        "        (Y, state) = rnn(X, state, params)\n",
        "        if t < len(prefix) - 1:\n",
        "            output.append(char_to_idx[prefix[t + 1]])\n",
        "        else:\n",
        "            output.append(int(Y[0].argmax(axis=1).asscalar()))\n",
        "    return ''.join([idx_to_char[i] for i in output])"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Uczj10k_FpSa",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "45f18c48-f72c-4ad8-bef4-b437d0cbaa7e"
      },
      "source": [
        "predict_rnn('分开', 10, rnn, params, init_rnn_state, num_hiddens, vocab_size, ctx, idx_to_char, char_to_idx)"
      ],
      "execution_count": 44,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "'分开等盘喝换距驳蕃车响倒'"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 44
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "MT0z-_usGFZi",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def grad_clipping(params, theta, ctx):\n",
        "    norm = nd.array([0], ctx)\n",
        "    for param in params:\n",
        "        norm += (param.grad ** 2).sum()\n",
        "    norm = norm.sqrt().asscalar()\n",
        "    if norm > theta:\n",
        "        for param in params:\n",
        "            param.grad[:] *= theta / norm "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "WYAMzWILHh66",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens, \n",
        "             vocab_size, ctx, corpus_indices, idx_to_char, \n",
        "             char_to_idx, is_random_iter, num_epochs, num_steps, \n",
        "             lr, clipping_theta, batch_size, pred_period, \n",
        "             pred_len, prefixes):\n",
        "    if is_random_iter:\n",
        "        data_iter_fn = d2l.data_iter_random \n",
        "    else:\n",
        "        data_iter_fn = d2l.data_iter_consecutive \n",
        "    params = get_params()\n",
        "    loss = gloss.SoftmaxCrossEntropyLoss()\n",
        "\n",
        "    for epoch in range(num_epochs):\n",
        "        if not is_random_iter:\n",
        "            state = init_rnn_state(batch_size, num_hiddens, ctx)\n",
        "        l_sum, n, start = 0.0, 0, time.time()\n",
        "        data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, ctx)\n",
        "        for X, Y in data_iter:\n",
        "            if is_random_iter:\n",
        "                state = init_rnn_state(batch_size, num_hiddens, ctx)\n",
        "            else:\n",
        "                for s in state:\n",
        "                    s.detach()\n",
        "            with autograd.record():\n",
        "                inputs = to_onehot(X, vocab_size)\n",
        "                (outputs, state) = rnn(inputs, state, params)\n",
        "                outputs = nd.concat(*outputs, dim=0)\n",
        "                y = Y.T.reshape((-1, ))\n",
        "                l = loss(outputs, y).mean()\n",
        "            l.backward()\n",
        "            grad_clipping(params, clipping_theta, ctx)\n",
        "            d2l.sgd(params, lr, 1)\n",
        "            l_sum += l.asscalar() * y.size \n",
        "            n += y.size \n",
        "\n",
        "        if (epoch + 1) % pred_period == 0:\n",
        "            print('epoch %d, perplexity %f, time %.2f sec' % (\n",
        "                epoch + 1, math.exp(l_sum / n), time.time() - start\n",
        "            ))\n",
        "            for prefix in prefixes:\n",
        "                print(' -', predict_rnn(\n",
        "                    prefix, pred_len, rnn, params, init_rnn_state, \n",
        "                    num_hiddens, vocab_size, ctx, idx_to_char, char_to_idx\n",
        "                ))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "5wpLU7bqwtnQ",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "num_epochs, num_steps, batch_size, lr, clipping_theta = 250, 35, 32, 1e2, 1e-2 \n",
        "pred_period, pred_len, prefixes = 50, 50, ['分开', '不分开']"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "9zIKC2zw-_Eh",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 281
        },
        "outputId": "b22a0b29-d2cf-48e9-e2ff-b11239b19b2b"
      },
      "source": [
        "train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens, vocab_size, ctx, corpus_indices, \n",
        "        idx_to_char, char_to_idx, True, num_epochs, num_steps, lr, clipping_theta, batch_size, \n",
        "        pred_period, pred_len, prefixes)"
      ],
      "execution_count": 55,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "epoch 50, perplexity 71.381833, time 0.43 sec\n",
            " - 分开 我不要 爱你我 说子我 别子我 别子我 别子我 别子我 别子我 别子我 别子我 别子我 别子我 别\n",
            " - 不分开 我有的 爱你我 我不要 一颗我 别子我 别子我 别子我 别子我 别子我 别子我 别子我 别子我 别\n",
            "epoch 100, perplexity 9.813439, time 0.43 sec\n",
            " - 分开 一颗两步三 用在它在留 仙人在停落 这人在停落 这人在停落 这人在停落 这人在停落 这人在停落 这\n",
            " - 不分开久 我不能再想 我不 我不 我不要再想你 不知不觉 你已经离开我 不知不觉 我该了这节奏 不知后觉 \n",
            "epoch 150, perplexity 2.938046, time 0.43 sec\n",
            " - 分开 爱家用双截棍 哼哼哈兮 习使用双截棍 哼哼哈兮 快使用双截棍 哼哼哈兮 快使用双截棍 哼哼哈兮 快\n",
            " - 不分开扫 然后你看去 慢慢温开天 仙人在怕羞 蜥蝪横著走 这样什么奇 蜥蝪横著走 蜥不懂 走 懂里什么奇 \n",
            "epoch 200, perplexity 1.599412, time 0.43 sec\n",
            " - 分开 一步两双截 辛辛它在抽屉 它所拥有的只剩下回忆 相爱还说前离字日的平墙 夕著斜木映的屋内还弥漫 姥\n",
            " - 不分开扫 然后将过去 慢慢在习前 你在在元前 蜥蝪 什么却 旧词依间截 几辛盘苦 全家怕日出 白色蜡烛 温\n",
            "epoch 250, perplexity 1.299030, time 0.43 sec\n",
            " - 分开 一步两停哭 还钩它满走 这样就反驳 到底拽什么 懂不懂篮球 这种不要走 三对三斗牛 三分球 它在空\n",
            " - 不分开期 然后将过去 慢慢温满天 一朵一朵因 三被在著妥 这里就反说 懂里什么奇 三对就什么 懂不懂篮球 \n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "outputId": "01856400-5216-4da1-9da1-9662815a712d",
        "id": "63LmedauAsRs",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 281
        }
      },
      "source": [
        "train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens, vocab_size, ctx, corpus_indices, \n",
        "        idx_to_char, char_to_idx, False, num_epochs, num_steps, lr, clipping_theta, batch_size, \n",
        "        pred_period, pred_len, prefixes)"
      ],
      "execution_count": 56,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "epoch 50, perplexity 62.303389, time 0.43 sec\n",
            " - 分开 我想要这 你使了双 我有一场 如果我 别怪我 我的我有 你的让空 我有了空 我有了外 在谁了外 泪\n",
            " - 不分开 我不能再 你有我有 你谁了外 在谁的可 感狂的可 感狂的可 感狂的可 感狂的可 感狂的可 感狂的可\n",
            "epoch 100, perplexity 7.131350, time 0.42 sec\n",
            " - 分开 你想了 是你我抬起头 有话去对医药箱说 别怪我 别怪我 说你怎么面对我 甩开球我满腔的怒火 我想揍\n",
            " - 不分开柳 你想经这开我 不知不觉 我已了这节奏 我该好好生活 不知不觉 我已经这节奏 我该好这生活 不知我\n",
            "epoch 150, perplexity 2.039645, time 0.42 sec\n",
            " - 分开 这说我 谁是我 印地安的传说 还真是 瞎透了 什么都有 沙漠之 怎么她著停留 一直在停留 谁让它停\n",
            " - 不分开柳 你已经离开我 不知不觉 我跟了这节奏 后知后觉 又过了一个秋 后知后兮 快使用双截棍 哼哼哈兮 \n",
            "epoch 200, perplexity 1.271316, time 0.43 sec\n",
            " - 分开 问候堂 是属于那年代白墙黑瓦的淡淡的忧伤 消失的 旧时光 一九四三 回头看 的片段 有一些风霜 老\n",
            " - 不分开觉 你已经离开我 不知不觉 我跟了这节奏 后知后觉 又过了一个秋 后知后觉 我该好好生活 后知后觉 \n",
            "epoch 250, perplexity 1.168021, time 0.42 sec\n",
            " - 分开 问候堂依旧在的事我 泪不休 语沉默 娘子她人在江南等我 泪不休 语沉默娘子 娘子却依旧每日 折一枝\n",
            " - 不分开觉 你已经离开我 不知不觉 我跟了这节奏 后知后觉 又过了一个秋 后知后觉 我该好好生活 我该好好生\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "uh8B4g1iB4N7",
        "colab_type": "text"
      },
      "source": [
        "## 6.5 循环神经网络的简洁实现"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "IljIbGVIACXh",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import d2lzh as d2l \n",
        "import math \n",
        "from mxnet import autograd, gluon, init, nd \n",
        "from mxnet.gluon import loss as Gloss, nn, rnn \n",
        "import time \n",
        "\n",
        "(corpus_indices, char_to_idx, idx_to_char, vocab_size) = d2l.load_data_jay_lyrics()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "MuHPskUFB_rN",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "num_hiddens = 256 \n",
        "rnn_layer = rnn.RNN(num_hiddens)\n",
        "rnn_layer.initialize()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "_wbe7AZAChy8",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "095733dd-2f7b-4a40-9525-558704902ee6"
      },
      "source": [
        "batch_size = 2 \n",
        "state = rnn_layer.begin_state(batch_size=batch_size)\n",
        "state[0].shape "
      ],
      "execution_count": 59,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "(1, 2, 256)"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 59
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Htbxd3pmCuCK",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "74d76ed2-9b20-4bbc-9923-851a8c65f134"
      },
      "source": [
        "num_steps = 35 \n",
        "X = nd.random.uniform(shape=(num_steps, batch_size, vocab_size))\n",
        "Y, state_new = rnn_layer(X, state)\n",
        "Y.shape, len(state_new), state_new[0].shape "
      ],
      "execution_count": 60,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "((35, 2, 256), 1, (1, 2, 256))"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 60
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "9da3anlCMVjN",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "class RNNModel(nn.Block):\n",
        "    def __init__(self, rnn_layer, vocab_size, **kwargs):\n",
        "        super(RNNModel, self).__init__(**kwargs)\n",
        "        self.rnn = rnn_layer \n",
        "        self.vocab_size = vocab_size \n",
        "        self.dense = nn.Dense(vocab_size)\n",
        "\n",
        "    def forward(self, inputs, state):\n",
        "        X = nd.one_hot(inputs.T, self.vocab_size)\n",
        "        Y, state = self.rnn(X, state)\n",
        "        output = self.dense(Y.reshape((-1, Y.shape[-1])))\n",
        "        return output, state \n",
        "    \n",
        "    def begin_state(self, *args, **kwargs):\n",
        "        return self.rnn.begin_state(*args, **kwargs)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ZSuEJM-aNktz",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def predict_rnn_gluon(prefix, num_chars, model, vocab_size, ctx, idx_to_char, char_to_idx):\n",
        "    state = model.begin_state(batch_size=1, ctx=ctx)\n",
        "    output = [char_to_idx[prefix[0]]]\n",
        "    for t in range(num_chars + len(prefix) - 1):\n",
        "        X = nd.array([output[-1]], ctx=ctx).reshape((1, 1))\n",
        "        (Y, state) = model(X, state)\n",
        "        if t < len(prefix) - 1:\n",
        "            output.append(char_to_idx[prefix[t + 1]])\n",
        "        else:\n",
        "            output.append(int(Y.argmax(axis=1).asscalar()))\n",
        "    return ''.join([idx_to_char[i] for i in output])"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "nisqDmpHPz3Z",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 34
        },
        "outputId": "dc17ae2b-00f9-46f2-92fb-0683a079b407"
      },
      "source": [
        "ctx = d2l.try_gpu()\n",
        "model = RNNModel(rnn_layer, vocab_size)\n",
        "model.initialize(force_reinit=True, ctx=ctx)\n",
        "predict_rnn_gluon('分开', 10, model, vocab_size, ctx, idx_to_char, char_to_idx)"
      ],
      "execution_count": 65,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "'分开帅物细疼物细疼物细疼'"
            ]
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 65
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "AFFCggcyQQOT",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx, corpus_indices, idx_to_char, char_to_idx, \n",
        "            num_epochs, num_steps, lr, clipping_theta, batch_size, pred_period, pred_len, prefixes):\n",
        "    loss = gloss.SoftmaxCrossEntropyLoss()\n",
        "    model.initialize(ctx=ctx, force_reinit=True, init=init.Normal(0.01))\n",
        "    trainer = gluon.Trainer(model.collect_params(), 'sgd', {'learning_rate': lr, 'momentum': 0, 'wd': 0})\n",
        "\n",
        "    for epoch in range(num_epochs):\n",
        "        l_sum, n, start = 0.0, 0, time.time()\n",
        "        data_iter = d2l.data_iter_consecutive(corpus_indices, batch_size, num_steps, ctx)\n",
        "        state = model.begin_state(batch_size=batch_size, ctx=ctx)\n",
        "        for X, Y in data_iter:\n",
        "            for s in state:\n",
        "                s.detach()\n",
        "            with autograd.record():\n",
        "                (output, state) = model(X, state)\n",
        "                y = Y.T.reshape((-1, ))\n",
        "                l = loss(output, y).mean()\n",
        "            l.backward()\n",
        "            params = [p.data() for p in model.collect_params().values()]\n",
        "            d2l.grad_clipping(params, clipping_theta, ctx)\n",
        "            trainer.step(1)\n",
        "            l_sum += l.asscalar() * y.size \n",
        "            n += y.size \n",
        "\n",
        "        if (epoch + 1) % pred_period == 0:\n",
        "            print('epoch %d, perplexity %f, time %.2f sec' %(\n",
        "                epoch + 1, math.exp(l_sum / n), time.time() - start\n",
        "            ))\n",
        "            for prefix in prefixes:\n",
        "                print(' _', predict_rnn_gluon(\n",
        "                    prefix, pred_len, model, vocab_size, ctx, idx_to_char, char_to_idx\n",
        "                ))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "GzNcsGgHVWpd",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 281
        },
        "outputId": "2926c47d-392a-4b95-ee4f-8896e7ed3d70"
      },
      "source": [
        "num_epochs, batch_size, lr, clipping_theta = 250, 32, 1e2, 1e-2 \n",
        "pred_period, pred_len, prefixes = 50, 50, ['分开', '不分开']\n",
        "train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx, corpus_indices, idx_to_char, char_to_idx, \n",
        "            num_epochs, num_steps, lr, clipping_theta, batch_size, pred_period, pred_len, prefixes)"
      ],
      "execution_count": 68,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "epoch 50, perplexity 77.114559, time 0.12 sec\n",
            " _ 分开 我不能再想 我不要再想 我不要再想 我不要再想 我不要再想 我不要再想 我不要再想 我不要再想 我\n",
            " _ 不分开 我想我这想你的怒场就 印地在有多 我不能再想 我不要再想 我不要再想 我不要再想 我不要再想 我不\n",
            "epoch 100, perplexity 12.234623, time 0.11 sec\n",
            " _ 分开 娘子我 你你我 每你怎么这对我 甩开球我满腔的怒火 我想揍你已经很久银像想我 别不我 说你想我这辈\n",
            " _ 不分开 我一著这样牵着你起想想我爱见你是 快不 你想 我爱要再想 我不要再想 我不能再想 我不能再想 我不\n",
            "epoch 150, perplexity 3.961371, time 0.12 sec\n",
            " _ 分开 平候堂 是属于明年 白有的在丽 你的完美主义 太彻底 分我连恨都难以 我 想和你在语日 我只想你的\n",
            " _ 不分开 我一你的叹息 对作依依不舍 连隔壁 干什么 我被天这样 老唱盘 旧皮箱 装属了明信 白在的有丽 我\n",
            "epoch 200, perplexity 2.257617, time 0.11 sec\n",
            " _ 分开 娘养的黑猫笑起来像哭 啦啦啦呜 用水晶球 你底子空 恨人己痛 在你跟痛 你不没 连一句珍 停止转 \n",
            " _ 不分开觉 你在我面辈泪 不要再这样打我妈妈 我的伤口被你看封 誓言太沉重泪被纵知 脸上再涌宙 有一条热昏头\n",
            "epoch 250, perplexity 1.779178, time 0.12 sec\n",
            " _ 分开 什么兵 是属于那年代 所有人看着我 抛物线进球 单手过人运球 篮下妙传出手 漂亮 失去意义 戒指在\n",
            " _ 不分开  你知世这坦堡 泛只林跟 悲你马 隐前 篮下的客栈人多 牧草有没有 我马儿有些瘦 天涯尽头 满脸风\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "-nFlPXkeWJJi",
        "colab_type": "text"
      },
      "source": [
        "## 6.7 门控循环单元 ( GRU )"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "yvDaj4I5WdRs",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import d2lzh as d2l \n",
        "from mxnet import nd \n",
        "from mxnet.gluon import rnn \n",
        "\n",
        "(corpus_indices, char_to_idx, idx_to_char, vocab_size) = d2l.load_data_jay_lyrics()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ghqqA1upbS7l",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size \n",
        "ctx = d2l.try_gpu()\n",
        "\n",
        "def get_params():\n",
        "    def _one(shape):\n",
        "        return nd.random.normal(scale=0.01, shape=shape, ctx=ctx)\n",
        "\n",
        "    def _three():\n",
        "        return (_one((num_inputs, num_hiddens)), \n",
        "            _one((num_hiddens, num_hiddens)), \n",
        "            nd.zeros(num_hiddens, ctx=ctx))\n",
        "    \n",
        "    W_xz, W_hz, b_z = _three()\n",
        "    W_xr, W_hr, b_r = _three()\n",
        "    W_xh, W_hh, b_h = _three()\n",
        "    \n",
        "    W_hq = _one((num_hiddens, num_outputs))\n",
        "    b_q = nd.zeros(num_outputs, ctx=ctx)\n",
        "    \n",
        "    params = [W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q]\n",
        "    for param in params:\n",
        "        param.attach_grad()\n",
        "    return params "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ImH2jJuvwjCD",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def init_gru_state(batch_size, num_hiddens, ctx):\n",
        "    return (nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx), )"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "xBKQY4pix0Bc",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def gru(inputs, state, params):\n",
        "    W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q = params \n",
        "    H, = state \n",
        "    outputs = []\n",
        "    for X in inputs:\n",
        "        Z = nd.sigmoid(nd.dot(X, W_xz) + nd.dot(H, W_hz) + b_z)\n",
        "        R = nd.sigmoid(nd.dot(X, W_xr) + nd.dot(H, W_hr) + b_r)\n",
        "        H_tilda = nd.tanh(nd.dot(X, W_xh) + nd.dot(R * H, W_hh) + b_h)\n",
        "        H = Z * H + (1 - Z) * H_tilda \n",
        "        Y = nd.dot(H, W_hq) + b_q \n",
        "        outputs.append(Y)\n",
        "    return outputs, (H,)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "outputId": "0a27eb1e-b563-4c23-bee9-313219d714c9",
        "id": "9xkc1dT9zRWa",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 228
        }
      },
      "source": [
        "num_epochs, num_steps, batch_size, lr, clipping_theta = 160, 35, 32, 1e2, 1e-2 \n",
        "pred_period, pred_len, prefixes = 40, 50, ['分开', '不分开']\n",
        "d2l.train_and_predict_rnn(gru, get_params, init_gru_state, num_hiddens, vocab_size, ctx, corpus_indices, idx_to_char, char_to_idx, False,\n",
        "            num_epochs, num_steps, lr, clipping_theta, batch_size, pred_period, pred_len, prefixes)"
      ],
      "execution_count": 100,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "epoch 40, perplexity 153.516774, time 0.99 sec\n",
            " - 分开 我想你的让我 爱爱人 我不的让我 爱爱人 我不的让我 爱爱人 我不的让我 爱爱人 我不的让我 爱爱\n",
            " - 不分开 我想你的让我 爱爱人 我不的让我 爱爱人 我不的让我 爱爱人 我不的让我 爱爱人 我不的让我 爱爱\n",
            "epoch 80, perplexity 32.849917, time 0.97 sec\n",
            " - 分开 我想要这样 我有就这样 我不要再想 我不要再想 我不要再想 我不要再想 我不要再想 我不要再想 我\n",
            " - 不分开 我不要再想 我不要再想 我不要再想 我不要再想 我不要再想 我不要再想 我不要再想 我不要再想 我\n",
            "epoch 120, perplexity 5.792713, time 0.99 sec\n",
            " - 分开 我想要你的微笑有天都能风看    在我胸口泪著 像这样的让快果我 能地的让我疯狂的可爱女人 坏坏的\n",
            " - 不分开 我知不觉 我跟了这节奏 后知后觉 我该好好生活 我该好好生活 不知不觉 你已经离开我 不知不觉 我\n",
            "epoch 160, perplexity 1.768622, time 0.99 sec\n",
            " - 分开 我想要的没有笑每天乡看到到 就是在最不能 到你到回面对我 甩开 我想就这了吧着你 不想太多 你来一\n",
            " - 不分开 不知再觉 你来一句奏默 后知后觉 我该好好生活 我该好好生活 静静悄悄默默离开 陷入了危险边缘Ba\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "dSONBf7tzGS4",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 228
        },
        "outputId": "c48fe492-d59c-41c6-9b64-5f57cf84b5c4"
      },
      "source": [
        "gru_layer = rnn.GRU(num_hiddens)\n",
        "model = d2l.RNNModel(gru_layer, vocab_size)\n",
        "train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx, corpus_indices, idx_to_char, char_to_idx, \n",
        "            num_epochs, num_steps, lr, clipping_theta, batch_size, pred_period, pred_len, prefixes)"
      ],
      "execution_count": 103,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "epoch 40, perplexity 153.891103, time 0.12 sec\n",
            " _ 分开 我想你你的让我想想想想你你你的可爱女 我想你的让我想想想想你想你想你想你想你想你想你想你想你想你想\n",
            " _ 不分开 我想你你的让我想想想想你你你的可爱女 我想你的让我想想想想你想你想你想你想你想你想你想你想你想你想\n",
            "epoch 80, perplexity 31.606348, time 0.12 sec\n",
            " _ 分开 我想要这样 我不要再想 我不要再想 我不要再想 我不要再想 我不要再想 我不要再想 我不要再想 我\n",
            " _ 不分开  没有你在我有 别发你的生快 我想要你 我不要 爱你的手不听 想要你的爱写 一定后 是你的手不多 \n",
            "epoch 120, perplexity 4.693406, time 0.12 sec\n",
            " _ 分开 一直我 说你怎么面对我 别发球我满腔的怒火 我想揍你已经很久 别想躲 说你眼睛看着我 别发抖 快给\n",
            " _ 不分开  我知这这里着我 甩开球我满腔的怒火 我想揍你已经很久 别想躲 说你眼睛看着我 别发抖 快给我抬起\n",
            "epoch 160, perplexity 1.451727, time 0.12 sec\n",
            " _ 分开 别弄 是你开的玩笑 想通 却又再考倒我 说散 你想很久了吧? 败给你的黑色幽默 说散 你想很久了吧\n",
            " _ 不分开  我来这样了我 不知不觉 我跟了这节奏 后知后觉 又过了一个秋 后知后觉 我该好好生活 我该好好生\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "IHmsqIosBbNN",
        "colab_type": "text"
      },
      "source": [
        "## 6.8 长短期记忆 ( LSTM )"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Mchpl7KNCeJn",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import d2lzh as d2l \n",
        "from mxnet import nd \n",
        "from mxnet.gluon import rnn  \n",
        "\n",
        "(corpus_indices, char_to_idx, idx_to_char, vocab_size) = d2l.load_data_jay_lyrics()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "u9qf65BXKjSm",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "num_inputs, num_hiddens, num_outputs = vocab_size, 256, vocab_size \n",
        "ctx = d2l.try_gpu()\n",
        "\n",
        "def get_params():\n",
        "    def _one(shape):\n",
        "        return nd.random.normal(scale=0.01, shape=shape, ctx=ctx)\n",
        "\n",
        "    def _three():\n",
        "        return (_one((num_inputs, num_hiddens)), \n",
        "            _one((num_hiddens, num_hiddens)), \n",
        "            nd.zeros(num_hiddens, ctx=ctx))\n",
        "    \n",
        "    W_xi, W_hi, b_i = _three()\n",
        "    W_xf, W_hf, b_f = _three()\n",
        "    W_xo, W_ho, b_o = _three()\n",
        "    W_xc, W_hc, b_c = _three()\n",
        "\n",
        "    W_hq = _one((num_hiddens, num_outputs))\n",
        "    b_q = nd.zeros(num_outputs, ctx=ctx)\n",
        "    params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q]\n",
        "    for param in params:\n",
        "        param.attach_grad()\n",
        "    return params "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Bb3QV0BzQE4Q",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def init_lstm_state(batch_size, num_hiddens, ctx):\n",
        "    return (nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx), \n",
        "        nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "_MD5rcobWb1L",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def lstm(inputs, state, params):\n",
        "    [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c, W_hq, b_q] = params \n",
        "    (H, C) = state \n",
        "    outputs = []\n",
        "    for X in inputs:\n",
        "        I = nd.sigmoid(nd.dot(X, W_xi) + nd.dot(H, W_hi) + b_i)\n",
        "        F = nd.sigmoid(nd.dot(X, W_xf) + nd.dot(H, W_hf) + b_f)\n",
        "        O = nd.sigmoid(nd.dot(X, W_xo) + nd.dot(H, W_ho) + b_o)\n",
        "        C_tilda = nd.tanh(nd.dot(X, W_xc) + nd.dot(H, W_hc) + b_c)\n",
        "        C = F * C + I * C_tilda \n",
        "        H = O * C.tanh()\n",
        "        Y = nd.dot(H, W_hq) + b_q \n",
        "        outputs.append(Y)\n",
        "    return outputs, (H, C)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "BCdujSpXXt3r",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "num_epochs, num_steps, batch_size, lr, clipping_theta = 160, 35, 32, 1e2, 1e-2 \n",
        "pred_period, pred_len, prefixes = 40, 50, ['分开', '不分开']"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ZaUrR_2qZBXi",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 228
        },
        "outputId": "56e12df8-e3de-480b-b2b9-f75344509fdf"
      },
      "source": [
        "d2l.train_and_predict_rnn(lstm, get_params, init_lstm_state, num_hiddens, vocab_size, ctx, corpus_indices, idx_to_char,\n",
        "            char_to_idx, False, num_epochs, num_steps, lr, clipping_theta, batch_size, pred_period, \n",
        "            pred_len, prefixes)"
      ],
      "execution_count": 110,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "epoch 40, perplexity 211.511178, time 1.18 sec\n",
            " - 分开 我不的 我不 我不的 我不的 我不的 我不的 我不的 我不的 我不的 我不的 我不的 我不的 我不\n",
            " - 不分开 我不的我 我不的 我不的 我不的 我不的 我不的 我不的 我不的 我不的 我不的 我不的 我不的 \n",
            "epoch 80, perplexity 67.897039, time 1.17 sec\n",
            " - 分开 我想你这你你 我想想你想我想 你不你 我不了 我不要 我不要 我不了 我不要 我不了 我不了 我不\n",
            " - 不分开 我想你这你你 我想要你想我 你不你 我不了 我不要 我不要 我不了 我不要 我不了 我不了 我不了\n",
            "epoch 120, perplexity 15.597091, time 1.15 sec\n",
            " - 分开 我想你这生活 我知好这生活 后知后觉 我跟了这节奏 后知后觉 我该好好节活 我知好好生活 后知后觉\n",
            " - 不分开 我想要这样 我不要这样 我不好这生活 后知后觉 你跟了一个我 后知不觉 我该了好节活 我知好好生活\n",
            "epoch 160, perplexity 4.250535, time 1.19 sec\n",
            " - 分开 我已带你 你跟的外人 后知后觉 我该好好节奏 我该好好生活 静静悄悄默默离开 陷入了危险边缘 我 \n",
            " - 不分开 我已经这生我 我知能 爱你 我想开难熬活 我该好这生活 静知悄觉默离离开 陷入了危险边缘B 心说我\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "DZeZohd9ZmoO",
        "colab_type": "code",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 228
        },
        "outputId": "6267791d-ca80-4679-c1e2-204275e5c4db"
      },
      "source": [
        "lstm_layer = rnn.LSTM(num_hiddens)\n",
        "model = d2l.RNNModel(lstm_layer, vocab_size)\n",
        "d2l.train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx, corpus_indices, idx_to_char, char_to_idx, \n",
        "                num_epochs, num_steps, lr, clipping_theta, batch_size, pred_period, pred_len, \n",
        "                prefixes)"
      ],
      "execution_count": 112,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "epoch 40, perplexity 220.343904, time 0.14 sec\n",
            " - 分开 我不的 我 我不 我 我不 我 我不 我 我不 我 我不 我 我不 我 我不 我 我不 我 我不 \n",
            " - 不分开 我不的 我 我不 我 我不 我 我不 我 我不 我 我不 我 我不 我 我不 我 我不 我 我不 \n",
            "epoch 80, perplexity 66.893844, time 0.14 sec\n",
            " - 分开 我想你你的你我 想想你 你你我 我不要这不我 你不不觉 我不要这不 我不要这 我不不好 我不不好 \n",
            " - 不分开 我想要你的你 我不要这 我不要这不 我不要这不 我不要这不 我不要这不 我不要这不 我不不好 我不\n",
            "epoch 120, perplexity 13.965304, time 0.14 sec\n",
            " - 分开 我想你的太笑 一你 在小我 一九的碗  有一碗 旧片段 有一风 装有的风  有有苦 旧一了 装一风\n",
            " - 不分开我 你要你的微我不要 你说你的风 我一定到天 我不要我我 我要 我不  爱有人的我 你要就我 你你的\n",
            "epoch 160, perplexity 3.680804, time 0.14 sec\n",
            " - 分开 说子 是是我的玩  有在起 是谁了在在手  有在起 是谁是双截极 所色在兮 快使用双截棍 哼哼哈兮\n",
            " - 不分开想你 想样的没样怎著么 也是一了 你想我难多在我有错错错搞错 拜托 我想是你的脑袋有问题 随便说说 \n"
          ],
          "name": "stdout"
        }
      ]
    }
  ]
}