{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 6.4 循环神经网络的从零开始实现"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 173,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "1.6.0\ncuda\n"
    }
   ],
   "source": [
    "import time\n",
    "import math\n",
    "import numpy as np\n",
    "import torch\n",
    "from torch import nn, optim\n",
    "import torch.nn.functional as F\n",
    "\n",
    "import sys\n",
    "sys.path.append(\"..\") \n",
    "import d2lzh_pytorch as d2l\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "\n",
    "print(torch.__version__)\n",
    "print(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 174,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "(corpus_indices, char_to_idx, idx_to_char, vocab_size) = d2l.load_data_jay_lyrics()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 175,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "[695, 359, 990, 286, 604, 501, 572, 695, 359, 117]\n"
    }
   ],
   "source": [
    "print(corpus_indices[0:10])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 176,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "dict"
     },
     "metadata": {},
     "execution_count": 176
    }
   ],
   "source": [
    "type(char_to_idx)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 177,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "1"
     },
     "metadata": {},
     "execution_count": 177
    }
   ],
   "source": [
    "char_to_idx['平'] # 字符转索引"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 178,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "'平'"
     },
     "metadata": {},
     "execution_count": 178
    }
   ],
   "source": [
    "idx_to_char[1] # 索引转字符"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 179,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "1027"
     },
     "metadata": {},
     "execution_count": 179
    }
   ],
   "source": [
    "vocab_size #词典大小"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 6.4.1 one-hot向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 180,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "torch.Size([2, 1027])"
     },
     "metadata": {},
     "execution_count": 180
    }
   ],
   "source": [
    "def one_hot(x, n_class, dtype=torch.float32): \n",
    "    # X shape: (batch), output shape: (batch, n_class)\n",
    "    x = x.long()\n",
    "    res = torch.zeros(x.shape[0], n_class, dtype=dtype, device=x.device)\n",
    "    res.scatter_(1, x.view(-1, 1), 1)\n",
    "    return res\n",
    "    \n",
    "x = torch.tensor([0, 2])\n",
    "one_hot(x, vocab_size).shape # n*v; n批量大小；v字典大小"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 181,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "torch.Size([14, 1027])"
     },
     "metadata": {},
     "execution_count": 181
    }
   ],
   "source": [
    "x = torch.tensor([0,13,2,3,4,5,6,7,8,9,10,11,12,13])\n",
    "b = one_hot(x, vocab_size)\n",
    "b.shape"
   ]
  },
  {
   "source": [
    "每次采样的小批量的形状是(批量大小, 时间步数)；\n",
    "下面的函数将这样的小批量变换成数个可以输入进网络的形状为(批量大小, 词典大小)的矩阵，矩阵个数等于时间步数。"
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 182,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "tensor([[0, 1, 2, 3, 4],\n        [5, 6, 7, 8, 9]])"
     },
     "metadata": {},
     "execution_count": 182
    }
   ],
   "source": [
    "# 本函数已保存在d2lzh_pytorch包中方便以后使用\n",
    "def to_onehot(X, n_class):  \n",
    "    # X shape: (batch, seq_len), output: seq_len elements of (batch, n_class)\n",
    "    return [one_hot(X[:, i], n_class) for i in range(X.shape[1])]\n",
    "\n",
    "X = torch.arange(10).view(2, 5) # (2,5)为(批量大小, 时间步数)\n",
    "X"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 183,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "[[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]\n"
    }
   ],
   "source": [
    "inputs = to_onehot(X, vocab_size)\n",
    "print(inputs[0].numpy()[:,0:20])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 184,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "torch.Size([2, 1027])"
     },
     "metadata": {},
     "execution_count": 184
    }
   ],
   "source": [
    "inputs[0].shape # (批量大小, 词典大小)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 185,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "5"
     },
     "metadata": {},
     "execution_count": 185
    }
   ],
   "source": [
    "len(inputs) # 矩阵个数等于时间步数5"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 6.4.2 初始化模型参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 186,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "will use cuda\n"
    }
   ],
   "source": [
    "num_inputs = vocab_size # d 输入特征个数，此处同 词典大小\n",
    "num_hiddens = 256 #隐藏单元个数\n",
    "num_outputs = vocab_size # q 输出大小\n",
    "print('will use', device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 187,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "def get_params():\n",
    "    def _one(shape):\n",
    "        ts = torch.tensor(np.random.normal(0, 0.01, size=shape), device=device, dtype=torch.float32)\n",
    "        return torch.nn.Parameter(ts, requires_grad=True)\n",
    "\n",
    "    # 隐藏层参数\n",
    "    W_xh = _one((num_inputs, num_hiddens))\n",
    "    W_hh = _one((num_hiddens, num_hiddens))\n",
    "    b_h = torch.nn.Parameter(torch.zeros(num_hiddens, device=device, requires_grad=True))\n",
    "    # 输出层参数\n",
    "    W_hq = _one((num_hiddens, num_outputs))\n",
    "    b_q = torch.nn.Parameter(torch.zeros(num_outputs, device=device, requires_grad=True))\n",
    "    return nn.ParameterList([W_xh, W_hh, b_h, W_hq, b_q])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 6.4.3 定义模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 188,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def init_rnn_state(batch_size, num_hiddens, device):\n",
    "    return (torch.zeros((batch_size, num_hiddens), device=device), )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 189,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def rnn(inputs, state, params):\n",
    "    # inputs和outputs皆为 时间步数num_steps个形状为(批量大小batch_size, 词典大小vocab_size)的矩阵\n",
    "    W_xh, W_hh, b_h, W_hq, b_q = params\n",
    "    H, = state\n",
    "    outputs = []\n",
    "    # inputs 长度为 时间步数num_steps，当num_steps=5时，下面循环执行5次\n",
    "    for X in inputs: # X形状为 (批量大小batch_size, 词典大小vocab_size)，此处词典大小v 即 输入特征个数 d \n",
    "        H = torch.tanh(torch.matmul(X, W_xh) + torch.matmul(H, W_hh) + b_h)\n",
    "        Y = torch.matmul(H, W_hq) + b_q\n",
    "        outputs.append(Y)\n",
    "    return outputs, (H,)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 190,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "torch.Size([2, 256])"
     },
     "metadata": {},
     "execution_count": 190
    }
   ],
   "source": [
    "state = init_rnn_state(X.shape[0], num_hiddens, device)\n",
    "state[0].shape # H[0]= n * h ; 批量大小 n= 2; h= 256"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 191,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "torch.Size([2, 5])"
     },
     "metadata": {},
     "execution_count": 191
    }
   ],
   "source": [
    "X.shape # (2,5)为(批量大小, 时间步数)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 192,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "5"
     },
     "metadata": {},
     "execution_count": 192
    }
   ],
   "source": [
    "inputs = to_onehot(X.to(device), vocab_size)\n",
    "len(inputs) # 矩阵个数等于时间步数5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 193,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "torch.Size([2, 1027])"
     },
     "metadata": {},
     "execution_count": 193
    }
   ],
   "source": [
    "inputs[0].shape # 可输入网络的形状：(批量大小n, 词典大小v)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 194,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "torch.Size([2, 1027])\ntorch.Size([2, 1027])\ntorch.Size([2, 1027])\ntorch.Size([2, 1027])\ntorch.Size([2, 1027])\n"
    }
   ],
   "source": [
    "for X in inputs:\n",
    "    print(X.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 195,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "时间步数num_steps: 5 \n输出Y的形状： torch.Size([2, 1027]) \n隐藏状态的形状 torch.Size([2, 256])\n"
    }
   ],
   "source": [
    "params = get_params()\n",
    "outputs, state_new = rnn(inputs, state, params)\n",
    "print(\n",
    "    '时间步数num_steps:', len(outputs)\n",
    "    ,'\\n输出Y的形状：', outputs[0].shape # 批量大小n * 字典大小v\n",
    "    ,'\\n隐藏状态的形状', state_new[0].shape # H[0]= n * h ; 批量大小 n= 2; h= 256\n",
    "    )"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 6.4.4 定义预测函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 196,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# 本函数已保存在d2lzh_pytorch包中方便以后使用\n",
    "# 函数基于前缀prefix（含有数个字符的字符串）来预测接下来的num_chars个字符\n",
    "# \n",
    "def predict_rnn(prefix, num_chars, rnn, params, init_rnn_state,\n",
    "                num_hiddens, vocab_size, device, idx_to_char, char_to_idx):\n",
    "    state = init_rnn_state(1, num_hiddens, device)\n",
    "    output = [char_to_idx[prefix[0]]]\n",
    "    for t in range(num_chars + len(prefix) - 1): # 预测10次\n",
    "        # 将上一时间步的输出作为当前时间步的输入\n",
    "        X = to_onehot(torch.tensor([[output[-1]]], device=device), vocab_size)\n",
    "        # 计算输出和更新隐藏状态\n",
    "        (Y, state) = rnn(X, state, params)\n",
    "        # 下一个时间步的输入是prefix里的字符或者当前的最佳预测字符\n",
    "        if t < len(prefix) - 1:\n",
    "            output.append(char_to_idx[prefix[t + 1]])\n",
    "        else:\n",
    "            output.append(int(Y[0].argmax(dim=1).item()))\n",
    "    return ''.join([idx_to_char[i] for i in output])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 197,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "'分开滴几寄宙秀自什千童果'"
     },
     "metadata": {},
     "execution_count": 197
    }
   ],
   "source": [
    "#------------------预测10个字符\n",
    "predict_rnn('分开', 10, rnn, params, init_rnn_state, num_hiddens, vocab_size,\n",
    "            device, idx_to_char, char_to_idx)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 198,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "torch.Size([1, 256])"
     },
     "metadata": {},
     "execution_count": 198
    }
   ],
   "source": [
    "state = init_rnn_state(1, num_hiddens, device)\n",
    "state[0].shape # H[0]= n * h ; 批量大小 n= 1; h= 256"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 199,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "[261]"
     },
     "metadata": {},
     "execution_count": 199
    }
   ],
   "source": [
    "prefix = '分开'\n",
    "output = [char_to_idx[prefix[0]]]\n",
    "output # 分的索引"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 200,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "0\t1\t2\t3\t4\t5\t6\t7\t8\t9\t10"
    }
   ],
   "source": [
    "num_chars = 10\n",
    "for t in range(num_chars + len(prefix) - 1):\n",
    "    print(t,end='\\t')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 201,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "261"
     },
     "metadata": {},
     "execution_count": 201
    }
   ],
   "source": [
    "in1 = output[-1]\n",
    "in1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 202,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "torch.Size([1, 1])"
     },
     "metadata": {},
     "execution_count": 202
    }
   ],
   "source": [
    "inX = torch.tensor([[in1]], device=device)\n",
    "inX.shape # (批量大小, 时间步数)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 203,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "时间步数: 1 \n可输入网络的形状: torch.Size([1, 1027])\n"
    }
   ],
   "source": [
    "t = 0\n",
    "# 每次采样的小批量的形状是(批量大小, 时间步数)； 将这样的小批量变换成数个可以输入进网络的形状为(批量大小, 词典大小)的矩阵，矩阵个数等于时间步数。\n",
    "# 将上一时间步的输出作为当前时间步的输入\n",
    "X = to_onehot(inX, vocab_size)\n",
    "print(\n",
    "    '时间步数:',len(X)\n",
    "    ,'\\n可输入网络的形状:',X[0].shape # (批量大小, 词典大小)\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 204,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "时间步数num_steps: 1 \n输出Y的形状： torch.Size([1, 1027]) \n隐藏状态的形状 torch.Size([1, 256])\n"
    }
   ],
   "source": [
    "# 计算输出和更新隐藏状态\n",
    "(Y, state) = rnn(X, state, params)\n",
    "print(\n",
    "    '时间步数num_steps:', len(Y)\n",
    "    ,'\\n输出Y的形状：', Y[0].shape # 批量大小n * 字典大小v\n",
    "    ,'\\n隐藏状态的形状', state[0].shape # H[0]= n * h ; 批量大小 n= 1; h= 256\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 205,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "True"
     },
     "metadata": {},
     "execution_count": 205
    }
   ],
   "source": [
    "t < len(prefix) - 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 206,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "981"
     },
     "metadata": {},
     "execution_count": 206
    }
   ],
   "source": [
    "Y[0].argmax(dim=1).item()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 6.4.5 裁剪梯度"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 207,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# 本函数已保存在d2lzh_pytorch包中方便以后使用\n",
    "def grad_clipping(params, theta, device):\n",
    "    norm = torch.tensor([0.0], device=device)\n",
    "    for param in params:\n",
    "        norm += (param.grad.data ** 2).sum()\n",
    "    norm = norm.sqrt().item()\n",
    "    if norm > theta:\n",
    "        for param in params:\n",
    "            param.grad.data *= (theta / norm)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 6.4.6 困惑度\n",
    "## 6.4.7 定义模型训练函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 208,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# 本函数已保存在d2lzh_pytorch包中方便以后使用\n",
    "def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,\n",
    "                          vocab_size, device, corpus_indices, idx_to_char,\n",
    "                          char_to_idx, is_random_iter, num_epochs, num_steps,\n",
    "                          lr, clipping_theta, batch_size, pred_period,\n",
    "                          pred_len, prefixes):\n",
    "    if is_random_iter:\n",
    "        data_iter_fn = d2l.data_iter_random\n",
    "    else:\n",
    "        data_iter_fn = d2l.data_iter_consecutive\n",
    "    params = get_params()\n",
    "    loss = nn.CrossEntropyLoss()\n",
    "\n",
    "    for epoch in range(num_epochs):\n",
    "        if not is_random_iter:  # 如使用相邻采样，在epoch开始时初始化隐藏状态\n",
    "            state = init_rnn_state(batch_size, num_hiddens, device)\n",
    "        l_sum, n, start = 0.0, 0, time.time()\n",
    "        data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, device)\n",
    "        for X, Y in data_iter:\n",
    "            if is_random_iter:  # 如使用随机采样，在每个小批量更新前初始化隐藏状态\n",
    "                state = init_rnn_state(batch_size, num_hiddens, device)\n",
    "            else:  # 否则需要使用detach函数从计算图分离隐藏状态\n",
    "                for s in state:\n",
    "                    s.detach_()\n",
    "            \n",
    "            inputs = to_onehot(X, vocab_size)\n",
    "            # outputs有num_steps个形状为(batch_size, vocab_size)的矩阵\n",
    "            (outputs, state) = rnn(inputs, state, params)\n",
    "            # 拼接之后形状为(num_steps * batch_size, vocab_size)\n",
    "            outputs = torch.cat(outputs, dim=0)\n",
    "            # Y的形状是(batch_size, num_steps)，转置后再变成长度为\n",
    "            # batch * num_steps 的向量，这样跟输出的行一一对应\n",
    "            y = torch.transpose(Y, 0, 1).contiguous().view(-1)\n",
    "            # 使用交叉熵损失计算平均分类误差\n",
    "            l = loss(outputs, y.long())\n",
    "            \n",
    "            # 梯度清0\n",
    "            if params[0].grad is not None:\n",
    "                for param in params:\n",
    "                    param.grad.data.zero_()\n",
    "            l.backward()\n",
    "            grad_clipping(params, clipping_theta, device)  # 裁剪梯度\n",
    "            d2l.sgd(params, lr, 1)  # 因为误差已经取过均值，梯度不用再做平均\n",
    "            l_sum += l.item() * y.shape[0]\n",
    "            n += y.shape[0]\n",
    "\n",
    "        if (epoch + 1) % pred_period == 0:\n",
    "            print('epoch %d, perplexity %f, time %.2f sec' % (\n",
    "                epoch + 1, math.exp(l_sum / n), time.time() - start))\n",
    "            for prefix in prefixes:\n",
    "                print(' -', predict_rnn(prefix, pred_len, rnn, params, init_rnn_state,\n",
    "                    num_hiddens, vocab_size, device, idx_to_char, char_to_idx))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 6.4.8 训练模型并创作歌词"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 209,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# 循环次数---时间步数---批量大小n--学习率-梯度裁剪阈值θ\n",
    "num_epochs, num_steps, batch_size, lr, clipping_theta = 250, 35, 32, 1e2, 1e-2\n",
    "#----------------------前缀\n",
    "pred_period, pred_len, prefixes = 50, 50, ['分开', '不分开']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 210,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "epoch 50, perplexity 70.383839, time 0.13 sec\n - 分开 我想你的可爱 我知你的爱写 一知在觉 我不要这 我有我这想你 一场我不 我不 我不能再想 我不要再\n - 不分开  我的让我感狂的可爱女人 坏坏的让我疯狂的可爱女人 坏坏的让我疯狂的可爱女人 坏坏的让我疯狂的可爱\nepoch 100, perplexity 10.000823, time 0.13 sec\n - 分开 有什么 有步我抬起头 有话去对医药箱说 别怪我 别怪我 娘你 那话 回对 有箭 一子在酒留 辛一坊\n - 不分开  我有回烦 不知我回 你是我 别子我抬三头 有话去对医药箱说 别 我不多再想 我不要再想你 爱有我\nepoch 150, perplexity 2.866636, time 0.14 sec\n - 分开 一直令步三步的母斑鸠 印地安老 在小在外截棍 哼哼哈兮 快使用双截棍 哼哼哈兮 快使用双截棍 哼哼\n - 不分开扫 我叫你爸 你小我妈 这样跟吗 一句落纵 不惯了容 我一定功节 再的可美 我想你烦我的寂寞 漂亮的\nepoch 200, perplexity 1.593022, time 0.14 sec\n - 分开 一只这枪 在谁完动的溪边 默默等待 娘子 一壶好酒 再来一哭 说自己痛 不再感动 没有梦 连不知轻\n - 不分开吗 然后将过去 慢慢温习 让我爱上你 那场悲剧 是你完美演出的一场戏 宁愿你碎过泣 再狠狠忘多 还是\nepoch 250, perplexity 1.329578, time 0.14 sec\n - 分开不想 相样心没的悉武日记 教化风断熟悉 我的完美主义 太彻底 我 还有了 四句脚没有 有已布 一皮箱\n - 不分开期 我不能再想 我不 我不 我不能 爱情走的太快就像龙卷风 不能承受我已无处可躲 我不要再想 我不要\n"
    }
   ],
   "source": [
    "train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,\n",
    "                      vocab_size, device, corpus_indices, idx_to_char,\n",
    "                      char_to_idx, True, num_epochs, num_steps, lr,\n",
    "                      clipping_theta, batch_size, pred_period, pred_len,\n",
    "                      prefixes)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 211,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "epoch 50, perplexity 60.517727, time 0.13 sec\n - 分开 我想要这 你有了空 如果我的见 让我的红 在人了人 不要我 别你我的起头 你的在 一直我 别子我的\n - 不分开 你不了 一沉我的起头 一场 娘不再你 我想了这 我想了这 我想了这 我想了这 我想了这 我想了这 \nepoch 100, perplexity 7.046009, time 0.14 sec\n - 分开 一颗的 旧时光 一九四酒 回杰一碗热粥 配上几斤的牛 一直寄老斑鸠 平经了不多 你爱着我的证据 让\n - 不分开觉 你已经离 我来好这着  没有你在我有多烦恼多难熬  没有你烦 我有多烦恼  没有你在我有多烦恼多\nepoch 150, perplexity 2.087119, time 0.14 sec\n - 分开 一颗在依 每谁心空 我不一定熬  没有你烦我有多烦恼多烦恼  没有云烦 我试多烦恼  没有你烦我有\n - 不分开觉 你已经离开我 不知不觉 我跟了这节奏 后知后觉 又过了一个秋 后知后觉 我该好好生活 我该好好生\nepoch 200, perplexity 1.297999, time 0.14 sec\n - 分开 问候开么是每找进的凯萨 你阳斜过多在我面的事  你在你怎么跟 像天我这样汉是样风面人  我的世界已\n - 不分开觉 你已经离开我 不知不觉 我跟了这节奏 后知后觉 又过了一个秋 后知后觉 我该好好生活 我该好好生\nepoch 250, perplexity 1.145852, time 0.13 sec\n - 分开 问候我 谁是神枪手的巫墙 隔壁是一术馆 店你说我飞到你宙去 想要和你融化在一起 融化在宇宙里 我每\n - 不分开觉 你已经离开我 不知不觉 我跟了这节奏 后知后觉 又过了一个秋 后知后觉 我该好好生活 我该好好生\n"
    }
   ],
   "source": [
    "train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,\n",
    "                      vocab_size, device, corpus_indices, idx_to_char,\n",
    "                      char_to_idx, False, num_epochs, num_steps, lr,\n",
    "                      clipping_theta, batch_size, pred_period, pred_len,\n",
    "                      prefixes)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 212,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "True"
     },
     "metadata": {},
     "execution_count": 212
    }
   ],
   "source": [
    "is_random_iter = False # 使用相邻采样\n",
    "data_iter_fn = d2l.data_iter_consecutive\n",
    "params = get_params()\n",
    "loss = nn.CrossEntropyLoss()\n",
    "epoch = 0\n",
    "not is_random_iter"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 213,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "torch.Size([32, 256])"
     },
     "metadata": {},
     "execution_count": 213
    }
   ],
   "source": [
    "# 使用相邻采样，在epoch开始时初始化隐藏状态\n",
    "state = init_rnn_state(batch_size, num_hiddens, device)\n",
    "state[0].shape # H[0]= n * h ; 批量大小 n= 32; h= 256"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 214,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "1600850281.9457026"
     },
     "metadata": {},
     "execution_count": 214
    }
   ],
   "source": [
    "l_sum = 0.0 # loss的和\n",
    "n = 0\n",
    "start = time.time()\n",
    "start"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 215,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "X: tensor([[ 695.,  359.,  990.,  ...,  479.,  572.,   28.],\n        [ 491.,  623.,  768.,  ...,  183.,  768.,  735.],\n        [ 317.,   15.,  667.,  ...,  947.,  768., 1022.],\n        ...,\n        [ 546.,  230.,  572.,  ...,  424.,  762.,  574.],\n        [  31.,  332.,  598.,  ...,  939.,  962.,  903.],\n        [ 990.,  126.,  572.,  ...,  692.,  572.,  510.]], device='cuda:0') \nY: tensor([[ 359.,  990.,  286.,  ...,  572.,   28.,  535.],\n        [ 623.,  768., 1024.,  ...,  768.,  735.,  308.],\n        [  15.,  667., 1003.,  ...,  768., 1022.,  544.],\n        ...,\n        [ 230.,  572.,  210.,  ...,  762.,  574.,  964.],\n        [ 332.,  598.,  985.,  ...,  962.,  903.,   56.],\n        [ 126.,  572.,  186.,  ...,  572.,  510.,  738.]], device='cuda:0')\n"
    }
   ],
   "source": [
    "data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, device)\n",
    "for X, Y in data_iter:\n",
    "    print(\n",
    "        'X:',X\n",
    "        ,'\\nY:',Y\n",
    "    )\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 216,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 使用相邻采样，需要使用detach函数从计算图分离隐藏状态, 这是为了使模型参数的梯度计算只依赖一次迭代读取的小批量序列(防止梯度计算开销太大)\n",
    "for s in state:\n",
    "    s.detach_()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 217,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "torch.Size([32, 35])"
     },
     "metadata": {},
     "execution_count": 217
    }
   ],
   "source": [
    "X.shape # 每次采样的小批量的形状是(批量大小, 时间步数)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 218,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "时间步数: 35 \n可输入网络的形状: torch.Size([32, 1027])\n"
    }
   ],
   "source": [
    "# 每次采样的小批量的形状是(批量大小, 时间步数)； 将这样的小批量变换成数个可以输入进网络的形状为(批量大小, 词典大小)的矩阵，矩阵个数等于时间步数。\n",
    "# 将上一时间步的输出作为当前时间步的输入\n",
    "inputs = to_onehot(X, vocab_size)\n",
    "print(\n",
    "    '时间步数:',len(inputs)\n",
    "    ,'\\n可输入网络的形状:',inputs[0].shape # (批量大小, 词典大小)\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 219,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "时间步数num_steps: 35 \n输出Y的形状： torch.Size([32, 1027]) \n隐藏状态的形状 torch.Size([32, 256])\n"
    }
   ],
   "source": [
    "# 计算输出和更新隐藏状态\n",
    "# outputs有num_steps个形状为(batch_size, vocab_size)的矩阵\n",
    "(outputs, state) = rnn(inputs, state, params)\n",
    "print(\n",
    "    '时间步数num_steps:', len(outputs)\n",
    "    ,'\\n输出Y的形状：', outputs[0].shape # 批量大小n * 字典大小v\n",
    "    ,'\\n隐藏状态的形状', state[0].shape # H[0]= n * h ; 批量大小 n= 32; h= 256\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 220,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "list"
     },
     "metadata": {},
     "execution_count": 220
    }
   ],
   "source": [
    "type(outputs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 221,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "1120"
     },
     "metadata": {},
     "execution_count": 221
    }
   ],
   "source": [
    "35 * 32"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 222,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "torch.Size([1120, 1027])"
     },
     "metadata": {},
     "execution_count": 222
    }
   ],
   "source": [
    "# 拼接之后形状为(num_steps * batch_size, vocab_size)\n",
    "outputs = torch.cat(outputs, dim=0)\n",
    "outputs.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 223,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "torch.Size([32, 35])"
     },
     "metadata": {},
     "execution_count": 223
    }
   ],
   "source": [
    "Y.shape # Y的形状是(batch_size, num_steps)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 224,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "torch.Size([35, 32])"
     },
     "metadata": {},
     "execution_count": 224
    }
   ],
   "source": [
    "y = torch.transpose(Y, 0, 1) #转置\n",
    "y.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 225,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "torch.Size([1120])"
     },
     "metadata": {},
     "execution_count": 225
    }
   ],
   "source": [
    "# 变成长度为 num_steps * batch_size 的向量，这样跟输出的行一一对应\n",
    "y = y.contiguous().view(-1)\n",
    "y.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 226,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "tensor([359., 623.,  15.,  ..., 964.,  56., 738.], device='cuda:0')"
     },
     "metadata": {},
     "execution_count": 226
    }
   ],
   "source": [
    "y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 227,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "tensor([359, 623,  15,  ..., 964,  56, 738], device='cuda:0')"
     },
     "metadata": {},
     "execution_count": 227
    }
   ],
   "source": [
    "y.long()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 228,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "tensor(6.9344, device='cuda:0', grad_fn=<NllLossBackward>)"
     },
     "metadata": {},
     "execution_count": 228
    }
   ],
   "source": [
    "# 使用交叉熵损失计算平均分类误差\n",
    "l = loss(outputs, y.long())\n",
    "l"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 231,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "text": "\u001b[1;31mSignature:\u001b[0m \u001b[0md2l\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msgd\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mparams\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlr\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;31mDocstring:\u001b[0m <no docstring>\n\u001b[1;31mSource:\u001b[0m   \n\u001b[1;32mdef\u001b[0m \u001b[0msgd\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mparams\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlr\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\n\u001b[0m    \u001b[1;31m# 为了和原书保持一致，这里除以了batch_size，但是应该是不用除的，因为一般用PyTorch计算loss时就默认已经\u001b[0m\u001b[1;33m\n\u001b[0m    \u001b[1;31m# 沿batch维求了平均了。\u001b[0m\u001b[1;33m\n\u001b[0m    \u001b[1;32mfor\u001b[0m \u001b[0mparam\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mparams\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\n\u001b[0m        \u001b[0mparam\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdata\u001b[0m \u001b[1;33m-=\u001b[0m \u001b[0mlr\u001b[0m \u001b[1;33m*\u001b[0m \u001b[0mparam\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgrad\u001b[0m \u001b[1;33m/\u001b[0m \u001b[0mbatch_size\u001b[0m \u001b[1;31m# 注意这里更改param时用的param.data\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;31mFile:\u001b[0m      d:\\workspace\\deep-learning\\动手学深度学习\\d2lzh_pytorch\\utils.py\n\u001b[1;31mType:\u001b[0m      function\n"
    }
   ],
   "source": [
    "d2l.sgd??"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 229,
   "metadata": {},
   "outputs": [],
   "source": [
    "if params[0].grad is not None:\n",
    "    for param in params:\n",
    "        param.grad.data.zero_()\n",
    "l.backward()\n",
    "grad_clipping(params, clipping_theta, device)  # 裁剪梯度\n",
    "d2l.sgd(params, lr, 1)  # 因为误差已经取过均值，梯度不用再做平均"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 232,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "6.934379577636719"
     },
     "metadata": {},
     "execution_count": 232
    }
   ],
   "source": [
    "l.item()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 233,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "1120"
     },
     "metadata": {},
     "execution_count": 233
    }
   ],
   "source": [
    "y.shape[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 237,
   "metadata": {},
   "outputs": [],
   "source": [
    "l_sum = 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 238,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "7766.505126953125"
     },
     "metadata": {},
     "execution_count": 238
    }
   ],
   "source": [
    "l_sum += l.item() * y.shape[0]\n",
    "l_sum"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 239,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "1120"
     },
     "metadata": {},
     "execution_count": 239
    }
   ],
   "source": [
    "n += y.shape[0]\n",
    "n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 240,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "50"
     },
     "metadata": {},
     "execution_count": 240
    }
   ],
   "source": [
    "pred_period"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 241,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "6.934379577636719"
     },
     "metadata": {},
     "execution_count": 241
    }
   ],
   "source": [
    "l_sum / n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 242,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "['分开', '不分开']"
     },
     "metadata": {},
     "execution_count": 242
    }
   ],
   "source": [
    "prefixes"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.7.7 64-bit ('d2l': conda)",
   "language": "python",
   "name": "python37764bitd2lconda94fc7ab78ae34cabbef0e75f5636f253"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7-final"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}