{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "docs = [\n",
    "        \"新的数学方法和概念，常常比解决数学问题本身更重要。\",\n",
    "        \"在数学中，我们发现真理的主要工具是归纳和模拟。\",\n",
    "        \"数学方法渗透并支配着一切自然科学的理论分支。它愈来愈成为衡量科学成就的主要标志了。\",\n",
    "        \"第一是数学，第二是数学，第三是数学。\",\n",
    "        \"历史使人贤明，诗造成气质高雅的人，数学使人高尚，自然哲学使人深沉，道德使人稳重，而伦理学和修辞学则使人善于争论。\"\n",
    "       ]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 使用jieba 分词"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Building prefix dict from the default dictionary ...\n",
      "Loading model from cache C:\\Users\\62669\\AppData\\Local\\Temp\\jieba.cache\n",
      "Loading model cost 0.637 seconds.\n",
      "Prefix dict has been built succesfully.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[['新',\n",
       "  '的',\n",
       "  '数学方法',\n",
       "  '和',\n",
       "  '概念',\n",
       "  '，',\n",
       "  '常常',\n",
       "  '比',\n",
       "  '解决',\n",
       "  '数学',\n",
       "  '问题',\n",
       "  '本身',\n",
       "  '更',\n",
       "  '重要',\n",
       "  '。'],\n",
       " ['在',\n",
       "  '数学',\n",
       "  '中',\n",
       "  '，',\n",
       "  '我们',\n",
       "  '发现',\n",
       "  '真理',\n",
       "  '的',\n",
       "  '主要',\n",
       "  '工具',\n",
       "  '是',\n",
       "  '归纳',\n",
       "  '和',\n",
       "  '模拟',\n",
       "  '。'],\n",
       " ['数学方法',\n",
       "  '渗透',\n",
       "  '并',\n",
       "  '支配',\n",
       "  '着',\n",
       "  '一切',\n",
       "  '自然科学',\n",
       "  '的',\n",
       "  '理论',\n",
       "  '分支',\n",
       "  '。',\n",
       "  '它',\n",
       "  '愈来愈',\n",
       "  '成为',\n",
       "  '衡量',\n",
       "  '科学',\n",
       "  '成就',\n",
       "  '的',\n",
       "  '主要',\n",
       "  '标志',\n",
       "  '了',\n",
       "  '。'],\n",
       " ['第一', '是', '数学', '，', '第二', '是', '数学', '，', '第三', '是', '数学', '。'],\n",
       " ['历史',\n",
       "  '使人',\n",
       "  '贤明',\n",
       "  '，',\n",
       "  '诗',\n",
       "  '造成',\n",
       "  '气质高雅',\n",
       "  '的',\n",
       "  '人',\n",
       "  '，',\n",
       "  '数学',\n",
       "  '使人',\n",
       "  '高尚',\n",
       "  '，',\n",
       "  '自然哲学',\n",
       "  '使人',\n",
       "  '深沉',\n",
       "  '，',\n",
       "  '道德',\n",
       "  '使人',\n",
       "  '稳重',\n",
       "  '，',\n",
       "  '而',\n",
       "  '伦理学',\n",
       "  '和',\n",
       "  '修辞学',\n",
       "  '则',\n",
       "  '使',\n",
       "  '人',\n",
       "  '善于',\n",
       "  '争论',\n",
       "  '。']]"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import jieba\n",
    "words_list = [list(jieba.cut(doc)) for doc in docs]\n",
    "words_list"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 构建词典"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "vocb = set([word for words in words_list for word in words])\n",
    "word_to_idx = {word: i for i, word in enumerate(vocb)}\n",
    "idx_to_word = {word_to_idx[word]: word for word in word_to_idx}"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 使用nn.Embedding创建词嵌入单元"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Embedding(63, 200)"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "vocb_size = len(vocb)\n",
    "embedding_size = 200\n",
    "embeds = nn.Embedding(vocb_size, embedding_size) \n",
    "embeds"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 查找词嵌入，该方法对应字典中的id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "28"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "word_to_idx[\"数学\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-6.4583e-01,  7.7057e-01, -6.3130e-01,  5.4269e-01, -1.4782e+00,\n",
       "         -1.2041e-01,  5.9758e-01,  1.5149e+00, -2.6339e-01, -6.7263e-01,\n",
       "         -6.4217e-01,  1.0209e+00, -9.4901e-02, -3.6418e-01, -1.0050e-01,\n",
       "         -1.4244e+00, -1.2991e+00, -1.0365e+00,  1.4462e-01, -1.2257e+00,\n",
       "         -3.6916e-02, -4.1594e-01, -1.9010e-02, -4.3103e-01, -1.4997e+00,\n",
       "          1.2723e+00,  7.0842e-01,  1.0682e+00,  3.6274e-01, -5.8121e-01,\n",
       "          7.9005e-01, -1.2472e+00, -4.5186e-03, -3.3665e-02, -1.3081e+00,\n",
       "         -1.6804e+00,  5.0088e-01,  1.9714e+00,  4.9155e-01,  6.5748e-01,\n",
       "          1.3346e+00,  8.8960e-02,  6.9962e-01, -9.7362e-01,  5.2199e-02,\n",
       "         -2.3520e+00,  1.1331e+00,  2.9872e-02,  2.8982e-01,  1.2950e+00,\n",
       "         -7.4605e-01, -1.0734e+00,  8.5781e-01,  3.1512e-01,  1.2917e+00,\n",
       "          1.0974e+00,  2.9043e-01, -4.8641e-01, -4.7770e-01,  5.4404e-01,\n",
       "         -5.9924e-01, -6.6578e-01, -9.0113e-01,  1.2556e+00, -5.7889e-01,\n",
       "         -8.8332e-01, -6.4160e-02, -6.9970e-01, -1.7210e-01,  1.3476e+00,\n",
       "         -1.6591e+00,  1.9880e+00,  1.8357e+00,  4.3807e-01,  1.6950e+00,\n",
       "         -1.0259e-01, -8.6750e-01, -3.1382e-01, -1.3104e+00,  1.9475e+00,\n",
       "          1.9383e-01,  2.0223e+00,  1.4702e+00,  1.6586e+00, -3.6649e-01,\n",
       "          2.1425e-02,  1.4570e+00, -4.7758e-01,  1.3825e+00, -3.4744e-02,\n",
       "         -4.7902e-02,  3.8827e-01,  8.8942e-01, -9.4803e-01, -3.8899e-01,\n",
       "         -5.4605e-01, -1.0742e+00,  1.8527e+00, -1.6024e+00, -9.7324e-01,\n",
       "         -6.1419e-03, -1.1700e+00,  9.5458e-01, -4.1964e-03,  7.2067e-01,\n",
       "         -1.0769e-01,  1.2648e+00,  1.4026e+00,  6.2802e-01, -3.0228e-01,\n",
       "          1.0243e+00, -1.6148e+00, -7.5439e-01, -1.9160e+00, -1.2828e+00,\n",
       "         -1.3728e-02, -6.7004e-01, -6.2465e-01, -1.1020e-01, -3.8789e-01,\n",
       "         -2.7853e-01, -1.0030e+00, -5.6607e-01,  1.4377e+00,  1.7836e-03,\n",
       "         -9.0004e-01,  2.8585e-01, -7.1485e-01, -1.5486e+00, -5.9179e-01,\n",
       "          1.1033e+00,  5.5936e-01,  1.0830e+00, -1.2589e+00,  1.7265e-02,\n",
       "         -6.8968e-01, -1.8350e+00, -9.6219e-02, -9.1897e-01, -1.1480e+00,\n",
       "         -7.6266e-01, -1.1089e+00,  1.2763e+00,  6.8988e-01, -2.9904e-01,\n",
       "         -2.7431e-01,  1.4056e+00, -7.1620e-01,  1.0882e+00, -1.1816e-01,\n",
       "          2.9849e-01, -8.0631e-03,  1.0465e+00,  1.0460e-01, -1.6045e-01,\n",
       "         -8.2378e-01, -9.4674e-03, -2.1783e+00, -7.6801e-01,  5.4458e-01,\n",
       "         -1.7053e+00,  5.5348e-02,  3.3861e-01, -5.8601e-01, -5.7167e-01,\n",
       "          8.1471e-01, -1.1304e+00, -5.2306e-01,  1.1690e+00, -1.3958e+00,\n",
       "          2.8354e+00,  9.5083e-02, -8.5484e-01,  4.4958e-01,  4.4592e-01,\n",
       "          7.4016e-01,  1.2730e+00, -6.8082e-01, -1.1658e+00, -5.5579e-01,\n",
       "         -2.7922e-01,  1.4917e+00,  2.3597e-01,  6.6123e-02, -3.7157e-02,\n",
       "          4.6912e-01,  9.8425e-02,  1.8654e-01,  1.8263e+00,  1.0570e+00,\n",
       "          4.9641e-01,  1.2887e+00,  1.0330e+00,  1.0624e+00,  1.5124e-01,\n",
       "         -2.5533e-01,  4.0365e-01, -1.2738e+00,  5.5967e-01,  3.6542e-01]],\n",
       "       grad_fn=<EmbeddingBackward>)"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "look_up = torch.LongTensor([word_to_idx[\"数学\"]])\n",
    "embeds(look_up)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 加载外部训练好的词向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "import gensim\n",
    "from gensim.models import Word2Vec\n",
    "word2vec_model = gensim.models.KeyedVectors.load(\"news.word2vec\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(155362, 200)"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "embedding_shape = word2vec_model.wv.vectors.shape\n",
    "embedding_shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "#初始化Embedding层\n",
    "embed = nn.Embedding(embedding_shape[0], embedding_shape[1])\n",
    "#拷贝训练好的词向量权重\n",
    "embed.weight.data.copy_(torch.from_numpy(word2vec_model.wv.vectors))\n",
    "#冻结权重，训练过程不更新\n",
    "embed.weight.requires_grad = False"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.9777,  0.3726,  0.3545, -0.3584,  0.9531,  0.3427, -0.4642,  1.2159,\n",
       "         -0.9503, -0.5782, -2.4697,  1.5502, -0.9270,  2.3928, -0.2477, -0.6038,\n",
       "         -0.4944,  1.3966,  0.7566, -0.8826, -1.1975, -0.0653, -0.1487,  0.7565,\n",
       "         -0.6251, -1.6796, -0.3788, -1.0130,  1.9895, -0.4686, -0.1373,  2.0175,\n",
       "         -0.2474, -1.5085,  0.7892,  0.3344, -1.1795, -3.1736,  0.9433, -1.4503,\n",
       "         -1.5128,  0.2205,  0.9734, -1.2039, -1.7167,  1.3966, -1.7425, -0.2714,\n",
       "          0.3819, -0.6154, -0.8171, -0.6028, -2.7886, -0.5618, -0.2610,  1.0492,\n",
       "         -0.3627, -1.2233,  1.2634,  0.3334,  0.6682,  2.6612,  0.6975, -0.0708,\n",
       "          1.5320, -0.1496, -0.2336,  1.2927, -0.7408, -1.0342, -0.9239,  0.0202,\n",
       "          0.8004,  1.4688, -1.2386,  0.2145,  2.0381, -0.3545,  0.8606, -1.0852,\n",
       "         -0.7719, -1.5213,  0.2656, -0.9081, -1.8508, -1.4375, -0.8160,  1.5982,\n",
       "          0.8021,  0.5514,  0.4641,  0.4165,  0.2809, -0.8394, -0.1702, -0.4008,\n",
       "          1.1085, -1.0770,  0.1681, -1.2643, -0.6502,  0.3040, -0.5413,  0.2220,\n",
       "         -0.1533,  0.1144,  1.0518, -0.7806,  0.2119, -0.5695, -2.0081,  0.5606,\n",
       "         -0.4242, -0.9143, -0.1640,  1.4673,  1.7795, -0.6617, -0.8579, -1.3069,\n",
       "         -0.1858,  0.0812,  2.0705, -0.9091, -0.2474,  0.4748,  0.2122,  0.7092,\n",
       "          1.1196,  0.1925,  0.2620, -0.0275, -2.6284,  1.6097, -0.3948,  0.5858,\n",
       "          0.7817, -1.4567, -1.0107,  1.7898,  1.3229, -0.4818, -2.0316,  0.1598,\n",
       "         -0.4340, -1.4581,  0.0847, -1.8987, -1.5999, -0.5150, -0.1101,  0.1677,\n",
       "         -1.7414,  1.1491, -0.4115,  1.3189, -1.8611, -0.1494,  1.8617,  0.6060,\n",
       "          1.3892, -1.5435, -0.9466,  0.8712, -0.8126,  0.4343, -0.2359,  1.2024,\n",
       "          0.6117, -0.1590,  2.0477,  0.6651, -1.9729, -0.2477, -0.2407, -0.1979,\n",
       "          0.2839,  1.3558, -0.9779, -1.0996, -1.0425, -0.9821,  2.7934,  0.2654,\n",
       "          0.0079, -0.8442,  0.2506, -0.2976, -0.0372,  0.4135,  1.0018,  1.4979,\n",
       "          0.4086, -0.8630, -0.5336, -2.0806, -0.7287,  0.0967,  0.2996,  0.0863]])"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#找到数学的index\n",
    "index = word2vec_model.wv.vocab[\"数学\"].index\n",
    "#查表\n",
    "embed(torch.LongTensor([index]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "5160"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([ 0.9777014 ,  0.37264398,  0.3544503 , -0.3584271 ,  0.9531079 ,\n",
       "        0.34265763, -0.46419275,  1.2158922 , -0.95033014, -0.57818353,\n",
       "       -2.4696522 ,  1.5502193 , -0.9270395 ,  2.3928015 , -0.24768908,\n",
       "       -0.6038057 , -0.4943789 ,  1.3965534 ,  0.7565906 , -0.88257504,\n",
       "       -1.1974952 , -0.06531978, -0.14872837,  0.7565044 , -0.625109  ,\n",
       "       -1.6795636 , -0.37883943, -1.0129856 ,  1.9894767 , -0.46857628,\n",
       "       -0.13727938,  2.0174541 , -0.24744098, -1.5085264 ,  0.7892023 ,\n",
       "        0.33436   , -1.1795474 , -3.1735787 ,  0.94334424, -1.4502727 ,\n",
       "       -1.512804  ,  0.22046994,  0.97340196, -1.2038714 , -1.7167488 ,\n",
       "        1.3966297 , -1.74253   , -0.271398  ,  0.38193786, -0.6153631 ,\n",
       "       -0.81710833, -0.6028143 , -2.7886477 , -0.56181955, -0.26096618,\n",
       "        1.0491836 , -0.36269432, -1.2233461 ,  1.2633723 ,  0.33339623,\n",
       "        0.6681777 ,  2.6611874 ,  0.6974885 , -0.07083179,  1.5320121 ,\n",
       "       -0.14957856, -0.23364297,  1.2927059 , -0.740794  , -1.034245  ,\n",
       "       -0.9239206 ,  0.02024247,  0.80037826,  1.4687984 , -1.2385873 ,\n",
       "        0.21453705,  2.038133  , -0.3545404 ,  0.8605712 , -1.0852084 ,\n",
       "       -0.7719066 , -1.5213072 ,  0.26558363, -0.90810126, -1.8508003 ,\n",
       "       -1.4374627 , -0.81599545,  1.5981528 ,  0.8021086 ,  0.5514403 ,\n",
       "        0.46408525,  0.4165459 ,  0.28089112, -0.8394122 , -0.17019364,\n",
       "       -0.40083984,  1.1085244 , -1.0769826 ,  0.16811785, -1.264332  ,\n",
       "       -0.65023345,  0.30400878, -0.54131866,  0.22195756, -0.1533443 ,\n",
       "        0.11435756,  1.0517627 , -0.7805956 ,  0.21193174, -0.5695136 ,\n",
       "       -2.0081282 ,  0.5605939 , -0.42422384, -0.91430044, -0.16397983,\n",
       "        1.4673239 ,  1.7795141 , -0.6616519 , -0.85786676, -1.3068919 ,\n",
       "       -0.18579642,  0.08120802,  2.0704765 , -0.909128  , -0.2473802 ,\n",
       "        0.47484902,  0.21223098,  0.70923716,  1.1195542 ,  0.192498  ,\n",
       "        0.2619538 , -0.02753049, -2.6283615 ,  1.6097256 , -0.39482525,\n",
       "        0.58578926,  0.78168225, -1.4567051 , -1.0106647 ,  1.7898326 ,\n",
       "        1.3229283 , -0.48175052, -2.031554  ,  0.1597894 , -0.43400484,\n",
       "       -1.4580803 ,  0.08473147, -1.8987256 , -1.5999441 , -0.51501524,\n",
       "       -0.11006336,  0.16767788, -1.741363  ,  1.1490517 , -0.4114892 ,\n",
       "        1.3189197 , -1.8611438 , -0.14940894,  1.8616751 ,  0.60603565,\n",
       "        1.3892089 , -1.5435234 , -0.9465592 ,  0.87124616, -0.81262296,\n",
       "        0.43428972, -0.23588274,  1.2024306 ,  0.6117268 , -0.15903586,\n",
       "        2.047749  ,  0.66510355, -1.9728937 , -0.24768215, -0.24072734,\n",
       "       -0.19785914,  0.28386274,  1.3558279 , -0.9779383 , -1.0995511 ,\n",
       "       -1.042461  , -0.9821223 ,  2.7934027 ,  0.26537526,  0.00787672,\n",
       "       -0.84422   ,  0.25056732, -0.2975813 , -0.03719828,  0.4135173 ,\n",
       "        1.0017619 ,  1.497947  ,  0.4086241 , -0.8630119 , -0.5335746 ,\n",
       "       -2.080624  , -0.72866327,  0.09673526,  0.29962385,  0.08625787],\n",
       "      dtype=float32)"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "word2vec_model.wv.vectors[5160]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Pytorch中对变成序列的处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 163,
   "metadata": {},
   "outputs": [],
   "source": [
    "docs = [[\"历史\",\"的\",\"潮流\",\"滚滚\",\"向前\"],[\"科技\",\"工作者\"]]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 164,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[[440, 1, 5925, 18646, 4955], [341, 4238]]"
      ]
     },
     "execution_count": 164,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "indexes = [[word2vec_model.wv.vocab[word].index for word in doc] for doc in docs]\n",
    "indexes"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### look up for embedding"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 168,
   "metadata": {},
   "outputs": [],
   "source": [
    "word_embedding = [[list(embed(torch.LongTensor([word2vec_model.wv.vocab[word].index])).squeeze(0).numpy()) for word in doc] for doc in docs]\n",
    "#对于第一个句子，使用0进行填充\n",
    "word_embedding[1].extend([list(np.zeros(200)) for i in range(3)])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 169,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = 2\n",
    "max_length =5\n",
    "embed_size = 200"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 构建“参差不齐”的批次数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 170,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[ 1.9508, -1.4661,  2.0933,  ..., -1.7270, -0.1494, -0.0703],\n",
       "         [-0.2667,  0.4490,  0.3256,  ..., -0.1530,  1.4263,  0.3550],\n",
       "         [ 0.3754,  1.8401,  0.2334,  ..., -1.0909, -0.7132,  1.6995],\n",
       "         [ 0.0494,  0.6918, -0.2538,  ...,  0.4563, -0.4146,  0.1772],\n",
       "         [-0.7569,  0.7580, -0.5532,  ..., -0.2523, -0.4494, -1.6142]],\n",
       "\n",
       "        [[ 0.4797,  1.4363,  1.2964,  ..., -0.5412, -0.1600,  0.3205],\n",
       "         [ 0.3191, -0.8103, -0.5561,  ..., -0.3135, -1.0554,  0.3390],\n",
       "         [ 0.0000,  0.0000,  0.0000,  ...,  0.0000,  0.0000,  0.0000],\n",
       "         [ 0.0000,  0.0000,  0.0000,  ...,  0.0000,  0.0000,  0.0000],\n",
       "         [ 0.0000,  0.0000,  0.0000,  ...,  0.0000,  0.0000,  0.0000]]])"
      ]
     },
     "execution_count": 170,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "batch_data = torch.Tensor(batch_size,max_length,embed_size)\n",
    "batch_data.data.copy_(torch.from_numpy(np.array(word_embedding)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 使用pack_padded_sequence对填充后的矩阵进行压缩"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 171,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.nn.utils.rnn import pack_padded_sequence,pad_packed_sequence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 176,
   "metadata": {},
   "outputs": [],
   "source": [
    "packedSequence = pack_padded_sequence(batch_data,torch.Tensor([5,2]),batch_first=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 178,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "PackedSequence(data=tensor([[ 1.9508, -1.4661,  2.0933,  ..., -1.7270, -0.1494, -0.0703],\n",
       "        [ 0.4797,  1.4363,  1.2964,  ..., -0.5412, -0.1600,  0.3205],\n",
       "        [-0.2667,  0.4490,  0.3256,  ..., -0.1530,  1.4263,  0.3550],\n",
       "        ...,\n",
       "        [ 0.3754,  1.8401,  0.2334,  ..., -1.0909, -0.7132,  1.6995],\n",
       "        [ 0.0494,  0.6918, -0.2538,  ...,  0.4563, -0.4146,  0.1772],\n",
       "        [-0.7569,  0.7580, -0.5532,  ..., -0.2523, -0.4494, -1.6142]]), batch_sizes=tensor([2, 2, 1, 1, 1]))"
      ]
     },
     "execution_count": 178,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "packedSequence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 179,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([7, 200])"
      ]
     },
     "execution_count": 179,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "packedSequence.data.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 186,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "pad packed sequence shape:torch.Size([2, 5, 200])\n",
      "pad packed sequences length:tensor([5, 2])\n"
     ]
    }
   ],
   "source": [
    "tensor,sizes = pad_packed_sequence(packedSequence,batch_first=True,padding_value=0.0)\n",
    "print(\"pad packed sequence shape:{}\".format(tensor.shape))\n",
    "print(\"pad packed sequences length:{}\".format(sizes))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 187,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[ 1.9508, -1.4661,  2.0933,  ..., -1.7270, -0.1494, -0.0703],\n",
       "         [-0.2667,  0.4490,  0.3256,  ..., -0.1530,  1.4263,  0.3550],\n",
       "         [ 0.3754,  1.8401,  0.2334,  ..., -1.0909, -0.7132,  1.6995],\n",
       "         [ 0.0494,  0.6918, -0.2538,  ...,  0.4563, -0.4146,  0.1772],\n",
       "         [-0.7569,  0.7580, -0.5532,  ..., -0.2523, -0.4494, -1.6142]],\n",
       "\n",
       "        [[ 0.4797,  1.4363,  1.2964,  ..., -0.5412, -0.1600,  0.3205],\n",
       "         [ 0.3191, -0.8103, -0.5561,  ..., -0.3135, -1.0554,  0.3390],\n",
       "         [ 0.0000,  0.0000,  0.0000,  ...,  0.0000,  0.0000,  0.0000],\n",
       "         [ 0.0000,  0.0000,  0.0000,  ...,  0.0000,  0.0000,  0.0000],\n",
       "         [ 0.0000,  0.0000,  0.0000,  ...,  0.0000,  0.0000,  0.0000]]])"
      ]
     },
     "execution_count": 187,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tensor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 192,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(tensor([[[ 0.0737, -0.2264,  0.3659, -0.7315,  0.4314, -0.5522,  0.4508,\n",
      "           0.4300,  0.6089,  0.4955],\n",
      "         [-0.6927, -0.9158,  0.1014, -0.3152,  0.7671, -0.0811, -0.3895,\n",
      "           0.5228,  0.0641,  0.1213]],\n",
      "\n",
      "        [[-0.4387, -0.3381,  0.0609, -0.3746,  0.4941, -0.3338,  0.8053,\n",
      "           0.2477, -0.2927,  0.6532],\n",
      "         [ 0.0118, -0.4609,  0.1862, -0.5827,  0.5011, -0.1901,  0.2654,\n",
      "          -0.3303,  0.5781,  0.5101]],\n",
      "\n",
      "        [[-0.4990,  0.1583, -0.7873, -0.8562,  0.3908, -0.1197,  0.3253,\n",
      "           0.5072, -0.7456,  0.1218],\n",
      "         [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000,\n",
      "           0.0000,  0.0000,  0.0000]],\n",
      "\n",
      "        [[-0.2892,  0.5787, -0.4259, -0.2719, -0.5481, -0.8696,  0.1278,\n",
      "          -0.3356, -0.6982, -0.3731],\n",
      "         [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000,\n",
      "           0.0000,  0.0000,  0.0000]],\n",
      "\n",
      "        [[-0.8881, -0.3430, -0.1504,  0.0181,  0.2018, -0.7650,  0.1453,\n",
      "           0.3667, -0.8491, -0.4716],\n",
      "         [ 0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000,  0.0000,\n",
      "           0.0000,  0.0000,  0.0000]]], grad_fn=<CopySlices>), tensor([5, 2]))\n"
     ]
    }
   ],
   "source": [
    "#pack\n",
    "pack = pack_padded_sequence(batch_data, torch.Tensor([5,2]), batch_first=True)\n",
    "rnn = nn.RNN(200,10, 2, batch_first=True)\n",
    "h0 = torch.randn(2, 2, 10)\n",
    "#forward\n",
    "out, _ = rnn(pack, h0)\n",
    "# unpack\n",
    "unpacked =pad_packed_sequence(out)\n",
    "print(unpacked)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 195,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([5, 2, 10])"
      ]
     },
     "execution_count": 195,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "unpacked[0].shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
