{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "%load_ext autoreload\n",
    "%autoreload 2\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "import sys\n",
    "sys.path.append(r'C:\\Users\\Administrator.DESKTOP-HN1J6IE\\Desktop\\lecture01_1_2\\code')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1. 数据预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "C:\\Users\\Administrator.DESKTOP-HN1J6IE\\Desktop\\lecture01_1_2\\code\nBuilding prefix dict from the default dictionary ...\n2020-07-28 22:39:06,720 : DEBUG : Building prefix dict from the default dictionary ...\nLoading model from cache C:\\Users\\ADMINI~1.DES\\AppData\\Local\\Temp\\jieba.cache\n2020-07-28 22:39:06,721 : DEBUG : Loading model from cache C:\\Users\\ADMINI~1.DES\\AppData\\Local\\Temp\\jieba.cache\nLoading model cost 0.537 seconds.\n2020-07-28 22:39:07,257 : DEBUG : Loading model cost 0.537 seconds.\nPrefix dict has been built successfully.\n2020-07-28 22:39:07,258 : DEBUG : Prefix dict has been built successfully.\n"
    }
   ],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from utils.data_loader import build_dataset,pad_proc,sentences_proc\n",
    "from utils.config import *\n",
    "from utils.multi_proc_utils import parallelize\n",
    "from gensim.models.word2vec import LineSentence, Word2Vec"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.1加载数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "train data size 82943,test data size 20000\n"
    }
   ],
   "source": [
    "train_df = pd.read_csv(train_data_path)\n",
    "test_df = pd.read_csv(test_data_path)\n",
    "print('train data size {},test data size {}'.format(len(train_df), len(test_df)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.2 空值填充"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 2. 空值剔除\n",
    "train_df.dropna(subset=['Report'], inplace=True)\n",
    "\n",
    "train_df.fillna('', inplace=True)\n",
    "test_df.fillna('', inplace=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.3.多线程, 批量数据处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "Wall time: 37.1 s\n"
    }
   ],
   "source": [
    "%%time\n",
    "train_df = parallelize(train_df, sentences_proc)\n",
    "test_df = parallelize(test_df, sentences_proc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "Wall time: 31.7 s\n"
    }
   ],
   "source": [
    "%%time\n",
    "train_df = parallelize(train_df, sentences_proc)\n",
    "test_df = parallelize(test_df, sentences_proc)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.4 合并训练测试数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "train data size 82873,test data size 20000,merged_df data size 102873\n"
    }
   ],
   "source": [
    "train_df['merged'] = train_df[['Question', 'Dialogue', 'Report']].apply(lambda x: ' '.join(x), axis=1)\n",
    "test_df['merged'] = test_df[['Question', 'Dialogue']].apply(lambda x: ' '.join(x), axis=1)\n",
    "merged_df = pd.concat([train_df[['merged']], test_df[['merged']]], axis=0)\n",
    "print('train data size {},test data size {},merged_df data size {}'.format(len(train_df), len(test_df),len(merged_df)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.5 保存处理好的 训练 测试集合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_df = train_df.drop(['merged'], axis=1)\n",
    "test_df = test_df.drop(['merged'], axis=1)\n",
    "train_df.to_csv(train_seg_path, index=None, header=True)\n",
    "test_df.to_csv(test_seg_path, index=None, header=True)\n",
    "merged_df.to_csv(merger_seg_path, index=None, header=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2.1 预训练词向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "'C:\\\\Users\\\\Administrator.DESKTOP-HN1J6IE\\\\Desktop\\\\lecture01_1_2\\\\code\\\\data\\\\merged_train_test_seg_data.csv'"
     },
     "metadata": {},
     "execution_count": 9
    }
   ],
   "source": [
    "merger_seg_path"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": "2020-07-28 22:40:25,878 : INFO : collecting all words and their counts\n2020-07-28 22:40:25,883 : INFO : PROGRESS: at sentence #0, processed 0 words, keeping 0 word types\n2020-07-28 22:40:26,137 : INFO : PROGRESS: at sentence #10000, processed 1276705 words, keeping 34209 word types\n2020-07-28 22:40:26,403 : INFO : PROGRESS: at sentence #20000, processed 2572180 words, keeping 49557 word types\n2020-07-28 22:40:26,663 : INFO : PROGRESS: at sentence #30000, processed 3842841 words, keeping 60845 word types\n2020-07-28 22:40:26,921 : INFO : PROGRESS: at sentence #40000, processed 5078847 words, keeping 70288 word types\n2020-07-28 22:40:27,182 : INFO : PROGRESS: at sentence #50000, processed 6389788 words, keeping 78649 word types\n2020-07-28 22:40:27,458 : INFO : PROGRESS: at sentence #60000, processed 7783786 words, keeping 87077 word types\n2020-07-28 22:40:27,748 : INFO : PROGRESS: at sentence #70000, processed 9207801 words, keeping 95322 word types\n2020-07-28 22:40:28,008 : INFO : PROGRESS: at sentence #80000, processed 10475253 words, keeping 102247 word types\n2020-07-28 22:40:28,246 : INFO : PROGRESS: at sentence #90000, processed 11633277 words, keeping 109005 word types\n2020-07-28 22:40:28,484 : INFO : PROGRESS: at sentence #100000, processed 12792682 words, keeping 114974 word types\n2020-07-28 22:40:28,555 : INFO : collected 116674 word types from a corpus of 13134376 raw words and 102873 sentences\n2020-07-28 22:40:28,556 : INFO : Loading a fresh vocabulary\n2020-07-28 22:40:28,636 : INFO : effective_min_count=5 retains 31851 unique words (27% of original 116674, drops 84823)\n2020-07-28 22:40:28,637 : INFO : effective_min_count=5 leaves 13003784 word corpus (99% of original 13134376, drops 130592)\n2020-07-28 22:40:28,702 : INFO : deleting the raw counts dictionary of 116674 items\n2020-07-28 22:40:28,704 : INFO : sample=0.001 downsamples 39 most-common words\n2020-07-28 22:40:28,705 : INFO : downsampling leaves estimated 9644326 word corpus (74.2% of prior 13003784)\n2020-07-28 22:40:28,769 : INFO : estimated required memory for 31851 words and 300 dimensions: 92367900 bytes\n2020-07-28 22:40:28,771 : INFO : resetting layer weights\n2020-07-28 22:40:33,008 : INFO : training model with 8 workers on 31851 vocabulary and 300 features, using sg=0 hs=0 sample=0.001 negative=5 window=3\n2020-07-28 22:40:34,016 : INFO : EPOCH 1 - PROGRESS: at 17.72% examples, 1698217 words/s, in_qsize 13, out_qsize 0\n2020-07-28 22:40:35,018 : INFO : EPOCH 1 - PROGRESS: at 36.78% examples, 1755209 words/s, in_qsize 12, out_qsize 0\n2020-07-28 22:40:36,020 : INFO : EPOCH 1 - PROGRESS: at 55.13% examples, 1781422 words/s, in_qsize 0, out_qsize 0\n2020-07-28 22:40:37,023 : INFO : EPOCH 1 - PROGRESS: at 70.13% examples, 1736905 words/s, in_qsize 15, out_qsize 1\n2020-07-28 22:40:38,023 : INFO : EPOCH 1 - PROGRESS: at 89.18% examples, 1740113 words/s, in_qsize 13, out_qsize 2\n2020-07-28 22:40:38,601 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-28 22:40:38,601 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-28 22:40:38,602 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-28 22:40:38,603 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-28 22:40:38,604 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-28 22:40:38,605 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-28 22:40:38,609 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-28 22:40:38,614 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-28 22:40:38,614 : INFO : EPOCH - 1 : training on 13134376 raw words (9644824 effective words) took 5.6s, 1721992 effective words/s\n2020-07-28 22:40:39,617 : INFO : EPOCH 2 - PROGRESS: at 16.33% examples, 1566546 words/s, in_qsize 0, out_qsize 0\n2020-07-28 22:40:40,618 : INFO : EPOCH 2 - PROGRESS: at 33.02% examples, 1586698 words/s, in_qsize 0, out_qsize 0\n2020-07-28 22:40:41,630 : INFO : EPOCH 2 - PROGRESS: at 50.31% examples, 1614021 words/s, in_qsize 4, out_qsize 1\n2020-07-28 22:40:42,636 : INFO : EPOCH 2 - PROGRESS: at 66.28% examples, 1631575 words/s, in_qsize 5, out_qsize 0\n2020-07-28 22:40:43,639 : INFO : EPOCH 2 - PROGRESS: at 83.54% examples, 1632027 words/s, in_qsize 0, out_qsize 1\n2020-07-28 22:40:44,500 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-28 22:40:44,503 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-28 22:40:44,503 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-28 22:40:44,504 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-28 22:40:44,505 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-28 22:40:44,511 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-28 22:40:44,513 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-28 22:40:44,514 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-28 22:40:44,514 : INFO : EPOCH - 2 : training on 13134376 raw words (9644536 effective words) took 5.9s, 1635324 effective words/s\n2020-07-28 22:40:45,526 : INFO : EPOCH 3 - PROGRESS: at 16.90% examples, 1609507 words/s, in_qsize 12, out_qsize 2\n2020-07-28 22:40:46,526 : INFO : EPOCH 3 - PROGRESS: at 34.01% examples, 1623643 words/s, in_qsize 15, out_qsize 0\n2020-07-28 22:40:47,530 : INFO : EPOCH 3 - PROGRESS: at 50.67% examples, 1626141 words/s, in_qsize 0, out_qsize 0\n2020-07-28 22:40:48,532 : INFO : EPOCH 3 - PROGRESS: at 66.72% examples, 1645669 words/s, in_qsize 0, out_qsize 0\n2020-07-28 22:40:49,532 : INFO : EPOCH 3 - PROGRESS: at 84.44% examples, 1648953 words/s, in_qsize 0, out_qsize 0\n2020-07-28 22:40:50,397 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-28 22:40:50,400 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-28 22:40:50,400 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-28 22:40:50,402 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-28 22:40:50,403 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-28 22:40:50,406 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-28 22:40:50,412 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-28 22:40:50,414 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-28 22:40:50,414 : INFO : EPOCH - 3 : training on 13134376 raw words (9645523 effective words) took 5.9s, 1635514 effective words/s\n2020-07-28 22:40:51,417 : INFO : EPOCH 4 - PROGRESS: at 16.68% examples, 1600641 words/s, in_qsize 14, out_qsize 0\n2020-07-28 22:40:52,424 : INFO : EPOCH 4 - PROGRESS: at 34.23% examples, 1635775 words/s, in_qsize 9, out_qsize 1\n2020-07-28 22:40:53,424 : INFO : EPOCH 4 - PROGRESS: at 51.53% examples, 1657273 words/s, in_qsize 0, out_qsize 0\n2020-07-28 22:40:54,426 : INFO : EPOCH 4 - PROGRESS: at 67.10% examples, 1658553 words/s, in_qsize 0, out_qsize 0\n2020-07-28 22:40:55,443 : INFO : EPOCH 4 - PROGRESS: at 85.10% examples, 1656286 words/s, in_qsize 8, out_qsize 0\n2020-07-28 22:40:56,199 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-28 22:40:56,206 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-28 22:40:56,207 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-28 22:40:56,208 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-28 22:40:56,210 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-28 22:40:56,216 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-28 22:40:56,219 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-28 22:40:56,221 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-28 22:40:56,221 : INFO : EPOCH - 4 : training on 13134376 raw words (9642672 effective words) took 5.8s, 1661064 effective words/s\n2020-07-28 22:40:57,231 : INFO : EPOCH 5 - PROGRESS: at 16.65% examples, 1583876 words/s, in_qsize 15, out_qsize 0\n2020-07-28 22:40:58,234 : INFO : EPOCH 5 - PROGRESS: at 34.37% examples, 1641090 words/s, in_qsize 14, out_qsize 1\n2020-07-28 22:40:59,237 : INFO : EPOCH 5 - PROGRESS: at 51.02% examples, 1638163 words/s, in_qsize 0, out_qsize 0\n2020-07-28 22:41:00,242 : INFO : EPOCH 5 - PROGRESS: at 66.34% examples, 1633696 words/s, in_qsize 0, out_qsize 0\n2020-07-28 22:41:01,246 : INFO : EPOCH 5 - PROGRESS: at 84.78% examples, 1652587 words/s, in_qsize 0, out_qsize 0\n2020-07-28 22:41:02,047 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-28 22:41:02,054 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-28 22:41:02,057 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-28 22:41:02,061 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-28 22:41:02,062 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-28 22:41:02,064 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-28 22:41:02,066 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-28 22:41:02,072 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-28 22:41:02,073 : INFO : EPOCH - 5 : training on 13134376 raw words (9645121 effective words) took 5.8s, 1648961 effective words/s\n2020-07-28 22:41:02,074 : INFO : training on a 65671880 raw words (48222676 effective words) took 29.1s, 1659146 effective words/s\n"
    }
   ],
   "source": [
    "wv_model = Word2Vec(LineSentence(merger_seg_path),\n",
    "                    size=300, \n",
    "                    negative=5, \n",
    "                    workers=8, \n",
    "                    iter=wv_train_epochs, \n",
    "                    window=3,\n",
    "                    min_count=5)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2.2. 建立词表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "31089"
     },
     "metadata": {},
     "execution_count": 17
    }
   ],
   "source": [
    "vocab = {word: index for index, word in enumerate(wv_model.wv.index2word)}\n",
    "reverse_vocab = {index: word for index, word in enumerate(wv_model.wv.index2word)}\n",
    "len(vocab)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Q 1 .使用了min_count,其实部分词不在vocab表中 ,但是训练数据和测试数据中又有这些词?\n",
    "\n",
    "---\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2.3. 获取词向量矩阵"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "(31089, 300)"
     },
     "metadata": {},
     "execution_count": 18
    }
   ],
   "source": [
    "embedding_matrix = wv_model.wv.vectors\n",
    "embedding_matrix.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3. 构建训练数据"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "+ 可以把Question,Dialogue当做一句 `长文本处理`, 合并构建成X\n",
    "+ Report作为需要预测的标签,构建Y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_df['X'] = train_df[['Question', 'Dialogue']].apply(lambda x: ' '.join(x), axis=1)\n",
    "test_df['X'] = test_df[['Question', 'Dialogue']].apply(lambda x: ' '.join(x), axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "0    方向机 重 ， 助力 泵 ， 方向机 都 换 技师说 车主说 新 都 换 车主说 助力 泵 ...\n1    奔驰 ML500 排气 凸轮轴 调节 错误 技师说 有没有 电脑 检测 故障 代码 。 车主...\n2    2010 款 宝马X1 ， 2011 年 出厂 ， 2.0 排量 ， 通用 6L45 变速箱...\n3    3.0 V6 发动机 号 位置 ， 照片 最好 ！ 技师说 右侧 排气管 上方 ， 缸体 上...\n4    2012 款 奔驰 c180 ， 维修保养 ， 动力 ， 值得 拥有 技师说 家庭 用车 ，...\nName: X, dtype: object"
     },
     "metadata": {},
     "execution_count": 12
    }
   ],
   "source": [
    "train_df['X'].head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Q 2. 句子长度一样 ? 如何构建训练,batch操作,矩阵 ...\n",
    "\n",
    "---"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3.1 填充字段"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "def pad_proc(sentence, max_len, vocab):\n",
    "    '''\n",
    "    < start > < end > < pad > < unk >\n",
    "    '''\n",
    "    # 0.按空格统计切分出词\n",
    "    words = sentence.strip().split(' ')\n",
    "    # 1. 截取规定长度的词数\n",
    "    words = words[:max_len]\n",
    "    # 2. 填充< unk > ,判断是否在vocab中, 不在填充 < unk >\n",
    "    sentence = [word if word in vocab else '<UNK>' for word in words]\n",
    "    # 3. 填充< start > < end >\n",
    "    sentence = ['<START>'] + sentence + ['<STOP>']\n",
    "    # 4. 判断长度，填充　< pad >\n",
    "    sentence = sentence + ['<PAD>'] * (max_len + 2 - len(words))\n",
    "    return ' '.join(sentence)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Q3. 如何确定max_len的值? 经验 ?"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3.2 获取适当的Max_Len"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_max_len(data):\n",
    "    \"\"\"\n",
    "    获得合适的最大长度值\n",
    "    :param data: 待统计的数据  train_df['Question']\n",
    "    :return: 最大长度值\n",
    "    \"\"\"\n",
    "    max_lens = data.apply(lambda x: x.count(' '))\n",
    "    return int(np.mean(max_lens) + 2 * np.std(max_lens))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取输入数据 适当的最大长度\n",
    "train_x_max_len = get_max_len(train_df['X'])\n",
    "test_x_max_len = get_max_len(test_df['X'])\n",
    "x_max_len = max(train_x_max_len, test_x_max_len)\n",
    "\n",
    "# 获取标签数据 适当的最大长度\n",
    "train_x_max_len = get_max_len(train_df['Report'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "38"
     },
     "metadata": {},
     "execution_count": 16
    }
   ],
   "source": [
    "train_x_max_len"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "351"
     },
     "metadata": {},
     "execution_count": 17
    }
   ],
   "source": [
    "x_max_len"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3.3 填充处理"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "+ < start > - 句子开始\n",
    "+ < end > - 句子结尾\n",
    "+ < pad > - 短句填充\n",
    "+ < unk > - 未知词"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "output_type": "error",
     "ename": "NameError",
     "evalue": "name 'vocab' is not defined",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-18-fff7df84a571>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m      1\u001b[0m \u001b[1;31m# 训练集X处理\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0mtrain_df\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'X'\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtrain_df\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'X'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;32mlambda\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mpad_proc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mx_max_len\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvocab\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      3\u001b[0m \u001b[1;31m# 训练集Y处理\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      4\u001b[0m \u001b[0mtrain_df\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'Y'\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtrain_df\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'Report'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;32mlambda\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mpad_proc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtrain_x_max_len\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvocab\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m \u001b[1;31m# 测试集X处理\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\anaconda3\\lib\\site-packages\\pandas\\core\\series.py\u001b[0m in \u001b[0;36mapply\u001b[1;34m(self, func, convert_dtype, args, **kwds)\u001b[0m\n\u001b[0;32m   3846\u001b[0m             \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   3847\u001b[0m                 \u001b[0mvalues\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mastype\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mobject\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 3848\u001b[1;33m                 \u001b[0mmapped\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mlib\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmap_infer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mvalues\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mconvert\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mconvert_dtype\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   3849\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   3850\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmapped\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0misinstance\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmapped\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mSeries\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mpandas\\_libs\\lib.pyx\u001b[0m in \u001b[0;36mpandas._libs.lib.map_infer\u001b[1;34m()\u001b[0m\n",
      "\u001b[1;32m<ipython-input-18-fff7df84a571>\u001b[0m in \u001b[0;36m<lambda>\u001b[1;34m(x)\u001b[0m\n\u001b[0;32m      1\u001b[0m \u001b[1;31m# 训练集X处理\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0mtrain_df\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'X'\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtrain_df\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'X'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;32mlambda\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mpad_proc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mx_max_len\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvocab\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      3\u001b[0m \u001b[1;31m# 训练集Y处理\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      4\u001b[0m \u001b[0mtrain_df\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'Y'\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtrain_df\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'Report'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mapply\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;32mlambda\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mpad_proc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtrain_x_max_len\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvocab\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m \u001b[1;31m# 测试集X处理\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mNameError\u001b[0m: name 'vocab' is not defined"
     ]
    }
   ],
   "source": [
    "# 训练集X处理\n",
    "train_df['X'] = train_df['X'].apply(lambda x: pad_proc(x, x_max_len, vocab))\n",
    "# 训练集Y处理\n",
    "train_df['Y'] = train_df['Report'].apply(lambda x: pad_proc(x, train_x_max_len, vocab))\n",
    "# 测试集X处理\n",
    "test_df['X'] = test_df['X'].apply(lambda x: pad_proc(x, x_max_len, vocab))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "output_type": "error",
     "ename": "KeyError",
     "evalue": "'Y'",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyError\u001b[0m                                  Traceback (most recent call last)",
      "\u001b[1;32m~\\anaconda3\\lib\\site-packages\\pandas\\core\\indexes\\base.py\u001b[0m in \u001b[0;36mget_loc\u001b[1;34m(self, key, method, tolerance)\u001b[0m\n\u001b[0;32m   2645\u001b[0m             \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2646\u001b[1;33m                 \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_engine\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_loc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   2647\u001b[0m             \u001b[1;32mexcept\u001b[0m \u001b[0mKeyError\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mpandas\\_libs\\index.pyx\u001b[0m in \u001b[0;36mpandas._libs.index.IndexEngine.get_loc\u001b[1;34m()\u001b[0m\n",
      "\u001b[1;32mpandas\\_libs\\index.pyx\u001b[0m in \u001b[0;36mpandas._libs.index.IndexEngine.get_loc\u001b[1;34m()\u001b[0m\n",
      "\u001b[1;32mpandas\\_libs\\hashtable_class_helper.pxi\u001b[0m in \u001b[0;36mpandas._libs.hashtable.PyObjectHashTable.get_item\u001b[1;34m()\u001b[0m\n",
      "\u001b[1;32mpandas\\_libs\\hashtable_class_helper.pxi\u001b[0m in \u001b[0;36mpandas._libs.hashtable.PyObjectHashTable.get_item\u001b[1;34m()\u001b[0m\n",
      "\u001b[1;31mKeyError\u001b[0m: 'Y'",
      "\nDuring handling of the above exception, another exception occurred:\n",
      "\u001b[1;31mKeyError\u001b[0m                                  Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-19-a54faefd05e5>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m      1\u001b[0m \u001b[1;31m# 保存中间结果数据\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      2\u001b[0m \u001b[0mtrain_df\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'X'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto_csv\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtrain_x_pad_path\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mindex\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mheader\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 3\u001b[1;33m \u001b[0mtrain_df\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'Y'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto_csv\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtrain_y_pad_path\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mindex\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mheader\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      4\u001b[0m \u001b[0mtest_df\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'X'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto_csv\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtest_x_pad_path\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mindex\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mheader\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\anaconda3\\lib\\site-packages\\pandas\\core\\frame.py\u001b[0m in \u001b[0;36m__getitem__\u001b[1;34m(self, key)\u001b[0m\n\u001b[0;32m   2798\u001b[0m             \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcolumns\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnlevels\u001b[0m \u001b[1;33m>\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2799\u001b[0m                 \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_getitem_multilevel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2800\u001b[1;33m             \u001b[0mindexer\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcolumns\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_loc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   2801\u001b[0m             \u001b[1;32mif\u001b[0m \u001b[0mis_integer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mindexer\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2802\u001b[0m                 \u001b[0mindexer\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[0mindexer\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\anaconda3\\lib\\site-packages\\pandas\\core\\indexes\\base.py\u001b[0m in \u001b[0;36mget_loc\u001b[1;34m(self, key, method, tolerance)\u001b[0m\n\u001b[0;32m   2646\u001b[0m                 \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_engine\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_loc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2647\u001b[0m             \u001b[1;32mexcept\u001b[0m \u001b[0mKeyError\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2648\u001b[1;33m                 \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_engine\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_loc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_maybe_cast_indexer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   2649\u001b[0m         \u001b[0mindexer\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_indexer\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mkey\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmethod\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mmethod\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtolerance\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtolerance\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2650\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[0mindexer\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mndim\u001b[0m \u001b[1;33m>\u001b[0m \u001b[1;36m1\u001b[0m \u001b[1;32mor\u001b[0m \u001b[0mindexer\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msize\u001b[0m \u001b[1;33m>\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mpandas\\_libs\\index.pyx\u001b[0m in \u001b[0;36mpandas._libs.index.IndexEngine.get_loc\u001b[1;34m()\u001b[0m\n",
      "\u001b[1;32mpandas\\_libs\\index.pyx\u001b[0m in \u001b[0;36mpandas._libs.index.IndexEngine.get_loc\u001b[1;34m()\u001b[0m\n",
      "\u001b[1;32mpandas\\_libs\\hashtable_class_helper.pxi\u001b[0m in \u001b[0;36mpandas._libs.hashtable.PyObjectHashTable.get_item\u001b[1;34m()\u001b[0m\n",
      "\u001b[1;32mpandas\\_libs\\hashtable_class_helper.pxi\u001b[0m in \u001b[0;36mpandas._libs.hashtable.PyObjectHashTable.get_item\u001b[1;34m()\u001b[0m\n",
      "\u001b[1;31mKeyError\u001b[0m: 'Y'"
     ]
    }
   ],
   "source": [
    "# 保存中间结果数据\n",
    "train_df['X'].to_csv(train_x_pad_path, index=None, header=False)\n",
    "train_df['Y'].to_csv(train_y_pad_path, index=None, header=False)\n",
    "test_df['X'].to_csv(test_x_pad_path, index=None, header=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Q4 新加的符号不在词表 和 词向量矩阵中,怎么办?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": "'nvaida' �����ڲ����ⲿ���Ҳ���ǿ����еĳ���\n���������ļ���\n"
    }
   ],
   "source": [
    "!nvaida msi"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3.4 词表更新"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {
    "scrolled": true,
    "tags": [
     "outputPrepend"
    ]
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": "733 : INFO : EPOCH 2 - PROGRESS: at 75.92% examples, 1064195 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:20:33,736 : INFO : EPOCH 2 - PROGRESS: at 87.85% examples, 1062604 words/s, in_qsize 11, out_qsize 0\n2020-07-21 09:20:34,633 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:20:34,636 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:20:34,640 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:20:34,640 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:20:34,641 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:20:34,643 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:20:34,645 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:20:34,646 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:20:34,646 : INFO : EPOCH - 2 : training on 29502788 raw words (8448996 effective words) took 7.9s, 1065787 effective words/s\n2020-07-21 09:20:35,650 : INFO : EPOCH 3 - PROGRESS: at 12.91% examples, 1067566 words/s, in_qsize 15, out_qsize 0\n2020-07-21 09:20:36,651 : INFO : EPOCH 3 - PROGRESS: at 26.18% examples, 1095989 words/s, in_qsize 13, out_qsize 0\n2020-07-21 09:20:37,655 : INFO : EPOCH 3 - PROGRESS: at 39.06% examples, 1081095 words/s, in_qsize 13, out_qsize 3\n2020-07-21 09:20:38,660 : INFO : EPOCH 3 - PROGRESS: at 51.22% examples, 1052423 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:20:39,662 : INFO : EPOCH 3 - PROGRESS: at 62.47% examples, 1037731 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:20:40,662 : INFO : EPOCH 3 - PROGRESS: at 73.96% examples, 1034493 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:20:41,664 : INFO : EPOCH 3 - PROGRESS: at 85.18% examples, 1030190 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:20:42,671 : INFO : EPOCH 3 - PROGRESS: at 97.34% examples, 1025148 words/s, in_qsize 14, out_qsize 1\n2020-07-21 09:20:42,858 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:20:42,859 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:20:42,866 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:20:42,867 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:20:42,870 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:20:42,870 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:20:42,871 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:20:42,872 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:20:42,872 : INFO : EPOCH - 3 : training on 29502788 raw words (8444529 effective words) took 8.2s, 1026988 effective words/s\n2020-07-21 09:20:43,876 : INFO : EPOCH 4 - PROGRESS: at 12.43% examples, 1029186 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:20:44,877 : INFO : EPOCH 4 - PROGRESS: at 24.87% examples, 1038598 words/s, in_qsize 0, out_qsize 1\n2020-07-21 09:20:45,880 : INFO : EPOCH 4 - PROGRESS: at 36.90% examples, 1022469 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:20:46,882 : INFO : EPOCH 4 - PROGRESS: at 48.75% examples, 1004395 words/s, in_qsize 15, out_qsize 0\n2020-07-21 09:20:47,883 : INFO : EPOCH 4 - PROGRESS: at 60.78% examples, 1008678 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:20:48,884 : INFO : EPOCH 4 - PROGRESS: at 72.27% examples, 1009273 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:20:49,887 : INFO : EPOCH 4 - PROGRESS: at 83.25% examples, 1006014 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:20:50,887 : INFO : EPOCH 4 - PROGRESS: at 93.99% examples, 993452 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:20:51,403 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:20:51,404 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:20:51,404 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:20:51,404 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:20:51,405 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:20:51,405 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:20:51,406 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:20:51,408 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:20:51,408 : INFO : EPOCH - 4 : training on 29502788 raw words (8446143 effective words) took 8.5s, 989782 effective words/s\n2020-07-21 09:20:52,411 : INFO : EPOCH 5 - PROGRESS: at 11.42% examples, 944812 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:20:53,412 : INFO : EPOCH 5 - PROGRESS: at 23.01% examples, 957249 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:20:54,414 : INFO : EPOCH 5 - PROGRESS: at 34.80% examples, 965765 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:20:55,416 : INFO : EPOCH 5 - PROGRESS: at 46.46% examples, 958204 words/s, in_qsize 1, out_qsize 0\n2020-07-21 09:20:56,418 : INFO : EPOCH 5 - PROGRESS: at 57.94% examples, 959073 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:20:57,419 : INFO : EPOCH 5 - PROGRESS: at 69.84% examples, 973602 words/s, in_qsize 13, out_qsize 0\n2020-07-21 09:20:58,424 : INFO : EPOCH 5 - PROGRESS: at 82.24% examples, 992367 words/s, in_qsize 4, out_qsize 0\n2020-07-21 09:20:59,434 : INFO : EPOCH 5 - PROGRESS: at 94.70% examples, 999919 words/s, in_qsize 12, out_qsize 2\n2020-07-21 09:20:59,828 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:20:59,828 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:20:59,828 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:20:59,829 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:20:59,829 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:20:59,829 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:20:59,830 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:20:59,832 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:20:59,833 : INFO : EPOCH - 5 : training on 29502788 raw words (8447783 effective words) took 8.4s, 1003063 effective words/s\n2020-07-21 09:20:59,833 : INFO : training on a 147513940 raw words (42233293 effective words) took 40.9s, 1032299 effective words/s\n2020-07-21 09:20:59,834 : INFO : collecting all words and their counts\n2020-07-21 09:20:59,841 : INFO : PROGRESS: at sentence #0, processed 0 words, keeping 0 word types\n2020-07-21 09:20:59,926 : INFO : PROGRESS: at sentence #10000, processed 420000 words, keeping 7474 word types\n2020-07-21 09:21:00,010 : INFO : PROGRESS: at sentence #20000, processed 840000 words, keeping 10107 word types\n1/3\n2020-07-21 09:21:00,094 : INFO : PROGRESS: at sentence #30000, processed 1260000 words, keeping 11888 word types\n2020-07-21 09:21:00,178 : INFO : PROGRESS: at sentence #40000, processed 1680000 words, keeping 13313 word types\n2020-07-21 09:21:00,260 : INFO : PROGRESS: at sentence #50000, processed 2100000 words, keeping 14436 word types\n2020-07-21 09:21:00,343 : INFO : PROGRESS: at sentence #60000, processed 2520000 words, keeping 15479 word types\n2020-07-21 09:21:00,427 : INFO : PROGRESS: at sentence #70000, processed 2940000 words, keeping 16495 word types\n2020-07-21 09:21:00,510 : INFO : PROGRESS: at sentence #80000, processed 3360000 words, keeping 17217 word types\n2020-07-21 09:21:00,534 : INFO : collected 17437 word types from a corpus of 3480666 raw words and 82873 sentences\n2020-07-21 09:21:00,535 : INFO : Updating model with new vocabulary\n2020-07-21 09:21:00,544 : INFO : New added 6933 unique words (28% of original 24370) and increased the count of 6933 pre-existing words (28% of original 24370)\n2020-07-21 09:21:00,577 : INFO : deleting the raw counts dictionary of 17437 items\n2020-07-21 09:21:00,578 : INFO : sample=0.001 downsamples 36 most-common words\n2020-07-21 09:21:00,579 : INFO : downsampling leaves estimated 2383649 word corpus (68.9% of prior 3461539)\n2020-07-21 09:21:00,626 : INFO : estimated required memory for 13866 words and 300 dimensions: 40211400 bytes\n2020-07-21 09:21:00,627 : INFO : updating layer weights\n2020-07-21 09:21:00,655 : WARNING : Effective 'alpha' higher than previous training cycles\n2020-07-21 09:21:00,656 : INFO : training model with 8 workers on 31093 vocabulary and 300 features, using sg=0 hs=0 sample=0.001 negative=5 window=3\n2020-07-21 09:21:01,663 : INFO : EPOCH 1 - PROGRESS: at 78.11% examples, 947137 words/s, in_qsize 9, out_qsize 0\n2020-07-21 09:21:01,902 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:21:01,904 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:21:01,906 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:21:01,907 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:21:01,908 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:21:01,909 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:21:01,910 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:21:01,912 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:21:01,913 : INFO : EPOCH - 1 : training on 3480666 raw words (1210783 effective words) took 1.3s, 965742 effective words/s\n2020-07-21 09:21:02,920 : INFO : EPOCH 2 - PROGRESS: at 75.53% examples, 914659 words/s, in_qsize 13, out_qsize 0\n2020-07-21 09:21:03,181 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:21:03,186 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:21:03,187 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:21:03,190 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:21:03,194 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:21:03,196 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:21:03,197 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:21:03,197 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:21:03,197 : INFO : EPOCH - 2 : training on 3480666 raw words (1211791 effective words) took 1.3s, 945342 effective words/s\n2020-07-21 09:21:04,203 : INFO : EPOCH 3 - PROGRESS: at 78.11% examples, 949136 words/s, in_qsize 15, out_qsize 1\n2020-07-21 09:21:04,427 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:21:04,431 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:21:04,432 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:21:04,432 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:21:04,433 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:21:04,433 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:21:04,435 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:21:04,437 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:21:04,438 : INFO : EPOCH - 3 : training on 3480666 raw words (1210584 effective words) took 1.2s, 977443 effective words/s\n2020-07-21 09:21:05,440 : INFO : EPOCH 4 - PROGRESS: at 78.69% examples, 958759 words/s, in_qsize 15, out_qsize 0\n2020-07-21 09:21:05,661 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:21:05,665 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:21:05,666 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:21:05,667 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:21:05,667 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:21:05,668 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:21:05,670 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:21:05,671 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:21:05,672 : INFO : EPOCH - 4 : training on 3480666 raw words (1210510 effective words) took 1.2s, 983351 effective words/s\n2020-07-21 09:21:06,675 : INFO : EPOCH 5 - PROGRESS: at 81.27% examples, 991214 words/s, in_qsize 1, out_qsize 0\n2020-07-21 09:21:06,889 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:21:06,893 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:21:06,893 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:21:06,894 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:21:06,894 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:21:06,896 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:21:06,898 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:21:06,900 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:21:06,900 : INFO : EPOCH - 5 : training on 3480666 raw words (1211106 effective words) took 1.2s, 988178 effective words/s\n2020-07-21 09:21:06,901 : INFO : training on a 17403330 raw words (6054774 effective words) took 6.2s, 969664 effective words/s\n2020-07-21 09:21:06,901 : INFO : collecting all words and their counts\n2020-07-21 09:21:06,905 : INFO : PROGRESS: at sentence #0, processed 0 words, keeping 0 word types\n2/3\n2020-07-21 09:21:07,481 : INFO : PROGRESS: at sentence #10000, processed 3560000 words, keeping 21520 word types\n2020-07-21 09:21:08,070 : INFO : collected 25844 word types from a corpus of 7120000 raw words and 20000 sentences\n2020-07-21 09:21:08,070 : INFO : Updating model with new vocabulary\n2020-07-21 09:21:08,084 : INFO : New added 12171 unique words (32% of original 38015) and increased the count of 12171 pre-existing words (32% of original 38015)\n2020-07-21 09:21:08,142 : INFO : deleting the raw counts dictionary of 25844 items\n2020-07-21 09:21:08,143 : INFO : sample=0.001 downsamples 24 most-common words\n2020-07-21 09:21:08,144 : INFO : downsampling leaves estimated 4030426 word corpus (56.8% of prior 7091286)\n2020-07-21 09:21:08,197 : INFO : estimated required memory for 24342 words and 300 dimensions: 70591800 bytes\n2020-07-21 09:21:08,198 : INFO : updating layer weights\n2020-07-21 09:21:08,226 : WARNING : Effective 'alpha' higher than previous training cycles\n2020-07-21 09:21:08,226 : INFO : training model with 8 workers on 31093 vocabulary and 300 features, using sg=0 hs=0 sample=0.001 negative=5 window=3\n2020-07-21 09:21:09,234 : INFO : EPOCH 1 - PROGRESS: at 49.42% examples, 1024665 words/s, in_qsize 13, out_qsize 0\n2020-07-21 09:21:10,174 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:21:10,181 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:21:10,181 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:21:10,182 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:21:10,182 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:21:10,183 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:21:10,186 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:21:10,188 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:21:10,188 : INFO : EPOCH - 1 : training on 7120000 raw words (2043757 effective words) took 2.0s, 1043366 effective words/s\n2020-07-21 09:21:11,191 : INFO : EPOCH 2 - PROGRESS: at 50.12% examples, 1043280 words/s, in_qsize 13, out_qsize 0\n2020-07-21 09:21:12,107 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:21:12,109 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:21:12,109 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:21:12,110 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:21:12,114 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:21:12,115 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:21:12,116 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:21:12,116 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:21:12,117 : INFO : EPOCH - 2 : training on 7120000 raw words (2045169 effective words) took 1.9s, 1061839 effective words/s\n2020-07-21 09:21:13,120 : INFO : EPOCH 3 - PROGRESS: at 49.98% examples, 1040028 words/s, in_qsize 15, out_qsize 1\n2020-07-21 09:21:14,048 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:21:14,050 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:21:14,051 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:21:14,051 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:21:14,052 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:21:14,054 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:21:14,055 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:21:14,056 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:21:14,056 : INFO : EPOCH - 3 : training on 7120000 raw words (2043820 effective words) took 1.9s, 1055062 effective words/s\n2020-07-21 09:21:15,060 : INFO : EPOCH 4 - PROGRESS: at 50.12% examples, 1041482 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:21:16,000 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:21:16,003 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:21:16,005 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:21:16,006 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:21:16,008 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:21:16,009 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:21:16,010 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:21:16,011 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:21:16,012 : INFO : EPOCH - 4 : training on 7120000 raw words (2043540 effective words) took 2.0s, 1046140 effective words/s\n2020-07-21 09:21:17,017 : INFO : EPOCH 5 - PROGRESS: at 50.54% examples, 1050235 words/s, in_qsize 9, out_qsize 0\n2020-07-21 09:21:17,957 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:21:17,959 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:21:17,963 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:21:17,966 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:21:17,969 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:21:17,970 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:21:17,970 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:21:17,971 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:21:17,971 : INFO : EPOCH - 5 : training on 7120000 raw words (2044058 effective words) took 2.0s, 1045723 effective words/s\n2020-07-21 09:21:17,972 : INFO : training on a 35600000 raw words (10220344 effective words) took 9.7s, 1048825 effective words/s\n"
    },
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "(10220344, 35600000)"
     },
     "metadata": {},
     "execution_count": 39
    }
   ],
   "source": [
    "print('start retrain w2v model')\n",
    "wv_model.build_vocab(LineSentence(train_x_pad_path), update=True)\n",
    "wv_model.train(LineSentence(train_x_pad_path), epochs=wv_train_epochs, total_examples=wv_model.corpus_count)\n",
    "print('1/3')\n",
    "wv_model.build_vocab(LineSentence(train_y_pad_path), update=True)\n",
    "wv_model.train(LineSentence(train_y_pad_path), epochs=wv_train_epochs, total_examples=wv_model.corpus_count)\n",
    "print('2/3')\n",
    "wv_model.build_vocab(LineSentence(test_x_pad_path), update=True)\n",
    "wv_model.train(LineSentence(test_x_pad_path), epochs=wv_train_epochs, total_examples=wv_model.corpus_count)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2019-11-23 22:16:53,722 : INFO : saving Word2Vec object under /home/roger/kaikeba/03_lecture/code/data/wv/word2vec.model, separately None\n",
      "2019-11-23 22:16:53,723 : INFO : not storing attribute vectors_norm\n",
      "2019-11-23 22:16:53,723 : INFO : not storing attribute cum_table\n",
      "2019-11-23 22:16:54,247 : INFO : saved /home/roger/kaikeba/03_lecture/code/data/wv/word2vec.model\n"
     ]
    }
   ],
   "source": [
    "# 保存词向量模型\n",
    "wv_model.save(save_wv_model_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Q5.为什么不一开始就添加 标志符号,然后训练词向量?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(32804, 300)"
      ]
     },
     "execution_count": 66,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 更新vocab \n",
    "vocab = {word: index for index, word in enumerate(wv_model.wv.index2word)}\n",
    "reverse_vocab = {index: word for index, word in enumerate(wv_model.wv.index2word)}\n",
    "# 更新词向量矩阵\n",
    "embedding_matrix = wv_model.wv.vectors\n",
    "embedding_matrix.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Q6. 词可以训练吗?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0    <START> 方向机 重 助力 泵 方向机 都 换 新 都 换 助力 泵 方向机 换 方向...\n",
       "1    <START> 奔驰 <UNK> 排气 凸轮轴 调节 错误 有没有 电脑 检测 故障 代码 ...\n",
       "2    <START> 2010 款 宝马X1 2011 年 出厂 20 排量 通用 <UNK> 变...\n",
       "3    <START> 30V6 发动机 号 位置 照片 最好 右侧 排气管 上方 缸体 上 靠近 ...\n",
       "4    <START> 2012 款 奔驰 c180 维修保养 动力 值得 拥有 家庭 用车 入手 ...\n",
       "Name: X, dtype: object"
      ]
     },
     "execution_count": 70,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_X.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3.4 数值转换"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 遇到未知词就填充unk的索引\n",
    "unk_index = vocab['<UNK>']\n",
    "def transform_data(sentence,vocab):\n",
    "    # 字符串切分成词\n",
    "    words=sentence.split(' ')\n",
    "    # 按照vocab的index进行转换\n",
    "    ids=[vocab[word] if word in vocab else unk_index for word in words]\n",
    "    return ids"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将词转换成索引  [<START> 方向机 重 ...] -> [32800, 403, 986, 246, 231\n",
    "train_ids_x=train_X.apply(lambda x:transform_data(x,vocab))\n",
    "train_ids_y=train_Y.apply(lambda x:transform_data(x,vocab))\n",
    "test_ids_x=test_X.apply(lambda x:transform_data(x,vocab))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将索引列表转换成矩阵 [32800, 403, 986, 246, 231] --> array([[32800,   403,   986 ]]\n",
    "train_data_X=np.array(train_ids_x.tolist())\n",
    "train_data_Y=np.array(train_ids_y.tolist())\n",
    "test_data_X=np.array(test_ids_x.tolist())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(82871, 261)"
      ]
     },
     "execution_count": 77,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_data_X.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4. 简易模型搭建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tensorflow.keras.models import Model, Sequential\n",
    "from tensorflow.keras.layers import GRU, Input, Dense, TimeDistributed, Activation, RepeatVector, Bidirectional\n",
    "from tensorflow.keras.layers import Embedding\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.losses import sparse_categorical_crossentropy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "metadata": {},
   "outputs": [],
   "source": [
    "def seq2seq(input_length, output_sequence_length, embedding_matrix, vocab_size):\n",
    "    model = Sequential()\n",
    "    model.add(Embedding(input_dim=vocab_size, output_dim=300, weights=[embedding_matrix], trainable=False,\n",
    "                        input_length=input_length))\n",
    "    model.add(Bidirectional(GRU(300, return_sequences=False)))\n",
    "    model.add(Dense(300, activation=\"relu\"))\n",
    "    model.add(RepeatVector(output_sequence_length))\n",
    "    model.add(Bidirectional(GRU(300, return_sequences=True)))\n",
    "    model.add(TimeDistributed(Dense(vocab_size, activation='softmax')))\n",
    "    model.compile(loss=sparse_categorical_crossentropy,\n",
    "                  optimizer=Adam(1e-3))\n",
    "    model.summary()\n",
    "    return model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4.1 基本参数设置"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(82871, 261)"
      ]
     },
     "execution_count": 84,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_data_X.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 输入的长度\n",
    "input_length = train_data_X.shape[1]\n",
    "# 输出的长度\n",
    "output_sequence_length = train_data_Y.shape[1]\n",
    "# 词表大小\n",
    "vocab_size=len(vocab)\n",
    "# 词向量矩阵\n",
    "embedding_matrix = wv_model.wv.vectors"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4.2 模型构建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 87,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model: \"sequential\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "embedding (Embedding)        (None, 261, 300)          9841200   \n",
      "_________________________________________________________________\n",
      "bidirectional (Bidirectional (None, 600)               1083600   \n",
      "_________________________________________________________________\n",
      "dense (Dense)                (None, 300)               180300    \n",
      "_________________________________________________________________\n",
      "repeat_vector (RepeatVector) (None, 34, 300)           0         \n",
      "_________________________________________________________________\n",
      "bidirectional_1 (Bidirection (None, 34, 600)           1083600   \n",
      "_________________________________________________________________\n",
      "time_distributed (TimeDistri (None, 34, 32804)         19715204  \n",
      "=================================================================\n",
      "Total params: 31,903,904\n",
      "Trainable params: 22,062,704\n",
      "Non-trainable params: 9,841,200\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "model = seq2seq(input_length,output_sequence_length,embedding_matrix,vocab_size)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4.3 模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 66296 samples, validate on 16575 samples\n",
      "15424/66296 [=====>........................] - ETA: 26:42 - loss: 3.4628"
     ]
    }
   ],
   "source": [
    "model.fit(train_data_X, train_data_Y, batch_size=32, epochs=1, validation_split=0.2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4.4 模型保存"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "model.save('data/seq2seq_model.h')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4 .5 模型预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_data_Y = model.predict(test_data_X)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# seq2seq"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "1. 所有输出端，都以一个通用的<start>标记开头，以<end>标记结尾，这两个标记也视为一个词/字；\n",
    "\n",
    "2. 将<start>输入decoder，然后得到隐藏层向量，将这个向量与encoder的输出混合，然后送入一个分类器，分类器的结果应当输出P；\n",
    "\n",
    "3. 将P输入decoder，得到新的隐藏层向量，再次与encoder的输出混合，送入分类器，分类器应输出Q；\n",
    "\n",
    "4. 依此递归，直到分类器的结果输出<end>。\n",
    "    \n",
    "\n",
    "* 回到用seq2seq生成文章标题这个任务上，模型可以做些简化，并且可以引入一些先验知识。比如，由于输入语言和输出语言都是中文，因此encoder和decoder的Embedding层可以共享参数（也就是用同一套词向量）。这使得模型的参数量大幅度减少了。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from keras.preprocessing.text import Tokenizer # 词表构建 单词过滤 词频统计 序列填充\n",
    "from keras.preprocessing.sequence import pad_sequences # 序列数据填充\n",
    "from sklearn.model_selection import train_test_split # 数据集划分"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6-final"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}