{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "The autoreload extension is already loaded. To reload it, use:\n  %reload_ext autoreload\n"
    }
   ],
   "source": [
    "%load_ext autoreload\n",
    "%autoreload 2\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "import sys\n",
    "sys.path.append(r'C:\\Users\\Administrator.DESKTOP-BN41LK7\\Desktop\\lecture01_1_2\\code')    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1. 数据预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "C:\\Users\\Administrator.DESKTOP-BN41LK7\\Desktop\\lecture01_1_2\\code\nBuilding prefix dict from the default dictionary ...\n2020-09-01 21:21:07,889 : DEBUG : Building prefix dict from the default dictionary ...\nLoading model from cache C:\\Users\\ADMINI~1.DES\\AppData\\Local\\Temp\\jieba.cache\n2020-09-01 21:21:07,891 : DEBUG : Loading model from cache C:\\Users\\ADMINI~1.DES\\AppData\\Local\\Temp\\jieba.cache\nLoading model cost 0.490 seconds.\n2020-09-01 21:21:08,381 : DEBUG : Loading model cost 0.490 seconds.\nPrefix dict has been built successfully.\n2020-09-01 21:21:08,382 : DEBUG : Prefix dict has been built successfully.\n"
    }
   ],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from utils.data_loader import build_dataset,pad_proc,sentences_proc\n",
    "from utils.config import *\n",
    "from utils.multi_proc_utils import parallelize\n",
    "from gensim.models.word2vec import LineSentence, Word2Vec"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.1加载数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "'C:\\\\Users\\\\Administrator.DESKTOP-BN41LK7\\\\Desktop\\\\lecture01_1_2\\\\code\\\\data\\\\AutoMaster_TrainSet.csv'"
     },
     "metadata": {},
     "execution_count": 5
    }
   ],
   "source": [
    "train_data_path"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "train data size 82943,test data size 20000\n"
    }
   ],
   "source": [
    "train_df = pd.read_csv(train_data_path)\n",
    "test_df = pd.read_csv(test_data_path)\n",
    "print('train data size {},test data size {}'.format(len(train_df), len(test_df)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "0    技师说：[语音]|车主说：新的都换了|车主说：助力泵，方向机|技师说：[语音]|车主说：换了...\n1    技师说：你这个有没有电脑检测故障代码。|车主说：有|技师说：发一下|车主说：发动机之前亮故障...\n2    技师说：你好，4缸自然吸气发动机N46是吧，先挂空档再挂其他档有没有闯动呢，变速箱油液位是否...\n3    技师说：右侧排气管上方，缸体上靠近变速箱|车主说：[图片]|车主说：是不是这个？|车主说：这...\n4    技师说：家庭用车的话，还是可以入手的|技师说：维修保养费用不高|车主说：12年的180市场价...\nName: Dialogue, dtype: object"
     },
     "metadata": {},
     "execution_count": 11
    }
   ],
   "source": [
    "train_df['Dialogue'].head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "'技师说：[语音]|车主说：新的都换了|车主说：助力泵，方向机|技师说：[语音]|车主说：换了方向机带的有|车主说：[图片]|技师说：[语音]|车主说：有助力就是重，这车要匹配吧|技师说：不需要|技师说：你这是更换的部件有问题|车主说：跑快了还好点，就倒车重的很。|技师说：是非常重吗|车主说：是的，累人|技师说：[语音]|车主说：我觉得也是，可是车主是以前没这么重，选吧助理泵换了不行，又把放向机换了，现在还这样就不知道咋和车主解释。|技师说：[语音]|技师说：[语音]'"
     },
     "metadata": {},
     "execution_count": 12
    }
   ],
   "source": [
    "train_df['Dialogue'][0]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "test_df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "  QID Brand     Model                                           Question  \\\n0  Q1    奔驰     奔驰GL级                                方向机重，助力泵，方向机都换了还是一样   \n1  Q2    奔驰      奔驰M级                                   奔驰ML500排气凸轮轴调节错误   \n2  Q3    宝马  宝马X1(进口)  2010款宝马X1，2011年出厂，2.0排量，通用6L45变速箱，原地换挡位PRND车辆闯...   \n3  Q4  Jeep       牧马人                              3.0V6发动机号在什么位置，有照片最好！   \n4  Q5    奔驰      奔驰C级                       2012款奔驰c180怎么样，维修保养，动力，值得拥有吗   \n\n                                            Dialogue  \\\n0  技师说：[语音]|车主说：新的都换了|车主说：助力泵，方向机|技师说：[语音]|车主说：换了...   \n1  技师说：你这个有没有电脑检测故障代码。|车主说：有|技师说：发一下|车主说：发动机之前亮故障...   \n2  技师说：你好，4缸自然吸气发动机N46是吧，先挂空档再挂其他档有没有闯动呢，变速箱油液位是否...   \n3  技师说：右侧排气管上方，缸体上靠近变速箱|车主说：[图片]|车主说：是不是这个？|车主说：这...   \n4  技师说：家庭用车的话，还是可以入手的|技师说：维修保养费用不高|车主说：12年的180市场价...   \n\n                                      Report  \n0                                       随时联系  \n1                                       随时联系  \n2  行驶没有顿挫的感觉，原地换挡有闯动，刹车踩重没有，这是力的限制的作用，应该没有问题  \n3                            举起车辆，在左前轮这边的缸体上  \n4           家庭用车可以入手的，维修保养价格还可以。车况好，价格合理可以入手  ",
      "text/html": "<div>\n<style scoped>\n    .dataframe tbody tr th:only-of-type {\n        vertical-align: middle;\n    }\n\n    .dataframe tbody tr th {\n        vertical-align: top;\n    }\n\n    .dataframe thead th {\n        text-align: right;\n    }\n</style>\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>QID</th>\n      <th>Brand</th>\n      <th>Model</th>\n      <th>Question</th>\n      <th>Dialogue</th>\n      <th>Report</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>Q1</td>\n      <td>奔驰</td>\n      <td>奔驰GL级</td>\n      <td>方向机重，助力泵，方向机都换了还是一样</td>\n      <td>技师说：[语音]|车主说：新的都换了|车主说：助力泵，方向机|技师说：[语音]|车主说：换了...</td>\n      <td>随时联系</td>\n    </tr>\n    <tr>\n      <th>1</th>\n      <td>Q2</td>\n      <td>奔驰</td>\n      <td>奔驰M级</td>\n      <td>奔驰ML500排气凸轮轴调节错误</td>\n      <td>技师说：你这个有没有电脑检测故障代码。|车主说：有|技师说：发一下|车主说：发动机之前亮故障...</td>\n      <td>随时联系</td>\n    </tr>\n    <tr>\n      <th>2</th>\n      <td>Q3</td>\n      <td>宝马</td>\n      <td>宝马X1(进口)</td>\n      <td>2010款宝马X1，2011年出厂，2.0排量，通用6L45变速箱，原地换挡位PRND车辆闯...</td>\n      <td>技师说：你好，4缸自然吸气发动机N46是吧，先挂空档再挂其他档有没有闯动呢，变速箱油液位是否...</td>\n      <td>行驶没有顿挫的感觉，原地换挡有闯动，刹车踩重没有，这是力的限制的作用，应该没有问题</td>\n    </tr>\n    <tr>\n      <th>3</th>\n      <td>Q4</td>\n      <td>Jeep</td>\n      <td>牧马人</td>\n      <td>3.0V6发动机号在什么位置，有照片最好！</td>\n      <td>技师说：右侧排气管上方，缸体上靠近变速箱|车主说：[图片]|车主说：是不是这个？|车主说：这...</td>\n      <td>举起车辆，在左前轮这边的缸体上</td>\n    </tr>\n    <tr>\n      <th>4</th>\n      <td>Q5</td>\n      <td>奔驰</td>\n      <td>奔驰C级</td>\n      <td>2012款奔驰c180怎么样，维修保养，动力，值得拥有吗</td>\n      <td>技师说：家庭用车的话，还是可以入手的|技师说：维修保养费用不高|车主说：12年的180市场价...</td>\n      <td>家庭用车可以入手的，维修保养价格还可以。车况好，价格合理可以入手</td>\n    </tr>\n  </tbody>\n</table>\n</div>"
     },
     "metadata": {},
     "execution_count": 13
    }
   ],
   "source": [
    "train_df.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.2 空值填充"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 2. 空值剔除\n",
    "train_df.dropna(subset=['Report'], inplace=True)\n",
    "\n",
    "train_df.fillna('', inplace=True)\n",
    "test_df.fillna('', inplace=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.3.多核, 批量数据处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "Wall time: 3min 49s\n"
    }
   ],
   "source": [
    "%%time\n",
    "train_df=sentences_proc(train_df)\n",
    "test_df=sentences_proc(test_df)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "Wall time: 37.2 s\n"
    }
   ],
   "source": [
    "%%time\n",
    "train_df = parallelize(train_df, sentences_proc)\n",
    "test_df = parallelize(test_df, sentences_proc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "Wall time: 34.8 s\n"
    }
   ],
   "source": [
    "%%time\n",
    "train_df = parallelize(train_df, sentences_proc)\n",
    "test_df = parallelize(test_df, sentences_proc)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.4 合并训练测试数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "train data size 82873,test data size 20000,merged_df data size 102873\n"
    }
   ],
   "source": [
    "train_df['merged'] = train_df[['Question', 'Dialogue', 'Report']].apply(lambda x: ' '.join(x), axis=1)\n",
    "test_df['merged'] = test_df[['Question', 'Dialogue']].apply(lambda x: ' '.join(x), axis=1)\n",
    "merged_df = pd.concat([train_df[['merged']], test_df[['merged']]], axis=0)\n",
    "print('train data size {},test data size {},merged_df data size {}'.format(len(train_df), len(test_df),len(merged_df)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 1.5 保存处理好的 训练 测试集合"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_df = train_df.drop(['merged'], axis=1)\n",
    "test_df = test_df.drop(['merged'], axis=1)\n",
    "train_df.to_csv(train_seg_path, index=None, header=True)\n",
    "test_df.to_csv(test_seg_path, index=None, header=True)\n",
    "merged_df.to_csv(merger_seg_path, index=None, header=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 2. 词向量"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2.1 预训练词向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "'C:\\\\Users\\\\Administrator.DESKTOP-HN1J6IE\\\\Desktop\\\\lecture01_1_2\\\\code\\\\data\\\\merged_train_test_seg_data.csv'"
     },
     "metadata": {},
     "execution_count": 20
    }
   ],
   "source": [
    "merger_seg_path"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": "2020-07-21 09:36:51,732 : INFO : collecting all words and their counts\n2020-07-21 09:36:51,737 : INFO : PROGRESS: at sentence #0, processed 0 words, keeping 0 word types\n2020-07-21 09:36:52,013 : INFO : PROGRESS: at sentence #10000, processed 1280514 words, keeping 33455 word types\n2020-07-21 09:36:52,291 : INFO : PROGRESS: at sentence #20000, processed 2579817 words, keeping 48497 word types\n2020-07-21 09:36:52,562 : INFO : PROGRESS: at sentence #30000, processed 3854099 words, keeping 59581 word types\n2020-07-21 09:36:52,830 : INFO : PROGRESS: at sentence #40000, processed 5093674 words, keeping 68854 word types\n2020-07-21 09:36:53,115 : INFO : PROGRESS: at sentence #50000, processed 6408089 words, keeping 77069 word types\n2020-07-21 09:36:53,414 : INFO : PROGRESS: at sentence #60000, processed 7806037 words, keeping 85365 word types\n2020-07-21 09:36:53,721 : INFO : PROGRESS: at sentence #70000, processed 9234196 words, keeping 93470 word types\n2020-07-21 09:36:53,997 : INFO : PROGRESS: at sentence #80000, processed 10505447 words, keeping 100297 word types\n2020-07-21 09:36:54,258 : INFO : PROGRESS: at sentence #90000, processed 11666596 words, keeping 106961 word types\n2020-07-21 09:36:54,515 : INFO : PROGRESS: at sentence #100000, processed 12828644 words, keeping 112848 word types\n2020-07-21 09:36:54,592 : INFO : collected 114526 word types from a corpus of 13171322 raw words and 102873 sentences\n2020-07-21 09:36:54,592 : INFO : Loading a fresh vocabulary\n2020-07-21 09:36:54,681 : INFO : effective_min_count=5 retains 31087 unique words (27% of original 114526, drops 83439)\n2020-07-21 09:36:54,682 : INFO : effective_min_count=5 leaves 13043129 word corpus (99% of original 13171322, drops 128193)\n2020-07-21 09:36:54,749 : INFO : deleting the raw counts dictionary of 114526 items\n2020-07-21 09:36:54,752 : INFO : sample=0.001 downsamples 42 most-common words\n2020-07-21 09:36:54,753 : INFO : downsampling leaves estimated 9662953 word corpus (74.1% of prior 13043129)\n2020-07-21 09:36:54,819 : INFO : estimated required memory for 31087 words and 300 dimensions: 90152300 bytes\n2020-07-21 09:36:54,820 : INFO : resetting layer weights\n2020-07-21 09:36:59,139 : INFO : training model with 8 workers on 31087 vocabulary and 300 features, using sg=0 hs=0 sample=0.001 negative=5 window=3\n2020-07-21 09:37:00,147 : INFO : EPOCH 1 - PROGRESS: at 14.03% examples, 1344033 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:01,151 : INFO : EPOCH 1 - PROGRESS: at 28.11% examples, 1353394 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:02,155 : INFO : EPOCH 1 - PROGRESS: at 43.21% examples, 1374407 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:03,158 : INFO : EPOCH 1 - PROGRESS: at 56.96% examples, 1387314 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:04,158 : INFO : EPOCH 1 - PROGRESS: at 70.04% examples, 1389044 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:05,159 : INFO : EPOCH 1 - PROGRESS: at 85.76% examples, 1396569 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:06,033 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:37:06,035 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:37:06,036 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:37:06,036 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:37:06,038 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:37:06,039 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:37:06,041 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:37:06,046 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:37:06,047 : INFO : EPOCH - 1 : training on 13171322 raw words (9661733 effective words) took 6.9s, 1399299 effective words/s\n2020-07-21 09:37:07,050 : INFO : EPOCH 2 - PROGRESS: at 15.11% examples, 1456837 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:08,053 : INFO : EPOCH 2 - PROGRESS: at 30.96% examples, 1493218 words/s, in_qsize 1, out_qsize 0\n2020-07-21 09:37:09,056 : INFO : EPOCH 2 - PROGRESS: at 46.25% examples, 1480782 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:10,059 : INFO : EPOCH 2 - PROGRESS: at 60.60% examples, 1487409 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:11,063 : INFO : EPOCH 2 - PROGRESS: at 74.43% examples, 1475521 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:12,069 : INFO : EPOCH 2 - PROGRESS: at 90.08% examples, 1467001 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:12,663 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:37:12,664 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:37:12,665 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:37:12,666 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:37:12,668 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:37:12,669 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:37:12,672 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:37:12,677 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:37:12,677 : INFO : EPOCH - 2 : training on 13171322 raw words (9663328 effective words) took 6.6s, 1458006 effective words/s\n2020-07-21 09:37:13,684 : INFO : EPOCH 3 - PROGRESS: at 16.01% examples, 1531381 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:14,688 : INFO : EPOCH 3 - PROGRESS: at 33.67% examples, 1611766 words/s, in_qsize 1, out_qsize 0\n2020-07-21 09:37:15,689 : INFO : EPOCH 3 - PROGRESS: at 50.08% examples, 1612192 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:16,691 : INFO : EPOCH 3 - PROGRESS: at 65.03% examples, 1602887 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:17,692 : INFO : EPOCH 3 - PROGRESS: at 80.36% examples, 1585961 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:18,693 : INFO : EPOCH 3 - PROGRESS: at 96.77% examples, 1558629 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:18,880 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:37:18,881 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:37:18,881 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:37:18,882 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:37:18,883 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:37:18,884 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:37:18,886 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:37:18,892 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:37:18,893 : INFO : EPOCH - 3 : training on 13171322 raw words (9661683 effective words) took 6.2s, 1554988 effective words/s\n2020-07-21 09:37:19,897 : INFO : EPOCH 4 - PROGRESS: at 14.62% examples, 1404277 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:20,901 : INFO : EPOCH 4 - PROGRESS: at 29.71% examples, 1430456 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:21,905 : INFO : EPOCH 4 - PROGRESS: at 45.09% examples, 1438374 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:22,909 : INFO : EPOCH 4 - PROGRESS: at 59.38% examples, 1452953 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:23,912 : INFO : EPOCH 4 - PROGRESS: at 73.14% examples, 1449942 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:24,915 : INFO : EPOCH 4 - PROGRESS: at 89.12% examples, 1449769 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:25,567 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:37:25,568 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:37:25,569 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:37:25,569 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:37:25,570 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:37:25,571 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:37:25,574 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:37:25,579 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:37:25,580 : INFO : EPOCH - 4 : training on 13171322 raw words (9662037 effective words) took 6.7s, 1445244 effective words/s\n2020-07-21 09:37:26,585 : INFO : EPOCH 5 - PROGRESS: at 14.77% examples, 1419416 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:27,594 : INFO : EPOCH 5 - PROGRESS: at 29.42% examples, 1413123 words/s, in_qsize 0, out_qsize 1\n2020-07-21 09:37:28,598 : INFO : EPOCH 5 - PROGRESS: at 44.10% examples, 1399976 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:29,599 : INFO : EPOCH 5 - PROGRESS: at 57.47% examples, 1399793 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:30,605 : INFO : EPOCH 5 - PROGRESS: at 71.00% examples, 1406491 words/s, in_qsize 1, out_qsize 0\n2020-07-21 09:37:31,609 : INFO : EPOCH 5 - PROGRESS: at 86.95% examples, 1412434 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:32,371 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:37:32,371 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:37:32,372 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:37:32,372 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:37:32,372 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:37:32,374 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:37:32,378 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:37:32,382 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:37:32,383 : INFO : EPOCH - 5 : training on 13171322 raw words (9663082 effective words) took 6.8s, 1420819 effective words/s\n2020-07-21 09:37:32,383 : INFO : training on a 65856610 raw words (48311863 effective words) took 33.2s, 1453250 effective words/s\n"
    }
   ],
   "source": [
    "wv_model = Word2Vec(LineSentence(merger_seg_path),\n",
    "                    size=300, \n",
    "                    negative=5, \n",
    "                    workers=8,\n",
    "                    iter=wv_train_epochs, \n",
    "                    window=3,\n",
    "                    min_count=5)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "<gensim.models.word2vec.Word2Vec at 0x215096f15c8>"
     },
     "metadata": {},
     "execution_count": 22
    }
   ],
   "source": [
    "wv_model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2.2. 建立词表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "31087"
     },
     "metadata": {},
     "execution_count": 23
    }
   ],
   "source": [
    "vocab = {word: index for index, word in enumerate(wv_model.wv.index2word)}\n",
    "reverse_vocab = {index: word for index, word in enumerate(wv_model.wv.index2word)}\n",
    "len(vocab)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "sentence=['技师说','帕萨特','机油']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "[1, 1071, 24]"
     },
     "metadata": {},
     "execution_count": 25
    }
   ],
   "source": [
    "[vocab[word]for word in sentence]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "['技师说', '两边', '机油']"
     },
     "metadata": {},
     "execution_count": 26
    }
   ],
   "source": [
    "[reverse_vocab[index] for index in [1, 1076, 24]]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Q 1 .使用了min_count,其实部分词不在vocab表中 ,但是训练数据和测试数据中又有这些词?\n",
    "\n",
    "---\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2.3. 获取词向量矩阵"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "(31087, 300)"
     },
     "metadata": {},
     "execution_count": 27
    }
   ],
   "source": [
    "embedding_matrix = wv_model.wv.vectors\n",
    "embedding_matrix.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "array([[-1.7095447e-03, -3.6179456e-01,  4.6072513e-01, ...,\n        -3.2866588e-01,  3.5074878e-01,  1.2231096e-01],\n       [-4.7785088e-01, -7.2842526e-01,  1.6290650e-01, ...,\n        -2.8525159e-01, -1.6097641e-01, -2.9837022e-02],\n       [-2.2389282e-01, -4.3576494e-01,  1.1438859e+00, ...,\n        -9.2113853e-01, -2.6735252e-01,  1.5114883e-01],\n       ...,\n       [ 5.1100664e-03,  1.7440578e-03, -5.5551744e-04, ...,\n         4.1221227e-02, -2.9914135e-02, -1.7683644e-02],\n       [ 5.0287023e-02, -1.6181944e-02, -3.0088993e-02, ...,\n        -1.4897756e-02, -6.3573293e-02, -1.6204495e-02],\n       [-1.8668832e-02,  1.8269194e-02,  4.8580538e-02, ...,\n         2.9952258e-02, -5.1152479e-02, -2.7201720e-02]], dtype=float32)"
     },
     "metadata": {},
     "execution_count": 28
    }
   ],
   "source": [
    "embedding_matrix"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3. 构建训练数据"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "+ 可以把Question,Dialogue当做一句 `长文本处理`, 合并构建成X\n",
    "+ Report作为需要预测的标签,构建Y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_df['X'] = train_df[['Question', 'Dialogue']].apply(lambda x: ' '.join(x), axis=1)\n",
    "test_df['X'] = test_df[['Question', 'Dialogue']].apply(lambda x: ' '.join(x), axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "0    方向机 重 ， 助力 泵 ， 方向机 都 换 技师说 车主说 新 都 换 车主说 助力 泵 ...\n1    奔驰 ML500 排气 凸轮轴 调节 错误 技师说 有没有 电脑 检测 故障 代码 。 车主...\n2    2010 款 宝马X1 ， 2011 年 出厂 ， 2.0 排量 ， 通用 6L45 变速箱...\n3    3.0 V6 发动机 号 位置 ， 照片 最好 ！ 技师说 右侧 排气管 上方 ， 缸体 上...\n4    2012 款 奔驰 c180 ， 维修保养 ， 动力 ， 值得 拥有 技师说 家庭 用车 ，...\nName: X, dtype: object"
     },
     "metadata": {},
     "execution_count": 30
    }
   ],
   "source": [
    "train_df['X'].head()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Q 2. 句子长度一样 ? 如何构建训练,batch操作,矩阵 ...\n",
    "\n",
    "---"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3.1 填充字段"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "def pad_proc(sentence, max_len, vocab):\n",
    "    '''\n",
    "    < start > < end > < pad > < unk >\n",
    "    '''\n",
    "    # 0.按空格统计切分出词\n",
    "    words = sentence.strip().split(' ')\n",
    "    # 1. 截取规定长度的词数\n",
    "    words = words[:max_len]\n",
    "    # 2. 填充< unk > ,判断是否在vocab中, 不在填充 < unk >\n",
    "    sentence = [word if word in vocab else '<UNK>' for word in words]\n",
    "    # 3. 填充< start > < end >\n",
    "    sentence = ['<START>'] + sentence + ['<STOP>']\n",
    "    # 4. 判断长度，填充　< pad >\n",
    "    sentence = sentence + ['<PAD>'] * (max_len + 2 - len(words))\n",
    "    return ' '.join(sentence)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "'<START> 技师说 帕萨特 机油 <STOP> <PAD> <PAD> <PAD> <PAD>'"
     },
     "metadata": {},
     "execution_count": 32
    }
   ],
   "source": [
    "pad_proc('技师说 帕萨特 机油', max_len=5, vocab=vocab)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 休息一下，9.10分继续"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Q3. 如何确定max_len的值? 经验 ?"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3.2 获取适当的Max_Len"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_max_len(data):\n",
    "    \"\"\"\n",
    "    获得合适的最大长度值\n",
    "    :param data: 待统计的数据  train_df['Question']\n",
    "    :return: 最大长度值\n",
    "    \"\"\"\n",
    "    max_lens = data.apply(lambda x: x.count(' '))\n",
    "    return int(np.mean(max_lens) + 2 * np.std(max_lens))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取输入数据 适当的最大长度\n",
    "train_y_max_len = get_max_len(train_df['X'])\n",
    "test_y_max_len = get_max_len(test_df['X'])\n",
    "y_max_len = max(train_y_max_len, test_y_max_len)\n",
    "\n",
    "# 获取标签数据 适当的最大长度\n",
    "train_y_max_len = get_max_len(train_df['Report'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "38"
     },
     "metadata": {},
     "execution_count": 35
    }
   ],
   "source": [
    "train_y_max_len"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [],
   "source": [
    "X_max_len=200"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3.3 填充处理"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "+ < start > - 句子开始\n",
    "+ < end > - 句子结尾\n",
    "+ < pad > - 短句填充\n",
    "+ < unk > - 未知词"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 训练集X处理\n",
    "train_df['X'] = train_df['X'].apply(lambda x: pad_proc(x, X_max_len, vocab))\n",
    "# 训练集Y处理\n",
    "train_df['Y'] = train_df['Report'].apply(lambda x: pad_proc(x, train_y_max_len, vocab))\n",
    "# 测试集X处理\n",
    "test_df['X'] = test_df['X'].apply(lambda x: pad_proc(x, X_max_len, vocab))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "'C:\\\\Users\\\\Administrator.DESKTOP-HN1J6IE\\\\Desktop\\\\lecture01_1_2\\\\code\\\\data\\\\train_X_pad_data.csv'"
     },
     "metadata": {},
     "execution_count": 38
    }
   ],
   "source": [
    "train_x_pad_path"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 保存中间结果数据\n",
    "train_df['X'].to_csv(train_x_pad_path, index=None, header=False)\n",
    "train_df['Y'].to_csv(train_y_pad_path, index=None, header=False)\n",
    "test_df['X'].to_csv(test_x_pad_path, index=None, header=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "0        <START> 帕萨特 烧 机油 ？ 技师说 你好 ， 请问 车 跑 公里 ， 保修期 内 ...\n1        <START> 修 一下 钱 换 修 技师说 你好 师傅 ！ 抛光 处理 一下 好 ！ 50...\n2        <START> 帕萨特 领域 喇叭 坏 店里 说 方向盘 <UNK> 坏 换 一根 两三百 ...\n3        <START> 发动机 漏气 会 征兆 ？ 技师说 你好 ！ 发动机 没力 ， 伴有 “ 啪...\n4        <START> 请问 那天 右 后 胎扎 订 ， 补 胎后 跑 高速 80 有点 抖 ， 1...\n                               ...                        \n19995    <START> 路虎 极光 2.0 t 发动机 ， 进气 链轮 损坏 更换 进气 链轮 后 ...\n19996    <START> 别克 英朗XT <UNK> 后 ， 发现 左 雾灯 下雨 后 起雾 ， 更换...\n19997    <START> 师傅 ， 想 问 下车 一周 开 两次 周一 开 过来 周五 开回去 对车 ...\n19998    <START> 你好 大师 ， 车前 分泵 ， 制动 钳 导管 之间 晃动 ， 颠簸 路段 ...\n19999    <START> 大师 好 ， 东风风光330 开车 下坡 时 没有 踩 刹车 四档 不 小心...\nName: X, Length: 20000, dtype: object"
     },
     "metadata": {},
     "execution_count": 40
    }
   ],
   "source": [
    "test_df['X']"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Q4 新加的符号不在词表 和 词向量矩阵中,怎么办?"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3.4 词表更新"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {
    "scrolled": true,
    "tags": [
     "outputPrepend"
    ]
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": "524 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:37:47,524 : INFO : EPOCH - 1 : training on 16906092 raw words (6952695 effective words) took 5.8s, 1207686 effective words/s\n2020-07-21 09:37:48,530 : INFO : EPOCH 2 - PROGRESS: at 17.68% examples, 1211294 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:49,532 : INFO : EPOCH 2 - PROGRESS: at 34.23% examples, 1177583 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:50,534 : INFO : EPOCH 2 - PROGRESS: at 51.44% examples, 1165705 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:51,536 : INFO : EPOCH 2 - PROGRESS: at 67.76% examples, 1164639 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:52,539 : INFO : EPOCH 2 - PROGRESS: at 83.72% examples, 1166036 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:53,536 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:37:53,536 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:37:53,537 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:37:53,537 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:37:53,537 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:37:53,538 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:37:53,539 : INFO : EPOCH 2 - PROGRESS: at 99.94% examples, 1155805 words/s, in_qsize 1, out_qsize 1\n2020-07-21 09:37:53,539 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:37:53,541 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:37:53,542 : INFO : EPOCH - 2 : training on 16906092 raw words (6953382 effective words) took 6.0s, 1155909 effective words/s\n2020-07-21 09:37:54,550 : INFO : EPOCH 3 - PROGRESS: at 16.08% examples, 1100922 words/s, in_qsize 1, out_qsize 0\n2020-07-21 09:37:55,553 : INFO : EPOCH 3 - PROGRESS: at 31.46% examples, 1082125 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:56,557 : INFO : EPOCH 3 - PROGRESS: at 47.60% examples, 1080075 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:57,569 : INFO : EPOCH 3 - PROGRESS: at 64.15% examples, 1097554 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:58,572 : INFO : EPOCH 3 - PROGRESS: at 80.53% examples, 1115196 words/s, in_qsize 0, out_qsize 1\n2020-07-21 09:37:59,573 : INFO : EPOCH 3 - PROGRESS: at 96.79% examples, 1117618 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:37:59,773 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:37:59,774 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:37:59,775 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:37:59,775 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:37:59,776 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:37:59,777 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:37:59,779 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:37:59,780 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:37:59,781 : INFO : EPOCH - 3 : training on 16906092 raw words (6953504 effective words) took 6.2s, 1115470 effective words/s\n2020-07-21 09:38:00,784 : INFO : EPOCH 4 - PROGRESS: at 16.26% examples, 1115699 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:38:01,788 : INFO : EPOCH 4 - PROGRESS: at 32.40% examples, 1116021 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:38:02,789 : INFO : EPOCH 4 - PROGRESS: at 48.96% examples, 1111404 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:38:03,794 : INFO : EPOCH 4 - PROGRESS: at 65.33% examples, 1121640 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:38:04,800 : INFO : EPOCH 4 - PROGRESS: at 81.42% examples, 1130207 words/s, in_qsize 0, out_qsize 1\n2020-07-21 09:38:05,807 : INFO : EPOCH 4 - PROGRESS: at 98.03% examples, 1132224 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:38:05,923 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:38:05,924 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:38:05,925 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:38:05,925 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:38:05,925 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:38:05,925 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:38:05,927 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:38:05,930 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:38:05,930 : INFO : EPOCH - 4 : training on 16906092 raw words (6952262 effective words) took 6.1s, 1130975 effective words/s\n2020-07-21 09:38:06,936 : INFO : EPOCH 5 - PROGRESS: at 16.32% examples, 1117879 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:38:07,936 : INFO : EPOCH 5 - PROGRESS: at 31.99% examples, 1102495 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:38:08,938 : INFO : EPOCH 5 - PROGRESS: at 49.08% examples, 1114688 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:38:09,938 : INFO : EPOCH 5 - PROGRESS: at 66.10% examples, 1136548 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:38:10,940 : INFO : EPOCH 5 - PROGRESS: at 82.36% examples, 1146158 words/s, in_qsize 6, out_qsize 2\n2020-07-21 09:38:11,937 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:38:11,938 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:38:11,939 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:38:11,939 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:38:11,939 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:38:11,940 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:38:11,940 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:38:11,942 : INFO : EPOCH 5 - PROGRESS: at 100.00% examples, 1157085 words/s, in_qsize 0, out_qsize 1\n2020-07-21 09:38:11,942 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:38:11,943 : INFO : EPOCH - 5 : training on 16906092 raw words (6953025 effective words) took 6.0s, 1156938 effective words/s\n2020-07-21 09:38:11,943 : INFO : training on a 84530460 raw words (34764868 effective words) took 30.2s, 1151985 effective words/s\n2020-07-21 09:38:11,944 : INFO : collecting all words and their counts\n2020-07-21 09:38:11,948 : INFO : PROGRESS: at sentence #0, processed 0 words, keeping 0 word types\n2020-07-21 09:38:12,033 : INFO : PROGRESS: at sentence #10000, processed 420000 words, keeping 7474 word types\n2020-07-21 09:38:12,116 : INFO : PROGRESS: at sentence #20000, processed 840000 words, keeping 10107 word types\n1/3\n2020-07-21 09:38:12,199 : INFO : PROGRESS: at sentence #30000, processed 1260000 words, keeping 11888 word types\n2020-07-21 09:38:12,282 : INFO : PROGRESS: at sentence #40000, processed 1680000 words, keeping 13313 word types\n2020-07-21 09:38:12,367 : INFO : PROGRESS: at sentence #50000, processed 2100000 words, keeping 14436 word types\n2020-07-21 09:38:12,452 : INFO : PROGRESS: at sentence #60000, processed 2520000 words, keeping 15479 word types\n2020-07-21 09:38:12,536 : INFO : PROGRESS: at sentence #70000, processed 2940000 words, keeping 16495 word types\n2020-07-21 09:38:12,619 : INFO : PROGRESS: at sentence #80000, processed 3360000 words, keeping 17217 word types\n2020-07-21 09:38:12,644 : INFO : collected 17437 word types from a corpus of 3480666 raw words and 82873 sentences\n2020-07-21 09:38:12,645 : INFO : Updating model with new vocabulary\n2020-07-21 09:38:12,654 : INFO : New added 6933 unique words (28% of original 24370) and increased the count of 6933 pre-existing words (28% of original 24370)\n2020-07-21 09:38:12,689 : INFO : deleting the raw counts dictionary of 17437 items\n2020-07-21 09:38:12,690 : INFO : sample=0.001 downsamples 36 most-common words\n2020-07-21 09:38:12,691 : INFO : downsampling leaves estimated 2383651 word corpus (68.9% of prior 3461539)\n2020-07-21 09:38:12,740 : INFO : estimated required memory for 13866 words and 300 dimensions: 40211400 bytes\n2020-07-21 09:38:12,740 : INFO : updating layer weights\n2020-07-21 09:38:12,770 : WARNING : Effective 'alpha' higher than previous training cycles\n2020-07-21 09:38:12,771 : INFO : training model with 8 workers on 31091 vocabulary and 300 features, using sg=0 hs=0 sample=0.001 negative=5 window=3\n2020-07-21 09:38:13,775 : INFO : EPOCH 1 - PROGRESS: at 75.53% examples, 918449 words/s, in_qsize 12, out_qsize 2\n2020-07-21 09:38:14,055 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:38:14,057 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:38:14,057 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:38:14,057 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:38:14,058 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:38:14,058 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:38:14,058 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:38:14,060 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:38:14,061 : INFO : EPOCH - 1 : training on 3480666 raw words (1210855 effective words) took 1.3s, 940692 effective words/s\n2020-07-21 09:38:15,065 : INFO : EPOCH 2 - PROGRESS: at 75.82% examples, 922536 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:38:15,371 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:38:15,373 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:38:15,374 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:38:15,374 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:38:15,374 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:38:15,375 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:38:15,375 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:38:15,376 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:38:15,377 : INFO : EPOCH - 2 : training on 3480666 raw words (1210977 effective words) took 1.3s, 921993 effective words/s\n2020-07-21 09:38:16,388 : INFO : EPOCH 3 - PROGRESS: at 76.97% examples, 930850 words/s, in_qsize 11, out_qsize 1\n2020-07-21 09:38:16,634 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:38:16,638 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:38:16,643 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:38:16,645 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:38:16,648 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:38:16,650 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:38:16,651 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:38:16,653 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:38:16,654 : INFO : EPOCH - 3 : training on 3480666 raw words (1211397 effective words) took 1.3s, 951086 effective words/s\n2020-07-21 09:38:17,660 : INFO : EPOCH 4 - PROGRESS: at 72.95% examples, 884026 words/s, in_qsize 13, out_qsize 2\n2020-07-21 09:38:17,959 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:38:17,960 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:38:17,961 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:38:17,961 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:38:17,962 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:38:17,963 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:38:17,963 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:38:17,964 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:38:17,964 : INFO : EPOCH - 4 : training on 3480666 raw words (1211226 effective words) took 1.3s, 926334 effective words/s\n2020-07-21 09:38:18,969 : INFO : EPOCH 5 - PROGRESS: at 75.82% examples, 921645 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:38:19,284 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:38:19,285 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:38:19,286 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:38:19,287 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:38:19,287 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:38:19,288 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:38:19,290 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:38:19,290 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:38:19,291 : INFO : EPOCH - 5 : training on 3480666 raw words (1210670 effective words) took 1.3s, 914060 effective words/s\n2020-07-21 09:38:19,291 : INFO : training on a 17403330 raw words (6055125 effective words) took 6.5s, 928645 effective words/s\n2020-07-21 09:38:19,292 : INFO : collecting all words and their counts\n2020-07-21 09:38:19,296 : INFO : PROGRESS: at sentence #0, processed 0 words, keeping 0 word types\n2/3\n2020-07-21 09:38:19,662 : INFO : PROGRESS: at sentence #10000, processed 2040000 words, keeping 20679 word types\n2020-07-21 09:38:20,026 : INFO : collected 25155 word types from a corpus of 4080000 raw words and 20000 sentences\n2020-07-21 09:38:20,026 : INFO : Updating model with new vocabulary\n2020-07-21 09:38:20,041 : INFO : New added 11396 unique words (31% of original 36551) and increased the count of 11396 pre-existing words (31% of original 36551)\n2020-07-21 09:38:20,098 : INFO : deleting the raw counts dictionary of 25155 items\n2020-07-21 09:38:20,099 : INFO : sample=0.001 downsamples 36 most-common words\n2020-07-21 09:38:20,100 : INFO : downsampling leaves estimated 3297876 word corpus (81.4% of prior 4051734)\n2020-07-21 09:38:20,150 : INFO : estimated required memory for 22792 words and 300 dimensions: 66096800 bytes\n2020-07-21 09:38:20,150 : INFO : updating layer weights\n2020-07-21 09:38:20,180 : WARNING : Effective 'alpha' higher than previous training cycles\n2020-07-21 09:38:20,180 : INFO : training model with 8 workers on 31091 vocabulary and 300 features, using sg=0 hs=0 sample=0.001 negative=5 window=3\n2020-07-21 09:38:21,184 : INFO : EPOCH 1 - PROGRESS: at 70.56% examples, 1170802 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:38:21,616 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:38:21,617 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:38:21,617 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:38:21,618 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:38:21,618 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:38:21,619 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:38:21,619 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:38:21,623 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:38:21,624 : INFO : EPOCH - 1 : training on 4080000 raw words (1676997 effective words) took 1.4s, 1163654 effective words/s\n2020-07-21 09:38:22,629 : INFO : EPOCH 2 - PROGRESS: at 67.86% examples, 1123811 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:38:23,094 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:38:23,095 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:38:23,095 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:38:23,096 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:38:23,097 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:38:23,097 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:38:23,098 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:38:23,100 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:38:23,101 : INFO : EPOCH - 2 : training on 4080000 raw words (1676977 effective words) took 1.5s, 1137129 effective words/s\n2020-07-21 09:38:24,104 : INFO : EPOCH 3 - PROGRESS: at 64.92% examples, 1082011 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:38:24,635 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:38:24,636 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:38:24,637 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:38:24,638 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:38:24,638 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:38:24,639 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:38:24,640 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:38:24,642 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:38:24,642 : INFO : EPOCH - 3 : training on 4080000 raw words (1678112 effective words) took 1.5s, 1090873 effective words/s\n2020-07-21 09:38:25,650 : INFO : EPOCH 4 - PROGRESS: at 70.07% examples, 1161178 words/s, in_qsize 0, out_qsize 0\n2020-07-21 09:38:26,103 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:38:26,104 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:38:26,105 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:38:26,105 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:38:26,106 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:38:26,107 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:38:26,107 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:38:26,111 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:38:26,112 : INFO : EPOCH - 4 : training on 4080000 raw words (1678250 effective words) took 1.5s, 1144233 effective words/s\n2020-07-21 09:38:27,117 : INFO : EPOCH 5 - PROGRESS: at 68.84% examples, 1141663 words/s, in_qsize 0, out_qsize 1\n2020-07-21 09:38:27,544 : INFO : worker thread finished; awaiting finish of 7 more threads\n2020-07-21 09:38:27,550 : INFO : worker thread finished; awaiting finish of 6 more threads\n2020-07-21 09:38:27,552 : INFO : worker thread finished; awaiting finish of 5 more threads\n2020-07-21 09:38:27,553 : INFO : worker thread finished; awaiting finish of 4 more threads\n2020-07-21 09:38:27,555 : INFO : worker thread finished; awaiting finish of 3 more threads\n2020-07-21 09:38:27,565 : INFO : worker thread finished; awaiting finish of 2 more threads\n2020-07-21 09:38:27,567 : INFO : worker thread finished; awaiting finish of 1 more threads\n2020-07-21 09:38:27,567 : INFO : worker thread finished; awaiting finish of 0 more threads\n2020-07-21 09:38:27,568 : INFO : EPOCH - 5 : training on 4080000 raw words (1676690 effective words) took 1.5s, 1153848 effective words/s\n2020-07-21 09:38:27,568 : INFO : training on a 20400000 raw words (8387026 effective words) took 7.4s, 1135314 effective words/s\n"
    },
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": "(8387026, 20400000)"
     },
     "metadata": {},
     "execution_count": 41
    }
   ],
   "source": [
    "print('start retrain w2v model')\n",
    "wv_model.build_vocab(LineSentence(train_x_pad_path), update=True)\n",
    "wv_model.train(LineSentence(train_x_pad_path), epochs=wv_train_epochs, total_examples=wv_model.corpus_count)\n",
    "print('1/3')\n",
    "wv_model.build_vocab(LineSentence(train_y_pad_path), update=True)\n",
    "wv_model.train(LineSentence(train_y_pad_path), epochs=wv_train_epochs, total_examples=wv_model.corpus_count)\n",
    "print('2/3')\n",
    "wv_model.build_vocab(LineSentence(test_x_pad_path), update=True)\n",
    "wv_model.train(LineSentence(test_x_pad_path), epochs=wv_train_epochs, total_examples=wv_model.corpus_count)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": "2020-07-21 09:38:27,623 : INFO : saving Word2Vec object under C:\\Users\\Administrator.DESKTOP-HN1J6IE\\Desktop\\lecture01_1_2\\code\\data\\wv\\word2vec.model, separately None\n2020-07-21 09:38:27,624 : INFO : not storing attribute vectors_norm\n2020-07-21 09:38:27,624 : INFO : not storing attribute cum_table\n2020-07-21 09:38:28,129 : INFO : saved C:\\Users\\Administrator.DESKTOP-HN1J6IE\\Desktop\\lecture01_1_2\\code\\data\\wv\\word2vec.model\n"
    }
   ],
   "source": [
    "# 保存词向量模型\n",
    "wv_model.save(save_wv_model_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Q5.为什么不一开始就添加 标志符号,然后训练词向量?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 更新vocab \n",
    "vocab = {word: index for index, word in enumerate(wv_model.wv.index2word)}\n",
    "reverse_vocab = {index: word for index, word in enumerate(wv_model.wv.index2word)}\n",
    "# 更新词向量矩阵\n",
    "embedding_matrix = wv_model.wv.vectors\n",
    "embedding_matrix.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "list(vocab.keys())[-5:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "(31851, 300)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Q6. 词可以训练吗?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_X"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3.4 数值转换"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 遇到未知词就填充unk的索引\n",
    "unk_index = vocab['<UNK>']\n",
    "def transform_data(sentence,vocab):\n",
    "    # 字符串切分成词\n",
    "    words=sentence.split(' ')\n",
    "    # 按照vocab的index进行转换\n",
    "    ids=[vocab[word] if word in vocab else unk_index for word in words]\n",
    "    return ids"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_X=train_df['X']\n",
    "train_Y=train_df['Y']\n",
    "test_X=test_df['X']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将词转换成索引  [<START> 方向机 重 ...] -> [32800, 403, 986, 246, 231\n",
    "train_ids_x=train_X.apply(lambda x:transform_data(x,vocab))\n",
    "train_ids_y=train_Y.apply(lambda x:transform_data(x,vocab))\n",
    "test_ids_x=test_X.apply(lambda x:transform_data(x,vocab))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 将索引列表转换成矩阵 [32800, 403, 986, 246, 231] --> array([[32800,   403,   986 ]]\n",
    "train_data_X=np.array(train_ids_x.tolist())\n",
    "train_data_Y=np.array(train_ids_y.tolist())\n",
    "test_data_X=np.array(test_ids_x.tolist())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_data_X.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_data_X"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4. 简易模型搭建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tensorflow.keras.models import Model, Sequential\n",
    "from tensorflow.keras.layers import GRU, Input, Dense, TimeDistributed, Activation, RepeatVector, Bidirectional\n",
    "from tensorflow.keras.layers import Embedding\n",
    "from tensorflow.keras.optimizers import Adam\n",
    "from tensorflow.keras.losses import sparse_categorical_crossentropy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def seq2seq(input_length, output_sequence_length, embedding_matrix, vocab_size):\n",
    "    model = Sequential()\n",
    "    model.add(Embedding(input_dim=vocab_size, output_dim=300, weights=[embedding_matrix], trainable=False,\n",
    "                        input_length=input_length))\n",
    "    model.add(Bidirectional(GRU(300, return_sequences=False)))\n",
    "    model.add(Dense(300, activation=\"relu\"))\n",
    "    model.add(RepeatVector(output_sequence_length))\n",
    "    model.add(Bidirectional(GRU(300, return_sequences=True)))\n",
    "    model.add(TimeDistributed(Dense(vocab_size, activation='softmax')))\n",
    "    model.compile(loss=sparse_categorical_crossentropy,\n",
    "                  optimizer=Adam(1e-3))\n",
    "    model.summary()\n",
    "    return model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4.1 基本参数设置"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_data_X.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 输入的长度\n",
    "input_length = train_data_X.shape[1]\n",
    "# 输出的长度\n",
    "output_sequence_length = train_data_Y.shape[1]\n",
    "# 词表大小\n",
    "vocab_size=len(vocab)\n",
    "# 词向量矩阵\n",
    "embedding_matrix = wv_model.wv.vectors"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4.2 模型构建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "model = seq2seq(input_length,output_sequence_length,embedding_matrix,vocab_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## 4.3 模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "model.fit(train_data_X, train_data_Y, batch_size=6, epochs=1, validation_split=0.2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4.4 模型保存"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "model.save('data/seq2seq_model.h')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4 .5 模型预测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_data_Y = model.predict(test_data_X)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# seq2seq"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "1. 所有输出端，都以一个通用的<start>标记开头，以<end>标记结尾，这两个标记也视为一个词/字；\n",
    "\n",
    "2. 将<start>输入decoder，然后得到隐藏层向量，将这个向量与encoder的输出混合，然后送入一个分类器，分类器的结果应当输出P；\n",
    "\n",
    "3. 将P输入decoder，得到新的隐藏层向量，再次与encoder的输出混合，送入分类器，分类器应输出Q；\n",
    "\n",
    "4. 依此递归，直到分类器的结果输出<end>。\n",
    "    \n",
    "\n",
    "* 回到用seq2seq生成文章标题这个任务上，模型可以做些简化，并且可以引入一些先验知识。比如，由于输入语言和输出语言都是中文，因此encoder和decoder的Embedding层可以共享参数（也就是用同一套词向量）。这使得模型的参数量大幅度减少了。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from keras.preprocessing.text import Tokenizer # 词表构建 单词过滤 词频统计 序列填充\n",
    "from keras.preprocessing.sequence import pad_sequences # 序列数据填充\n",
    "from sklearn.model_selection import train_test_split # 数据集划分"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.8.5 64-bit ('myenv': conda)",
   "language": "python",
   "name": "python_defaultSpec_1598966422865"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.5-final"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}