{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T02:42:32.338355Z",
     "start_time": "2020-11-16T02:17:00.095063Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'gensim.models.keyedvectors.Word2VecKeyedVectors'>\n"
     ]
    }
   ],
   "source": [
    "from gensim.models import KeyedVectors\n",
    "file = 'Tencent_AILab_ChineseEmbedding.txt'\n",
    "wv_from_text = KeyedVectors.load_word2vec_format(file, binary=False)\n",
    "print(type(wv_from_text))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T02:47:04.918697Z",
     "start_time": "2020-11-16T02:47:04.585556Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\u001b[0;33mspecial attribute:\u001b[0m\n",
       "    \u001b[0;36m__class__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__dict__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__doc__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__module__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__weakref__\u001b[0m\n",
       "\u001b[0;33mabstract class:\u001b[0m\n",
       "    \u001b[0;36m__subclasshook__\u001b[0m\n",
       "\u001b[0;33mobject customization:\u001b[0m\n",
       "    \u001b[0;36m__format__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__hash__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__init__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__new__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__repr__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__sizeof__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__str__\u001b[0m\n",
       "\u001b[0;33mrich comparison:\u001b[0m\n",
       "    \u001b[0;36m__eq__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__ge__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__gt__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__le__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__lt__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__ne__\u001b[0m\n",
       "\u001b[0;33mattribute access:\u001b[0m\n",
       "    \u001b[0;36m__delattr__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__dir__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__getattribute__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__setattr__\u001b[0m\n",
       "\u001b[0;33mclass customization:\u001b[0m\n",
       "    \u001b[0;36m__init_subclass__\u001b[0m\n",
       "\u001b[0;33mcontainer:\u001b[0m\n",
       "    \u001b[0;36m__contains__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__getitem__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__setitem__\u001b[0m\n",
       "\u001b[0;33mpickle:\u001b[0m\n",
       "    \u001b[0;36m__reduce__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__reduce_ex__\u001b[0m\n",
       "\u001b[0;33mdescriptor:\u001b[0m\n",
       "    \u001b[0;36mindex2entity\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30m@property with getter, setter\u001b[0m\n",
       "    \u001b[0;36mload\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mclass classmethod with getter, classmethod(function) -> method\u001b[0m\n",
       "    \u001b[0;36mload_word2vec_format\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mclass classmethod with getter, classmethod(function) -> method\u001b[0m\n",
       "    \u001b[0;36msyn0\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30m@property with getter, setter\u001b[0m\n",
       "    \u001b[0;36msyn0norm\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30m@property with getter, setter\u001b[0m\n",
       "    \u001b[0;36mwv\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30m@property with getter\u001b[0m\n",
       "\u001b[0;33mfunction:\u001b[0m\n",
       "    \u001b[0;36m_adapt_by_suffix\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mGet compress setting and filename for numpy file compression.\u001b[0m\n",
       "    \u001b[0;36m_load_specials\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mLoad attributes that were stored separately, and give them the same opportunity\u001b[0m\n",
       "    \u001b[0;36m_log_evaluate_word_analogies\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCalculate score by section, helper for\u001b[0m\n",
       "    \u001b[0;36m_save_specials\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mSave aside any attributes that need to be handled separately, including\u001b[0m\n",
       "    \u001b[0;36m_smart_save\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mSave the object to a file. Used internally by :meth:`gensim.utils.SaveLoad.save()`.\u001b[0m\n",
       "    \u001b[0;36maccuracy\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute accuracy of the model.\u001b[0m\n",
       "    \u001b[0;36madd\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mAppend entities and theirs vectors in a manual way.\u001b[0m\n",
       "    \u001b[0;36mcloser_than\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mGet all entities that are closer to `entity1` than `entity2` is to `entity1`.\u001b[0m\n",
       "    \u001b[0;36mcosine_similarities\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute cosine similarities between one vector and a set of other vectors.\u001b[0m\n",
       "    \u001b[0;36mdistance\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute cosine distance between two words.\u001b[0m\n",
       "    \u001b[0;36mdistances\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute cosine distances from given word or vector to all words in `other_words`.\u001b[0m\n",
       "    \u001b[0;36mdoesnt_match\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mWhich word from the given list doesn't go with the others?\u001b[0m\n",
       "    \u001b[0;36mevaluate_word_analogies\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute performance of the model on an analogy test set.\u001b[0m\n",
       "    \u001b[0;36mevaluate_word_pairs\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute correlation of the model with human similarity judgments.\u001b[0m\n",
       "    \u001b[0;36mget_keras_embedding\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mGet a Keras 'Embedding' layer with weights set as the Word2Vec model's learned word embeddings.\u001b[0m\n",
       "    \u001b[0;36mget_vector\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mGet the entity's representations in vector space, as a 1D numpy array.\u001b[0m\n",
       "    \u001b[0;36minit_sims\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mPrecompute L2-normalized vectors.\u001b[0m\n",
       "    \u001b[0;36mlog_accuracy\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30m\u001b[0m\n",
       "    \u001b[0;36mlog_evaluate_word_pairs\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30m\u001b[0m\n",
       "    \u001b[0;36mmost_similar\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mFind the top-N most similar words.\u001b[0m\n",
       "    \u001b[0;36mmost_similar_cosmul\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mFind the top-N most similar words, using the multiplicative combination objective,\u001b[0m\n",
       "    \u001b[0;36mmost_similar_to_given\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mGet the `entity` from `entities_list` most similar to `entity1`.\u001b[0m\n",
       "    \u001b[0;36mn_similarity\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute cosine similarity between two sets of words.\u001b[0m\n",
       "    \u001b[0;36mrank\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mRank of the distance of `entity2` from `entity1`, in relation to distances of all entities from `entity1`.\u001b[0m\n",
       "    \u001b[0;36mrelative_cosine_similarity\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute the relative cosine similarity between two words given top-n similar words,\u001b[0m\n",
       "    \u001b[0;36msave\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mSave KeyedVectors.\u001b[0m\n",
       "    \u001b[0;36msave_word2vec_format\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mStore the input-hidden weight matrix in the same format used by the original\u001b[0m\n",
       "    \u001b[0;36msimilar_by_vector\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mFind the top-N most similar words by vector.\u001b[0m\n",
       "    \u001b[0;36msimilar_by_word\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mFind the top-N most similar words.\u001b[0m\n",
       "    \u001b[0;36msimilarity\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute cosine similarity between two words.\u001b[0m\n",
       "    \u001b[0;36msimilarity_matrix\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mConstruct a term similarity matrix for computing Soft Cosine Measure.\u001b[0m\n",
       "    \u001b[0;36mwmdistance\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute the Word Mover's Distance between two documents.\u001b[0m\n",
       "    \u001b[0;36mword_vec\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mGet `word` representations in vector space, as a 1D numpy array.\u001b[0m\n",
       "    \u001b[0;36mwords_closer_than\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mGet all words that are closer to `w1` than `w2` is to `w1`.\u001b[0m"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pdir(KeyedVectors)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T03:02:55.776974Z",
     "start_time": "2020-11-16T03:02:54.859422Z"
    }
   },
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "train = np.load('../Dataset/data/train.npy')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T07:36:58.220008Z",
     "start_time": "2020-11-16T07:36:58.204076Z"
    },
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(155, 200)\n",
      "67\n"
     ]
    }
   ],
   "source": [
    "import jieba\n",
    "doc = train[0,0]\n",
    "w2v = wv_from_text\n",
    "mat = None\n",
    "use_wv = dict()\n",
    "for i, word in enumerate(jieba.cut(doc)):\n",
    "    try:\n",
    "        wv = w2v[word]\n",
    "        use_wv[word] = wv\n",
    "    except:\n",
    "        for i, w in enumerate(word):\n",
    "            use_wv[w] = w2v[w]\n",
    "            if i == 0:\n",
    "                wv = w2v[w]\n",
    "            else:\n",
    "                wv = np.vstack((wv,w2v[w]))\n",
    "    if i == 0:\n",
    "        mat = wv\n",
    "    else:\n",
    "        mat = np.vstack((mat, wv))\n",
    "print(mat.shape)\n",
    "print(len(use_wv))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 可以使用 dict 转成 npy 保存"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T07:53:45.693269Z",
     "start_time": "2020-11-16T07:53:45.340215Z"
    },
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# print(use_wv)\n",
    "file = 'test_w2v.txt'\n",
    "np.save('test_w2v.npy',np.array(use_wv))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T07:57:53.190852Z",
     "start_time": "2020-11-16T07:57:53.182873Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "dict"
      ]
     },
     "execution_count": 75,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "uuswv = np.load('test_w2v.npy').item()\n",
    "len(uuswv)\n",
    "type(uuswv)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 测试直接保存为 txt 文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T07:59:57.905191Z",
     "start_time": "2020-11-16T07:59:57.890253Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(155, 200)\n",
      "67\n"
     ]
    }
   ],
   "source": [
    "import jieba\n",
    "doc = train[0,0]\n",
    "w2v = wv_from_text\n",
    "mat = None\n",
    "use_wv = dict()\n",
    "for i, word in enumerate(jieba.cut(doc)):\n",
    "    try:\n",
    "        wv = w2v[word]\n",
    "        use_wv[word] = wv\n",
    "    except:\n",
    "        for i, w in enumerate(word):\n",
    "            use_wv[w] = w2v[w]\n",
    "            if i == 0:\n",
    "                wv = w2v[w]\n",
    "            else:\n",
    "                wv = np.vstack((wv,w2v[w]))\n",
    "    \n",
    "    if i == 0:\n",
    "        mat = wv\n",
    "    else:\n",
    "        mat = np.vstack((mat, wv))\n",
    "print(mat.shape)\n",
    "print(len(use_wv))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 95,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T08:15:39.252764Z",
     "start_time": "2020-11-16T08:15:39.226809Z"
    }
   },
   "outputs": [],
   "source": [
    "# Save the word embedding\n",
    "file = 'test_w2v.txt'\n",
    "with open(file, 'w', encoding='utf-8') as f:\n",
    "    f.write('%s 200\\n' % len(use_wv))\n",
    "    for key, value in use_wv.items():\n",
    "        s = ('%s' % value.tolist()).replace('[','').replace(']','').replace(',','')\n",
    "        f.write('%s %s\\n' % (key, s))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 98,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T08:16:00.721325Z",
     "start_time": "2020-11-16T08:16:00.695371Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([ 0.422316, -0.188445,  0.303607,  0.138876,  0.157501, -0.393434,\n",
       "        0.119793, -0.10605 ,  0.299519,  0.111948, -0.191819,  0.074888,\n",
       "       -0.241597,  0.047663, -0.432895, -0.069372, -0.185891, -0.032745,\n",
       "       -0.11919 ,  0.080988, -0.03573 ,  0.434491, -0.49235 ,  0.308569,\n",
       "        0.119515,  0.097402, -0.102863,  0.0496  ,  0.213468, -0.057582,\n",
       "        0.203145, -0.082714,  0.09664 ,  0.035962, -0.230816, -0.084599,\n",
       "        0.18715 ,  0.57447 , -0.299669,  0.182591, -0.097882,  0.237317,\n",
       "        0.243701,  0.191663, -0.085517, -0.013433, -0.321317, -0.463325,\n",
       "        0.270055, -0.147297, -0.244581,  0.034256, -0.197056, -0.303928,\n",
       "        0.034367, -0.126948,  0.245291,  0.01963 ,  0.285298,  0.090663,\n",
       "        0.048534,  0.49026 ,  0.53321 ,  0.16681 ,  0.250462, -0.162299,\n",
       "       -0.124155,  0.240891, -0.153492, -0.272789, -0.410148, -0.181354,\n",
       "       -0.10621 ,  0.284273,  0.237221,  0.356572,  0.05953 ,  0.369757,\n",
       "       -0.325207, -0.099684,  0.119162,  0.21746 , -0.084874, -0.240895,\n",
       "        0.362958, -0.024245, -0.164401, -0.009086, -0.007824, -0.186961,\n",
       "        0.067057, -0.480429, -0.168648,  0.021166,  0.132482, -0.102885,\n",
       "       -0.172932,  0.24377 , -0.295846, -0.366742,  0.001642,  0.724706,\n",
       "        0.309111, -0.097334, -0.321319, -0.108957,  0.167371, -0.115275,\n",
       "       -0.228781,  0.186884, -0.319265, -0.46672 ,  0.125559,  0.212941,\n",
       "        0.005978, -0.072225, -0.026804, -0.273873, -0.036719,  0.13779 ,\n",
       "        0.525658, -0.195398,  0.080441, -0.071529,  0.460552,  0.246358,\n",
       "       -0.378339, -0.061954, -0.059653,  0.040772, -0.094346, -0.069649,\n",
       "       -0.164087,  0.108511, -0.145296,  0.223755, -0.448848, -0.059729,\n",
       "       -0.129417, -0.228746, -0.236851, -0.285469,  0.142372,  0.008411,\n",
       "        0.422986,  0.150435,  0.056039,  0.132055, -0.079006,  0.564026,\n",
       "       -0.285916, -0.270596, -0.360649, -0.119925, -0.314672, -0.232604,\n",
       "       -0.026787,  0.241815, -0.115235, -0.090316,  0.315375,  0.017611,\n",
       "        0.080874,  0.262319,  0.213444,  0.113567,  0.197748,  0.446394,\n",
       "       -0.006739, -0.186261, -0.149167,  0.061629, -0.2214  , -0.160106,\n",
       "        0.047103,  0.078469, -0.031366,  0.157373,  0.014709,  0.350224,\n",
       "        0.199311,  0.300172,  0.061173,  0.124012,  0.153249,  0.338645,\n",
       "       -0.279952,  0.161051, -0.072745, -0.07343 ,  0.136257,  0.105497,\n",
       "       -0.022218, -0.39233 , -0.113431,  0.533534, -0.379577, -0.311079,\n",
       "       -0.188586,  0.266753], dtype=float32)"
      ]
     },
     "execution_count": 98,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# read the word embedding\n",
    "uus = KeyedVectors.load_word2vec_format(file, binary=False)\n",
    "uus['君不见']"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T08:17:45.637609Z",
     "start_time": "2020-11-16T08:17:45.633620Z"
    }
   },
   "source": [
    "### 将 train, val, test 数据中可能出现的词向量都保存好"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 122,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T09:26:53.571009Z",
     "start_time": "2020-11-16T09:26:44.633890Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Now we are in file: train\n"
     ]
    },
    {
     "ename": "KeyError",
     "evalue": "\"word '不' not in vocabulary\"",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyError\u001b[0m                                  Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-122-2a93a71d386c>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m     11\u001b[0m             \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 12\u001b[1;33m                 \u001b[0muse_wv\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mword\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mwv_from_text\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mword\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     13\u001b[0m             \u001b[1;32mexcept\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mg:\\Anaconda3\\envs\\torch\\lib\\site-packages\\gensim\\models\\keyedvectors.py\u001b[0m in \u001b[0;36m__getitem__\u001b[1;34m(self, entities)\u001b[0m\n\u001b[0;32m    352\u001b[0m             \u001b[1;31m# allow calls like trained_model['office'], as a shorthand for trained_model[['office']]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 353\u001b[1;33m             \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_vector\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mentities\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    354\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mg:\\Anaconda3\\envs\\torch\\lib\\site-packages\\gensim\\models\\keyedvectors.py\u001b[0m in \u001b[0;36mget_vector\u001b[1;34m(self, word)\u001b[0m\n\u001b[0;32m    470\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0mget_vector\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mword\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 471\u001b[1;33m         \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mword_vec\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mword\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    472\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mg:\\Anaconda3\\envs\\torch\\lib\\site-packages\\gensim\\models\\keyedvectors.py\u001b[0m in \u001b[0;36mword_vec\u001b[1;34m(self, word, use_norm)\u001b[0m\n\u001b[0;32m    467\u001b[0m         \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 468\u001b[1;33m             \u001b[1;32mraise\u001b[0m \u001b[0mKeyError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"word '%s' not in vocabulary\"\u001b[0m \u001b[1;33m%\u001b[0m \u001b[0mword\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    469\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mKeyError\u001b[0m: \"word '不' not in vocabulary\"",
      "\nDuring handling of the above exception, another exception occurred:\n",
      "\u001b[1;31mKeyError\u001b[0m                                  Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-122-2a93a71d386c>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m     13\u001b[0m             \u001b[1;32mexcept\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     14\u001b[0m                 \u001b[1;32mfor\u001b[0m \u001b[0mi\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mw\u001b[0m \u001b[1;32min\u001b[0m \u001b[0menumerate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mword\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 15\u001b[1;33m                     \u001b[0muse_wv\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mw\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mwv_from_text\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mw\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     16\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     17\u001b[0m \u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0muse_wv\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mg:\\Anaconda3\\envs\\torch\\lib\\site-packages\\gensim\\models\\keyedvectors.py\u001b[0m in \u001b[0;36m__getitem__\u001b[1;34m(self, entities)\u001b[0m\n\u001b[0;32m    351\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mentities\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mstring_types\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    352\u001b[0m             \u001b[1;31m# allow calls like trained_model['office'], as a shorthand for trained_model[['office']]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 353\u001b[1;33m             \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_vector\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mentities\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    354\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    355\u001b[0m         \u001b[1;32mreturn\u001b[0m \u001b[0mvstack\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_vector\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mentity\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mentity\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mentities\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mg:\\Anaconda3\\envs\\torch\\lib\\site-packages\\gensim\\models\\keyedvectors.py\u001b[0m in \u001b[0;36mget_vector\u001b[1;34m(self, word)\u001b[0m\n\u001b[0;32m    469\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    470\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0mget_vector\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mword\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 471\u001b[1;33m         \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mword_vec\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mword\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    472\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    473\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0mwords_closer_than\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mw1\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mw2\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mg:\\Anaconda3\\envs\\torch\\lib\\site-packages\\gensim\\models\\keyedvectors.py\u001b[0m in \u001b[0;36mword_vec\u001b[1;34m(self, word, use_norm)\u001b[0m\n\u001b[0;32m    466\u001b[0m             \u001b[1;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    467\u001b[0m         \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 468\u001b[1;33m             \u001b[1;32mraise\u001b[0m \u001b[0mKeyError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"word '%s' not in vocabulary\"\u001b[0m \u001b[1;33m%\u001b[0m \u001b[0mword\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    469\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    470\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0mget_vector\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mword\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mKeyError\u001b[0m: \"word '不' not in vocabulary\""
     ]
    }
   ],
   "source": [
    "files = ['train','val','test']\n",
    "# w2v = wv_from_text\n",
    "use_wv = dict()\n",
    "# 计算可能使用的词并保存好词向量\n",
    "for file in files:\n",
    "    print('Now we are in file: %s' % file)\n",
    "    data = np.load('../Dataset/data/%s.npy' % file)\n",
    "    for i in range(len(data)):\n",
    "        doc = data[i, 0] # string\n",
    "        for i, word in enumerate(jieba.cut(doc)):\n",
    "            try:\n",
    "                use_wv[word] = wv_from_text[word]\n",
    "            except:\n",
    "                for i, w in enumerate(word):\n",
    "                    use_wv[w] = wv_from_text[w]\n",
    "                    \n",
    "len(use_wv)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 保存的文件中\n",
    "file = 'Lyric_ChineseEmbedding.txt'\n",
    "with open(file, 'w', encoding='utf-8') as f:\n",
    "    f.write('%s 200\\n' % len(use_wv))\n",
    "    for key, value in use_wv.items():\n",
    "        s = ('%s' % value.tolist()).replace('[','').replace(']','').replace(',','')\n",
    "        f.write('%s %s\\n' % (key, s))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 无关测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 119,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T09:24:39.188995Z",
     "start_time": "2020-11-16T09:24:39.182979Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([ 0.257969, -0.288066,  0.105582,  0.281781,  0.262464,  0.0247  ,\n",
       "        0.201691,  0.005893,  0.189025,  0.123702,  0.000702, -0.024353,\n",
       "        0.074768, -0.120963,  0.053227, -0.115575, -0.004054, -0.215095,\n",
       "       -0.157094, -0.259438,  0.062279,  0.142544, -0.057884,  0.149006,\n",
       "        0.142659, -0.055425, -0.023676,  0.048273,  0.274754, -0.119763,\n",
       "       -0.068765,  0.031547, -0.045737,  0.171814, -0.255463, -0.086406,\n",
       "        0.171091,  0.073908, -0.288   ,  0.051408,  0.014997,  0.005331,\n",
       "        0.401295,  0.095939, -0.045079,  0.089809, -0.274839, -0.682658,\n",
       "       -0.140976, -0.10434 , -0.26031 ,  0.023253, -0.258755,  0.066476,\n",
       "       -0.121786,  0.001883,  0.101805,  0.168083,  0.184144,  0.06967 ,\n",
       "        0.060302,  0.190894,  0.035508,  0.005831, -0.160862, -0.194421,\n",
       "        0.02766 , -0.004459, -0.094092, -0.127693, -0.151391,  0.004516,\n",
       "        0.176035,  0.156662, -0.035896,  0.040939,  0.056237,  0.105161,\n",
       "        0.208659, -0.029048,  0.050157,  0.265578,  0.121959,  0.337101,\n",
       "        0.15338 , -0.229313, -0.225776,  0.144097, -0.110946, -0.118999,\n",
       "        0.063307,  0.043647,  0.038288, -0.268226, -0.166147,  0.131352,\n",
       "        0.087143, -0.138708, -0.125388,  0.124644, -0.029785,  0.235381,\n",
       "       -0.14024 , -0.022642, -0.285102,  0.207541, -0.066322, -0.421221,\n",
       "       -0.050854, -0.180806,  0.063572, -0.221671,  0.144535, -0.106335,\n",
       "        0.039128, -0.323642,  0.136248, -0.268427, -0.005231,  0.022319,\n",
       "        0.100253, -0.346457, -0.058811, -0.041238,  0.045793,  0.12292 ,\n",
       "       -0.153493,  0.146689,  0.01837 ,  0.121537, -0.134108, -0.041995,\n",
       "        0.198174, -0.130834, -0.339254,  0.032741, -0.095624,  0.119803,\n",
       "        0.06163 ,  0.166394, -0.330792, -0.224072, -0.270991,  0.125234,\n",
       "       -0.057835, -0.233979,  0.228011,  0.05066 , -0.16057 ,  0.180151,\n",
       "       -0.08335 , -0.156653, -0.135692, -0.145908, -0.096677, -0.066022,\n",
       "        0.088564, -0.045435,  0.054121, -0.357914,  0.448524,  0.141443,\n",
       "       -0.094841,  0.21214 ,  0.053208,  0.114341, -0.139839,  0.239768,\n",
       "        0.095185,  0.049941, -0.141136, -0.093853, -0.107702,  0.065235,\n",
       "       -0.007106, -0.292004,  0.048486, -0.063324,  0.033387,  0.068469,\n",
       "       -0.210287, -0.03936 , -0.273291,  0.143838, -0.105371, -0.155323,\n",
       "       -0.020802,  0.49933 , -0.100334, -0.001517,  0.252245,  0.093027,\n",
       "        0.057058,  0.062758,  0.277739,  0.240228,  0.184851, -0.348845,\n",
       "       -0.224438,  0.06558 ], dtype=float32)"
      ]
     },
     "execution_count": 119,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "wv_from_text['了']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T07:25:18.340813Z",
     "start_time": "2020-11-16T07:25:18.332833Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(2, 200)"
      ]
     },
     "execution_count": 53,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mat = None\n",
    "s = '闪邀'\n",
    "try:\n",
    "    mat = wv_from_text[s]\n",
    "except:\n",
    "    for i,w in enumerate(s):\n",
    "        if i == 0:\n",
    "            mat = wv_from_text[w]\n",
    "        else:\n",
    "            mat = np.stack((mat, wv_from_text[w]), axis=0)\n",
    "mat.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T07:31:45.209394Z",
     "start_time": "2020-11-16T07:31:45.199446Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "8824330 200\n",
      "\n",
      "</s> 0.002001 0.002210 -0.001915 -0.001639 0.000683 0.001511 0.000470 0.000106 -0.001802 0.001109 -0.002178 0.000625 -0.000376 -0.000479 -0.001658 -0.000941 0.001290 0.001513 0.001485 0.000799 0.000772 -0.001901 -0.002048 0.002485 0.001901 0.001545 -0.000302 0.002008 -0.000247 0.000367 -0.000075 -0.001492 0.000656 -0.000669 -0.001913 0.002377 0.002190 -0.000548 -0.000113 0.000255 -0.001819 -0.002004 0.002277 0.000032 -0.001291 -0.001521 -0.001538 0.000848 0.000101 0.000666 -0.002107 -0.001904 -0.000065 0.000572 0.001275 -0.001585 0.002040 0.000463 0.000560 -0.000304 0.001493 -0.001144 -0.001049 0.001079 -0.000377 0.000515 0.000902 -0.002044 -0.000992 0.001457 0.002116 0.001966 -0.001523 -0.001054 -0.000455 0.001001 -0.001894 0.001499 0.001394 -0.000799 -0.000776 -0.001119 0.002114 0.001956 -0.000590 0.002107 0.002410 0.000908 0.002491 -0.001556 -0.000766 -0.001054 -0.001454 0.001407 0.000790 0.000212 -0.001097 0.000762 0.001530 0.000097 0.001140 -0.002476 0.002157 0.000240 -0.000916 -0.001042 -0.000374 -0.001468 -0.002185 -0.001419 0.002139 -0.000885 -0.001340 0.001159 -0.000852 0.002378 -0.000802 -0.002294 0.001358 -0.000037 -0.001744 0.000488 0.000721 -0.000241 0.000912 -0.001979 0.000441 0.000908 -0.001505 0.000071 -0.000030 -0.001200 -0.001416 -0.002347 0.000011 0.000076 0.000005 -0.001967 -0.002481 -0.002373 -0.002163 -0.000274 0.000696 0.000592 -0.001591 0.002499 -0.001006 -0.000637 -0.000702 0.002366 -0.001882 0.000581 -0.000668 0.001594 0.000020 0.002135 -0.001410 -0.001303 -0.002096 -0.001833 -0.001600 -0.001557 0.001222 -0.000933 0.001340 0.001845 0.000678 0.001475 0.001238 0.001170 -0.001775 -0.001717 -0.001828 -0.000066 0.002065 -0.001368 -0.001530 -0.002098 0.001653 -0.002089 -0.000290 0.001089 -0.002309 -0.002239 0.000721 0.001762 0.002132 0.001073 0.001581 -0.001564 -0.001820 0.001987 -0.001382 0.000877 0.000287 0.000895 -0.000591 0.000099 -0.000843 -0.000563\n",
      "\n",
      "的 0.209092 -0.165459 -0.058054 0.281176 0.102982 0.099868 0.047287 0.113531 0.202805 0.240482 0.026028 0.073504 0.010873 0.010201 -0.056060 -0.063864 -0.025928 -0.158832 -0.019444 -0.144610 -0.124821 0.000499 -0.050971 0.113983 0.088150 0.080318 -0.145976 0.093325 0.139695 -0.082682 -0.034356 0.061241 -0.090153 0.053166 -0.171991 -0.187834 0.115600 0.219545 -0.200234 -0.106904 0.033836 0.005707 0.484198 0.147382 -0.165274 0.094883 -0.202281 -0.638371 -0.127920 -0.212338 -0.250738 -0.022411 -0.315008 0.169237 -0.002799 0.019125 0.017462 0.028013 0.195060 0.036385 -0.051681 0.154037 0.214785 -0.179985 -0.020429 -0.044819 -0.074923 0.105441 -0.081715 -0.034099 -0.096518 -0.004290 0.095423 0.234515 -0.138332 0.134917 0.082070 0.051714 0.159327 0.061818 0.037091 0.239265 0.073274 0.170960 0.223636 -0.187691 -0.206850 -0.051000 -0.269477 -0.116970 0.213069 -0.096122 0.035362 -0.254648 0.021978 0.071687 0.109870 -0.104643 -0.175653 0.097061 -0.068692 0.196374 0.007704 0.072367 -0.275905 0.217282 -0.056664 -0.321484 -0.004813 -0.041167 -0.118400 -0.159937 0.065294 -0.092538 0.013975 -0.219047 -0.058431 -0.177256 -0.043169 -0.151647 -0.006049 -0.279595 -0.005488 0.096733 0.147219 0.197677 -0.088133 0.053465 0.038738 0.059665 -0.132819 0.019606 0.224926 -0.176136 -0.411968 -0.044071 -0.120198 -0.107929 -0.001640 0.036719 -0.243131 -0.273457 -0.317418 -0.079236 0.054842 -0.143945 0.168189 -0.013057 -0.145664 0.135278 0.029447 -0.141014 -0.183899 -0.080112 -0.113538 0.071163 0.134968 0.141939 0.144405 -0.249114 0.454654 -0.077072 -0.001521 0.298252 0.160275 0.085942 -0.213363 0.083022 -0.000400 0.134826 -0.000681 -0.017328 -0.026751 0.111903 0.010307 -0.124723 0.031472 0.081697 0.071449 0.011486 -0.091571 -0.039319 -0.112756 0.171106 0.026869 -0.077058 -0.052948 0.252645 -0.035071 0.040870 0.277828 0.085193 0.006959 -0.048913 0.279133 0.169515 0.068156 -0.278624 -0.173408 0.035439\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 文件查看\n",
    "file = 'Tencent_AILab_ChineseEmbedding.txt'\n",
    "with open(file, 'r', encoding='utf-8') as f:\n",
    "    print(f.readline())\n",
    "    print(f.readline())\n",
    "    print(f.readline())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "torch",
   "language": "python",
   "name": "torch"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
