{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T02:42:32.338355Z",
     "start_time": "2020-11-16T02:17:00.095063Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'gensim.models.keyedvectors.Word2VecKeyedVectors'>\n"
     ]
    }
   ],
   "source": [
    "from gensim.models import KeyedVectors\n",
    "file = 'Tencent_AILab_ChineseEmbedding.txt'\n",
    "wv_from_text = KeyedVectors.load_word2vec_format(file, binary=False)\n",
    "print(type(wv_from_text))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T02:47:04.918697Z",
     "start_time": "2020-11-16T02:47:04.585556Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "\u001b[0;33mspecial attribute:\u001b[0m\n",
       "    \u001b[0;36m__class__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__dict__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__doc__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__module__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__weakref__\u001b[0m\n",
       "\u001b[0;33mabstract class:\u001b[0m\n",
       "    \u001b[0;36m__subclasshook__\u001b[0m\n",
       "\u001b[0;33mobject customization:\u001b[0m\n",
       "    \u001b[0;36m__format__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__hash__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__init__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__new__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__repr__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__sizeof__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__str__\u001b[0m\n",
       "\u001b[0;33mrich comparison:\u001b[0m\n",
       "    \u001b[0;36m__eq__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__ge__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__gt__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__le__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__lt__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__ne__\u001b[0m\n",
       "\u001b[0;33mattribute access:\u001b[0m\n",
       "    \u001b[0;36m__delattr__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__dir__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__getattribute__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__setattr__\u001b[0m\n",
       "\u001b[0;33mclass customization:\u001b[0m\n",
       "    \u001b[0;36m__init_subclass__\u001b[0m\n",
       "\u001b[0;33mcontainer:\u001b[0m\n",
       "    \u001b[0;36m__contains__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__getitem__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__setitem__\u001b[0m\n",
       "\u001b[0;33mpickle:\u001b[0m\n",
       "    \u001b[0;36m__reduce__\u001b[0m\u001b[1;30m, \u001b[0m\u001b[0;36m__reduce_ex__\u001b[0m\n",
       "\u001b[0;33mdescriptor:\u001b[0m\n",
       "    \u001b[0;36mindex2entity\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30m@property with getter, setter\u001b[0m\n",
       "    \u001b[0;36mload\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mclass classmethod with getter, classmethod(function) -> method\u001b[0m\n",
       "    \u001b[0;36mload_word2vec_format\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mclass classmethod with getter, classmethod(function) -> method\u001b[0m\n",
       "    \u001b[0;36msyn0\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30m@property with getter, setter\u001b[0m\n",
       "    \u001b[0;36msyn0norm\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30m@property with getter, setter\u001b[0m\n",
       "    \u001b[0;36mwv\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30m@property with getter\u001b[0m\n",
       "\u001b[0;33mfunction:\u001b[0m\n",
       "    \u001b[0;36m_adapt_by_suffix\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mGet compress setting and filename for numpy file compression.\u001b[0m\n",
       "    \u001b[0;36m_load_specials\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mLoad attributes that were stored separately, and give them the same opportunity\u001b[0m\n",
       "    \u001b[0;36m_log_evaluate_word_analogies\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCalculate score by section, helper for\u001b[0m\n",
       "    \u001b[0;36m_save_specials\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mSave aside any attributes that need to be handled separately, including\u001b[0m\n",
       "    \u001b[0;36m_smart_save\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mSave the object to a file. Used internally by :meth:`gensim.utils.SaveLoad.save()`.\u001b[0m\n",
       "    \u001b[0;36maccuracy\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute accuracy of the model.\u001b[0m\n",
       "    \u001b[0;36madd\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mAppend entities and theirs vectors in a manual way.\u001b[0m\n",
       "    \u001b[0;36mcloser_than\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mGet all entities that are closer to `entity1` than `entity2` is to `entity1`.\u001b[0m\n",
       "    \u001b[0;36mcosine_similarities\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute cosine similarities between one vector and a set of other vectors.\u001b[0m\n",
       "    \u001b[0;36mdistance\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute cosine distance between two words.\u001b[0m\n",
       "    \u001b[0;36mdistances\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute cosine distances from given word or vector to all words in `other_words`.\u001b[0m\n",
       "    \u001b[0;36mdoesnt_match\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mWhich word from the given list doesn't go with the others?\u001b[0m\n",
       "    \u001b[0;36mevaluate_word_analogies\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute performance of the model on an analogy test set.\u001b[0m\n",
       "    \u001b[0;36mevaluate_word_pairs\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute correlation of the model with human similarity judgments.\u001b[0m\n",
       "    \u001b[0;36mget_keras_embedding\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mGet a Keras 'Embedding' layer with weights set as the Word2Vec model's learned word embeddings.\u001b[0m\n",
       "    \u001b[0;36mget_vector\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mGet the entity's representations in vector space, as a 1D numpy array.\u001b[0m\n",
       "    \u001b[0;36minit_sims\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mPrecompute L2-normalized vectors.\u001b[0m\n",
       "    \u001b[0;36mlog_accuracy\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30m\u001b[0m\n",
       "    \u001b[0;36mlog_evaluate_word_pairs\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30m\u001b[0m\n",
       "    \u001b[0;36mmost_similar\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mFind the top-N most similar words.\u001b[0m\n",
       "    \u001b[0;36mmost_similar_cosmul\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mFind the top-N most similar words, using the multiplicative combination objective,\u001b[0m\n",
       "    \u001b[0;36mmost_similar_to_given\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mGet the `entity` from `entities_list` most similar to `entity1`.\u001b[0m\n",
       "    \u001b[0;36mn_similarity\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute cosine similarity between two sets of words.\u001b[0m\n",
       "    \u001b[0;36mrank\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mRank of the distance of `entity2` from `entity1`, in relation to distances of all entities from `entity1`.\u001b[0m\n",
       "    \u001b[0;36mrelative_cosine_similarity\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute the relative cosine similarity between two words given top-n similar words,\u001b[0m\n",
       "    \u001b[0;36msave\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mSave KeyedVectors.\u001b[0m\n",
       "    \u001b[0;36msave_word2vec_format\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mStore the input-hidden weight matrix in the same format used by the original\u001b[0m\n",
       "    \u001b[0;36msimilar_by_vector\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mFind the top-N most similar words by vector.\u001b[0m\n",
       "    \u001b[0;36msimilar_by_word\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mFind the top-N most similar words.\u001b[0m\n",
       "    \u001b[0;36msimilarity\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute cosine similarity between two words.\u001b[0m\n",
       "    \u001b[0;36msimilarity_matrix\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mConstruct a term similarity matrix for computing Soft Cosine Measure.\u001b[0m\n",
       "    \u001b[0;36mwmdistance\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mCompute the Word Mover's Distance between two documents.\u001b[0m\n",
       "    \u001b[0;36mword_vec\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mGet `word` representations in vector space, as a 1D numpy array.\u001b[0m\n",
       "    \u001b[0;36mwords_closer_than\u001b[0m\u001b[0;36m: \u001b[0m\u001b[1;30mGet all words that are closer to `w1` than `w2` is to `w1`.\u001b[0m"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pdir(KeyedVectors)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T03:02:55.776974Z",
     "start_time": "2020-11-16T03:02:54.859422Z"
    }
   },
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "train = np.load('../Dataset/data/train.npy')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T07:36:58.220008Z",
     "start_time": "2020-11-16T07:36:58.204076Z"
    },
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(155, 200)\n",
      "67\n"
     ]
    }
   ],
   "source": [
    "import jieba\n",
    "doc = train[0,0]\n",
    "w2v = wv_from_text\n",
    "mat = None\n",
    "use_wv = dict()\n",
    "for i, word in enumerate(jieba.cut(doc)):\n",
    "    try:\n",
    "        wv = w2v[word]\n",
    "        use_wv[word] = wv\n",
    "    except:\n",
    "        for i, w in enumerate(word):\n",
    "            use_wv[w] = w2v[w]\n",
    "            if i == 0:\n",
    "                wv = w2v[w]\n",
    "            else:\n",
    "                wv = np.vstack((wv,w2v[w]))\n",
    "    if i == 0:\n",
    "        mat = wv\n",
    "    else:\n",
    "        mat = np.vstack((mat, wv))\n",
    "print(mat.shape)\n",
    "print(len(use_wv))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 可以使用 dict 转成 npy 保存"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T07:53:45.693269Z",
     "start_time": "2020-11-16T07:53:45.340215Z"
    },
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# print(use_wv)\n",
    "file = 'test_w2v.txt'\n",
    "np.save('test_w2v.npy',np.array(use_wv))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T07:57:53.190852Z",
     "start_time": "2020-11-16T07:57:53.182873Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "dict"
      ]
     },
     "execution_count": 75,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "uuswv = np.load('test_w2v.npy').item()\n",
    "len(uuswv)\n",
    "type(uuswv)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 测试直接保存为 txt 文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T07:59:57.905191Z",
     "start_time": "2020-11-16T07:59:57.890253Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "(155, 200)\n",
      "67\n"
     ]
    }
   ],
   "source": [
    "import jieba\n",
    "doc = train[0,0]\n",
    "w2v = wv_from_text\n",
    "mat = None\n",
    "use_wv = dict()\n",
    "for i, word in enumerate(jieba.cut(doc)):\n",
    "    try:\n",
    "        wv = w2v[word]\n",
    "        use_wv[word] = wv\n",
    "    except:\n",
    "        for i, w in enumerate(word):\n",
    "            use_wv[w] = w2v[w]\n",
    "            if i == 0:\n",
    "                wv = w2v[w]\n",
    "            else:\n",
    "                wv = np.vstack((wv,w2v[w]))\n",
    "    \n",
    "    if i == 0:\n",
    "        mat = wv\n",
    "    else:\n",
    "        mat = np.vstack((mat, wv))\n",
    "print(mat.shape)\n",
    "print(len(use_wv))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 95,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T08:15:39.252764Z",
     "start_time": "2020-11-16T08:15:39.226809Z"
    }
   },
   "outputs": [],
   "source": [
    "# Save the word embedding\n",
    "file = 'test_w2v.txt'\n",
    "with open(file, 'w', encoding='utf-8') as f:\n",
    "    f.write('%s 200\\n' % len(use_wv))\n",
    "    for key, value in use_wv.items():\n",
    "        s = ('%s' % value.tolist()).replace('[','').replace(']','').replace(',','')\n",
    "        f.write('%s %s\\n' % (key, s))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 98,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T08:16:00.721325Z",
     "start_time": "2020-11-16T08:16:00.695371Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([ 0.422316, -0.188445,  0.303607,  0.138876,  0.157501, -0.393434,\n",
       "        0.119793, -0.10605 ,  0.299519,  0.111948, -0.191819,  0.074888,\n",
       "       -0.241597,  0.047663, -0.432895, -0.069372, -0.185891, -0.032745,\n",
       "       -0.11919 ,  0.080988, -0.03573 ,  0.434491, -0.49235 ,  0.308569,\n",
       "        0.119515,  0.097402, -0.102863,  0.0496  ,  0.213468, -0.057582,\n",
       "        0.203145, -0.082714,  0.09664 ,  0.035962, -0.230816, -0.084599,\n",
       "        0.18715 ,  0.57447 , -0.299669,  0.182591, -0.097882,  0.237317,\n",
       "        0.243701,  0.191663, -0.085517, -0.013433, -0.321317, -0.463325,\n",
       "        0.270055, -0.147297, -0.244581,  0.034256, -0.197056, -0.303928,\n",
       "        0.034367, -0.126948,  0.245291,  0.01963 ,  0.285298,  0.090663,\n",
       "        0.048534,  0.49026 ,  0.53321 ,  0.16681 ,  0.250462, -0.162299,\n",
       "       -0.124155,  0.240891, -0.153492, -0.272789, -0.410148, -0.181354,\n",
       "       -0.10621 ,  0.284273,  0.237221,  0.356572,  0.05953 ,  0.369757,\n",
       "       -0.325207, -0.099684,  0.119162,  0.21746 , -0.084874, -0.240895,\n",
       "        0.362958, -0.024245, -0.164401, -0.009086, -0.007824, -0.186961,\n",
       "        0.067057, -0.480429, -0.168648,  0.021166,  0.132482, -0.102885,\n",
       "       -0.172932,  0.24377 , -0.295846, -0.366742,  0.001642,  0.724706,\n",
       "        0.309111, -0.097334, -0.321319, -0.108957,  0.167371, -0.115275,\n",
       "       -0.228781,  0.186884, -0.319265, -0.46672 ,  0.125559,  0.212941,\n",
       "        0.005978, -0.072225, -0.026804, -0.273873, -0.036719,  0.13779 ,\n",
       "        0.525658, -0.195398,  0.080441, -0.071529,  0.460552,  0.246358,\n",
       "       -0.378339, -0.061954, -0.059653,  0.040772, -0.094346, -0.069649,\n",
       "       -0.164087,  0.108511, -0.145296,  0.223755, -0.448848, -0.059729,\n",
       "       -0.129417, -0.228746, -0.236851, -0.285469,  0.142372,  0.008411,\n",
       "        0.422986,  0.150435,  0.056039,  0.132055, -0.079006,  0.564026,\n",
       "       -0.285916, -0.270596, -0.360649, -0.119925, -0.314672, -0.232604,\n",
       "       -0.026787,  0.241815, -0.115235, -0.090316,  0.315375,  0.017611,\n",
       "        0.080874,  0.262319,  0.213444,  0.113567,  0.197748,  0.446394,\n",
       "       -0.006739, -0.186261, -0.149167,  0.061629, -0.2214  , -0.160106,\n",
       "        0.047103,  0.078469, -0.031366,  0.157373,  0.014709,  0.350224,\n",
       "        0.199311,  0.300172,  0.061173,  0.124012,  0.153249,  0.338645,\n",
       "       -0.279952,  0.161051, -0.072745, -0.07343 ,  0.136257,  0.105497,\n",
       "       -0.022218, -0.39233 , -0.113431,  0.533534, -0.379577, -0.311079,\n",
       "       -0.188586,  0.266753], dtype=float32)"
      ]
     },
     "execution_count": 98,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# read the word embedding\n",
    "uus = KeyedVectors.load_word2vec_format(file, binary=False)\n",
    "uus['君不见']"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T08:17:45.637609Z",
     "start_time": "2020-11-16T08:17:45.633620Z"
    }
   },
   "source": [
    "### 将 train, val, test 数据中可能出现的词向量都保存好"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 198,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T12:33:29.435052Z",
     "start_time": "2020-11-16T12:32:57.648575Z"
    },
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Now we are in file: train\n",
      "Now we are in file: val\n",
      "Now we are in file: test\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "72618"
      ]
     },
     "execution_count": 198,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "files = ['train','val','test']\n",
    "# w2v = wv_from_text\n",
    "use_wv = dict()\n",
    "# 计算可能使用的词并保存好词向量\n",
    "for file in files:\n",
    "    print('Now we are in file: %s' % file)\n",
    "    data = np.load('../Dataset/data/%s.npy' % file)\n",
    "    for i in range(len(data)):\n",
    "        doc = data[i, 0] # string\n",
    "        for i, word in enumerate(jieba.cut(doc)):\n",
    "            try:\n",
    "                use_wv[word] = wv_from_text[word]\n",
    "            except:\n",
    "                for w in word:\n",
    "#                     use_wv[w] = wv_from_text[w]\n",
    "                    try:\n",
    "                        use_wv[w] = wv_from_text[w]\n",
    "                    except:\n",
    "                        print(word)\n",
    "                    \n",
    "len(use_wv)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 199,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T12:34:19.936404Z",
     "start_time": "2020-11-16T12:33:59.733581Z"
    }
   },
   "outputs": [],
   "source": [
    "# 保存的文件中\n",
    "file = 'Lyric_ChineseEmbedding.txt'\n",
    "with open(file, 'w', encoding='utf-8') as f:\n",
    "    f.write('%s 200\\n' % len(use_wv))\n",
    "    for key, value in use_wv.items():\n",
    "        s = ('%s' % value.tolist()).replace('[','').replace(']','').replace(',','')\n",
    "        f.write('%s %s\\n' % (key, s))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 200,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T12:35:15.747903Z",
     "start_time": "2020-11-16T12:34:57.560975Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "gensim.models.keyedvectors.Word2VecKeyedVectors"
      ]
     },
     "execution_count": 200,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 测试是否可以读入\n",
    "uswv = KeyedVectors.load_word2vec_format(file, binary=False)\n",
    "type(uswv)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 无关测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 196,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T12:29:17.787215Z",
     "start_time": "2020-11-16T12:29:17.780230Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([ 7.43640e-02, -3.12850e-01, -2.19033e-01,  2.65098e-01,\n",
       "       -1.41281e-01,  5.08400e-03,  1.61318e-01,  1.57070e-01,\n",
       "       -1.22310e-02, -3.43070e-02, -3.97340e-02, -1.79081e-01,\n",
       "        1.12902e-01, -2.11087e-01,  1.17502e-01, -2.36397e-01,\n",
       "       -1.32380e-02, -2.93170e-02,  3.77779e-01, -1.99435e-01,\n",
       "       -1.14114e-01,  2.34738e-01,  2.20704e-01,  5.85370e-01,\n",
       "       -7.23230e-02,  4.65832e-01, -1.97843e-01,  3.45043e-01,\n",
       "        2.79139e-01, -1.28107e-01,  2.07017e-01,  1.78380e-01,\n",
       "       -5.67700e-02,  2.12179e-01,  1.04817e-01, -1.63010e-01,\n",
       "        7.59060e-02,  1.74777e-01, -5.64714e-01, -3.34254e-01,\n",
       "       -1.04427e-01, -2.77828e-01,  4.77887e-01, -1.83508e-01,\n",
       "        2.51575e-01,  2.09658e-01, -1.41139e-01, -4.30851e-01,\n",
       "       -1.35338e-01, -1.51300e-01, -2.01093e-01,  1.78970e-02,\n",
       "       -1.09884e-01,  1.79656e-01,  8.52280e-02, -2.14484e-01,\n",
       "        1.05480e-02,  2.22822e-01,  2.77732e-01, -3.10607e-01,\n",
       "       -5.66800e-03, -1.67432e-01,  4.11332e-01, -2.82726e-01,\n",
       "       -1.12450e-01, -5.64030e-02, -3.07230e-01,  2.42712e-01,\n",
       "        7.95480e-02, -4.56780e-02, -2.63112e-01, -1.41183e-01,\n",
       "        1.28548e-01,  2.38722e-01,  3.00400e-02, -2.66103e-01,\n",
       "        2.93401e-01,  5.01290e-01,  1.80035e-01, -3.27796e-01,\n",
       "        6.43000e-03,  2.95168e-01,  2.82956e-01,  9.41930e-02,\n",
       "        1.64882e-01, -2.48845e-01,  1.63839e-01, -5.06740e-02,\n",
       "       -2.62889e-01, -9.71910e-02,  2.19417e-01, -3.37517e-01,\n",
       "        2.81400e-03, -2.80614e-01,  3.81470e-01, -5.65230e-02,\n",
       "       -2.47350e-01, -2.88340e-01, -1.30629e-01,  2.82501e-01,\n",
       "        1.78575e-01,  1.32000e-04,  2.20482e-01,  3.56075e-01,\n",
       "       -1.21178e-01,  2.77648e-01, -1.16100e-02,  1.52550e-02,\n",
       "        8.27960e-02, -4.92928e-01,  1.65390e-01,  2.58306e-01,\n",
       "       -2.08328e-01, -2.14051e-01,  3.30092e-01, -2.19034e-01,\n",
       "       -3.85377e-01, -6.62770e-01, -6.98350e-02, -5.16150e-02,\n",
       "        2.71121e-01, -6.21520e-02,  7.80980e-02,  2.46680e-01,\n",
       "       -1.18661e-01,  1.93351e-01, -4.62458e-01, -1.30482e-01,\n",
       "        1.48470e-01,  1.33937e-01,  7.61870e-02, -9.30370e-02,\n",
       "        4.56740e-02, -2.88382e-01, -4.00070e-01, -2.86422e-01,\n",
       "       -2.16451e-01, -9.12550e-02,  1.47396e-01,  1.72030e-01,\n",
       "       -4.53243e-01, -7.01062e-01, -3.91576e-01, -1.08278e-01,\n",
       "        8.92520e-02,  8.23130e-02,  7.70700e-02, -1.33505e-01,\n",
       "       -1.33254e-01, -1.27673e-01, -3.21619e-01,  3.21436e-01,\n",
       "       -2.75346e-01,  6.98990e-02,  5.75070e-02,  7.71990e-02,\n",
       "       -1.32580e-02,  2.01404e-01,  2.73715e-01,  1.86200e-03,\n",
       "        6.72010e-02,  4.68350e-02,  1.65908e-01,  4.73905e-01,\n",
       "        2.53064e-01, -3.53483e-01, -2.28515e-01,  2.56506e-01,\n",
       "       -1.40482e-01,  3.79910e-02, -6.30530e-02,  1.76111e-01,\n",
       "        1.37112e-01, -7.58000e-03,  1.61219e-01, -1.02265e-01,\n",
       "       -1.45819e-01,  2.00164e-01, -1.72946e-01,  1.55184e-01,\n",
       "       -3.05044e-01,  9.94490e-02,  4.87580e-02,  1.98669e-01,\n",
       "        3.16290e-01,  3.09886e-01,  5.99550e-02,  3.54125e-01,\n",
       "        2.24319e-01,  1.61963e-01,  2.54480e-02, -1.89238e-01,\n",
       "        1.32883e-01, -2.78007e-01,  1.05088e-01,  2.98980e-01,\n",
       "       -8.65640e-02, -3.47662e-01, -1.22750e-01, -6.44450e-02],\n",
       "      dtype=float32)"
      ]
     },
     "execution_count": 196,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "wv_from_text['方']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T07:25:18.340813Z",
     "start_time": "2020-11-16T07:25:18.332833Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(2, 200)"
      ]
     },
     "execution_count": 53,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mat = None\n",
    "s = '闪邀'\n",
    "try:\n",
    "    mat = wv_from_text[s]\n",
    "except:\n",
    "    for i,w in enumerate(s):\n",
    "        if i == 0:\n",
    "            mat = wv_from_text[w]\n",
    "        else:\n",
    "            mat = np.stack((mat, wv_from_text[w]), axis=0)\n",
    "mat.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2020-11-16T07:31:45.209394Z",
     "start_time": "2020-11-16T07:31:45.199446Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "8824330 200\n",
      "\n",
      "</s> 0.002001 0.002210 -0.001915 -0.001639 0.000683 0.001511 0.000470 0.000106 -0.001802 0.001109 -0.002178 0.000625 -0.000376 -0.000479 -0.001658 -0.000941 0.001290 0.001513 0.001485 0.000799 0.000772 -0.001901 -0.002048 0.002485 0.001901 0.001545 -0.000302 0.002008 -0.000247 0.000367 -0.000075 -0.001492 0.000656 -0.000669 -0.001913 0.002377 0.002190 -0.000548 -0.000113 0.000255 -0.001819 -0.002004 0.002277 0.000032 -0.001291 -0.001521 -0.001538 0.000848 0.000101 0.000666 -0.002107 -0.001904 -0.000065 0.000572 0.001275 -0.001585 0.002040 0.000463 0.000560 -0.000304 0.001493 -0.001144 -0.001049 0.001079 -0.000377 0.000515 0.000902 -0.002044 -0.000992 0.001457 0.002116 0.001966 -0.001523 -0.001054 -0.000455 0.001001 -0.001894 0.001499 0.001394 -0.000799 -0.000776 -0.001119 0.002114 0.001956 -0.000590 0.002107 0.002410 0.000908 0.002491 -0.001556 -0.000766 -0.001054 -0.001454 0.001407 0.000790 0.000212 -0.001097 0.000762 0.001530 0.000097 0.001140 -0.002476 0.002157 0.000240 -0.000916 -0.001042 -0.000374 -0.001468 -0.002185 -0.001419 0.002139 -0.000885 -0.001340 0.001159 -0.000852 0.002378 -0.000802 -0.002294 0.001358 -0.000037 -0.001744 0.000488 0.000721 -0.000241 0.000912 -0.001979 0.000441 0.000908 -0.001505 0.000071 -0.000030 -0.001200 -0.001416 -0.002347 0.000011 0.000076 0.000005 -0.001967 -0.002481 -0.002373 -0.002163 -0.000274 0.000696 0.000592 -0.001591 0.002499 -0.001006 -0.000637 -0.000702 0.002366 -0.001882 0.000581 -0.000668 0.001594 0.000020 0.002135 -0.001410 -0.001303 -0.002096 -0.001833 -0.001600 -0.001557 0.001222 -0.000933 0.001340 0.001845 0.000678 0.001475 0.001238 0.001170 -0.001775 -0.001717 -0.001828 -0.000066 0.002065 -0.001368 -0.001530 -0.002098 0.001653 -0.002089 -0.000290 0.001089 -0.002309 -0.002239 0.000721 0.001762 0.002132 0.001073 0.001581 -0.001564 -0.001820 0.001987 -0.001382 0.000877 0.000287 0.000895 -0.000591 0.000099 -0.000843 -0.000563\n",
      "\n",
      "的 0.209092 -0.165459 -0.058054 0.281176 0.102982 0.099868 0.047287 0.113531 0.202805 0.240482 0.026028 0.073504 0.010873 0.010201 -0.056060 -0.063864 -0.025928 -0.158832 -0.019444 -0.144610 -0.124821 0.000499 -0.050971 0.113983 0.088150 0.080318 -0.145976 0.093325 0.139695 -0.082682 -0.034356 0.061241 -0.090153 0.053166 -0.171991 -0.187834 0.115600 0.219545 -0.200234 -0.106904 0.033836 0.005707 0.484198 0.147382 -0.165274 0.094883 -0.202281 -0.638371 -0.127920 -0.212338 -0.250738 -0.022411 -0.315008 0.169237 -0.002799 0.019125 0.017462 0.028013 0.195060 0.036385 -0.051681 0.154037 0.214785 -0.179985 -0.020429 -0.044819 -0.074923 0.105441 -0.081715 -0.034099 -0.096518 -0.004290 0.095423 0.234515 -0.138332 0.134917 0.082070 0.051714 0.159327 0.061818 0.037091 0.239265 0.073274 0.170960 0.223636 -0.187691 -0.206850 -0.051000 -0.269477 -0.116970 0.213069 -0.096122 0.035362 -0.254648 0.021978 0.071687 0.109870 -0.104643 -0.175653 0.097061 -0.068692 0.196374 0.007704 0.072367 -0.275905 0.217282 -0.056664 -0.321484 -0.004813 -0.041167 -0.118400 -0.159937 0.065294 -0.092538 0.013975 -0.219047 -0.058431 -0.177256 -0.043169 -0.151647 -0.006049 -0.279595 -0.005488 0.096733 0.147219 0.197677 -0.088133 0.053465 0.038738 0.059665 -0.132819 0.019606 0.224926 -0.176136 -0.411968 -0.044071 -0.120198 -0.107929 -0.001640 0.036719 -0.243131 -0.273457 -0.317418 -0.079236 0.054842 -0.143945 0.168189 -0.013057 -0.145664 0.135278 0.029447 -0.141014 -0.183899 -0.080112 -0.113538 0.071163 0.134968 0.141939 0.144405 -0.249114 0.454654 -0.077072 -0.001521 0.298252 0.160275 0.085942 -0.213363 0.083022 -0.000400 0.134826 -0.000681 -0.017328 -0.026751 0.111903 0.010307 -0.124723 0.031472 0.081697 0.071449 0.011486 -0.091571 -0.039319 -0.112756 0.171106 0.026869 -0.077058 -0.052948 0.252645 -0.035071 0.040870 0.277828 0.085193 0.006959 -0.048913 0.279133 0.169515 0.068156 -0.278624 -0.173408 0.035439\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 文件查看\n",
    "file = 'Tencent_AILab_ChineseEmbedding.txt'\n",
    "with open(file, 'r', encoding='utf-8') as f:\n",
    "    print(f.readline())\n",
    "    print(f.readline())\n",
    "    print(f.readline())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "torch",
   "language": "python",
   "name": "torch"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
