{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 向量检索(相似度计算)\n",
    "Numpy 矩阵计算，\n",
    "Sklearn KDTree， \n",
    "Annoy 算法 "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Platform : linux [win32/linux]\n",
      "Systerm  : 3.6.8 (default, Jan 14 2019, 11:02:34) \n",
      "[GCC 8.0.1 20180414 (experimental) [trunk revision 259383]] \n",
      "numpy  Version: 1.16.4\n",
      "sklearn Version: 0.23.2\n"
     ]
    }
   ],
   "source": [
    "import os \n",
    "import sys\n",
    "import pickle\n",
    "import numpy as np \n",
    "import sklearn\n",
    "np.set_printoptions(precision=3)   # 设置 numpy 显示位数\n",
    "print('Platform : {} [win32/linux]'.format(sys.platform))  # 当前平台信息 \n",
    "print('Systerm  : {} '.format(sys.version))\n",
    "print('numpy  Version: {}'.format(np.__version__))\n",
    "print('sklearn Version: {}'.format(sklearn.__version__))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "---\n",
    "### 获取Bert WordEmbedding"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# -*- coding: utf-8 -*-\n",
    "\"\"\"\n",
    "******* 文档说明 ******\n",
    "\n",
    "# 当前项目: TensorFlow CKPT 模型 解析\n",
    "# 创建时间: 2020-05-08  22:24\n",
    "# 开发作者: Vincent\n",
    "# 版    本: V1.0\n",
    "\"\"\"\n",
    "import tensorflow as tf\n",
    "\n",
    "\n",
    "# Tensroflw CKPT 模型文件解析\n",
    "class CKPTInfo(object):\n",
    "\n",
    "    def __init__(self, ckpt_path, data_path=None, tensorboard_path=None):\n",
    "        \"\"\"\n",
    "        :param ckpt_path:   模型计算图文件  .\\model-20200428120934.ckpt.meta\n",
    "        :param data_path:   模型参数  .\\model-20200428120934.ckpt\n",
    "        :param tensorboard_path:   模型结果保存路径  TensorBoard\n",
    "        \"\"\"\n",
    "        # 导入计算图\n",
    "        self.saver = tf.train.import_meta_graph(ckpt_path, clear_devices=True)\n",
    "\n",
    "        # ########################################################  打印全部参数\n",
    "        var_size = 0  # 训练参数数量\n",
    "        print('All Variables ：')\n",
    "        for variables_i in tf.all_variables():\n",
    "            print('     {}  {:30s} {:15s} {:s}'.format(variables_i.trainable, variables_i.name,\n",
    "                                                       str(variables_i.shape), variables_i.dtype.name))\n",
    "\n",
    "            # 统计训练参数大小\n",
    "            if variables_i.trainable:\n",
    "                var_size_i = 1\n",
    "                for x in variables_i.shape:\n",
    "                    var_size_i *= x\n",
    "                var_size += var_size_i\n",
    "\n",
    "        self.train_var_size = var_size\n",
    "        print('     Trainable_Variables Size:【{}】'.format(self.train_var_size))\n",
    "\n",
    "        # ########################################################  打印训练参数\n",
    "        # print('Trainable Variables ：')\n",
    "        # var_size = 0  # 训练参数数量\n",
    "        # for trainable_variables_i in tf.trainable_variables():\n",
    "        #     print(\n",
    "        #         '     {:30s} {:15s} {:s}'.format(trainable_variables_i.name, str(trainable_variables_i.shape),\n",
    "        #                                          trainable_variables_i.dtype.name))\n",
    "        #     var_size_i = 1\n",
    "        #     for x in trainable_variables_i.shape:\n",
    "        #         var_size_i *= x\n",
    "        #     var_size += var_size_i\n",
    "        #\n",
    "        # print('     Trainable_Variables Size:【{}】'.format(var_size))\n",
    "        # ########################################################\n",
    "\n",
    "        # 获取张量\n",
    "        # print(tf.get_default_graph().get_tensor_by_name('bert/embeddings/token_type_embeddings:0'))\n",
    "\n",
    "        # # ########################################################  打印计算图中所有操作列表\n",
    "        # print('\\n\\nOperations ：')\n",
    "        # for i, op_i in enumerate(tf.get_default_graph().get_operations()):\n",
    "        #     print('【{}】 {}'.format(op_i.type, op_i.name))\n",
    "        #     print('Inputs:')\n",
    "        #     pprint(op_i.inputs._inputs)\n",
    "        #     print('Outputs:')\n",
    "        #     pprint(op_i.outputs)\n",
    "        #\n",
    "        #     if i > 10:\n",
    "        #         break\n",
    "\n",
    "        # 获取节点\n",
    "        # t2 = tf.get_default_graph().get_operation_by_name(name='bert/embeddings/LayerNorm/gamma')\n",
    "        self.operation_list = tf.get_default_graph().get_operations()\n",
    "\n",
    "        # #############################################################\n",
    "        self.sess = tf.Session(graph=tf.get_default_graph())\n",
    "\n",
    "        # 保存模型图结构  tensorboard --logdir=log_path\n",
    "        if tensorboard_path is not None:\n",
    "            # self.sess.run(tf.global_variables_initializer())  # 初始化所有变量\n",
    "            tf.summary.FileWriter(tensorboard_path, self.sess.graph)\n",
    "\n",
    "        # 导入模型参数\n",
    "        if data_path:\n",
    "            # 导入模型参数\n",
    "            self.saver.restore(self.sess, data_path)\n",
    "\n",
    "            # 初始化\n",
    "            # self.sess.run(tf.global_variables_initializer())\n",
    "            # self.sess.run(tf.local_variables_initializer())\n",
    "\n",
    "        # # 待研究 https://www.cnblogs.com/shouhuxianjian/p/10525000.html\n",
    "        # frozen_graph = tf.graph_util.convert_variables_to_constants(self.sess, self.sess.graph.as_graph_def(),\n",
    "        #                                                             [tf.get_default_graph().get_operation_by_name(\n",
    "        #                                                                 name='bert/embeddings/LayerNorm/gamma')])\n",
    "        #\n",
    "        # tf.graph_util.remove_training_nodes(tf.get_default_graph())\n",
    "        # print(len(tf.get_default_graph().get_operations()))\n",
    "\n",
    "    # 查看模型变量数值\n",
    "    def var_values(self, tensor_name):\n",
    "        # 根据变量名称获取某个变量，并读取其数值\n",
    "        tensor = tf.get_default_graph().get_tensor_by_name(tensor_name)\n",
    "        tensor_value = self.sess.run(tensor)\n",
    "        print('{}:  \\n{}'.format(tensor_name, tensor_value))\n",
    "\n",
    "        # print(len(tf.get_default_graph().get_operations()))\n",
    "        # # 更新变量数值\n",
    "        # update = tf.assign(tensor, np.ones(tensor.shape) * 3)\n",
    "        # self.sess.run(update)\n",
    "        # print('Update {}:  \\n{}'.format(tensor_name, self.sess.run(tensor)))\n",
    "        #\n",
    "        # print(len(tf.get_default_graph().get_operations()))\n",
    "\n",
    "        return tensor_value\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING: Logging before flag parsing goes to stderr.\n",
      "W1102 11:53:59.956907 140131286914880 deprecation.py:323] From <ipython-input-2-7b2e38b06e8f>:28: all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.\n",
      "Instructions for updating:\n",
      "Please use tf.global_variables instead.\n",
      "W1102 11:53:59.998155 140131286914880 deprecation.py:323] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/saver.py:1276: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use standard file APIs to check for files with this prefix.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "All Variables ：\n",
      "     True  bert/embeddings/word_embeddings:0 (21128, 768)    float32_ref\n",
      "     True  bert/embeddings/token_type_embeddings:0 (2, 768)        float32_ref\n",
      "     True  bert/embeddings/position_embeddings:0 (512, 768)      float32_ref\n",
      "     True  bert/embeddings/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/embeddings/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_0/attention/self/query/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_0/attention/self/query/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_0/attention/self/key/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_0/attention/self/key/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_0/attention/self/value/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_0/attention/self/value/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_0/attention/output/dense/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_0/attention/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_0/attention/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_0/attention/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_0/intermediate/dense/kernel:0 (768, 3072)     float32_ref\n",
      "     True  bert/encoder/layer_0/intermediate/dense/bias:0 (3072,)         float32_ref\n",
      "     True  bert/encoder/layer_0/output/dense/kernel:0 (3072, 768)     float32_ref\n",
      "     True  bert/encoder/layer_0/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_0/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_0/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_1/attention/self/query/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_1/attention/self/query/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_1/attention/self/key/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_1/attention/self/key/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_1/attention/self/value/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_1/attention/self/value/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_1/attention/output/dense/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_1/attention/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_1/attention/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_1/attention/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_1/intermediate/dense/kernel:0 (768, 3072)     float32_ref\n",
      "     True  bert/encoder/layer_1/intermediate/dense/bias:0 (3072,)         float32_ref\n",
      "     True  bert/encoder/layer_1/output/dense/kernel:0 (3072, 768)     float32_ref\n",
      "     True  bert/encoder/layer_1/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_1/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_1/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_2/attention/self/query/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_2/attention/self/query/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_2/attention/self/key/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_2/attention/self/key/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_2/attention/self/value/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_2/attention/self/value/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_2/attention/output/dense/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_2/attention/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_2/attention/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_2/attention/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_2/intermediate/dense/kernel:0 (768, 3072)     float32_ref\n",
      "     True  bert/encoder/layer_2/intermediate/dense/bias:0 (3072,)         float32_ref\n",
      "     True  bert/encoder/layer_2/output/dense/kernel:0 (3072, 768)     float32_ref\n",
      "     True  bert/encoder/layer_2/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_2/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_2/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_3/attention/self/query/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_3/attention/self/query/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_3/attention/self/key/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_3/attention/self/key/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_3/attention/self/value/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_3/attention/self/value/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_3/attention/output/dense/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_3/attention/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_3/attention/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_3/attention/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_3/intermediate/dense/kernel:0 (768, 3072)     float32_ref\n",
      "     True  bert/encoder/layer_3/intermediate/dense/bias:0 (3072,)         float32_ref\n",
      "     True  bert/encoder/layer_3/output/dense/kernel:0 (3072, 768)     float32_ref\n",
      "     True  bert/encoder/layer_3/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_3/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_3/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_4/attention/self/query/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_4/attention/self/query/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_4/attention/self/key/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_4/attention/self/key/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_4/attention/self/value/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_4/attention/self/value/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_4/attention/output/dense/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_4/attention/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_4/attention/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_4/attention/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_4/intermediate/dense/kernel:0 (768, 3072)     float32_ref\n",
      "     True  bert/encoder/layer_4/intermediate/dense/bias:0 (3072,)         float32_ref\n",
      "     True  bert/encoder/layer_4/output/dense/kernel:0 (3072, 768)     float32_ref\n",
      "     True  bert/encoder/layer_4/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_4/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_4/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_5/attention/self/query/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_5/attention/self/query/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_5/attention/self/key/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_5/attention/self/key/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_5/attention/self/value/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_5/attention/self/value/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_5/attention/output/dense/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_5/attention/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_5/attention/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_5/attention/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_5/intermediate/dense/kernel:0 (768, 3072)     float32_ref\n",
      "     True  bert/encoder/layer_5/intermediate/dense/bias:0 (3072,)         float32_ref\n",
      "     True  bert/encoder/layer_5/output/dense/kernel:0 (3072, 768)     float32_ref\n",
      "     True  bert/encoder/layer_5/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_5/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_5/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_6/attention/self/query/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_6/attention/self/query/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_6/attention/self/key/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_6/attention/self/key/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_6/attention/self/value/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_6/attention/self/value/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_6/attention/output/dense/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_6/attention/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_6/attention/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_6/attention/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_6/intermediate/dense/kernel:0 (768, 3072)     float32_ref\n",
      "     True  bert/encoder/layer_6/intermediate/dense/bias:0 (3072,)         float32_ref\n",
      "     True  bert/encoder/layer_6/output/dense/kernel:0 (3072, 768)     float32_ref\n",
      "     True  bert/encoder/layer_6/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_6/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_6/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_7/attention/self/query/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_7/attention/self/query/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_7/attention/self/key/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_7/attention/self/key/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_7/attention/self/value/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_7/attention/self/value/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_7/attention/output/dense/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_7/attention/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_7/attention/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_7/attention/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_7/intermediate/dense/kernel:0 (768, 3072)     float32_ref\n",
      "     True  bert/encoder/layer_7/intermediate/dense/bias:0 (3072,)         float32_ref\n",
      "     True  bert/encoder/layer_7/output/dense/kernel:0 (3072, 768)     float32_ref\n",
      "     True  bert/encoder/layer_7/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_7/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_7/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_8/attention/self/query/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_8/attention/self/query/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_8/attention/self/key/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_8/attention/self/key/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_8/attention/self/value/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_8/attention/self/value/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_8/attention/output/dense/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_8/attention/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_8/attention/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_8/attention/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_8/intermediate/dense/kernel:0 (768, 3072)     float32_ref\n",
      "     True  bert/encoder/layer_8/intermediate/dense/bias:0 (3072,)         float32_ref\n",
      "     True  bert/encoder/layer_8/output/dense/kernel:0 (3072, 768)     float32_ref\n",
      "     True  bert/encoder/layer_8/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_8/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_8/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_9/attention/self/query/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_9/attention/self/query/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_9/attention/self/key/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_9/attention/self/key/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_9/attention/self/value/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_9/attention/self/value/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_9/attention/output/dense/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_9/attention/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_9/attention/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_9/attention/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_9/intermediate/dense/kernel:0 (768, 3072)     float32_ref\n",
      "     True  bert/encoder/layer_9/intermediate/dense/bias:0 (3072,)         float32_ref\n",
      "     True  bert/encoder/layer_9/output/dense/kernel:0 (3072, 768)     float32_ref\n",
      "     True  bert/encoder/layer_9/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_9/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_9/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_10/attention/self/query/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_10/attention/self/query/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_10/attention/self/key/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_10/attention/self/key/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_10/attention/self/value/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_10/attention/self/value/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_10/attention/output/dense/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_10/attention/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_10/attention/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_10/attention/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_10/intermediate/dense/kernel:0 (768, 3072)     float32_ref\n",
      "     True  bert/encoder/layer_10/intermediate/dense/bias:0 (3072,)         float32_ref\n",
      "     True  bert/encoder/layer_10/output/dense/kernel:0 (3072, 768)     float32_ref\n",
      "     True  bert/encoder/layer_10/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_10/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_10/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_11/attention/self/query/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_11/attention/self/query/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_11/attention/self/key/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_11/attention/self/key/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_11/attention/self/value/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_11/attention/self/value/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_11/attention/output/dense/kernel:0 (768, 768)      float32_ref\n",
      "     True  bert/encoder/layer_11/attention/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_11/attention/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_11/attention/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_11/intermediate/dense/kernel:0 (768, 3072)     float32_ref\n",
      "     True  bert/encoder/layer_11/intermediate/dense/bias:0 (3072,)         float32_ref\n",
      "     True  bert/encoder/layer_11/output/dense/kernel:0 (3072, 768)     float32_ref\n",
      "     True  bert/encoder/layer_11/output/dense/bias:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_11/output/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  bert/encoder/layer_11/output/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  bert/pooler/dense/kernel:0     (768, 768)      float32_ref\n",
      "     True  bert/pooler/dense/bias:0       (768,)          float32_ref\n",
      "     True  cls/predictions/output_bias:0  (21128,)        float32_ref\n",
      "     True  cls/predictions/transform/LayerNorm/beta:0 (768,)          float32_ref\n",
      "     True  cls/predictions/transform/LayerNorm/gamma:0 (768,)          float32_ref\n",
      "     True  cls/predictions/transform/dense/bias:0 (768,)          float32_ref\n",
      "     True  cls/predictions/transform/dense/kernel:0 (768, 768)      float32_ref\n",
      "     True  cls/seq_relationship/output_bias:0 (2,)            float32_ref\n",
      "     True  cls/seq_relationship/output_weights:0 (2, 768)        float32_ref\n",
      "     Trainable_Variables Size:【102882442】\n",
      "bert/embeddings/word_embeddings:0:  \n",
      "[[ 0.026  0.011 -0.019 ...  0.09   0.003  0.006]\n",
      " [ 0.002  0.022  0.001 ...  0.081  0.002  0.025]\n",
      " [ 0.015  0.001  0.003 ...  0.084  0.012  0.028]\n",
      " ...\n",
      " [ 0.035  0.002  0.009 ...  0.009  0.034  0.01 ]\n",
      " [ 0.054  0.029  0.026 ...  0.053  0.065  0.035]\n",
      " [ 0.02   0.002 -0.009 ...  0.08  -0.056  0.025]]\n",
      "(21128, 768)\n"
     ]
    }
   ],
   "source": [
    "ckpt_info = CKPTInfo(ckpt_path=r'/notebooks/pre_model/chinese_L-12_H-768_A-12/bert_model.ckpt.meta',\n",
    "                     data_path=r'/notebooks/pre_model/chinese_L-12_H-768_A-12/bert_model.ckpt',\n",
    "                     tensorboard_path=None)\n",
    "\n",
    "# 查看参数值\n",
    "word_embedding = ckpt_info.var_values('bert/embeddings/word_embeddings:0')\n",
    "print(word_embedding.shape)\n",
    "del ckpt_info"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "21128\n"
     ]
    }
   ],
   "source": [
    "# 词典\n",
    "word_dict = [line_i.strip() for line_i in open(r'/notebooks/pre_model/chinese_L-12_H-768_A-12/vocab.txt', 'r',  encoding='utf-8')]\n",
    "print(len(word_dict))  # word_dict.index('你')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "---\n",
    "### 矩阵计算"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ################################### 余弦相似度计算\n",
    "# 根据词向量计算词之间的余弦相似度矩阵  【衡量两个向量方向的差异】\n",
    "def cos_similar(np_a, np_b):\n",
    "    \"\"\"\n",
    "    :param np_a:  词向量 np 格式\n",
    "    :param np_b:  词向量 np 格式\n",
    "    :return:  相似度矩阵 词数量*词数量\n",
    "    \"\"\"\n",
    "#     # 若 np_a、 np_b 为一维向量，将其 reshape 为 2维向量\n",
    "#     if len(np_a.shape) == 1:\n",
    "#         np_a = np_a.reshape(1, -1)\n",
    "#     if len(np_b.shape) == 1:\n",
    "#         np_b = np_b.reshape(1, -1)\n",
    "\n",
    "    # 计算 np_a 的 2范数\n",
    "    a_norm = np.linalg.norm(np_a, axis=1)\n",
    "    a_norm = a_norm.reshape(-1, 1)\n",
    "\n",
    "    # 计算 np_b 的 2范数\n",
    "    b_norm = np.linalg.norm(np_b, axis=1)\n",
    "    b_norm = b_norm.reshape(1, -1)\n",
    "\n",
    "    # 余弦相似度 【防止分母为 0 输出为Nan 加上一个极小数 1e-8 】\n",
    "    cos_similar_score = np_a.dot(np_b.T) / (a_norm.dot(b_norm)+1e-8)\n",
    "\n",
    "#     print( (a_norm.dot(b_norm)+1e-8))\n",
    "    return cos_similar_score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ################################### 归一化余弦相似度计算\n",
    "# 平方和归一化\n",
    "def normalization(np_a):\n",
    "    \"\"\"\n",
    "    :param np_a:  词向量 np 格式\n",
    "    :return:  归一化成平方和为 1 \n",
    "    \"\"\"\n",
    "    a_norm = np.linalg.norm(np_a, axis=1).reshape(-1,1)\n",
    "\n",
    "    return np_a / a_norm\n",
    "\n",
    "\n",
    "# 根据词向量计算词之间的余弦相似度矩阵(数据已归一化)  【衡量两个向量方向的差异】\n",
    "def cos_similar_norm(np_a, np_b):\n",
    "    \"\"\"\n",
    "    :param np_a:  词向量 np 格式\n",
    "    :param np_b:  词向量 np 格式\n",
    "    :return:  相似度矩阵 词数量*词数量\n",
    "    \"\"\"\n",
    "    # 余弦相似度 \n",
    "    cos_similar_score = np_a.dot(np_b.T)\n",
    "\n",
    "    return cos_similar_score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([ 0.002, -0.021,  0.07 , -0.017, -0.027], dtype=float32)"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 检索词\n",
    "word_x_embedding = word_embedding[[word_dict.index('你')]]\n",
    "word_x_embedding.shape\n",
    "word_x_embedding[0,:5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "24 ms ± 67.5 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
     ]
    }
   ],
   "source": [
    "# ##############################################################\n",
    "# 运行时间\n",
    "%timeit cos_similar(word_x_embedding, word_embedding)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[-0.223, -0.237, -0.229, -0.241, -0.247]], dtype=float32)"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s = cos_similar(word_x_embedding, word_embedding)\n",
    "s.shape\n",
    "s[:10,:5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "kth = 5\n",
    "top_index = np.argpartition(-s, kth=kth+1, axis=1)[:,:kth+1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "你 1.0000001\n",
      "您 0.61749154\n",
      "妳 0.5540988\n",
      "我 0.48161584\n",
      "他 0.32874238\n",
      "她 0.3061644\n"
     ]
    }
   ],
   "source": [
    "for top_i in top_index[0]:\n",
    "    print(word_dict[top_i], s[0, top_i])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ##############################################################\n",
    "# 归一化\n",
    "word_embedding_norm = normalization(word_embedding) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([ 0.002, -0.017,  0.056, -0.014, -0.022], dtype=float32)"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "word_x_embedding_norm = word_embedding_norm[[word_dict.index('你')]]\n",
    "word_x_embedding_norm.shape\n",
    "word_x_embedding_norm[0,:5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2.96 ms ± 58.6 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
     ]
    }
   ],
   "source": [
    "# 运行时间\n",
    "%timeit cos_similar_norm(word_x_embedding_norm, word_embedding_norm)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[-0.223, -0.237, -0.229, -0.241, -0.247]], dtype=float32)"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s = cos_similar_norm(word_x_embedding_norm, word_embedding_norm)\n",
    "s.shape\n",
    "s[:10,:5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "kth = 5\n",
    "top_index = np.argpartition(-s, kth=kth+1, axis=1)[:,:kth+1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "你 1.0000001\n",
      "您 0.61749154\n",
      "妳 0.55409884\n",
      "我 0.4816159\n",
      "他 0.32874238\n",
      "她 0.30616444\n"
     ]
    }
   ],
   "source": [
    "for top_i in top_index[0]:\n",
    "    print(word_dict[top_i], s[0, top_i])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ##############################################################\n",
    "# 欧氏距离\n",
    "def euclidean_similar(np_a, np_b):\n",
    "    # 若 np_a、 np_b 为一维向量，将其 reshape 为 2维向量\n",
    "    if len(np_a.shape) == 1:\n",
    "        np_a = np_a.reshape(1, -1)\n",
    "    if len(np_b.shape) == 1:\n",
    "        np_b = np_b.reshape(1, -1)\n",
    "\n",
    "    # 若 np_a 的长度大于 np_b时，a,b 互换，减少循环次数提高效率\n",
    "    if np_a.shape[0] > np_b.shape[0]:\n",
    "        np_a, np_b = np_b, np_a\n",
    "        change_flag = True\n",
    "    else:\n",
    "        change_flag = False\n",
    "\n",
    "    # 欧氏距离 Euclidean \n",
    "    euclidean_similar_m = np.zeros([len(np_a), len(np_b)])\n",
    "\n",
    "    for np_i, np_a_i in enumerate(np_a):\n",
    "        euclidean_similar_m[np_i, :] = np.sqrt(((np_a_i-np_b)**2).sum(axis=1))\n",
    "    \n",
    "    # 若前面 a,b 互换，则相似系数矩阵转置\n",
    "    if change_flag:\n",
    "        euclidean_similar_m = euclidean_similar_m.T\n",
    "\n",
    "    return euclidean_similar_m"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "27.2 ms ± 85.3 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
     ]
    }
   ],
   "source": [
    "# 运行时间\n",
    "%timeit s = euclidean_similar(word_x_embedding, word_embedding)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[1.769, 1.776, 1.77 , 1.778, 1.785]])"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "s = euclidean_similar(word_x_embedding, word_embedding)\n",
    "s.shape\n",
    "s[:10,:5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "kth = 5\n",
    "top_index = np.argpartition(s, kth=kth+1, axis=1)[:,:kth+1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "我 1.2434134483337402\n",
      "你 0.0\n",
      "您 1.1638455390930176\n",
      "他 1.4242374897003174\n",
      "妳 1.3497015237808228\n",
      "， 1.4280239343643188\n"
     ]
    }
   ],
   "source": [
    "for top_i in top_index[0]:\n",
    "    print(word_dict[top_i], s[0, top_i])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "---\n",
    "### KDTree\n",
    "[Sklearn.neighbors.KDTree](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KDTree.html)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.neighbors import KDTree"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "tree = KDTree(word_embedding, leaf_size=2, metric='euclidean')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "129 ms ± 248 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
     ]
    }
   ],
   "source": [
    "# 运行时间\n",
    "%timeit dist, ind = tree.query(word_x_embedding, k=5)                # doctest: +SKIP"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "你 0.0\n",
      "您 1.1638456136456132\n",
      "我 1.2434133771776321\n",
      "妳 1.3497015256712674\n",
      "他 1.424237434937482\n",
      "， 1.4280239096169012\n"
     ]
    }
   ],
   "source": [
    "dist, ind = tree.query(word_x_embedding, k=6)  \n",
    "# print(ind)  # indices of 3 closest neighbors\n",
    "# print(dist)  # distances to 3 closest neighbors\n",
    "\n",
    "for ind_i, dist_i in zip(ind[0], dist[0]):\n",
    "    print(word_dict[ind_i], dist_i)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "---\n",
    "## Annoy \n",
    "https://github.com/spotify/annoy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Requirement already satisfied: annoy in /root/.local/lib/python3.6/site-packages (1.17.0)\n",
      "\u001b[33mWARNING: You are using pip version 19.1.1, however version 20.2.4 is available.\n",
      "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n"
     ]
    }
   ],
   "source": [
    "! pip install --user annoy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "from annoy import AnnoyIndex"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "维度：768\n",
      "Items Num:   21128\n",
      "Trees Num:   10\n"
     ]
    }
   ],
   "source": [
    "f = word_embedding.shape[1]\n",
    "print('维度：{}'.format(f))\n",
    "\n",
    "# 创建 Annoy 检索 引擎\n",
    "a_engine = AnnoyIndex(f, metric='euclidean')  # Length of item vector that will be indexed  Metric can be \"angular\", \"euclidean\", \"manhattan\", \"hamming\", or \"dot\".\n",
    "\n",
    "# 将各个Embedding写入引擎中\n",
    "for i, word_i in enumerate(word_dict):\n",
    "    a_engine.add_item(i, word_embedding[i,:])\n",
    "    \n",
    "#     print(i)\n",
    "#     print(word_embedding[i,:].shape)\n",
    "#     break\n",
    "\n",
    "# 创建检索树，创建后无法再写入Embedding\n",
    "a_engine.build(10) # 10 trees.  builds a forest of n_trees trees. More trees gives higher precision when querying. \n",
    "\n",
    "print('Items Num:  ', a_engine.get_n_items())   # 引擎中 Embedding数量\n",
    "print('Trees Num:  ', a_engine.get_n_trees())   # 引擎中检索树数量\n",
    "\n",
    "# a_engine.save('SearchEngine.ann')  # 保存检索字典\n",
    "#\n",
    "# 创建检索引擎并导入已有检索字典\n",
    "# u = AnnoyIndex(f, metric='euclidean')   \n",
    "# u.load('SearchEngine.ann') # super fast, will just mmap the file\n",
    "# print(u.get_nns_by_item(0, 10, include_distances=True)) # will find the 10 nearest neighbors"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "95.6 µs ± 143 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)\n"
     ]
    }
   ],
   "source": [
    "# 根据序号检索\n",
    "%timeit search_index, score = a_engine.get_nns_by_item(word_dict.index('你'), n=5, include_distances=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "155 µs ± 555 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)\n"
     ]
    }
   ],
   "source": [
    "# 根据 Embedding 检索\n",
    "%timeit search_index, score = a_engine.get_nns_by_vector(word_embedding[[word_dict.index('你')]][0], n=5, include_distances=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "你 0.0\n",
      "您 1.1638456583023071\n",
      "我 1.2434133291244507\n",
      "他 1.4242373704910278\n",
      "， 1.4280239343643188\n",
      "人 1.4439804553985596\n"
     ]
    }
   ],
   "source": [
    "# 检索结果\n",
    "search_index, score = a_engine.get_nns_by_item(word_dict.index('你'), n=6, include_distances=True)\n",
    "for i, score_i in zip(search_index, score):\n",
    "    print(word_dict[i], score_i)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
