{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n",
      "/usr/local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n",
      "/usr/local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n",
      "/usr/local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n",
      "/usr/local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n",
      "/usr/local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n",
      "/usr/local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n",
      "/usr/local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n",
      "/usr/local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n",
      "/usr/local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n",
      "/usr/local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n",
      "/usr/local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n"
     ]
    }
   ],
   "source": [
    "import sys\n",
    "import os\n",
    "# sys.path.insert(0,\"/dfsdata2/dongxz1_data/package/\")\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from time import time\n",
    "import json\n",
    "import random\n",
    "from transformers import AutoModel, AutoTokenizer, AutoConfig\n",
    "from utils import build_iterator\n",
    "from sklearn import metrics\n",
    "import torch.nn.functional as F\n",
    "import numpy as np\n",
    "from derivative_bert_models.models.base_components import PositionalEncoding"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n",
    "bert_pre_path =\"./bert_uncased_model/\"\n",
    "output_filep = \"./output/bert.ckpt\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class DcBert(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.bert = AutoModel.from_pretrained(bert_pre_path)\n",
    "        self.tokenizer = AutoTokenizer.from_pretrained(bert_pre_path)\n",
    "        trans_layer = nn.TransformerEncoderLayer(self.bert.config.hidden_size, 8)\n",
    "        self.trans_encoder = nn.TransformerEncoder(trans_layer, 1)\n",
    "        self.pe = PositionalEncoding(self.bert.config.hidden_size, 512)\n",
    "        self.dense = nn.Linear(2 * self.bert.config.hidden_size, 2)\n",
    "        self.origin_text = [] # 代表了回复文本\n",
    "        self.origin_text_to_embeddings = {} #代表了回复文本向量，与其一一对应\n",
    "        self.origin_text_embeddings = []\n",
    "        self.origin_text_nums = 1\n",
    "\n",
    "    def forward(self, x1, x2):\n",
    "        # bert\n",
    "        _tx1 = self.tokenizer(x1, padding=True, return_tensors='pt')\n",
    "        _tx2 = self.tokenizer(x2, padding=True, return_tensors='pt')\n",
    "        _tx1.to(DEVICE)\n",
    "        _tx2.to(DEVICE)\n",
    "        _x1 = self.bert(**_tx1)[0]\n",
    "        _x2 = self.bert(**_tx2)[0]\n",
    "\n",
    "        # get [cls] index in doc,_x1代表了document\n",
    "        d_cls_id = _x1.shape[1]\n",
    " \n",
    "        # concat to transformer\n",
    "        x = torch.cat([_x1, _x2], dim=1)\n",
    "        \n",
    "\n",
    "        # positional encoding\n",
    "        x = self.pe(x)\n",
    "\n",
    "        # transformer\n",
    "        x = self.trans_encoder(x)\n",
    "\n",
    "        # get [cls] hidden states and concat\n",
    "        x = torch.cat([x[:, 0, :], x[:, d_cls_id, :]], dim=-1)\n",
    "\n",
    "        # using dense to compute score.\n",
    "        out = self.dense(x)\n",
    "        return out\n",
    "    \n",
    "    #一种是没有给回复，基于全量的response进行匹配内容\n",
    "    def search_all(self, x1):\n",
    "        #_tx2代表了response 对应document:_x1\n",
    "        begin_time=time()\n",
    "        if self.origin_text_nums==0:\n",
    "            print(\"请填入回复文本，使用response。。。\")\n",
    "            return\n",
    "        _tx2 =  self.origin_text_embeddings  #document\n",
    "        _tx1 = self.tokenizer([x1], padding=True, return_tensors='pt')\n",
    "        _tx1.to(DEVICE)\n",
    "        _x1 = self.bert(**_tx1)[0]\n",
    "        print(_x1.shape)\n",
    "        print(_tx2.shape)\n",
    "#         _tx1 = {key:value.repeat(self.origin_text_nums,1,1) for key,value in _tx1.items()}\n",
    "        _x1 = _x1.repeat(self.origin_text_nums,1,1)\n",
    "        d_cls_id = _tx2.shape[1]\n",
    "        # concat to transformer\n",
    "        x = torch.cat([_tx2, _x1], dim=1)\n",
    "        \n",
    "\n",
    "        # positional encoding\n",
    "        x = self.pe(x)\n",
    "\n",
    "        # transformer\n",
    "        x = self.trans_encoder(x)\n",
    "\n",
    "        # get [cls] hidden states and concat\n",
    "        x = torch.cat([x[:, 0, :], x[:, d_cls_id, :]], dim=-1)\n",
    "\n",
    "        # using dense to compute score.\n",
    "        out = self.dense(x)\n",
    "        end_time = time()\n",
    "        print(\"预测耗费时间：{} s\".format(str(end_time-begin_time)))\n",
    "        \n",
    "        return out\n",
    "    \n",
    "    #一种是给了n个回复的\n",
    "    def search(self, x1, responses):\n",
    "        _tx1 = self.tokenizer(x1, padding=True, return_tensors='pt')\n",
    "        _tx1.to(DEVICE)\n",
    "        pass\n",
    "        \n",
    "        \n",
    "    def build_index(self,texts):\n",
    "        self.origin_text_nums = len(texts)\n",
    "        self.origin_text = texts\n",
    "        _texts = self.tokenizer(texts, padding=True, return_tensors='pt')\n",
    "        _texts.to(DEVICE)\n",
    "        self.origin_text_embeddings = self.bert(**_texts)[0]\n",
    "        for text, embedding in zip(texts, self.origin_text_embeddings):\n",
    "            self.origin_text_to_embeddings[text]=embedding      "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DcBert(\n",
       "  (bert): BertModel(\n",
       "    (embeddings): BertEmbeddings(\n",
       "      (word_embeddings): Embedding(21128, 768, padding_idx=0)\n",
       "      (position_embeddings): Embedding(512, 768)\n",
       "      (token_type_embeddings): Embedding(2, 768)\n",
       "      (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "      (dropout): Dropout(p=0.1, inplace=False)\n",
       "    )\n",
       "    (encoder): BertEncoder(\n",
       "      (layer): ModuleList(\n",
       "        (0): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "        )\n",
       "        (1): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "        )\n",
       "        (2): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "        )\n",
       "        (3): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "        )\n",
       "        (4): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "        )\n",
       "        (5): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "        )\n",
       "        (6): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "        )\n",
       "        (7): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "        )\n",
       "        (8): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "        )\n",
       "        (9): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "        )\n",
       "        (10): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "        )\n",
       "        (11): BertLayer(\n",
       "          (attention): BertAttention(\n",
       "            (self): BertSelfAttention(\n",
       "              (query): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (key): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (value): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "            (output): BertSelfOutput(\n",
       "              (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "            )\n",
       "          )\n",
       "          (intermediate): BertIntermediate(\n",
       "            (dense): Linear(in_features=768, out_features=3072, bias=True)\n",
       "          )\n",
       "          (output): BertOutput(\n",
       "            (dense): Linear(in_features=3072, out_features=768, bias=True)\n",
       "            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (dropout): Dropout(p=0.1, inplace=False)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (pooler): BertPooler(\n",
       "      (dense): Linear(in_features=768, out_features=768, bias=True)\n",
       "      (activation): Tanh()\n",
       "    )\n",
       "  )\n",
       "  (trans_encoder): TransformerEncoder(\n",
       "    (layers): ModuleList(\n",
       "      (0): TransformerEncoderLayer(\n",
       "        (self_attn): MultiheadAttention(\n",
       "          (out_proj): Linear(in_features=768, out_features=768, bias=True)\n",
       "        )\n",
       "        (linear1): Linear(in_features=768, out_features=2048, bias=True)\n",
       "        (dropout): Dropout(p=0.1, inplace=False)\n",
       "        (linear2): Linear(in_features=2048, out_features=768, bias=True)\n",
       "        (norm1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
       "        (norm2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
       "        (dropout1): Dropout(p=0.1, inplace=False)\n",
       "        (dropout2): Dropout(p=0.1, inplace=False)\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (pe): PositionalEncoding()\n",
       "  (dense): Linear(in_features=1536, out_features=2, bias=True)\n",
       ")"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model = DcBert()\n",
    "model.to(DEVICE)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# filep =\"./data/chatbot_specific.txt\"\n",
    "# i = 0\n",
    "# with open(filep,'r',encoding='utf-8') as fr:\n",
    "#     datas = []\n",
    "#     for line in fr:\n",
    "#         if i==1000:\n",
    "#             break\n",
    "#         con = json.loads(line)\n",
    "# #         print(con)\n",
    "#         i+=1\n",
    "#         datas.append([con[\"pos_response\"],con[\"question\"],1])\n",
    "#         datas.append([con[\"pos_response\"],con[\"question\"],0])\n",
    "\n",
    "# random.shuffle(datas)\n",
    "\n",
    "def load_ccf_data(train_test='train'):\n",
    "    D = {}\n",
    "    path = \"./data/\"\n",
    "    with open(os.path.join(path, train_test, train_test + '.query.tsv')) as f:\n",
    "        for l in f:\n",
    "            span = l.strip().split('\\t')\n",
    "            D[span[0]] = {'query': span[1], 'reply': []}\n",
    "\n",
    "    with open(os.path.join(path, train_test, train_test + '.reply.tsv')) as f:\n",
    "        for l in f:\n",
    "            span = l.strip().split('\\t')\n",
    "            if len(span) == 4:\n",
    "                q_id, r_id, r, label = span\n",
    "            else:\n",
    "                label = None\n",
    "                q_id, r_id, r = span\n",
    "            D[q_id]['reply'].append([r_id, r, label])\n",
    "    d = []\n",
    "    for k, v in D.items():\n",
    "        q_id = k\n",
    "        q = v['query']\n",
    "        reply = v['reply']\n",
    "\n",
    "        for r in reply:\n",
    "            r_id, rc, label = r\n",
    "\n",
    "            d.append([ q, rc, int(label)])\n",
    "    return d\n",
    "datas = load_ccf_data('train')\n",
    "np.random.shuffle(datas)\n",
    "n = int(len(datas) * 0.8)\n",
    "train_data = datas[:n]\n",
    "dev_data = datas[n:]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[['当然是好的楼层好的户型要贵了，买个性价比稍微高点的。这个小区哪栋位置好，什么户型好点', '小区大多数都是95平的', 0], ['证下来没有', '方便接电话吗？', 0], ['现在能看这套房子吗', '可以看', 1], ['方便看么？', '您想明天几点看？', 1], ['另一套咋样', '这套性价比好些', 1], ['读哪所小学', '这个是龙祥路小学', 1], ['含税吗', 'PHONE', 0], ['您方便吗', '橡树湾吗', 0], ['他这个家具怎么办', '书房的确需要自己改动了', 0], ['什么时候能看', '个人买商办类房源得全款购买，北京名下无房，社保或者纳税满五年。', 0]]\n"
     ]
    }
   ],
   "source": [
    "\n",
    "train_iter = build_iterator(train_data,32,DEVICE)\n",
    "dev_iter = build_iterator(dev_data,32,DEVICE)\n",
    "print(train_data[0:10])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def evaluate(model,data_iter):\n",
    "    model.eval()\n",
    "    loss_total = 0\n",
    "    predict_all = np.array([], dtype=int)\n",
    "    labels_all = np.array([], dtype=int)\n",
    "    with torch.no_grad():\n",
    "        for i,(doc, query, labels) in enumerate(data_iter):\n",
    "#             print(doc,query)\n",
    "#             print(len(doc),len(query))\n",
    "            outputs = model(doc,query)\n",
    "            loss = F.cross_entropy(outputs, labels)\n",
    "            loss_total += loss\n",
    "            labels = labels.data.cpu().numpy()\n",
    "            predic = torch.max(outputs.data, 1)[1].cpu().numpy()\n",
    "            labels_all = np.append(labels_all, labels)\n",
    "            predict_all = np.append(predict_all, predic)\n",
    "    acc = metrics.accuracy_score(labels_all, predict_all)\n",
    "    return acc, loss_total/len(data_iter)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model,train_iter,dev_iter):\n",
    "    model.train()\n",
    "    optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)\n",
    "    dev_best_loss = float('inf')\n",
    "    total_batch=0\n",
    "    last_improve = 0  # 记录上次验证集loss下降的batch数\n",
    "    for epoch in range(2):\n",
    "        print('epoch {} starting..........'.format(epoch))\n",
    "        for i, (doc, query, labels) in enumerate(train_iter):\n",
    "            print(total_batch)\n",
    "#             print(labels)\n",
    "#             print(doc,query)\n",
    "            out = model(doc, query)\n",
    "#             print(out)\n",
    "            model.zero_grad()\n",
    "            loss = F.cross_entropy(out, labels)\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            print(\"train loss {0}\".format(loss))\n",
    "            if total_batch % 20 == 0:\n",
    "                dev_acc, dev_loss = evaluate(model, dev_iter)\n",
    "                if dev_loss < dev_best_loss:\n",
    "                    dev_best_loss = dev_loss\n",
    "                    torch.save(model.state_dict(), output_filep)\n",
    "                    last_improve = total_batch\n",
    "\n",
    "                msg = \"val loss {0}, val_acc {1}\"\n",
    "                print(msg.format(dev_loss,dev_acc))\n",
    "#                 if total_batch - last_improve > 20:\n",
    "#                     print(\"No optimization for a long time, auto-stopping...\")\n",
    "#                     train_iter.index=0\n",
    "#                     flag = True\n",
    "#                     total_batch+=1\n",
    "#                     break \n",
    "            total_batch+=1\n",
    "           "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch 0 starting..........\n",
      "0\n",
      "train loss 0.6314787268638611\n",
      "val loss 0.5671117305755615, val_acc 0.7509844799629373\n",
      "1\n",
      "train loss 0.5264757871627808\n",
      "2\n",
      "train loss 0.5254492163658142\n",
      "3\n",
      "train loss 0.9129909873008728\n",
      "4\n",
      "train loss 0.29441776871681213\n",
      "5\n",
      "train loss 0.8393858075141907\n",
      "6\n",
      "train loss 0.42777159810066223\n",
      "7\n",
      "train loss 0.5871984958648682\n",
      "8\n",
      "train loss 0.5640465617179871\n",
      "9\n",
      "train loss 0.517058253288269\n",
      "10\n",
      "train loss 0.6139292120933533\n",
      "11\n",
      "train loss 0.5601129531860352\n",
      "12\n",
      "train loss 0.6074283123016357\n",
      "13\n",
      "train loss 0.5541792511940002\n",
      "14\n",
      "train loss 0.6700742244720459\n",
      "15\n",
      "train loss 0.4992196261882782\n",
      "16\n",
      "train loss 0.43150895833969116\n",
      "17\n",
      "train loss 0.4990749657154083\n",
      "18\n",
      "train loss 0.6635454297065735\n",
      "19\n",
      "train loss 0.5056222677230835\n",
      "20\n",
      "train loss 0.7060697078704834\n",
      "val loss 0.5259908437728882, val_acc 0.7546907574704657\n",
      "21\n",
      "train loss 0.44724175333976746\n",
      "22\n",
      "train loss 0.4729962944984436\n",
      "23\n",
      "train loss 0.569221556186676\n",
      "24\n",
      "train loss 0.5161874890327454\n",
      "25\n",
      "train loss 0.45421648025512695\n",
      "26\n",
      "train loss 0.5455036759376526\n",
      "27\n",
      "train loss 0.5129182934761047\n",
      "28\n",
      "train loss 0.5119307041168213\n",
      "29\n",
      "train loss 0.5466891527175903\n",
      "30\n",
      "train loss 0.48642605543136597\n",
      "31\n",
      "train loss 0.4771554470062256\n",
      "32\n",
      "train loss 0.47329145669937134\n",
      "33\n",
      "train loss 0.5611618757247925\n",
      "34\n",
      "train loss 0.616987407207489\n",
      "35\n",
      "train loss 0.38711100816726685\n",
      "36\n",
      "train loss 0.5903722047805786\n",
      "37\n",
      "train loss 0.5570120215415955\n",
      "38\n",
      "train loss 0.5954364538192749\n",
      "39\n",
      "train loss 0.35820436477661133\n",
      "40\n",
      "train loss 0.6111028790473938\n",
      "val loss 0.49227654933929443, val_acc 0.7662728746814917\n",
      "41\n",
      "train loss 0.5465934872627258\n",
      "42\n",
      "train loss 0.3493103086948395\n",
      "43\n",
      "train loss 0.6018799543380737\n",
      "44\n",
      "train loss 0.4532216191291809\n",
      "45\n",
      "train loss 0.6199786067008972\n",
      "46\n",
      "train loss 0.42055416107177734\n",
      "47\n",
      "train loss 0.37895429134368896\n",
      "48\n",
      "train loss 0.42007187008857727\n",
      "49\n",
      "train loss 0.3679824769496918\n",
      "50\n",
      "train loss 0.5427132844924927\n",
      "51\n",
      "train loss 0.4900912642478943\n",
      "52\n",
      "train loss 0.5135436058044434\n",
      "53\n",
      "train loss 0.6048822402954102\n",
      "54\n",
      "train loss 0.5235422253608704\n",
      "55\n",
      "train loss 0.48224467039108276\n",
      "56\n",
      "train loss 0.5366954803466797\n",
      "57\n",
      "train loss 0.46054232120513916\n",
      "58\n",
      "train loss 0.600256621837616\n",
      "59\n",
      "train loss 0.49034133553504944\n",
      "60\n",
      "train loss 0.4726533889770508\n",
      "val loss 0.49750033020973206, val_acc 0.7683576557794765\n",
      "61\n",
      "train loss 0.42034342885017395\n",
      "62\n",
      "train loss 0.6356143951416016\n",
      "63\n",
      "train loss 0.583588719367981\n",
      "64\n",
      "train loss 0.438738077878952\n",
      "65\n",
      "train loss 0.5043628811836243\n",
      "66\n",
      "train loss 0.4868418872356415\n",
      "67\n",
      "train loss 0.5658619403839111\n",
      "68\n",
      "train loss 0.3827340006828308\n",
      "69\n",
      "train loss 0.3337244689464569\n",
      "70\n",
      "train loss 0.501927375793457\n",
      "71\n",
      "train loss 0.47422653436660767\n",
      "72\n",
      "train loss 0.614553689956665\n",
      "73\n",
      "train loss 0.3688763678073883\n",
      "74\n",
      "train loss 0.4640260934829712\n",
      "75\n",
      "train loss 0.4967780113220215\n",
      "76\n",
      "train loss 0.4477512538433075\n",
      "77\n",
      "train loss 0.4260774850845337\n",
      "78\n",
      "train loss 0.3369362950325012\n",
      "79\n",
      "train loss 0.6807775497436523\n",
      "80\n",
      "train loss 0.41270712018013\n",
      "val loss 0.4632750153541565, val_acc 0.7861941162844568\n",
      "81\n",
      "train loss 0.44517481327056885\n",
      "82\n",
      "train loss 0.6060213446617126\n",
      "83\n",
      "train loss 0.4873473048210144\n",
      "84\n",
      "train loss 0.48232585191726685\n",
      "85\n",
      "train loss 0.43286967277526855\n",
      "86\n",
      "train loss 0.3466922640800476\n",
      "87\n",
      "train loss 0.33313727378845215\n",
      "88\n",
      "train loss 0.6331649422645569\n",
      "89\n",
      "train loss 0.29522705078125\n",
      "90\n",
      "train loss 0.4868783950805664\n",
      "91\n",
      "train loss 0.6328262686729431\n",
      "92\n",
      "train loss 0.6111916899681091\n",
      "93\n",
      "train loss 0.4621458351612091\n",
      "94\n",
      "train loss 0.44247791171073914\n",
      "95\n",
      "train loss 0.4893995523452759\n",
      "96\n",
      "train loss 0.5006353259086609\n",
      "97\n",
      "train loss 0.43058887124061584\n",
      "98\n",
      "train loss 0.6138615012168884\n",
      "99\n",
      "train loss 0.5123598575592041\n",
      "100\n",
      "train loss 0.511470377445221\n",
      "val loss 0.47476422786712646, val_acc 0.7720639332870048\n",
      "101\n",
      "train loss 0.4064585566520691\n",
      "102\n",
      "train loss 0.5073665380477905\n",
      "103\n",
      "train loss 0.4424562156200409\n",
      "104\n",
      "train loss 0.5729564428329468\n",
      "105\n",
      "train loss 0.3595547378063202\n",
      "106\n",
      "train loss 0.39160284399986267\n",
      "107\n",
      "train loss 0.5657534599304199\n",
      "108\n",
      "train loss 0.5626059770584106\n",
      "109\n",
      "train loss 0.4663218557834625\n",
      "110\n",
      "train loss 0.3831840455532074\n",
      "111\n",
      "train loss 0.36050891876220703\n",
      "112\n",
      "train loss 0.42184537649154663\n",
      "113\n",
      "train loss 0.3985496759414673\n",
      "114\n",
      "train loss 0.4474189579486847\n",
      "115\n",
      "train loss 0.48280030488967896\n",
      "116\n",
      "train loss 0.4336874783039093\n",
      "117\n",
      "train loss 0.4895973801612854\n",
      "118\n",
      "train loss 0.5287944674491882\n",
      "119\n",
      "train loss 0.5029650926589966\n",
      "120\n",
      "train loss 0.390742689371109\n",
      "val loss 0.43926942348480225, val_acc 0.7926801019226315\n",
      "121\n",
      "train loss 0.2521439492702484\n",
      "122\n",
      "train loss 0.6087156534194946\n",
      "123\n",
      "train loss 0.5311896800994873\n",
      "124\n",
      "train loss 0.5328835844993591\n",
      "125\n",
      "train loss 0.3216191232204437\n",
      "126\n",
      "train loss 0.4297094941139221\n",
      "127\n",
      "train loss 0.4456327557563782\n",
      "128\n",
      "train loss 0.47638946771621704\n",
      "129\n",
      "train loss 0.5521257519721985\n",
      "130\n",
      "train loss 0.44641709327697754\n",
      "131\n",
      "train loss 0.3827756345272064\n",
      "132\n",
      "train loss 0.5571607947349548\n",
      "133\n",
      "train loss 0.45356953144073486\n",
      "134\n",
      "train loss 0.5102439522743225\n",
      "135\n",
      "train loss 0.3686738610267639\n",
      "136\n",
      "train loss 0.5241921544075012\n",
      "137\n",
      "train loss 0.538539469242096\n",
      "138\n",
      "train loss 0.3019217252731323\n",
      "139\n",
      "train loss 0.45282986760139465\n",
      "140\n",
      "train loss 0.3115333914756775\n",
      "val loss 0.44188442826271057, val_acc 0.793838313643734\n",
      "141\n",
      "train loss 0.5873001217842102\n",
      "142\n",
      "train loss 0.35385656356811523\n",
      "143\n",
      "train loss 0.3654828369617462\n",
      "144\n",
      "train loss 0.38323870301246643\n",
      "145\n",
      "train loss 0.3524201810359955\n",
      "146\n",
      "train loss 0.48303499817848206\n",
      "147\n",
      "train loss 0.3289087414741516\n",
      "148\n",
      "train loss 0.5446372628211975\n",
      "149\n",
      "train loss 0.35285696387290955\n",
      "150\n",
      "train loss 0.40311574935913086\n",
      "151\n",
      "train loss 0.37839722633361816\n",
      "152\n",
      "train loss 0.3349907696247101\n",
      "153\n",
      "train loss 0.3613782823085785\n",
      "154\n",
      "train loss 0.3838360607624054\n",
      "155\n",
      "train loss 0.36243706941604614\n",
      "156\n",
      "train loss 0.3413000702857971\n",
      "157\n",
      "train loss 0.5793058276176453\n",
      "158\n",
      "train loss 0.34547144174575806\n",
      "159\n",
      "train loss 0.26575392484664917\n",
      "160\n",
      "train loss 0.3722679316997528\n",
      "val loss 0.450629323720932, val_acc 0.7945332406763956\n",
      "No optimization for a long time, auto-stopping...\n",
      "epoch 1 starting..........\n",
      "161\n",
      "train loss 0.35016223788261414\n",
      "162\n",
      "train loss 0.37191104888916016\n",
      "163\n",
      "train loss 0.4943353235721588\n",
      "164\n",
      "train loss 0.40449902415275574\n",
      "165\n",
      "train loss 0.2160373032093048\n",
      "166\n",
      "train loss 0.5256971716880798\n",
      "167\n",
      "train loss 0.26035842299461365\n",
      "168\n",
      "train loss 0.3670967221260071\n",
      "169\n",
      "train loss 0.47956591844558716\n",
      "170\n",
      "train loss 0.218063622713089\n",
      "171\n",
      "train loss 0.45420506596565247\n",
      "172\n",
      "train loss 0.4069471061229706\n",
      "173\n",
      "train loss 0.4100184142589569\n",
      "174\n",
      "train loss 0.510828971862793\n",
      "175\n",
      "train loss 0.5050724148750305\n",
      "176\n",
      "train loss 0.348792165517807\n",
      "177\n",
      "train loss 0.41952088475227356\n",
      "178\n",
      "train loss 0.3632589876651764\n",
      "179\n",
      "train loss 0.46354150772094727\n",
      "180\n",
      "train loss 0.2787085175514221\n",
      "val loss 0.4295072555541992, val_acc 0.7991660875608061\n",
      "181\n",
      "train loss 0.4852437376976013\n",
      "182\n",
      "train loss 0.45102834701538086\n",
      "183\n",
      "train loss 0.3773016929626465\n",
      "184\n",
      "train loss 0.43528449535369873\n",
      "185\n",
      "train loss 0.37591055035591125\n",
      "186\n",
      "train loss 0.3900280296802521\n",
      "187\n",
      "train loss 0.423612117767334\n",
      "188\n",
      "train loss 0.4501030743122101\n",
      "189\n",
      "train loss 0.2973765730857849\n",
      "190\n",
      "train loss 0.3830941915512085\n",
      "191\n",
      "train loss 0.33733803033828735\n",
      "192\n",
      "train loss 0.4194948077201843\n",
      "193\n",
      "train loss 0.3287925720214844\n",
      "194\n",
      "train loss 0.4202690124511719\n",
      "195\n",
      "train loss 0.410097599029541\n",
      "196\n",
      "train loss 0.37172195315361023\n",
      "197\n",
      "train loss 0.45898517966270447\n",
      "198\n",
      "train loss 0.4161989986896515\n",
      "199\n",
      "train loss 0.3964569568634033\n",
      "200\n",
      "train loss 0.25049158930778503\n",
      "val loss 0.4307098686695099, val_acc 0.7910586055130878\n",
      "201\n",
      "train loss 0.4189929962158203\n",
      "202\n",
      "train loss 0.5211871266365051\n",
      "203\n",
      "train loss 0.23593243956565857\n",
      "204\n",
      "train loss 0.4080183804035187\n",
      "205\n",
      "train loss 0.2960379123687744\n",
      "206\n",
      "train loss 0.46701857447624207\n",
      "207\n",
      "train loss 0.3359125256538391\n",
      "208\n",
      "train loss 0.2853158116340637\n",
      "209\n",
      "train loss 0.32118290662765503\n",
      "210\n",
      "train loss 0.24692511558532715\n",
      "211\n",
      "train loss 0.3835436999797821\n",
      "212\n",
      "train loss 0.3437981903553009\n",
      "213\n",
      "train loss 0.310507595539093\n",
      "214\n",
      "train loss 0.4226420521736145\n",
      "215\n",
      "train loss 0.2852320671081543\n",
      "216\n",
      "train loss 0.25419846177101135\n",
      "217\n",
      "train loss 0.3348238170146942\n",
      "218\n",
      "train loss 0.33518004417419434\n",
      "219\n",
      "train loss 0.403134286403656\n",
      "220\n",
      "train loss 0.3544749617576599\n",
      "val loss 0.45932769775390625, val_acc 0.779244845957841\n",
      "No optimization for a long time, auto-stopping...\n"
     ]
    }
   ],
   "source": [
    "train(model, train_iter, dev_iter)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 74,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['没问题，如果您需要更多帮助，请告诉我。', \"抱歉让你久等了，我会尽快的(●'◡'●)\", '好的，随时为您服务(*^_^*)', '当然了，小优在线。', '小优哪里做错了吗，不要生气哦，小优正在努力提升自己。', '好的，随时为您服务(*^_^*)', '很高兴收到您的联系方式，随时准备为您提供服务。', '放马过来吧，我可是很聪明的。', '有什么可以再帮您的？', '如果小优有没做好的地方，请您谅解，小优一定努力改正！']\n"
     ]
    }
   ],
   "source": [
    "#测试搜索过程\n",
    "data_response_index = [['没问题，如果您需要更多帮助，请告诉我。', '嗯嗯等下', 1], [\"抱歉让你久等了，我会尽快的(●'◡'●)\", '你告诉我还要等多久', 1], ['好的，随时为您服务(*^_^*)', '没事了啊', 1], ['当然了，小优在线。', '你在线吗', 0], ['小优哪里做错了吗，不要生气哦，小优正在努力提升自己。', '你想死就过来', 0], ['好的，随时为您服务(*^_^*)', '好咯，', 0], ['很高兴收到您的联系方式，随时准备为您提供服务。', '214325325126.com', 0], ['放马过来吧，我可是很聪明的。', '再试一下吧', 0], ['有什么可以再帮您的？', '我是刚才咨询你的那个', 0], ['如果小优有没做好的地方，请您谅解，小优一定努力改正！', '你是大笨蛋', 1]]\n",
    "data_response_index = [_[0] for _ in data_response_index]\n",
    "print(data_response_index)\n",
    "model.build_index(data_response_index*10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(model.search_all(\"嗯嗯等下\"))\n",
    "# print(model.origin_text_nums)\n",
    "# for key,value in model.origin_text_embeddings.items():\n",
    "#     print(value.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
