{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from pytorch_transformers import BertModel, BertTokenizer\n",
    "from dataloader import ContentSet\n",
    "from torch.utils.data import DataLoader\n",
    "import pandas as pd\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_file=\"./data/test_dataset.csv\"\n",
    "test_set = ContentSet(test_file, label_type=-1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_loader = DataLoader(test_set, batch_size=100)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "bert = BertModel.from_pretrained(\"../bert_cn/\").cuda()\n",
    "tokenizer = BertTokenizer.from_pretrained(\"../bert_cn/\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def pad_sentences(sent_list, padding_value, max_sent_len=256):\n",
    "    max_len = min( max(map(lambda s:len(s), sent_list)), max_sent_len)\n",
    "    batchsize = len(sent_list)\n",
    "    out = torch.empty([batchsize, max_len], dtype=torch.int64).fill_(padding_value)\n",
    "    mask = torch.ones([batchsize, max_len])\n",
    "    for i, sent in enumerate(sent_list):\n",
    "        sent_len = min(len(sent), max_len)\n",
    "        out[i, :sent_len] = torch.tensor(sent[:sent_len])\n",
    "        mask[i, sent_len:].fill_(0.0)\n",
    "    return out, mask\n",
    "\n",
    "def encode_sent_list(tt, sent_list, cls_token=False):\n",
    "    return [tt.encode(sent.strip(\"\\t\"), add_special_tokens=cls_token) for sent in sent_list]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "def Sents2Vecs(batch):\n",
    "        input_ids = encode_sent_list(tokenizer, batch[2])\n",
    "        ipt_tensor, mask_tensor = pad_sentences(input_ids, 0.0)\n",
    "        hiddens, outs = bert(ipt_tensor.cuda(), attention_mask=mask_tensor.cuda())\n",
    "        return hiddens, outs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_set = ContentSet(\"./data/train_1_3.csv\", label_type=-1)\n",
    "train_loader = DataLoader(train_set, batch_size=100)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 词平均"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 重新计算Sent Vec"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_vecs = []\n",
    "for batch in test_loader:\n",
    "    sent_lens = [min(len(list(sent)), 254) for sent in batch[2]]\n",
    "    with torch.no_grad():\n",
    "        rst = Sents2Vecs(batch)\n",
    "        vecs = torch.stack([rst[0][i][:sent_lens[i], :].mean(dim=0) for i in range( len(sent_lens) )])\n",
    "        test_vecs.append(vecs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "with torch.no_grad():\n",
    "    test_semant = torch.cat(test_vecs, dim=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([3206, 768])"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_semant.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pickle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open(\"test_semant\", \"wb\") as fw:\n",
    "    pickle.dump(test_semant, fw, protocol=pickle.HIGHEST_PROTOCOL)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 读取缓存的Sent Vec"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pickle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open(\"test_semant\", \"rb\") as fr:\n",
    "    test_semant = pickle.load(fr)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [],
   "source": [
    "norm_te = test_semant.norm(p=2, dim=1).unsqueeze(1)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 检索训练集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 111,
   "metadata": {},
   "outputs": [],
   "source": [
    "ids = []\n",
    "for batch in train_loader:\n",
    "    sent_lens = [min(len(list(sent)), 254) for sent in batch[2]]\n",
    "    with torch.no_grad():\n",
    "        rst = Sents2Vecs(batch)\n",
    "        vecs = torch.stack([rst[0][i][:sent_lens[i], :].mean(dim=0) for i in range( len(sent_lens) )])\n",
    "        dot = torch.matmul(test_semant, vecs.T)\n",
    "        norm_vecs = vecs.norm(p=2, dim=1).unsqueeze(0)\n",
    "        norm_mtx = torch.matmul(norm_te, norm_vecs)\n",
    "        sco = dot/norm_mtx\n",
    "        mx = sco.max(dim=0)\n",
    "        st = mx[0].sort(dim=0)\n",
    "        cnt = int(mx[0].ge_(0.9).sum()) \n",
    "        ids.extend([batch[0][i] for i in st[1].tolist()[-cnt:]])\n",
    "df = pd.DataFrame(np.array(ids), columns=[\"id\"])\n",
    "df.to_csv(\"./data/Sim0_9.csv\", index=False)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
