{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from functools import partial\n",
    "import argparse\n",
    "import os\n",
    "import sys\n",
    "import random\n",
    "import time\n",
    "\n",
    "import numpy as np\n",
    "import hnswlib\n",
    "import paddle\n",
    "import paddle.nn.functional as F\n",
    "from paddlenlp.data import Stack, Tuple, Pad\n",
    "from paddlenlp.datasets import load_dataset, MapDataset\n",
    "from paddlenlp.utils.log import logger\n",
    "import paddlenlp\n",
    "\n",
    "from base_model import SemanticIndexBase\n",
    "from data2 import convert_example, create_dataloader\n",
    "from data2 import gen_id2corpus\n",
    "from ann_util import build_index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "paddle.set_device(\"cpu\") "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 下面开始构建模型并载入模型参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_name = \"ernie-1.0\"\n",
    "\n",
    "pretrained_model = paddlenlp.transformers.AutoModel.from_pretrained(model_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = SemanticIndexBase(pretrained_model, output_emb_size=256)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "params_path = \"model_param/model_180/model_state.pdparams\" \n",
    "\n",
    "if params_path and os.path.isfile(params_path): \n",
    "    state_dict = paddle.load(params_path) \n",
    "    model.set_dict(state_dict) \n",
    "    print(\"Loaded parameters from %s\" % params_path) \n",
    "else:\n",
    "    raise ValueError(\"Please set params_path with correct pretrained model file\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 下面加载语料库文件，并利用语料库中的数据来构造ANN索引库"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer = paddlenlp.transformers.AutoTokenizer.from_pretrained(model_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "trans_func = partial(convert_example, tokenizer=tokenizer, max_seq_length=60)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def batchify_fn(samples):\n",
    "    fn = Tuple(\n",
    "        Pad(axis=0, pad_val=tokenizer.pad_token_id, dtype=\"int64\"),  \n",
    "        Pad(axis=0, pad_val=tokenizer.pad_token_type_id, dtype=\"int64\"),  \n",
    "    )\n",
    "\n",
    "    processed_samples = fn(samples) \n",
    "\n",
    "    result = []\n",
    "    for data in processed_samples:\n",
    "        result.append(data) \n",
    "\n",
    "    return result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "corpus_file = \"recall_dataset/corpus.csv\" \n",
    "\n",
    "id2corpus = gen_id2corpus(corpus_file) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(type(id2corpus))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in range(10):\n",
    "    print(id2corpus[i]) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "corpus_list = []\n",
    "for idx, text in id2corpus.items():\n",
    "    corpus_list.append({idx: text}) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in range(10):\n",
    "    print(corpus_list[i])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "corpus_ds = MapDataset(corpus_list) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(type(corpus_ds))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in range(10):\n",
    "    print(corpus_ds[i])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_sampler = paddle.io.BatchSampler(corpus_ds, batch_size=64, shuffle=False)\n",
    "\n",
    "corpus_data_loader = paddle.io.DataLoader(dataset=corpus_ds.map(trans_func), batch_sampler=batch_sampler, collate_fn=batchify_fn, return_list=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#如果需要从头构建索引，就运行下面这段代码。\n",
    "\n",
    "output_emb_size = 256\n",
    "hnsw_max_elements = 1000000 \n",
    "hnsw_ef = 100 \n",
    "hnsw_m = 100 \n",
    "\n",
    "final_index = build_index(output_emb_size, hnsw_max_elements, hnsw_ef, hnsw_m, corpus_data_loader, model)\n",
    "\n",
    "save_index_dir = \"index_file\" \n",
    "if not os.path.exists(save_index_dir):\n",
    "    os.makedirs(save_index_dir)\n",
    "\n",
    "save_index_path = os.path.join(save_index_dir, \"final_index.bin\") \n",
    "final_index.save_index(save_index_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "#如果有现成的索引文件final_index.bin,就运行这段代码\n",
    "save_index_path = \"index_file/final_index.bin\"\n",
    "output_emb_size = 256\n",
    "final_index = hnswlib.Index(space=\"ip\", dim=output_emb_size) \n",
    "final_index.load_index(save_index_path) \n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 下面获取验证数据集中的所有query"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_query_text(similar_text_pair_file): \n",
    "    querys = []\n",
    "    with open(similar_text_pair_file, \"r\", encoding=\"utf-8\") as f:\n",
    "        for line in f:\n",
    "            splited_line = line.rstrip().split(\"\\t\") \n",
    "            if len(splited_line) != 2: \n",
    "                continue\n",
    "\n",
    "            if not splited_line[0] or not splited_line[1]: \n",
    "                continue\n",
    "\n",
    "            querys.append({\"text\": splited_line[0]}) \n",
    "\n",
    "    return querys"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "similar_text_pair_file = \"recall_dataset/dev.csv\" \n",
    "\n",
    "query_list = get_query_text(similar_text_pair_file) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(type(query_list))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(query_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "query_ds = MapDataset(query_list) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_sampler = paddle.io.BatchSampler(query_ds, batch_size=64, shuffle=False)\n",
    "\n",
    "query_data_loader = paddle.io.DataLoader(dataset=query_ds.map(trans_func), batch_sampler=batch_sampler, collate_fn=batchify_fn, return_list=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "query_embedding = model.get_semantic_embedding(query_data_loader) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 下面针对验证集中的query进行召回，生成召回结果文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "recall_result_dir = \"recall_result_file\" \n",
    " \n",
    "if not os.path.exists(recall_result_dir): \n",
    "    os.mkdir(recall_result_dir)\n",
    "\n",
    "recall_result_file = \"recall_result.txt\" \n",
    "\n",
    "recall_result_file = os.path.join(recall_result_dir, recall_result_file) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 下面正式开始召回\n",
    "\n",
    "with open(recall_result_file, \"w\", encoding=\"utf-8\") as f: \n",
    "    for batch_index, batch_query_embedding in enumerate(query_embedding): \n",
    "        recalled_idx, cosine_sims = final_index.knn_query(batch_query_embedding.numpy(), 50) \n",
    "\n",
    "        batch_size = len(cosine_sims)\n",
    "\n",
    "        for row_index in range(batch_size):\n",
    "            text_index = 64 * batch_index + row_index 。\n",
    "            for idx, doc_idx in enumerate(recalled_idx[row_index]):\n",
    "                f.write( \"{}\\t{}\\t{}\\n\".format(query_list[text_index][\"text\"], id2corpus[doc_idx], 1.0 - cosine_sims[row_index][idx] ) )"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "search",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
