{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "74f43268-5452-4288-91e3-9dbd6c85ac5b",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[nltk_data] Error loading punkt: Remote end closed connection without\n",
      "[nltk_data]     response\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "False"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import nltk\n",
    "import os\n",
    "\n",
    "# 设置自定义路径\n",
    "custom_data_path = r'E:\\jupyterlab\\data\\nltk_data'\n",
    "os.makedirs(custom_data_path, exist_ok=True)  # 确保目录存在\n",
    "\n",
    "# 更新 NLTK 数据路径\n",
    "nltk.data.path.append(custom_data_path)\n",
    "\n",
    "# 下载 punkt 数据\n",
    "nltk.download('punkt', download_dir=custom_data_path)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "bc8f6bdd-8d0d-4287-8452-4045db66f4d4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[['the', 'cat', 'sat', 'on', 'the', 'mat.'], ['the', 'cat', 'sat', 'on', '111', 'the', 'mat.'], ['the', 'dog', 'barked', 'at', 'the', 'moon.'], ['the', 'sun', 'is', 'shining', 'bright.']]\n",
      "['cat']\n",
      "[0. 0. 0. 0.]\n",
      "BM25 Results:  ['The sun is shining bright.', 'The dog barked at the moon.']\n"
     ]
    }
   ],
   "source": [
    "\n",
    "from rank_bm25 import BM25Okapi\n",
    "\n",
    "# Sample documents\n",
    "documents = [\n",
    "    \"The cat sat on the mat.\",\n",
    "    \"The cat sat on 111 the mat.\",\n",
    "    \"The cat dog barked at the moon.\",\n",
    "    \"The cat sun is shining bright.\"\n",
    "]\n",
    "\n",
    "# Tokenize the documents\n",
    "tokenized_corpus = [doc.lower().split(\" \") for doc in documents]\n",
    "print(tokenized_corpus)\n",
    "# Initialize BM25\n",
    "bm25 = BM25Okapi(tokenized_corpus)\n",
    "\n",
    "# Query\n",
    "query = \"cat on mat\"\n",
    "query_list = query.lower().split(\" \")\n",
    "print(query_list)\n",
    "# Retrieve BM25 results\n",
    "bm25_scores = bm25.get_scores(query_list)\n",
    "bm25_results = bm25.get_top_n(tokenized_query, documents, n=2)\n",
    "print(bm25_scores)\n",
    "print(\"BM25 Results: \", bm25_results)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "7a96b5b1-c1b6-44f5-8921-5411825d7301",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([0.        , 0.46864736, 0.51082562])"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from rank_bm25 import BM25Okapi\n",
    "\n",
    "# 语料库\n",
    "corpus = [\n",
    "    \"Hello there good man!\",\n",
    "    \"It is quite windy in London\",\n",
    "    \"How is the weather today?\"\n",
    "]\n",
    "\n",
    "# 分割成字（中文相当于分词）\n",
    "# \"Hello there good man!\",=》 ['Hello','there','good','man']\n",
    "tokenized_corpus = [doc.split(\" \") for doc in corpus]\n",
    "\n",
    "bm25 = BM25Okapi(tokenized_corpus)\n",
    "# <rank_bm25.BM25Okapi at 0x1047881d0>\n",
    "\n",
    "# 输入Query搜索词条\n",
    "query = \"weather on London\"\n",
    "\n",
    "# 分词\n",
    "tokenized_query = query.split(\" \")\n",
    "\n",
    "# query与每一个语料库文档的得分\n",
    "doc_scores = bm25.get_scores(tokenized_query)\n",
    "doc_scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "a3f18073-541e-4e1d-ae13-1f4c7e953c9d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0.49765247 0.49765247 0.        ]\n"
     ]
    }
   ],
   "source": [
    "from rank_bm25 import BM25Okapi\n",
    "\n",
    "# 语料库\n",
    "corpus = [\n",
    "    \"The cat sat on the mat.\",\n",
    "    \"The dog barked at the moon.\",\n",
    "    \"The sun is shining bright.\"\n",
    "]\n",
    "\n",
    "# 分词并去除标点符号\n",
    "tokenized_corpus = [doc.lower().replace('.', '').split() for doc in corpus]\n",
    "\n",
    "bm25 = BM25Okapi(tokenized_corpus)\n",
    "\n",
    "# 输入查询\n",
    "query = \"cat dog\"\n",
    "tokenized_query = query.lower().split()\n",
    "\n",
    "# 计算得分\n",
    "doc_scores = bm25.get_scores(tokenized_query)\n",
    "print(doc_scores)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bd3f7beb-8940-4ff6-92b9-b91de2ff6ee0",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "342e50f8-2c8b-493d-94c4-103be2de00f9",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
