{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "26424b3c",
   "metadata": {},
   "source": [
    "### 导入模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e99778a7",
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
    "from sentence_transformers import SentenceTransformer\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8c94479f",
   "metadata": {},
   "source": [
    "### 读取文本"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "88565151",
   "metadata": {},
   "outputs": [],
   "source": [
    "with open(\"./data/10027.txt\", \"r\", encoding=\"utf-8\") as file:\n",
    "    text = file.read()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "84265bdc",
   "metadata": {},
   "source": [
    "### 分块"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bb30509c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 1️⃣ 分块器初始化\n",
    "splitter = RecursiveCharacterTextSplitter(\n",
    "    chunk_size=256,\n",
    "    chunk_overlap=50,\n",
    "    separators=[\"\\n\\n\", \"\\n\", \"。\", \"；\", \"？\", \"！\", \".\", \"!\", \"?\", \"，\", \" \", \"\"]\n",
    ")\n",
    "chunks = splitter.split_text(text)\n",
    "clean_chunks = [chunk.lstrip(\"。！？. \") for chunk in chunks]\n",
    "\n",
    "# 保存分块结果到 txt 文件\n",
    "with open(\"./data/chunks_output.txt\", \"w\", encoding=\"utf-8\") as f:\n",
    "    for i, chunk in enumerate(clean_chunks):\n",
    "        f.write(f\"[Chunk {i+1}]: {chunk}\\n\\n\")\n",
    "\n",
    "# print(f\"📌 共分出 {len(clean_chunks)} 个 chunk：\")\n",
    "# for i, chunk in enumerate(clean_chunks):\n",
    "#     print(f\"[Chunk {i+1}]: {chunk}\\n\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0d1e5153",
   "metadata": {},
   "source": [
    "### 生成向量（bge-small-en-v1.5）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f139d51e",
   "metadata": {},
   "outputs": [],
   "source": [
    "model = SentenceTransformer(\"./model/bge-small-en-v1.5\", device=\"cuda\")\n",
    "\n",
    "vectors = model.encode(chunks)\n",
    "np.save(\"./data/10027_vectors.npy\", vectors, allow_pickle=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6ef7e747",
   "metadata": {},
   "source": [
    "### 为Qdrant创建一个客户端对象"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9608607c",
   "metadata": {},
   "outputs": [],
   "source": [
    "from qdrant_client import QdrantClient\n",
    "from qdrant_client.models import VectorParams, Distance\n",
    "\n",
    "client = QdrantClient(\"http://localhost:6333\")\n",
    "\n",
    "client.delete_collection(collection_name=\"10027\")\n",
    "client.delete_collection(collection_name=\"Spring\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e9fe3c48",
   "metadata": {},
   "source": [
    "### 创建集合10027"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fa555dc1",
   "metadata": {},
   "outputs": [],
   "source": [
    "if not client.collection_exists(\"10027\"):\n",
    "    client.create_collection(\n",
    "        collection_name=\"10027\",\n",
    "        vectors_config=VectorParams(size=384, distance=Distance.COSINE),\n",
    "    )"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "288df02a",
   "metadata": {},
   "source": [
    "### 构造payload"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aa12ebb2",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 构造payload：每条payload是一个dict，例如 {\"text\": chunk_text}\n",
    "payload = [{\"text\": chunk} for chunk in chunks]\n",
    "# print(payload[:])\n",
    "\n",
    "client.upload_collection(\n",
    "    collection_name=\"10027\",\n",
    "    vectors=vectors,\n",
    "    payload=payload,\n",
    "    ids=None,  # Vector ids will be assigned automatically\n",
    "    batch_size=256,  # How many vectors will be uploaded in a single request?\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6e0e4403",
   "metadata": {},
   "source": [
    "### 将集合内的全部内容输出到txt文件（测试使用）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d5c36496",
   "metadata": {},
   "outputs": [],
   "source": [
    "all_points = []\n",
    "next_offset = None\n",
    "\n",
    "while True:\n",
    "    points, next_offset = client.scroll(\n",
    "        collection_name=\"10027\",\n",
    "        limit=100,  # 每次最多取 100 条，可调整\n",
    "        with_payload=True,\n",
    "        offset=next_offset\n",
    "    )\n",
    "    if not points:\n",
    "        break\n",
    "    all_points.extend(points)\n",
    "    if next_offset is None:\n",
    "        break\n",
    "\n",
    "# 保存到 txt 文件中\n",
    "with open(\"./data/10027_vectors.txt\", \"w\", encoding=\"utf-8\") as f:\n",
    "    for pt in all_points:\n",
    "        vec_id = pt.id\n",
    "        payload_text = pt.payload.get(\"text\", \"\")\n",
    "        f.write(f\"ID: {vec_id}\\nText: {payload_text}\\n\\n\")\n",
    "\n",
    "print(f\"✅ 已将 {len(all_points)} 条记录写入 spring_vectors.txt\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1d0f56dc",
   "metadata": {},
   "source": [
    "### 查询函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a6cb4a32",
   "metadata": {},
   "outputs": [],
   "source": [
    "def search_query(query_text, top_k=5):\n",
    "    # query_vector = model.encode(query_text).tolist()\n",
    "\n",
    "    results = client.search(\n",
    "        collection_name=\"10027\",\n",
    "        query_vector=model.encode(query_text).tolist(),  \n",
    "        limit=top_k,\n",
    "        with_payload=True,\n",
    "    )\n",
    "    return results"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0a049d0f",
   "metadata": {},
   "source": [
    "### 按关键字查询"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5d1fdc25",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 查询问题\n",
    "query = \"工作时间\"\n",
    "\n",
    "# 调用 search_query 函数\n",
    "results = search_query(query, top_k=5)\n",
    "\n",
    "# 打印结果\n",
    "print(f\"\\nTop 5 results for: '{query}'\\n\")\n",
    "for i, res in enumerate(results, 1):\n",
    "    id = res.id\n",
    "    text = res.payload.get(\"text\", \"N/A\")\n",
    "    score = res.score\n",
    "    # print(f\"{i}. {text} (score={score:.4f})\\n\")# 查询问题\n",
    "    print(f\"{i}. ID: {id}\")\n",
    "    print(f\"   Text: {text}\")\n",
    "    print(f\"   Score: {score:.4f}\\n\")\n",
    "\n",
    "# 打开txt文件用于写入\n",
    "with open(\"./data/output.txt\", \"w\", encoding=\"utf-8\") as f:\n",
    "    f.write(f\"\\nTop 5 results for: '{query}'\\n\\n\")\n",
    "    for i, res in enumerate(results, 1):\n",
    "        id = res.id\n",
    "        text = res.payload.get(\"text\", \"N/A\")\n",
    "        score = res.score\n",
    "        f.write(f\"{i}. ID: {id}\\n\")\n",
    "        f.write(f\"   Text: {text}\\n\")\n",
    "        f.write(f\"   Score: {score:.4f}\\n\\n\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "py310",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
