{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "6514661e-70d5-4016-9948-4ef24d984673",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['paper_segment_collection']"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 显示当前的collections\n",
    "from pymilvus import MilvusClient\n",
    "client = MilvusClient(\"./milvus_demo.db\")\n",
    "client.list_collections()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "eeccb42d-16ae-4e69-97e8-66c74a9bfee1",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 根据过滤条件，列出所有数据\n",
    "collection_name = \"paper_segment_collection\"\n",
    "user_id = 1\n",
    "paper_file_id = '68aeb785d76b04f70a369924'\n",
    "filter_expression = f\"user_id == {user_id} AND paper_file_id == '{paper_file_id}'\"\n",
    "res = client.query(\n",
    "    collection_name=collection_name,\n",
    "    filter=filter_expression,\n",
    "    output_fields=[\"*\"],\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "ced7c1c4-d941-42a2-9063-fb5bc27450f8",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "82"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(res)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "4cf69659-956b-4a26-8b16-8f4bbc907443",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'**小结**：指出Transformer是首个完全依赖自注意力的序列转换模型，不使用RNN或卷积，引出其核心创新性。  \\n**论文选段**：To the best of our knowledge, however, the Transformer is the first transduction model relying entirely on self-attention to compute representations of its input and output without using sequence-aligned RNNs or convolution. In the following sections, we will describe the Transformer, motivate self-attention and discuss its advantages over models such as [17, 18] and [9].'"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "res[71][\"text\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "3f2b2164-ed57-4db7-975c-8ce9fb005315",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'**小结**：说明嵌入层与Softmax实现方式，包括权重共享机制及嵌入输出的 $\\\\sqrt{d_{\\\\text{model}}}$ 缩放因子。  \\n**论文选段**：  \\n**3.4 嵌入与Softmax**  \\n与其他序列转换模型类似，我们使用可学习嵌入将输入与输出token映射为 $d_{\\\\text{model}}$ 维向量。同时使用可学习线性变换与Softmax将解码器输出转为下一token预测概率。本模型中，两个嵌入层与解码器输出前的线性变换共享同一权重矩阵（类似[30]）。在嵌入层中，权重乘以 $\\\\sqrt{d_{\\\\text{model}}}$ 进行缩放。'"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "res[81][\"text\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "da89f281-d46b-4533-a9f2-fe4a97615caf",
   "metadata": {},
   "outputs": [
    {
     "name": "stdin",
     "output_type": "stream",
     "text": [
      "Please enter your API key:  ········\n"
     ]
    }
   ],
   "source": [
    "from getpass import getpass\n",
    "from langchain_community.embeddings import DashScopeEmbeddings\n",
    "\n",
    "model = \"text-embedding-v4\"\n",
    "api_key = getpass(\"Please enter your API key: \")\n",
    "\n",
    "embeddings = DashScopeEmbeddings(\n",
    "    model=model,\n",
    "    dashscope_api_key=api_key,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "499dd45c-ed7c-4b20-bc33-3e8868f7ff43",
   "metadata": {},
   "outputs": [],
   "source": [
    "query_vectors = embeddings.embed_query(\"论文的题目是什么？\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "a7788aec-8e62-4872-ad4a-ca23f5e73b0f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 测试过滤向量检索\n",
    "filter_res = client.search(\n",
    "    collection_name=collection_name,\n",
    "    data=[query_vectors],\n",
    "    anns_field=\"embedding_vector\",\n",
    "    search_params={\"params\": {}},  # No additional parameters required for FLAT\n",
    "    limit=5,\n",
    "    filter=filter_expression,\n",
    "    output_fields=[\"text\"]\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "e766e9aa-519a-4449-ba56-6a6bf7ff6a60",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[{'id': 460268410561101853, 'distance': 0.3983493149280548, 'entity': {'text': '**Abstract**'}}, {'id': 460268410272219136, 'distance': 0.3562866151332855, 'entity': {'text': 'Provided proper attribution is provided, Google hereby grants permission to\\nreproduce the tables and figures in this paper solely for use in journalistic or\\nscholarly works.'}}, {'id': 460268413411131753, 'distance': 0.3467264771461487, 'entity': {'text': '[28] Romain Paulus, Caiming Xiong, and Richard Socher. A deep reinforced model for abstractive\\nsummarization. _arXiv preprint arXiv:1705.04304_, 2017.'}}, {'id': 460268413124084036, 'distance': 0.34113961458206177, 'entity': {'text': '[11] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In _Proceedings of the IEEE Conference on Computer Vision and Pattern_\\n_Recognition_, pages 770–778, 2016.'}}, {'id': 460268412980691251, 'distance': 0.34104278683662415, 'entity': {'text': '[4] Jianpeng Cheng, Li Dong, and Mirella Lapata. Long short-term memory-networks for machine\\nreading. _arXiv preprint arXiv:1601.06733_, 2016.'}}]"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "filter_res[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "4a854dd0-9615-449d-a67f-8829654eb4b6",
   "metadata": {},
   "outputs": [],
   "source": [
    "from pymilvus import MilvusClient\n",
    "client = MilvusClient(\"./tmp/milvus_demo.db\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "bf632fcb-4abc-4245-b1b3-d5a83e9d1648",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 删除后执行\n",
    "collection_name = \"paper_segment_collection\"\n",
    "user_id = 1\n",
    "paper_file_id = '68a723a24e8c22034c32cf33'\n",
    "\n",
    "def delete_by(user_id: int, paper_file_id: str) -> int:\n",
    "    \"\"\"根据user_id和paper_file_id组合条件删除数据\"\"\"\n",
    "    filter_expression = f\"user_id == {user_id} AND paper_file_id == '{paper_file_id}'\"\n",
    "    res = client.delete(\n",
    "        collection_name=collection_name,\n",
    "        filter=filter_expression\n",
    "    )\n",
    "        \n",
    "    return res[\"delete_count\"]\n",
    "\n",
    "delete_by(user_id, paper_file_id)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "aad2e59a-8976-4384-8618-3034d1ffb060",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "filter_expression = f\"user_id == {user_id} AND paper_file_id == '{paper_file_id}'\"\n",
    "res = client.query(\n",
    "    collection_name=collection_name,\n",
    "    filter=filter_expression,\n",
    "    output_fields=[\"*\"],\n",
    ")\n",
    "\n",
    "len(res)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "11616663-884a-4ea3-b73b-0d631600b974",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
