{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "0fa3c3d0-f8af-4b59-8684-5393d5ea0b45",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['paper_segment_collection']"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from pymilvus import MilvusClient\n",
    "client = MilvusClient(\"./tmp/milvus_demo.db\")\n",
    "client.list_collections()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "7b457e84-2ee6-4ea8-892a-9daca7c5bbf1",
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "from typing import Dict, List\n",
    "from pymilvus import DataType, MilvusClient, utility\n",
    "\n",
    "from pymilvus import MilvusClient\n",
    "\n",
    "def check_database_exists(client: MilvusClient, db_name: str) -> bool:\n",
    "    existing_dbs = client.list_databases()\n",
    "    return db_name in existing_dbs\n",
    "\n",
    "class PaperSegmentModel():\n",
    "    \"\"\"论文切割文档向量数据库\"\"\"\n",
    "    \n",
    "    _DB_NAME = \"paper_segment\"\n",
    "    _COLLECTION_NAME = \"paper_segment_collection\"\n",
    "    \n",
    "    INDEX_VECTOR_DIM = 1024\n",
    "    TEXT_CHUNK_SIZE  = 800\n",
    "    \n",
    "    def __init__(self, milvus_client: MilvusClient) -> None:\n",
    "        self._milvus_client = milvus_client\n",
    "        \n",
    "        # ~ self._init_db() # todo: milvus lite 不支持db操作\n",
    "        self._init_collection()\n",
    "        \n",
    "    def _init_db(self) -> None:\n",
    "        if not check_database_exists(self._milvus_client, PaperSegmentModel._DB_NAME):\n",
    "            self._milvus_client.create_database(\n",
    "                db_name=PaperSegmentModel._DB_NAME\n",
    "            )\n",
    "        \n",
    "        self._milvus_client.use_database(\n",
    "            db_name=PaperSegmentModel._DB_NAME\n",
    "        )\n",
    "        \n",
    "    def _init_collection(self) -> None:\n",
    "        # ~ if not utility.has_collection(PaperSegmentModel._COLLECTION_NAME): # todo: milvus lite 不支持\n",
    "        if not self._milvus_client.has_collection(PaperSegmentModel._COLLECTION_NAME):\n",
    "            self._create_collection()\n",
    "        \n",
    "        self._milvus_client.load_collection(\n",
    "            collection_name=PaperSegmentModel._COLLECTION_NAME\n",
    "        )\n",
    "\n",
    "        res = self._milvus_client.get_load_state(\n",
    "            collection_name=PaperSegmentModel._COLLECTION_NAME\n",
    "        )\n",
    "\n",
    "        print(f'load milvus db {PaperSegmentModel._DB_NAME} collection {PaperSegmentModel._COLLECTION_NAME}: {res}', file=sys.stderr)\n",
    "        \n",
    "    def _create_collection(self) -> None:\n",
    "        schema = MilvusClient.create_schema(\n",
    "            auto_id=False,\n",
    "            enable_dynamic_field=True,\n",
    "        )\n",
    "\n",
    "        schema.add_field(field_name=\"id\", datatype=DataType.INT64, is_primary=True, auto_id=True)\n",
    "        schema.add_field(field_name=\"embedding_vector\", datatype=DataType.FLOAT_VECTOR, dim=PaperSegmentModel.INDEX_VECTOR_DIM)\n",
    "        schema.add_field(field_name=\"text\", datatype=DataType.VARCHAR, max_length=PaperSegmentModel.TEXT_CHUNK_SIZE)\n",
    "        schema.add_field(field_name=\"meta\", datatype=DataType.JSON)\n",
    "        schema.add_field(field_name=\"splitter_id\", datatype=DataType.VARCHAR, max_length=128)\n",
    "        schema.add_field(field_name=\"user_id\", datatype=DataType.INT64)\n",
    "        schema.add_field(field_name=\"paper_file_id\", datatype=DataType.VARCHAR, max_length=128)\n",
    "        schema.add_field(field_name=\"paper_file_name\", datatype=DataType.VARCHAR, max_length=128)\n",
    "        \n",
    "        index_params = MilvusClient.prepare_index_params()\n",
    "\n",
    "        index_params.add_index(\n",
    "            field_name=\"user_id\",\n",
    "            index_name=\"idx_user_id\",\n",
    "            # ~ index_type=\"STL_SORT\" # todo: milvus lite 疑似不支持\n",
    "            index_type=\"INVERTED\"\n",
    "        )\n",
    "        \n",
    "        index_params.add_index(\n",
    "            field_name=\"paper_file_id\",\n",
    "            index_name=\"idx_paper_file_id\",\n",
    "            index_type=\"INVERTED\"\n",
    "        )\n",
    "            \n",
    "        index_params.add_index(\n",
    "            field_name=\"embedding_vector\",\n",
    "            index_name=\"idx_vector_flat\",\n",
    "            index_type=\"FLAT\",\n",
    "            metric_type=\"COSINE\",\n",
    "            params={}\n",
    "        )\n",
    "        \n",
    "        # 创建集合\n",
    "        collection_name = PaperSegmentModel._COLLECTION_NAME\n",
    "        self._milvus_client.create_collection(collection_name=collection_name, schema=schema, index_params=index_params)\n",
    "        \n",
    "    def paper_segment_data(self, user_id: int, paper_file_id: str, paper_file_name: str, \n",
    "               text: str, embedding_vector: list, splitter_id: str, meta: Dict=None) -> Dict:\n",
    "        \"\"\"插入论文片段数据\"\"\"\n",
    "        return {\n",
    "            \"user_id\": user_id,\n",
    "            \"paper_file_id\": paper_file_id,\n",
    "            \"paper_file_name\": paper_file_name,\n",
    "            \"text\": text,\n",
    "            \"embedding_vector\": embedding_vector,\n",
    "            \"meta\": meta if meta else {},\n",
    "            \"splitter_id\": splitter_id\n",
    "        }\n",
    "        \n",
    "    def insert_one(self, user_id: int, paper_file_id: str, paper_file_name: str, \n",
    "               text: str, embedding_vector: list, splitter_id: str, meta: Dict=None) -> Dict:\n",
    "        \"\"\"插入论文片段数据\"\"\"\n",
    "        data = [\n",
    "            self.paper_segment_data(user_id, paper_file_id, paper_file_name, \n",
    "               text, embedding_vector, splitter_id, meta)\n",
    "        ]\n",
    "        \n",
    "        res = self._milvus_client.insert(\n",
    "            collection_name=self._COLLECTION_NAME, \n",
    "            data=data\n",
    "        )\n",
    "            \n",
    "        return res\n",
    "    \n",
    "    def insert_many(self, datas: List[Dict]) -> Dict:\n",
    "        \"\"\"插入论文片段数据\"\"\"\n",
    "        return self._milvus_client.insert(\n",
    "            collection_name=self._COLLECTION_NAME, \n",
    "            data=datas\n",
    "        )\n",
    "\n",
    "    def delete_by(self, user_id: int, paper_file_id: str) -> int:\n",
    "        \"\"\"根据user_id和paper_file_id组合条件删除数据\"\"\"\n",
    "        filter_expression = f\"user_id == {user_id} AND paper_file_id == '{paper_file_id}'\"\n",
    "        res = self._milvus_client.delete(\n",
    "            collection_name=self._COLLECTION_NAME,\n",
    "            filter=filter_expression\n",
    "        )\n",
    "            \n",
    "        if isinstance(res, list):\n",
    "            deleted_count = len(res)  # 主键ID列表的长度即为删除数量\n",
    "        elif hasattr(res, 'delete_count'):  # 检查是否为OmitZeroDict类型\n",
    "            deleted_count = res.delete_count\n",
    "        else:\n",
    "            deleted_count = -1  # 默认处理未知类型\n",
    "            \n",
    "        return deleted_count\n",
    "\n",
    "    def search_by(self, user_id: int, paper_file_id: str, \n",
    "                    query_vectors: list, top_k: int=5) -> List[List[dict]]:\n",
    "        \"\"\"先按组合条件过滤，再向量检索\"\"\"\n",
    "        # 第一步：组合条件过滤\n",
    "        filter_expression = f\"user_id == {user_id} AND paper_file_id == '{paper_file_id}'\"\n",
    "        filter_res = self._milvus_client.search(\n",
    "            collection_name=self._COLLECTION_NAME,\n",
    "            data=[query_vectors],\n",
    "            anns_field=\"embedding_vector\",\n",
    "            search_params={\"params\": {}},  # No additional parameters required for FLAT\n",
    "            limit=top_k,\n",
    "            filter=filter_expression,\n",
    "            output_fields=[\"text\", \"meta\", \"splitter_id\"]\n",
    "        )\n",
    "            \n",
    "        return filter_res\n",
    "\n",
    "    def check_exist(self, user_id: int, paper_file_id: str) -> bool:\n",
    "        \"\"\"检查user_id和paper_file_id组合条件是否存在数据\"\"\"\n",
    "        filter_expression = f\"user_id == {user_id} AND paper_file_id == '{paper_file_id}' RANDOM_SAMPLE(0.01)\"\n",
    "        res = self._milvus_client.query(\n",
    "            collection_name=self._COLLECTION_NAME,\n",
    "            filter=filter_expression\n",
    "        )\n",
    "            \n",
    "        return len(res) > 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "9156506b-ab37-4d2d-9fa3-a3a3782a054f",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "load milvus db paper_segment collection paper_segment_collection: {'state': <LoadState: Loaded>}\n"
     ]
    }
   ],
   "source": [
    "model = PaperSegmentModel(client)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "7663e98f-60ca-43e8-ab3c-e076aa8b0c9a",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/lib/python3.11/site-packages/requests/__init__.py:86: RequestsDependencyWarning: Unable to find acceptable character detection dependency (chardet or charset_normalizer).\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdin",
     "output_type": "stream",
     "text": [
      "dashscope api key:  ········\n"
     ]
    }
   ],
   "source": [
    "import getpass\n",
    "from langchain_community.embeddings import DashScopeEmbeddings\n",
    "\n",
    "embeddings = DashScopeEmbeddings(\n",
    "    model=\"text-embedding-v4\",\n",
    "    dashscope_api_key=getpass.getpass(\"dashscope api key: \"),\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "afc8dd21-d431-4992-90d0-d31fd7894a81",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[{'id': 460308054764159005, 'distance': 0.31552955508232117, 'entity': {'splitter_id': 'TextParagraphSplitter', 'text': '**Abstract**', 'meta': {'paragraph_index': 20, 'segment_index': 1, 'is_recursive': False}}}, {'id': 460308057047171400, 'distance': 0.29688760638237, 'entity': {'splitter_id': 'TextParagraphSplitter', 'text': '[15] Rafal Jozefowicz, Oriol Vinyals, Mike Schuster, Noam Shazeer, and Yonghui Wu. Exploring\\nthe limits of language modeling. _arXiv preprint arXiv:1602.02410_, 2016.', 'meta': {'paragraph_index': 166, 'segment_index': 1, 'is_recursive': False}}}, {'id': 460308057349161321, 'distance': 0.2962988615036011, 'entity': {'splitter_id': 'TextParagraphSplitter', 'text': '[28] Romain Paulus, Caiming Xiong, and Richard Socher. A deep reinforced model for abstractive\\nsummarization. _arXiv preprint arXiv:1705.04304_, 2017.', 'meta': {'paragraph_index': 179, 'segment_index': 1, 'is_recursive': False}}}, {'id': 460308057494126975, 'distance': 0.29592394828796387, 'entity': {'splitter_id': 'TextParagraphSplitter', 'text': '[40] Muhua Zhu, Yue Zhang, Wenliang Chen, Min Zhang, and Jingbo Zhu. Fast and accurate\\nshift-reduce constituent parsing. In _Proceedings of the 51st Annual Meeting of the ACL (Volume_\\n_1: Long Papers)_, pages 434–443. ACL, August 2013.', 'meta': {'paragraph_index': 191, 'segment_index': 1, 'is_recursive': False}}}, {'id': 460308057349161329, 'distance': 0.291282594203949, 'entity': {'splitter_id': 'TextParagraphSplitter', 'text': '[36] Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, and Zbigniew Wojna.\\nRethinking the inception architecture for computer vision. _CoRR_, abs/1512.00567, 2015.', 'meta': {'paragraph_index': 187, 'segment_index': 1, 'is_recursive': False}}}]"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 测试查询操作\n",
    "vec = embeddings.embed_query(\"这篇论文的主要观点是什么？\")\n",
    "model.search_by(1, \"68a9747e1d0e64cff1c6f7c2\", vec)[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "2c3ae5b2-f0ce-45fc-9099-9c78cef4e143",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建提示词模板\n",
    "from langchain_core.prompts import ChatPromptTemplate\n",
    "\n",
    "prompt_template_str = \\\n",
    "\"\"\"\n",
    "基于以下论文中查找到的部分内容片段回答问题。如果内容片段没有相关信息，请说\n",
    "“我无法根据提供的论文内容，回答您的问题”\n",
    "论文内容：{context}\n",
    "问题：{question}\n",
    "回答：\n",
    "\"\"\"\n",
    "\n",
    "prompt = ChatPromptTemplate.from_template(prompt_template_str)\n",
    "\n",
    "msg = prompt.invoke({\"question\": \"我是神奇龙。我帅吗？\", \"context\": \"神奇龙是大帅逼\"})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "ef5226b4-ccd4-40ed-a91c-77683aad1fae",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'\\n基于以下论文中查找到的部分内容片段回答问题。如果内容片段没有相关信息，请说\\n“我无法根据提供的论文内容，回答您的问题”\\n论文内容：神奇龙是大帅逼\\n问题：我是神奇龙。我帅吗？\\n回答：\\n'"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "msg.messages[0].content"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "id": "ecf7e519-ee68-402d-91c8-4925fc007a1f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdin",
     "output_type": "stream",
     "text": [
      "DASHSCOPE_API_KEY:  ········\n"
     ]
    }
   ],
   "source": [
    "# 定义langgraph\n",
    "\n",
    "# 定义状态\n",
    "from typing import List, Dict\n",
    "from typing_extensions import TypedDict\n",
    "class State(TypedDict):\n",
    "    question: str\n",
    "    context: List[Dict]\n",
    "    answer: str\n",
    "\n",
    "# 定义检索步骤\n",
    "def retrieve(state: State):\n",
    "    vec = embeddings.embed_query(state[\"question\"])\n",
    "    res = model.search_by(1, \"68a9747e1d0e64cff1c6f7c2\", vec)\n",
    "    retrieved_docs = res[0]\n",
    "    return {\"context\": retrieved_docs}\n",
    "\n",
    "# 定义生成步骤\n",
    "from langchain_openai import ChatOpenAI\n",
    "from getpass import getpass\n",
    "\n",
    "DASHSCOPE_API_KEY = getpass(\"DASHSCOPE_API_KEY: \")\n",
    "\n",
    "llm = ChatOpenAI(\n",
    "    api_key=DASHSCOPE_API_KEY,\n",
    "    base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    "    model=\"qwen3-235b-a22b-instruct-2507\",  # 您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models\n",
    "    # other params...\n",
    ")\n",
    "def generate(state: State):\n",
    "    context_content = \"\\n\\n\".join(f'{doc[\"meta\"][\"paragraph_index\"]} {doc[\"meta\"][\"segment_index\"]}: \\n{doc[\"text\"]}' for doc in state[\"context\"]) + \\\n",
    "        \"注：paragraph_index代表在文章中的第几段落，segment_index代表在段落中的第几段分割文本\"\n",
    "    messages = [\n",
    "        {\n",
    "            \"role\": \"system\",\n",
    "            \"content\": \"\"\"你是一个AI相关主题论文阅读的助手，面对对人工智能知识的小白用户，能将用户的问题用通俗易懂的方式进行解答。\"\"\"\n",
    "        },\n",
    "        {\n",
    "            \"role\": \"user\",\n",
    "            \"content\": prompt.invoke({\"question\": state[\"question\"], \"context\": context_content}).messages[0].content\n",
    "        }\n",
    "    ]\n",
    "    response = llm.invoke(messages)\n",
    "    return {\"answer\": response}\n",
    "\n",
    "# 构建图\n",
    "from langgraph.graph import START, StateGraph\n",
    "graph = (\n",
    "    StateGraph(State)\n",
    "    .add_sequence([retrieve, generate])\n",
    "    .add_edge(START, \"retrieve\")\n",
    "    .compile()\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "b9e697fa-1e41-4be3-ad39-4b796e519d72",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'question': '这篇论文主要讲了些什么？',\n",
       " 'context': [{'id': 460308054764158997, 'distance': 0.46582749485969543, 'entity': {'splitter_id': 'TextParagraphSplitter', 'text': '```', 'meta': {'paragraph_index': 12, 'segment_index': 1, 'is_recursive': False}}}, {'id': 460308054608707593, 'distance': 0.46582749485969543, 'entity': {'splitter_id': 'TextParagraphSplitter', 'text': '```', 'meta': {'paragraph_index': 10, 'segment_index': 1, 'is_recursive': False}}}, {'id': 460308054608707591, 'distance': 0.46582749485969543, 'entity': {'splitter_id': 'TextParagraphSplitter', 'text': '```', 'meta': {'paragraph_index': 8, 'segment_index': 1, 'is_recursive': False}}}, {'id': 460308054608707589, 'distance': 0.46582749485969543, 'entity': {'splitter_id': 'TextParagraphSplitter', 'text': '```', 'meta': {'paragraph_index': 6, 'segment_index': 1, 'is_recursive': False}}}, {'id': 460308054608707587, 'distance': 0.46582749485969543, 'entity': {'splitter_id': 'TextParagraphSplitter', 'text': '```', 'meta': {'paragraph_index': 4, 'segment_index': 1, 'is_recursive': False}}}],\n",
       " 'answer': AIMessage(content='我无法根据提供的论文内容，回答您的问题。', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 11, 'prompt_tokens': 151, 'total_tokens': 162, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'qwen3-235b-a22b-instruct-2507', 'system_fingerprint': None, 'id': 'chatcmpl-d5567f31-1c24-9581-82dc-aa8bc74700da', 'service_tier': None, 'finish_reason': 'stop', 'logprobs': None}, id='run--83e944cd-7a6c-45f2-97c5-63676b8846d4-0', usage_metadata={'input_tokens': 151, 'output_tokens': 11, 'total_tokens': 162, 'input_token_details': {}, 'output_token_details': {}})}"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "graph.invoke({\"question\": \"这篇论文主要讲了些什么？\"})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "id": "fc4511fd-afb4-4ca4-9ca7-4b19cd018d9a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "获取到的内容：\n",
      "paragraph_index:20 segment_index:1: \n",
      "**Abstract**\n",
      "\n",
      "paragraph_index:191 segment_index:1: \n",
      "[40] Muhua Zhu, Yue Zhang, Wenliang Chen, Min Zhang, and Jingbo Zhu. Fast and accurate\n",
      "shift-reduce constituent parsing. In _Proceedings of the 51st Annual Meeting of the ACL (Volume_\n",
      "_1: Long Papers)_, pages 434–443. ACL, August 2013.\n",
      "\n",
      "paragraph_index:179 segment_index:1: \n",
      "[28] Romain Paulus, Caiming Xiong, and Richard Socher. A deep reinforced model for abstractive\n",
      "summarization. _arXiv preprint arXiv:1705.04304_, 2017.\n",
      "\n",
      "paragraph_index:162 segment_index:1: \n",
      "[11] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In _Proceedings of the IEEE Conference on Computer Vision and Pattern_\n",
      "_Recognition_, pages 770–778, 2016.\n",
      "\n",
      "paragraph_index:172 segment_index:1: \n",
      "[21] Oleksii Kuchaiev and Boris Ginsburg. Factorization tricks for LSTM networks. _arXiv preprint_\n",
      "_arXiv:1703.10722_, 2017.注：paragraph_index代表在文章中的第几段落，segment_index代表在段落中的第几段分割文本\n",
      "{'chunk': AIMessageChunk(content='', additional_kwargs={}, response_metadata={}, id='run--d8f6b7f4-1fd9-4c7d-9e5a-a98eb07a80e4')}\n",
      "{'chunk': AIMessageChunk(content='我', additional_kwargs={}, response_metadata={}, id='run--d8f6b7f4-1fd9-4c7d-9e5a-a98eb07a80e4')}\n",
      "{'chunk': AIMessageChunk(content='无法根据提供的', additional_kwargs={}, response_metadata={}, id='run--d8f6b7f4-1fd9-4c7d-9e5a-a98eb07a80e4')}\n",
      "{'chunk': AIMessageChunk(content='论文内容，', additional_kwargs={}, response_metadata={}, id='run--d8f6b7f4-1fd9-4c7d-9e5a-a98eb07a80e4')}\n",
      "{'chunk': AIMessageChunk(content='回答您的问题', additional_kwargs={}, response_metadata={}, id='run--d8f6b7f4-1fd9-4c7d-9e5a-a98eb07a80e4')}\n",
      "{'chunk': AIMessageChunk(content='。', additional_kwargs={}, response_metadata={}, id='run--d8f6b7f4-1fd9-4c7d-9e5a-a98eb07a80e4')}\n",
      "{'chunk': AIMessageChunk(content='', additional_kwargs={}, response_metadata={'finish_reason': 'stop', 'model_name': 'qwen3-235b-a22b-instruct-2507'}, id='run--d8f6b7f4-1fd9-4c7d-9e5a-a98eb07a80e4')}\n"
     ]
    }
   ],
   "source": [
    "from langgraph.config import get_stream_writer\n",
    "\n",
    "def generate(state: State):\n",
    "    context_content = \"\\n\\n\".join(f'paragraph_index:{doc[\"meta\"][\"paragraph_index\"]} segment_index:{doc[\"meta\"][\"segment_index\"]}: \\n{doc[\"text\"]}' for doc in state[\"context\"]) + \\\n",
    "        \"注：paragraph_index代表在文章中的第几段落，segment_index代表在段落中的第几段分割文本\"\n",
    "    print(f\"获取到的内容：\\n{context_content}\")\n",
    "    messages = [\n",
    "        {\n",
    "            \"role\": \"system\",\n",
    "            \"content\": \"\"\"你是一个AI相关主题论文阅读的助手，面对对人工智能知识的小白用户，能将用户的问题用通俗易懂的方式进行解答。\"\"\"\n",
    "        },\n",
    "        {\n",
    "            \"role\": \"user\",\n",
    "            \"content\": prompt.invoke({\"question\": state[\"question\"], \"context\": context_content}).messages[0].content\n",
    "        }\n",
    "    ]\n",
    "    response = llm.stream(messages)\n",
    "    writer = get_stream_writer()\n",
    "    for chunk in response:\n",
    "        writer({\"chunk\": chunk})\n",
    "    return {\"answer\": response}\n",
    "\n",
    "# 构建图\n",
    "from langgraph.graph import START, StateGraph\n",
    "graph = (\n",
    "    StateGraph(State)\n",
    "    .add_sequence([retrieve, generate])\n",
    "    .add_edge(START, \"retrieve\")\n",
    "    .compile()\n",
    ")\n",
    "\n",
    "for chunk in graph.stream({\"question\": \"这篇论文主要讲了些什么？\"}, stream_mode=\"custom\"):\n",
    "    print(chunk)  # 实时输出\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "id": "f4392f1d-e429-4614-bd65-b9993128f251",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "获取到的内容：\n",
      "paragraph_index:2 segment_index:1: \n",
      "## **Attention Is All You Need**\n",
      "\n",
      "paragraph_index:50 segment_index:1: \n",
      "**3.2.1** **Scaled Dot-Product Attention**\n",
      "\n",
      "paragraph_index:57 segment_index:1: \n",
      "**3.2.2** **Multi-Head Attention**\n",
      "\n",
      "paragraph_index:46 segment_index:1: \n",
      "An attention function can be described as mapping a query and a set of key-value pairs to an output,\n",
      "where the query, keys, values, and output are all vectors. The output is computed as a weighted sum\n",
      "\n",
      "paragraph_index:34 segment_index:1: \n",
      "Self-attention, sometimes called intra-attention is an attention mechanism relating different positions\n",
      "of a single sequence in order to compute a representation of the sequence. Self-attention has been\n",
      "used successfully in a variety of tasks including reading comprehension, abstractive summarization,\n",
      "textual entailment and learning task-independent sentence representations [4, 27, 28, 22].注：paragraph_index代表在文章中的第几段落，segment_index代表在段落中的第几段分割文本\n",
      "{'chunk': AIMessageChunk(content='', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='注意力', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='机制是一种能够', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='将一个', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='查询（query', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='）和一组键值对', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='（key-value pairs）映', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='射到输出的', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='函数，其中查询', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='、键、值和', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='输出都是向量。输出', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='是通过以查询', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='和键之间的相关', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='性作为权重，', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='对值进行加权求', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='和得到的。它', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='可以帮助模型在处理序列', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='数据时，关注', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='到序列中不同', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='位置之间的关系，从而', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='更好地捕捉上下文信息。', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='例如，在自注意力', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='（Self-attention', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='）机制中，序列', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='中的每个位置都可以', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='关注序列中的其他位置，', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='以帮助理解整个序列', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='的含义。', additional_kwargs={}, response_metadata={}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n",
      "{'chunk': AIMessageChunk(content='', additional_kwargs={}, response_metadata={'finish_reason': 'stop', 'model_name': 'qwen3-235b-a22b-instruct-2507'}, id='run--e2826f68-f218-472e-a6be-180f283a94ed')}\n"
     ]
    }
   ],
   "source": [
    "for chunk in graph.stream({\"question\": \"注意力机制是什么？\"}, stream_mode=\"custom\"):\n",
    "    print(chunk)  # 实时输出"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "id": "5891c1ae-f03d-48a9-a367-5224d7b31ded",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "获取到的内容：\n",
      "paragraph_index:2 segment_index:1: \n",
      "## **Attention Is All You Need**\n",
      "\n",
      "paragraph_index:50 segment_index:1: \n",
      "**3.2.1** **Scaled Dot-Product Attention**\n",
      "\n",
      "paragraph_index:57 segment_index:1: \n",
      "**3.2.2** **Multi-Head Attention**\n",
      "\n",
      "paragraph_index:46 segment_index:1: \n",
      "An attention function can be described as mapping a query and a set of key-value pairs to an output,\n",
      "where the query, keys, values, and output are all vectors. The output is computed as a weighted sum\n",
      "\n",
      "paragraph_index:34 segment_index:1: \n",
      "Self-attention, sometimes called intra-attention is an attention mechanism relating different positions\n",
      "of a single sequence in order to compute a representation of the sequence. Self-attention has been\n",
      "used successfully in a variety of tasks including reading comprehension, abstractive summarization,\n",
      "textual entailment and learning task-independent sentence representations [4, 27, 28, 22].注：paragraph_index代表在文章中的第几段落，segment_index代表在段落中的第几段分割文本\n",
      "content='' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='注意力' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='机制是一种' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='能够' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='将一个' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='查询（query）和一组' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='键值对（keys' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='和values）进行' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='映射，从而得到' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='输出的函数。其中' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='，查询、键、' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='值以及输出都是向' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='量，输出是' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='通过将值进行' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='加权求和得到' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='的。这种机制可以' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='用于关联序列中不同位置' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='的信息，从而帮助模型' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='更好地理解整个序列' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='。比如在“' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='自注意力”（' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='Self-attention）中，' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='序列中的每个位置都' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='与其他位置进行关联，以' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='计算出该序列' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='的表示。这种方法' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='已被成功应用于阅读' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='理解、摘要生成' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='、文本推理等任务中' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='。' additional_kwargs={} response_metadata={} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n",
      "content='' additional_kwargs={} response_metadata={'finish_reason': 'stop', 'model_name': 'qwen3-235b-a22b-instruct-2507'} id='run--0b6eca9e-acfc-4cb2-83c4-618e0e1a4df4'\n"
     ]
    }
   ],
   "source": [
    "for chunk in graph.stream({\"question\": \"注意力机制是什么？\"}, stream_mode=\"custom\"):\n",
    "    print(chunk[\"chunk\"])  # 实时输出"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "00979175-849c-469e-91c1-4731e096302b",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
