{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "915eec7a-15cc-42eb-ac62-7f43885a4744",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/lib/python3.11/site-packages/requests/__init__.py:86: RequestsDependencyWarning: Unable to find acceptable character detection dependency (chardet or charset_normalizer).\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "from langchain_core.prompts import ChatPromptTemplate\n",
    "\n",
    "abstract_reader_system_msg = \"\"\"# 系统提示词：论文标题与摘要信息提取专家\n",
    "你需要扮演「论文信息提取专家」，基于用户提供的论文「标题文本」和「摘要文本」（二者均为论文分割后的原始内容，需优先使用文本中的原文表述，避免主观编造），提取出阅读论文第一步（标题+摘要）应获取的核心信息，并严格按照指定JSON格式输出。\n",
    "\n",
    "典型输入格式为：\n",
    "论文文本选段：...\n",
    "阅读提示：...\n",
    "\n",
    "## 一、提取信息类别及定义（需全覆盖，无对应信息时标注为\"无\"）\n",
    "请严格按照以下8个类别提取信息，每个类别需符合定义且信息精准：\n",
    "1. \"paper_basic_info\"：论文基础信息\n",
    "   - 包含\"title\"（论文完整标题，直接复制用户提供的标题文本，不可修改）、\"authors\"（摘要中提及的作者姓名，若摘要未提及则填\"无\"，多名作者用分号分隔）、\"keywords\"（从标题/摘要中提取的核心关键词，需是领域内核心术语，如研究主题、方法、对象等，3-5个为宜，用分号分隔，无则填\"无\"）。\n",
    "\n",
    "2. \"research_topic\"：研究主题\n",
    "   - 定义：论文聚焦的核心领域、具体研究方向或核心问题范畴（如“深度学习在医学CT影像分割中的应用研究”“基于强化学习的自动驾驶路径规划优化”），需从标题和摘要中整合，避免过于宽泛。\n",
    "\n",
    "3. \"existing_problem\"：现有研究存在的问题（研究背景痛点）\n",
    "   - 定义：摘要中提及的「当前领域内未解决的缺陷、不足或挑战」（如“传统U-Net模型在小样本医学影像分割中精度低”“现有路径规划算法未考虑实时交通拥堵动态变化”），需直接对应摘要中“前人研究的不足”描述，无则填\"无\"。\n",
    "\n",
    "4. \"research_goal\"：研究目标\n",
    "   - 定义：本文明确要解决的「具体问题」或「达成的具体目标」（如“提出一种轻量化改进U-Net模型，提升小样本CT影像分割精度”“设计一种融合实时交通数据的强化学习路径规划算法”），需体现“本文要做什么”，而非“做了什么”。\n",
    "\n",
    "5. \"core_method\"：核心方法/技术手段\n",
    "   - 定义：摘要中提及的「本文用于解决问题的关键方法、模型、实验设计或技术路径」（如“基于注意力机制改进U-Net模型；采用K折交叉验证进行实验”“提出双分支特征融合网络；使用Cityscapes数据集训练验证”），需包含“方法名称+核心操作”，无则填\"无\"。\n",
    "\n",
    "6. \"key_result\"：关键实验/研究结果\n",
    "   - 定义：摘要中提及的「最核心、最具代表性的实验结论或研究发现」（需包含具体数据或明确结论，如“改进模型在LIDC数据集上Dice系数达0.89，较传统U-Net提升8%”“验证了所提算法在拥堵场景下路径规划效率提升20%”），避免笼统表述，无则填\"无\"。\n",
    "\n",
    "7. \"research_significance\"：研究意义/价值\n",
    "   - 定义：摘要中提及的「本文研究的理论价值或实际应用价值」（如“为小样本医学影像分割提供新方法，助力临床实时诊断”“为自动驾驶在复杂交通场景下的路径规划提供技术支撑”），需体现“本文研究的作用”，无则填\"无\"。\n",
    "\n",
    "8. \"preliminary_judgment\"：是否值得后续阅读的初步判断\n",
    "   - 定义：基于上述信息，给出“值得”或“不值得”的判断，并简要说明理由（如“值得，因研究主题与医学影像分割方向高度相关，核心方法创新且关键结果显著”“不值得，因研究主题与用户关注的自然语言处理领域无关”），理由需紧扣提取的信息，不主观臆断。\n",
    "\n",
    "## 二、输出格式要求（必须严格遵守，不可增减字段或修改格式）\n",
    "1. 仅输出JSON文本，无任何额外解释性文字、换行或注释；\n",
    "2. JSON的键名需与上述8个类别完全一致（如\"paper_basic_info\"“research_topic”），不可修改；\n",
    "3. 每个键对应的值为字符串类型，若信息包含多个要点，用“；”分隔，语言需精炼、无冗余，优先使用论文原文中的核心表述；\n",
    "4. 无对应信息时，值必须填写“无”（不可留空或省略字段）。\n",
    "5. 除paper_basic_info外，其余的每个字段回答应该精简，每个字段不要超过600个字符。\n",
    "\n",
    "## 三、提取规则（需严格遵守）\n",
    "1. 准确性：所有信息必须来自用户提供的“标题文本”和“摘要文本”，不可添加文本外的信息，不可主观推测或编造；\n",
    "2. 完整性：8个信息类别需全部覆盖，无信息则填“无”，不可遗漏任何字段；\n",
    "3. 简洁性：每个类别的值需精炼，去除重复表述（如“核心方法”中已提及的模型，无需在“研究主题”中重复完整模型名），避免长句堆砌。\n",
    "\n",
    "## 四、输入示例（供参考，用户实际输入为具体论文的标题和摘要文本）\n",
    "用户输入：\n",
    "标题文本：基于注意力机制的轻量化U-Net在小样本CT影像分割中的应用\n",
    "摘要文本：作者：张三；李四。现有传统U-Net模型在小样本医学CT影像分割任务中，因参数冗余导致分割精度低（仅78%），且推理速度慢，无法满足临床实时诊断需求。本文以“提升小样本CT影像分割精度与速度”为目标，提出一种融合通道注意力模块的轻量化U-Net模型（LA-U-Net），通过减少卷积层参数、添加注意力权重优化特征提取。实验在LIDC小样本CT数据集上验证，结果显示LA-U-Net的Dice系数达0.89，较传统U-Net提升11%，推理速度提升2倍。该研究为临床小样本CT影像的快速精准分割提供了可行方案，适用于基层医院诊断场景。\n",
    "\n",
    "## 五、输出示例（供参考，需严格模仿此格式）\n",
    "{\n",
    "  \"paper_basic_info\": {\n",
    "    \"title\": \"基于注意力机制的轻量化U-Net在小样本CT影像分割中的应用\",\n",
    "    \"authors\": \"张三；李四\",\n",
    "    \"keywords\": \"注意力机制；轻量化U-Net；小样本CT影像；影像分割\"\n",
    "  },\n",
    "  \"research_topic\": \"轻量化U-Net模型在小样本CT影像分割中的应用研究\",\n",
    "  \"existing_problem\": \"传统U-Net模型在小样本CT影像分割中精度低（仅78%）；推理速度慢，无法满足临床实时诊断需求\",\n",
    "  \"research_goal\": \"提出融合通道注意力模块的轻量化U-Net模型（LA-U-Net），提升小样本CT影像分割精度与推理速度\",\n",
    "  \"core_method\": \"设计融合通道注意力的轻量化U-Net模型（LA-U-Net）；减少卷积层参数；在LIDC小样本CT数据集上进行实验验证\",\n",
    "  \"key_result\": \"LA-U-Net在LIDC数据集上Dice系数达0.89，较传统U-Net提升11%；推理速度提升2倍\",\n",
    "  \"research_significance\": \"为临床小样本CT影像的快速精准分割提供可行方案；适用于基层医院诊断场景\",\n",
    "  \"preliminary_judgment\": \"值得，因研究主题聚焦CT影像分割（若用户关注医学影像领域则高度相关），核心方法有创新，关键结果数据显著且应用价值明确\"\n",
    "}\n",
    "\n",
    "现在，请接收用户提供的论文「标题文本」和「摘要文本」，严格按照上述要求提取信息并输出JSON。\n",
    "\"\"\"\n",
    "\n",
    "# 第一段内容阅读提示\n",
    "first_read_prompt = ChatPromptTemplate.from_template(\\\n",
    "\"\"\"论文文本选段：{context}\n",
    "阅读提示：\n",
    "1、阅读建议：{read_tip}\n",
    "\"\"\")\n",
    "\n",
    "# 后续内容阅读提示\n",
    "next_read_prompt = ChatPromptTemplate.from_template(\\\n",
    "\"\"\"论文文本选段：{context}\n",
    "阅读提示：\n",
    "1、阅读建议：{read_tip}\n",
    "2、论文信息：（基于已读取内容）{paper_info}\n",
    "3、当前总结：（基于已读取内容）{summary}\n",
    "\"\"\")\n",
    "\n",
    "def merge_paper_basic_info(paper_data):\n",
    "    \"\"\"\n",
    "    方法1：将paper_basic_info字段合并为一段文本\n",
    "    :param paper_data: 包含论文所有信息的字典（需包含paper_basic_info键）\n",
    "    :return: 合并后的论文基础信息文本字符串\n",
    "    \"\"\"\n",
    "    # 提取paper_basic_info子字典，若不存在则初始化空字典\n",
    "    basic_info = paper_data.get(\"paper_basic_info\", {})\n",
    "\n",
    "    # 按字段含义拼接文本，严格遵循\"中文含义：内容\"格式\n",
    "    title_content = basic_info.get(\"title\", \"无\")  # 论文完整标题直接复制，不可修改\n",
    "    authors_content = basic_info.get(\"authors\", \"无\")  # 作者姓名用分号分隔\n",
    "    keywords_content = basic_info.get(\"keywords\", \"无\")  # 核心关键词用分号分隔\n",
    "\n",
    "    # 组合成完整段落，各字段用分号分隔，提升可读性\n",
    "    merged_text = (\n",
    "       f\"论文完整标题：{title_content}；\"\n",
    "       f\"摘要中提及的作者姓名，若摘要未提及则填'无'，多名作者用分号分隔：{authors_content}；\"\n",
    "       f\"从标题/摘要中提取的核心关键词，需是领域内核心术语，如研究主题、方法、对象等，3-5个为宜，用分号分隔，无则填'无'：{keywords_content}\"\n",
    "    )\n",
    "\n",
    "    return merged_text\n",
    "\n",
    "def merge_other_research_fields(paper_data):\n",
    "    \"\"\"\n",
    "    方法2：将除paper_basic_info外的其余字段合并为一段文本\n",
    "    :param paper_data: 包含论文所有信息的字典\n",
    "    :return: 合并后的研究相关信息文本字符串\n",
    "    \"\"\"\n",
    "    # 定义需合并的字段及其对应的中文含义与说明（映射关系严格遵循用户给定定义）\n",
    "    field_mapping = {\n",
    "        \"research_topic\": (\n",
    "            \"研究主题\",\n",
    "            \"论文聚焦的核心领域、具体研究方向或核心问题范畴（如“深度学习在医学CT影像分割中的应用研究”“基于强化学习的自动驾驶路径规划优化”），需从标题和摘要中整合，避免过于宽泛：\"\n",
    "        ),\n",
    "        \"existing_problem\": (\n",
    "            \"现有研究存在的问题（研究背景痛点）\",\n",
    "            \"摘要中提及的「当前领域内未解决的缺陷、不足或挑战」（如“传统U-Net模型在小样本医学影像分割中精度低”“现有路径规划算法未考虑实时交通拥堵动态变化”），需直接对应摘要中“前人研究的不足”描述，无则填'无'：\"\n",
    "        ),\n",
    "        \"research_goal\": (\n",
    "            \"研究目标\",\n",
    "            \"本文明确要解决的「具体问题」或「达成的具体目标」（如“提出一种轻量化改进U-Net模型，提升小样本CT影像分割精度”“设计一种融合实时交通数据的强化学习路径规划算法”），需体现“本文要做什么”，而非“做了什么”：\"\n",
    "        ),\n",
    "        \"core_method\": (\n",
    "            \"核心方法/技术手段\",\n",
    "            \"摘要中提及的「本文用于解决问题的关键方法、模型、实验设计或技术路径」（如“基于注意力机制改进U-Net模型；采用K折交叉验证进行实验”“提出双分支特征融合网络；使用Cityscapes数据集训练验证”），需包含“方法名称+核心操作”，无则填'无'：\"\n",
    "        ),\n",
    "        \"key_result\": (\n",
    "            \"关键实验/研究结果\",\n",
    "            \"摘要中提及的「最核心、最具代表性的实验结论或研究发现」（需包含具体数据或明确结论，如“改进模型在LIDC数据集上Dice系数达0.89，较传统U-Net提升8%”“验证了所提算法在拥堵场景下路径规划效率提升20%”），避免笼统表述，无则填'无'：\"\n",
    "        ),\n",
    "        \"research_significance\": (\n",
    "            \"研究意义/价值\",\n",
    "            \"摘要中提及的「本文研究的理论价值或实际应用价值」（如“为小样本医学影像分割提供新方法，助力临床实时诊断”“为自动驾驶在复杂交通场景下的路径规划提供技术支撑”），需体现“本文研究的作用”，无则填'无'：\"\n",
    "        ),\n",
    "        \"preliminary_judgment\": (\n",
    "            \"是否值得后续阅读的初步判断\",\n",
    "            \"基于上述信息，给出“值得”或“不值得”的判断，并简要说明理由（如“值得，因研究主题与医学影像分割方向高度相关，核心方法创新且关键结果显著”“不值得，因研究主题与用户关注的自然语言处理领域无关”），理由需紧扣提取的信息，不主观臆断：\"\n",
    "        )\n",
    "    }\n",
    "        \n",
    "    # 初始化文本列表，逐字段拼接内容\n",
    "    merged_parts = []\n",
    "    for field_key, (field_name, field_desc) in field_mapping.items():\n",
    "        # 获取字段内容，若不存在则填\"无\"\n",
    "        field_content = paper_data.get(field_key, \"无\")\n",
    "        # # 按\"中文含义：说明+内容\"格式拼接，加入段落分隔符\n",
    "        # part = f\"{field_name}：{field_desc}{field_content}\"\n",
    "        part = f\"{field_name}：{field_content}\"\n",
    "        merged_parts.append(part)\n",
    "    \n",
    "    # 将所有字段内容合并为一段文本，用分号+换行分隔以优化阅读体验\n",
    "    return \"；\\n\".join(merged_parts)\n",
    "\n",
    "abstract_evaluator_system_msg = \"\"\"你是一个论文阅读助手，专注于论文开头的标题和摘要部分。一般一篇论文的典型结构是，在论文开头会有论文的标题和摘要。现对你的工作描述如下：\n",
    "一、角色与任务目标​\n",
    "你需扮演 “论文文本片段分析助手”，核心任务是：基于输入的论文文本片段（含当前截取片段 + 已读历史片段，如有），判断论文标题是否完整、摘要部分是否已读取完毕；若未完毕，指引继续读取下一片段，若已完毕，确认结束。​\n",
    "二、输入信息​\n",
    "需接收两类信息：​\n",
    "当前论文文本截取片段（可能包含标题、作者、摘要片段、部分引言等内容）；​\n",
    "一些文本描述信息和相关上下文信息。​\n",
    "三、核心判断逻辑​\n",
    "（一）先判断 “标题完整性”​\n",
    "若当前 + 历史片段中，标题存在以下情况，判定 “标题未完整”，需继续读取（返回 NEXT）：​\n",
    "标题仅显示部分内容（如末尾为 “...”“-”，或句子未结束，如 “基于深度学习的图像分割技术在”）；​\n",
    "未出现完整标题（仅显示作者、机构，或直接开始摘要内容但无标题）。​\n",
    "若标题文字完整（语义连贯、无截断，含研究主题、核心对象 / 方法，无明显缺失），进入 “摘要完整性判断”。​\n",
    "（二）再判断 “摘要完整性”​\n",
    "摘要完整需满足：片段中包含摘要的5 类核心要素（无遗漏），且片段末尾无 “摘要未结束” 的信号（如语义截断、未完成句子），同时无 “引言启动” 信号（如出现 “1. 引言”“1. Introduction”“一、研究背景” 等）。​\n",
    "摘要核心要素判定标准：​\n",
    "必须包含：研究背景 / 待解决的领域问题（如 “现有 XX 方法存在 XX 缺陷”）、研究目标（如 “本文旨在解决 XX 问题”）、核心方法 / 技术路径（如 “提出 XX 模型 / 采用 XX 实验设计”）、关键结果 / 发现（如 “在 XX 数据集上准确率达 XX%”）、研究结论 / 意义（如 “为 XX 领域提供 XX 参考”）；​\n",
    "若当前 + 历史片段中，缺失任意 1 类核心要素，或要素描述不完整（如仅提 “提出新方法” 但未说明方法名称 / 逻辑），判定 “摘要未完毕”；​\n",
    "若片段末尾出现 “摘要”“Abstract” 的结束标识（如 “摘要结束”“Keywords”“关键词”，或直接衔接引言开头，或者出现明显代表正式文本开始的标题文字），且 5 类要素完整，判定 “摘要已完毕”。​\n",
    "四、输出格式（严格 JSON）​\n",
    "仅允许输出以下结构的 JSON，无额外文字：​\n",
    "{​\n",
    "\"judgment\": \"END\" 或 \"NEXT\",​\n",
    "\"reading_suggestion\": \"若 judgment 为 END：说明标题已完整 + 摘要 5 类要素无缺失，确认无需继续读取；若 judgment 为 NEXT：先总结当前已读内容（含完整标题 / 部分标题、摘要已获取的要素），再明确下一片段需重点补充的缺失要素（如 “当前已读标题为 XX，摘要已获取研究背景，需补充核心方法与关键结果”），建议关注片段是否出现 “Keywords”“引言” 等标识\"​\n",
    "}​\n",
    "五、约束​\n",
    "reading_suggestion 字数需少于 800 字，语言简洁，仅聚焦 “已读总结 + 下一段关注重点”，不展开无关内容。\n",
    "\"\"\"\n",
    "\n",
    "# 后续内容阅读提示\n",
    "judge_next_prompt = ChatPromptTemplate.from_template(\\\n",
    "\"\"\"论文文本选段：{context}\n",
    "阅读提示：\n",
    "1、论文信息：（基于已读取内容）{paper_info}\n",
    "2、当前总结：（基于已读取内容）{my_summary}\n",
    "\"\"\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "e4c977cb-f68a-48ea-9944-1d80a0493a9f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "from typing import Dict, List, TypedDict\n",
    "\n",
    "from langchain_openai import ChatOpenAI\n",
    "from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
    "from langgraph.graph import StateGraph, START, END\n",
    "\n",
    "\n",
    "class AbstractState(TypedDict):\n",
    "    paper_md_list: List[str]\n",
    "    \n",
    "    index: int\n",
    "    text: str\n",
    "\n",
    "    my_summary: Dict\n",
    "\n",
    "    next_or_end: str\n",
    "    read_tip: str\n",
    "\n",
    "class PaperAbstractExtractor():\n",
    "    \"\"\"\n",
    "    论文摘要提取器：\n",
    "    负责读取论文开头的标题和摘要部分，提取出论文结构信息和摘要中的关键信息\n",
    "    \"\"\"\n",
    "    \n",
    "    first_read_tip = \"这是分割后论文片段的第一段（前3600字符），论文的标题和作者极有可能出现在这个地方，输入论文片段文本为markdown格式\"\n",
    "    \n",
    "    def __init__(self, llm: ChatOpenAI):\n",
    "        self._json_llm = llm.bind(response_format={\"type\": \"json_object\"})\n",
    "        self._text_splitter = RecursiveCharacterTextSplitter(\n",
    "            chunk_size=3600,\n",
    "            chunk_overlap=480,\n",
    "            length_function=len,\n",
    "            separators=[\"\\n\\n\",\n",
    "                        \".\", \"。\", \"\\u3002\",\n",
    "                        \",\", \"，\", \"\\uff0c\",\n",
    "                        \"\\n\",\n",
    "                        \" \", \"\\u3000\", \"\\u200b\",    # 空格/全角空格/零宽空格\n",
    "                        ''],\n",
    "            is_separator_regex=False,\n",
    "        )\n",
    "        \n",
    "        self._build_graph()\n",
    "        \n",
    "    def extract(self, paper_md_text: str):\n",
    "        split_text_list = self._text_splitter.split_text(paper_md_text)\n",
    "        return self._workflow.invoke({\"paper_md_list\": split_text_list, \"index\": 0, \"read_tip\": PaperAbstractExtractor.first_read_tip})\n",
    "        \n",
    "    def _text_generator(self, state: AbstractState):\n",
    "        text_index = state[\"index\"]\n",
    "        paper_md_list = state[\"paper_md_list\"]\n",
    "        if text_index >= len(paper_md_list):\n",
    "            print(f\"\\n_text_generator:\\nindex:{text_index+1}\\nnext_or_end:END\")\n",
    "            return {\"next_or_end\": \"END\"}\n",
    "        print(f\"\\n_text_generator:\\nindex:{text_index+1}\\ntext:{paper_md_list[text_index][:50]}\\nnext_or_end:NEXT\")\n",
    "        return {\"index\": text_index+1, \"text\": paper_md_list[text_index], \"next_or_end\": \"NEXT\"}\n",
    "        \n",
    "    def _abstract_reader(self, state: AbstractState):\n",
    "        \"\"\"论文Abstract解析提取器\"\"\"\n",
    "        text_index = state[\"index\"]\n",
    "        user_content = None\n",
    "        if (text_index == 1):\n",
    "            user_content = first_read_prompt.invoke({\"context\": state[\"text\"], \"read_tip\": state[\"read_tip\"]}).messages[0].content\n",
    "        else:\n",
    "            my_summary = state[\"my_summary\"]\n",
    "            # 生成论文基础信息文本\n",
    "            paper_info = merge_paper_basic_info(my_summary)\n",
    "            # 生成其他研究字段合并文本\n",
    "            summary = merge_other_research_fields(my_summary)\n",
    "            user_content = next_read_prompt.invoke({\"context\": state[\"text\"], \"read_tip\": state[\"read_tip\"],\n",
    "                                                    \"paper_info\": paper_info, \"summary\": summary}).messages[0].content\n",
    "        resp = self._json_llm.invoke([{\n",
    "                \"role\": \"system\",\n",
    "                \"content\": abstract_reader_system_msg,\n",
    "            },{\n",
    "                \"role\": \"user\",\n",
    "                \"content\": user_content,\n",
    "            }]\n",
    "        )\n",
    "        json_res = json.loads(resp.content)\n",
    "        print(f\"\\n_abstract_reader:\\nindex:{text_index}\\nmy_summary:{json_res}\\n\")\n",
    "        return {\"my_summary\": json_res}\n",
    "    \n",
    "    def _abstract_evaluator(self, state: AbstractState):\n",
    "        \"\"\"评估论文Abstract是否读完\"\"\"\n",
    "        my_summary = state[\"my_summary\"]\n",
    "        # 生成论文基础信息文本\n",
    "        paper_info = merge_paper_basic_info(my_summary)\n",
    "        # 生成其他研究字段合并文本\n",
    "        summary = merge_other_research_fields(my_summary)\n",
    "        user_content = judge_next_prompt.invoke({\"context\": state[\"text\"],\n",
    "                                                    \"paper_info\": paper_info, \"my_summary\": summary}).messages[0].content\n",
    "        resp = self._json_llm.invoke([{\n",
    "                \"role\": \"system\",\n",
    "                \"content\": abstract_evaluator_system_msg,\n",
    "            },{\n",
    "                \"role\": \"user\",\n",
    "                \"content\": user_content,\n",
    "            }]\n",
    "        )\n",
    "        json_res = json.loads(resp.content)\n",
    "        print(f'\\n_abstract_evaluator:\\nindex:{state[\"index\"]}\\nnext_or_end:{json_res[\"judgment\"]}\\nread_tip:{json_res[\"reading_suggestion\"]}\\n')\n",
    "        return {\"next_or_end\": json_res[\"judgment\"], \"read_tip\": json_res[\"reading_suggestion\"]}\n",
    "    \n",
    "    def _next_route(self, state: AbstractState):\n",
    "        print(f'\\n_next_route:\\nindex:{state[\"index\"]}\\nnext_or_end:{state[\"next_or_end\"]}\\n')\n",
    "        return state[\"next_or_end\"]\n",
    "    \n",
    "    def _build_graph(self):\n",
    "        self._graph = StateGraph(AbstractState)\n",
    "\n",
    "        # Add the nodes\n",
    "        self._graph.add_node(\"text_generator\", self._text_generator)\n",
    "        self._graph.add_node(\"abstract_reader\", self._abstract_reader)\n",
    "        self._graph.add_node(\"abstract_evaluator\", self._abstract_evaluator)\n",
    "\n",
    "        # Add edges to connect nodes\n",
    "        self._graph.add_edge(START, \"text_generator\")\n",
    "        self._graph.add_conditional_edges(\n",
    "            \"text_generator\",\n",
    "            self._next_route,\n",
    "            {\n",
    "                \"END\": END,\n",
    "                \"NEXT\": \"abstract_reader\",\n",
    "            },\n",
    "        )\n",
    "        self._graph.add_edge(\"abstract_reader\", \"abstract_evaluator\")\n",
    "        self._graph.add_conditional_edges(\n",
    "            \"abstract_evaluator\",\n",
    "            self._next_route,\n",
    "            {\n",
    "                \"END\": END,\n",
    "                \"NEXT\": \"text_generator\",\n",
    "            },\n",
    "        )\n",
    "\n",
    "        # Compile the workflow\n",
    "        self._workflow = self._graph.compile()\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "82910a9c-7ee8-46db-9c09-5ce2eaa94de1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdin",
     "output_type": "stream",
     "text": [
      "dashscope api key:  ········\n"
     ]
    }
   ],
   "source": [
    "import getpass\n",
    "dashscope_api_key=getpass.getpass(\"dashscope api key: \")\n",
    "from langchain_openai import ChatOpenAI\n",
    "llm = ChatOpenAI(\n",
    "    api_key=dashscope_api_key,\n",
    "    base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    "    model=\"qwen3-235b-a22b-instruct-2507\",  # 您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models\n",
    "    # other params...\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "923fa4b4-7c60-418c-a801-0dc0d94e2235",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "40199"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import os\n",
    "import re\n",
    "import time\n",
    "from typing import List\n",
    "import fitz\n",
    "import pymupdf4llm\n",
    "\n",
    "def is_page_number(span, page_height, page_width) -> bool:\n",
    "    \"\"\"页码判断逻辑\"\"\"\n",
    "    # 预处理：去除首尾空白字符（包括换行符）\n",
    "    text = span[\"text\"].strip()\n",
    "    \n",
    "    # 规则1：位置检测（底部10%区域）\n",
    "    is_bottom = span[\"bbox\"][3] > page_height * 0.9\n",
    "    \n",
    "    # 规则2：宽度检测（不超过页面宽度20%）\n",
    "    is_narrow = (span[\"bbox\"][2] - span[\"bbox\"][0]) < page_width * 0.2\n",
    "    \n",
    "    # 增强版规则3：内容检测（支持多种页码格式）\n",
    "    # 匹配模式包括：\n",
    "    # - 纯数字 \"1\", \"23\"\n",
    "    # - \"Page 1\", \"page 5\" （大小写不敏感）\n",
    "    # - 可能包含末尾空格/换行（已被strip处理）\n",
    "    page_pattern = r'^(?:page\\s*)?\\d+\\s*$'\n",
    "    is_valid_format = bool(re.match(page_pattern, text, re.IGNORECASE))\n",
    "    \n",
    "    return is_bottom and is_narrow and is_valid_format\n",
    "\n",
    "def parse_pdf2md(file_path: str) -> str:\n",
    "    doc = fitz.open(file_path)\n",
    "    md_pages = []\n",
    "\n",
    "    for page in doc:\n",
    "        page_width = page.rect.width\n",
    "        page_height = page.rect.height\n",
    "        text_dict = page.get_text(\"dict\")\n",
    "        \n",
    "        # 初始化裁剪高度\n",
    "        crop_height = page_height\n",
    "        \n",
    "        # 检测页码（仅检查最后一个block的最后一行）\n",
    "        if text_dict[\"blocks\"]:\n",
    "            last_block = text_dict[\"blocks\"][-1]\n",
    "            if \"lines\" in last_block and last_block[\"lines\"]:\n",
    "                last_line = last_block[\"lines\"][-1]\n",
    "                if \"spans\" in last_line and last_line[\"spans\"]:\n",
    "                    last_span = last_line[\"spans\"][-1]\n",
    "                    if is_page_number(last_span, page_height, page_width):\n",
    "                        crop_height = last_span[\"bbox\"][1] - (last_span[\"size\"] * 0.5)\n",
    "\n",
    "        # 创建裁剪页面\n",
    "        tmp_doc = fitz.open()\n",
    "        new_page = tmp_doc.new_page(\n",
    "            width=page.rect.width,\n",
    "            height=crop_height\n",
    "        )\n",
    "        new_page.show_pdf_page(\n",
    "            new_page.rect,\n",
    "            doc,\n",
    "            page.number,\n",
    "            clip=fitz.Rect(0, 0, page.rect.width, crop_height)\n",
    "        )\n",
    "        \n",
    "        md_pages.append(pymupdf4llm.to_markdown(tmp_doc))\n",
    "        \n",
    "    return \"\\n\".join(md_pages)\n",
    "\n",
    "md_text = parse_pdf2md(\"./tmp/Attention Is All You Need.pdf\")\n",
    "len(md_text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "e97ea5cc-9731-49ef-9563-67029b4a3906",
   "metadata": {},
   "outputs": [],
   "source": [
    "extractor = PaperAbstractExtractor(llm)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "ec244546-4560-4b0e-8c80-cca2c64d0261",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "_text_generator:\n",
      "index:1\n",
      "text:Provided proper attribution is provided, Google he\n",
      "next_or_end:NEXT\n",
      "\n",
      "_next_route:\n",
      "index:1\n",
      "next_or_end:NEXT\n",
      "\n",
      "\n",
      "_abstract_reader:\n",
      "index:1\n",
      "my_summary:{'paper_basic_info': {'title': 'Attention Is All You Need', 'authors': 'Niki Parmar; Ashish Vaswani; Llion Jones; Noam Shazeer; Jakob Uszkoreit; Aidan N. Gomez; Łukasz Kaiser; Illia Polosukhin', 'keywords': 'Transformer；attention mechanism；machine translation；sequence transduction；neural networks'}, 'research_topic': '基于纯注意力机制的序列转导模型研究', 'existing_problem': '主流序列转导模型依赖复杂的循环或卷积神经网络，难以并行化，训练耗时长', 'research_goal': '提出一种完全基于注意力机制、摒弃循环与卷积的新型网络架构Transformer，提升模型并行化能力与训练效率', 'core_method': '设计纯注意力驱动的Transformer架构；采用缩放点积注意力与多头注意力机制；在机器翻译任务上进行实验验证', 'key_result': '在WMT 2014英译德任务上达到28.4 BLEU，超过此前最佳结果2 BLEU以上；在英译法任务上3.5天训练获得41.8 BLEU，训练成本显著降低', 'research_significance': '为序列建模提供高效、可并行的新架构，推动机器翻译与自然语言处理领域发展', 'preliminary_judgment': '值得，因提出革命性Transformer架构，核心方法创新性强，实验结果显著优于已有模型，对后续NLP研究具有重大影响'}\n",
      "\n",
      "\n",
      "_abstract_evaluator:\n",
      "index:1\n",
      "next_or_end:END\n",
      "read_tip:标题已完整，为'Attention Is All You Need'。摘要部分包含全部5类核心要素：研究背景（主流序列转导模型依赖RNN/CNN，难以并行化）；研究目标（提出完全基于注意力机制的Transformer架构）；核心方法（引入缩放点积注意力、多头注意力，摒弃循环与卷积结构）；关键结果（英-德翻译28.4 BLEU，提升超2 BLEU；英-法41.8 BLEU，训练成本显著降低）；研究结论（Transformer在机器翻译与句法分析任务上表现优异，泛化能力强）。摘要末尾无截断，且紧接'1 Introduction'，标志正文开始，确认摘要已完毕，无需继续读取。\n",
      "\n",
      "\n",
      "_next_route:\n",
      "index:1\n",
      "next_or_end:END\n",
      "\n"
     ]
    }
   ],
   "source": [
    "res = extractor.extract(md_text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "17f2cf23-3c89-44ea-a580-8a91fb465bec",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'dict'>\n"
     ]
    }
   ],
   "source": [
    "print(type(res))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "66d1c5bb-3765-4e84-aa6b-0f4255bd6054",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'paper_basic_info': {'title': 'Attention Is All You Need',\n",
       "  'authors': 'Niki Parmar; Ashish Vaswani; Llion Jones; Noam Shazeer; Jakob Uszkoreit; Aidan N. Gomez; Łukasz Kaiser; Illia Polosukhin',\n",
       "  'keywords': 'Transformer；attention mechanism；machine translation；sequence transduction；neural networks'},\n",
       " 'research_topic': '基于纯注意力机制的序列转导模型研究',\n",
       " 'existing_problem': '主流序列转导模型依赖复杂的循环或卷积神经网络，难以并行化，训练耗时长',\n",
       " 'research_goal': '提出一种完全基于注意力机制、摒弃循环与卷积的新型网络架构Transformer，提升模型并行化能力与训练效率',\n",
       " 'core_method': '设计纯注意力驱动的Transformer架构；采用缩放点积注意力与多头注意力机制；在机器翻译任务上进行实验验证',\n",
       " 'key_result': '在WMT 2014英译德任务上达到28.4 BLEU，超过此前最佳结果2 BLEU以上；在英译法任务上3.5天训练获得41.8 BLEU，训练成本显著降低',\n",
       " 'research_significance': '为序列建模提供高效、可并行的新架构，推动机器翻译与自然语言处理领域发展',\n",
       " 'preliminary_judgment': '值得，因提出革命性Transformer架构，核心方法创新性强，实验结果显著优于已有模型，对后续NLP研究具有重大影响'}"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "res[\"my_summary\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "1381fd20-74a4-4de4-a774-a2507ef8a4ba",
   "metadata": {},
   "outputs": [],
   "source": [
    "abstract_res = res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "82ca1f56-3088-4562-ab2c-00a0d4bc4caf",
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain_core.prompts import ChatPromptTemplate\n",
    "\n",
    "pre_adviser_system_msg = \\\n",
    "\"\"\"# 任务角色\n",
    "你是论文文本分割的“前置分析师”，需基于论文核心信息和文本内容，精准判断结构并给出可落地的分割建议。\n",
    "\n",
    "# 输入信息\n",
    "1. 前期核心信息（不可忽略）：\n",
    "   - paper_basic_info：{此处填入前期提取的论文标题、作者、关键词}\n",
    "   - research_topic：{此处填入前期提取的研究主题}\n",
    "   - 上一段文本解读信息：{此处填入上一段文本的结构分析、核心小结（若有）}\n",
    "2. 当前待分析文本段：{此处填入用户提供的当前论文文本段}\n",
    "\n",
    "# 任务要求\n",
    "1. 分析文本段结构：\n",
    "   - 先判断当前文本属于论文的哪个小节/模块（如引言-研究背景、引言-研究gap、方法-数据预处理、方法-模型架构、结果-图表分析、讨论-局限性），需结合论文主题（research_topic）和上一段内容推断；\n",
    "   - 再拆解文本的内部逻辑（如“方法-数据预处理”可能包含“数据来源→数据清洗→数据标注”三步，“结果-图表分析”可能包含“图1解读→表1对比→核心结论”）。\n",
    "\n",
    "2. 总结文本段主要内容：\n",
    "   - 提炼核心信息（如“介绍了数据集A的来源为XX，样本量1000例，标注方式为双盲标注”“推导了公式1，用于解决XX问题，关键假设是XX”），避免冗余，不超过150字符。\n",
    "\n",
    "3. 生成分割建议：\n",
    "   - 明确分割依据（需对应上述“内部逻辑”），如“按‘数据来源-数据清洗-数据标注’三部分分割，每部分对应一个完整信息点”“按‘公式1推导-公式1应用场景’分割，两部分分别对应‘理论依据-实际用途’”；\n",
    "   - 说明建议理由（如“因‘数据来源’和‘数据清洗’是两个独立预处理步骤，分开分割可避免信息混杂”“因‘公式推导’是理论，‘应用场景’是实践，拆分后更易理解逻辑链”）。\n",
    "\n",
    "# 输出格式\n",
    "1. 文本段所属小节/模块：{如“方法-数据预处理”“结果-实验对比”}\n",
    "2. 文本段内部逻辑拆解：{如“数据来源→数据清洗→数据标注”“图1（准确率对比）→表1（消融实验）→核心结论”}\n",
    "3. 文本段主要内容总结：{核心信息，≤150字符}\n",
    "4. 分割建议：{明确分割依据+建议理由，≤200字符}\n",
    "\"\"\"\n",
    "\n",
    "pre_adviser_prompt = ChatPromptTemplate.from_template(\\\n",
    "\"\"\"1. 前期核心信息：\n",
    "   - paper_basic_info：{paper_info}\n",
    "   - research_topic：{research_topic}\n",
    "   - 上一段文本解读信息：{next_tip}\n",
    "2. 当前待分析文本段：{text}\n",
    "\"\"\")\n",
    "\n",
    "splitter_system_msg = \\\n",
    "\"\"\"# 任务角色\n",
    "你是论文文本分割的“执行器”，位于第一步（文本段结构分析与分割建议生成）前置工作之后。需按前期建议分割文本，同步纠正格式问题，并补充精简小结。\n",
    "\n",
    "# 输入信息\n",
    "1. 前期核心信息（不可忽略）：\n",
    "   - paper_basic_info：{此处填入前期提取的论文标题、作者、关键词}\n",
    "   - research_topic：{此处填入前期提取的研究主题}\n",
    "2. 第一步生成的分割建议：{此处填入第一步的分割依据+建议理由}\n",
    "3. 当前待分割文本段：{此处填入用户提供的当前论文文本段}\n",
    "\n",
    "# 任务要求\n",
    "1. 文本分割规则：\n",
    "   - 严格按第一步的分割建议执行，若建议分N段则输出N段；\n",
    "   - 每段输出需包含“小结+论文选段”，总字符数≤780（含标点，小结占50-80字符，选段占剩余字符）；\n",
    "   - 选段需完整保留原文语义（不可删减关键信息，如公式、数据、实验步骤），仅修正格式错误。\n",
    "\n",
    "2. 公式与表格纠正：\n",
    "   - 公式纠正：确保符号完整（如“∑”“∂”不缺失）、运算符正确（如“×”不显示为“x”、“≥”不显示为“>=”）、上下标格式规范（如“a₂”不显示为“a2”“L₁损失”不显示为“L1损失”）；\n",
    "   - 表格纠正：补充缺失的行列标签（如“表头：模型名称/准确率（%）”“行标签： baseline模型/本文模型”）、修正数据对应关系（如确保“模型A”对应“准确率85%”不颠倒），若表格解析严重混乱，需用文字简要还原核心逻辑（如“表格1核心：本文模型在A数据集准确率89%，比baseline高7%”）。\n",
    "\n",
    "3. 小结撰写规则：\n",
    "   - 精准提炼选段核心（如“选段介绍数据集A的样本量、来源及标注方式，为后续实验提供数据基础”“选段推导公式1，说明其用于优化模型损失函数的原理”）；\n",
    "   - 不添加主观评价，仅客观总结内容，控制在50-80字符。\n",
    "\n",
    "# 输出格式\n",
    "每段分割结果用“---shenll---”分隔，格式如下：\n",
    "**小结**：{50-80字符，客观总结选段核心}\n",
    "**论文选段**：{修正后的原文选段，采用markdown格式，确保公式/表格格式正确，与小结对应，总字符≤780}\n",
    "\n",
    "---shenll---\n",
    "（若有第二段，重复上述格式）\n",
    "\"\"\"\n",
    "\n",
    "splitter_prompt = ChatPromptTemplate.from_template(\\\n",
    "\"\"\"1. 前期核心信息（不可忽略）：\n",
    "   - paper_basic_info：{paper_info}\n",
    "   - research_topic：{research_topic}\n",
    "2. 第一步生成的分割建议：{split_tip}\n",
    "3. 当前待分割文本段：{text}\n",
    "\"\"\")\n",
    "\n",
    "next_adviser_system_msg = \\\n",
    "\"\"\"# 任务角色\n",
    "你是论文文本分割的“后续引导者”，位于第一步（文本段结构分析与分割建议生成），第二步（文本分割与公式 / 表格纠正）前置工作之后。需基于当前分割结果，为下一段分析提供精准的关注方向。\n",
    "\n",
    "# 输入信息\n",
    "1. 前期核心信息（不可忽略）：\n",
    "   - paper_basic_info：{此处填入前期提取的论文标题、作者、关键词}\n",
    "   - research_topic：{此处填入前期摘要提取的研究主题}\n",
    "   - core_method/research_goal：{此处填入前期摘要提取的核心方法/研究目标，二选一，优先选与当前文本关联的}\n",
    "2. 当前分割结果：{此处填入第二步输出的所有“小结+论文选段”，分隔符“---shenll---”}\n",
    "3. 第一步的结构分析：{此处填入第一步判断的“当前文本所属小节/模块”“内部逻辑拆解”}\n",
    "\n",
    "# 任务要求\n",
    "1. 明确当前文本的关键信息锚点：\n",
    "   - 当前小节/模块标题（需确认是否与第一步判断一致，若当前文本结尾提及下一小节标题，需补充记录，如“当前为‘方法-数据预处理’，结尾提及‘下一节介绍模型架构’，下一小节标题为‘模型架构设计’”）；\n",
    "   - 当前已覆盖的核心信息（如“已覆盖‘数据集A的来源、样本量、标注方式’”“已覆盖‘公式1推导及假设条件’”）。\n",
    "\n",
    "2. 判断当前主题完结状态：\n",
    "   - 明确当前小节/模块的主题是否完结（如“‘数据预处理’主题已完结，包含数据来源、清洗、标注三部分，无遗漏”“‘模型架构’主题未完结，仅介绍输入层，未提及隐藏层和输出层”）；\n",
    "   - 若未完结，说明“未覆盖的子主题”（如“‘模型架构’未覆盖隐藏层的注意力模块设计、输出层的激活函数选择”）。\n",
    "\n",
    "3. 提取下一段重点关注内容：\n",
    "   - 关联前期核心信息（如`core_method`是“改进U-Net”，则下一段需关注“改进的注意力模块参数、U-Net decoder层的调整”）；\n",
    "   - 补充“当前未解决的信息点”（如“当前提到‘采用K折交叉验证’，但未说明K值和验证指标，下一段需关注K值（如K=5）、验证指标（如Dice系数、IoU）”“当前提到‘实验结果优于baseline’，但未给出具体数据（如准确率数值），下一段需关注实验数据表格/图表”）；\n",
    "   - 若当前提及下一小节标题，需明确“下一小节的核心子主题”（如“下一小节为‘模型架构设计’，需重点关注注意力模块的结构、与传统U-Net的差异”）。\n",
    "\n",
    "# 输出格式\n",
    "1. 当前文本关键信息锚点：\n",
    "   - 小节/模块标题：{如“方法-数据预处理”“结果-实验对比（图1）”}\n",
    "   - 已覆盖核心信息：{如“数据集A（来源：XX数据库，样本量：1000例，标注方式：双盲标注）”“公式1推导（解决模型过拟合问题，假设条件：样本独立同分布）”}\n",
    "\n",
    "2. 当前主题完结状态：\n",
    "   - 完结/未完结：{选择其一}\n",
    "   - 说明：{如“完结，‘数据预处理’的3个子步骤均已覆盖”“未完结，‘模型架构’仅介绍输入层，缺少隐藏层和输出层描述”}\n",
    "\n",
    "3. 下一段重点关注内容：\n",
    "   - 核心方向（关联前期信息）：{如“围绕‘改进U-Net的注意力模块’，关注模块参数、与encoder层的连接方式”}\n",
    "   - 需补充的未解决信息点：{如“K折交叉验证的K值、验证指标；实验结果的具体数值（如准确率、召回率）”}\n",
    "   - 下一小节子主题（若有）：{如“模型架构设计的隐藏层注意力模块、输出层激活函数”}\n",
    "\"\"\"\n",
    "\n",
    "next_adviser_prompt = ChatPromptTemplate.from_template(\\\n",
    "\"\"\"1. 前期核心信息（不可忽略）：\n",
    "   - paper_basic_info：{paper_info}\n",
    "   - research_topic：{research_topic}\n",
    "   - core_method/research_goal：\n",
    "         core_method：{core_method}\n",
    "         research_goal：{research_goal}\n",
    "2. 当前分割结果：{split_res}\n",
    "3. 第一步的结构分析：{split_tip}\n",
    "\"\"\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "57379b1d-c225-4f5d-930b-c2e0d977c357",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "from typing import Dict, Iterator, List, TypedDict\n",
    "\n",
    "from langchain_openai import ChatOpenAI\n",
    "from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
    "from langgraph.config import get_stream_writer\n",
    "from langgraph.graph import StateGraph, START, END\n",
    "\n",
    "class SplitState(TypedDict):\n",
    "    paper_md_list: List[str]\n",
    "    \n",
    "    index: int\n",
    "    text: str\n",
    "\n",
    "    paper_info: str\n",
    "    abstract_summary: Dict\n",
    "\n",
    "    split_tip: str\n",
    "    split_res: str\n",
    "    next_tip: str\n",
    "    \n",
    "    next_or_end: str\n",
    "\n",
    "class By3StepsSplitter():\n",
    "    \"\"\"\n",
    "    三步法解析和分割论文文本：\n",
    "    第一步 —— 文本段结构分析与分割建议生成\n",
    "    第二步 —— 文本分割与公式 / 表格纠正\n",
    "    第三步 —— 下一段分割铺垫信息提取\n",
    "    \"\"\"\n",
    "    \n",
    "    def __init__(self, llm: ChatOpenAI, max_chunk_size: int = 2048):\n",
    "        self._llm = llm\n",
    "        self._text_splitter = RecursiveCharacterTextSplitter(\n",
    "            chunk_size=780,\n",
    "            chunk_overlap=80,\n",
    "            length_function=len,\n",
    "            separators=[\"\\n\\n\",\n",
    "                        \".\", \"。\", \"\\u3002\",\n",
    "                        \",\", \"，\", \"\\uff0c\",\n",
    "                        \"\\n\",\n",
    "                        \" \", \"\\u3000\", \"\\u200b\",    # 空格/全角空格/零宽空格\n",
    "                        ''],\n",
    "            is_separator_regex=False,\n",
    "        )\n",
    "        self._max_chunk_size = max_chunk_size\n",
    "        \n",
    "        self._build_graph()\n",
    "        \n",
    "    def split_stream(self, abstract_state: AbstractState) -> Iterator[str]:\n",
    "        abstract_summary = abstract_state[\"my_summary\"]\n",
    "        paper_info = merge_paper_basic_info(abstract_summary)\n",
    "        paper_md_list = abstract_state[\"paper_md_list\"]\n",
    "        for segement_res in self._workflow.stream({\n",
    "            \"paper_md_list\": paper_md_list,\n",
    "            \"index\": 0,\n",
    "            \"paper_info\": paper_info,\n",
    "            \"abstract_summary\": abstract_summary,\n",
    "            \"next_tip\": \"这是论文中的第一段分割文本，还没有对上一段文本的分析\"\n",
    "            }, {\"recursion_limit\": 999}, stream_mode=\"custom\"):\n",
    "            yield segement_res[\"text_segment\"]\n",
    "        \n",
    "    def _text_generator(self, state: SplitState):\n",
    "        text_index = state[\"index\"]\n",
    "        paper_md_list = state[\"paper_md_list\"]\n",
    "        if text_index >= len(paper_md_list):\n",
    "            print(f'\\n_text_generator:\\nindex: {text_index+1}\\nnext_or_end: END\\n')\n",
    "            return {\"next_or_end\": \"END\"}\n",
    "        print(f'\\n_text_generator:\\nindex: {text_index+1}\\ntext: {paper_md_list[text_index][:50]}\\nnext_or_end: NEXT\\n')\n",
    "        return {\"index\": text_index+1, \"text\": paper_md_list[text_index], \"next_or_end\": \"NEXT\"}\n",
    "    \n",
    "    def _next_route(self, state: SplitState):\n",
    "        print(f'\\n_next_route:\\nindex: {state[\"index\"]}\\nnext_or_end: {state[\"next_or_end\"]}\\n')\n",
    "        return state[\"next_or_end\"]\n",
    "        \n",
    "    def _paper_split_adviser(self, state: SplitState):\n",
    "        \"\"\"第一步 —— 文本段结构分析与分割建议生成\"\"\"\n",
    "        user_content = pre_adviser_prompt.invoke({\n",
    "            \"paper_info\": state[\"paper_info\"],\n",
    "            \"research_topic\": state[\"abstract_summary\"].get(\"research_topic\", \"没有相关信息\"),\n",
    "            \"next_tip\": state[\"next_tip\"],\n",
    "            \"text\": state[\"text\"]\n",
    "            }).messages[0].content\n",
    "        \n",
    "        resp = self._llm.invoke([{\n",
    "                \"role\": \"system\",\n",
    "                \"content\": pre_adviser_system_msg,\n",
    "            },{\n",
    "                \"role\": \"user\",\n",
    "                \"content\": user_content,\n",
    "            }]\n",
    "        )\n",
    "        split_tip = resp.content\n",
    "\n",
    "        print(f'\\n_paper_split_adviser:\\nindex: {state[\"index\"]}\\nsplit_tip: {split_tip}\\n')\n",
    "        return {\"split_tip\": split_tip}\n",
    "    \n",
    "    def _paper_text_splitter(self, state: SplitState):\n",
    "        \"\"\"第二步 —— 文本分割与公式 / 表格纠正\"\"\"\n",
    "        user_content = splitter_prompt.invoke({\n",
    "            \"paper_info\": state[\"paper_info\"],\n",
    "            \"research_topic\": state[\"abstract_summary\"].get(\"research_topic\", \"没有相关信息\"),\n",
    "            \"split_tip\": state[\"split_tip\"],\n",
    "            \"text\": state[\"text\"]}).messages[0].content\n",
    "        \n",
    "        resp = self._llm.invoke([{\n",
    "                \"role\": \"system\",\n",
    "                \"content\": splitter_system_msg,\n",
    "            },{\n",
    "                \"role\": \"user\",\n",
    "                \"content\": user_content,\n",
    "            }]\n",
    "        )\n",
    "        split_res = resp.content\n",
    "        \n",
    "        writer = get_stream_writer()\n",
    "        segments = (s.strip() for s in split_res.split(\"---shenll---\"))\n",
    "        for s in segments:\n",
    "            if len(s) == 0:\n",
    "                continue\n",
    "            if len(s) >= self._max_chunk_size:\n",
    "                print(f'\\n_paper_text_splitter:\\nindex: {state[\"index\"]}\\nnext split len: {len(s)}\\n')\n",
    "                for s_chunk in self._text_splitter.split_text(s):\n",
    "                    writer({\"text_segment\": s_chunk})\n",
    "            else:\n",
    "                print(f'\\n_paper_text_splitter:\\nindex: {state[\"index\"]}\\nreturn len: {len(s)}\\n')\n",
    "                writer({\"text_segment\": s})\n",
    "\n",
    "        return {\"split_res\": split_res}\n",
    "    \n",
    "    def _next_read_adviser(self, state: SplitState):\n",
    "        \"\"\"第三步 —— 下一段分割铺垫信息提取\"\"\"\n",
    "        user_content = next_adviser_prompt.invoke({\n",
    "            \"paper_info\": state[\"paper_info\"], \n",
    "            \"research_topic\": state[\"abstract_summary\"].get(\"research_topic\", \"没有相关信息\"),\n",
    "            \"core_method\": state[\"abstract_summary\"].get(\"core_method\", \"没有相关信息\"),\n",
    "            \"research_goal\": state[\"abstract_summary\"].get(\"research_goal\", \"没有相关信息\"),\n",
    "            \"split_tip\": state[\"split_tip\"],\n",
    "            \"split_res\": state[\"split_res\"]}).messages[0].content\n",
    "        \n",
    "        resp = self._llm.invoke([{\n",
    "                \"role\": \"system\",\n",
    "                \"content\": next_adviser_system_msg,\n",
    "            },{\n",
    "                \"role\": \"user\",\n",
    "                \"content\": user_content,\n",
    "            }]\n",
    "        )\n",
    "        next_tip = resp.content\n",
    "\n",
    "        print(f'\\n_next_read_adviser:\\nindex: {state[\"index\"]}\\nnext_tip: {next_tip}\\n')\n",
    "        return {\"next_tip\": next_tip}\n",
    "    \n",
    "    def _build_graph(self):\n",
    "        self._graph = StateGraph(SplitState)\n",
    "\n",
    "        # Add the nodes\n",
    "        self._graph.add_node(\"text_generator\", self._text_generator)\n",
    "        self._graph.add_node(\"paper_split_adviser\", self._paper_split_adviser)\n",
    "        self._graph.add_node(\"paper_text_splitter\", self._paper_text_splitter)\n",
    "        self._graph.add_node(\"next_read_adviser\", self._next_read_adviser)\n",
    "\n",
    "        # Add edges to connect nodes\n",
    "        self._graph.add_edge(START, \"text_generator\")\n",
    "        self._graph.add_conditional_edges(\n",
    "            \"text_generator\",\n",
    "            self._next_route,\n",
    "            {\n",
    "                \"END\": END,\n",
    "                \"NEXT\": \"paper_split_adviser\",\n",
    "            },\n",
    "        )\n",
    "        self._graph.add_edge(\"paper_split_adviser\", \"paper_text_splitter\")\n",
    "        self._graph.add_edge(\"paper_text_splitter\", \"next_read_adviser\")\n",
    "        self._graph.add_edge(\"next_read_adviser\", \"text_generator\")\n",
    "        \n",
    "        # Compile the workflow\n",
    "        self._workflow = self._graph.compile()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "5bc5ccc9-52bc-4765-a668-c0e6132b54e9",
   "metadata": {},
   "outputs": [],
   "source": [
    "splitter = By3StepsSplitter(llm)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "97dff466-bedc-46f4-80c3-7ae3aff42240",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "_text_generator:\n",
      "index: 1\n",
      "text: Provided proper attribution is provided, Google he\n",
      "next_or_end: NEXT\n",
      "\n",
      "\n",
      "_next_route:\n",
      "index: 1\n",
      "next_or_end: NEXT\n",
      "\n",
      "\n",
      "_paper_split_adviser:\n",
      "index: 1\n",
      "split_tip: 1. 文本段所属小节/模块：**引言-研究背景**  \n",
      "2. 文本段内部逻辑拆解：**版权许可声明→论文标题与作者信息→摘要→贡献说明→会议信息→引言开头：RNN/LSTM在序列建模中的主导地位**  \n",
      "3. 文本段主要内容总结：介绍RNN、LSTM和GRU在序列建模（如机器翻译）中的主导地位，引出对现有基于循环结构模型的改进需求。  \n",
      "4. 分割建议：按“元信息（版权/作者/摘要/贡献/会议）→引言正文”分割，因元信息与科学论述无关，分离可突出研究主线，便于后续结构化处理。\n",
      "\n",
      "\n",
      "_paper_text_splitter:\n",
      "index: 1\n",
      "return len: 226\n",
      "\n",
      "\n",
      "_paper_text_splitter:\n",
      "index: 1\n",
      "return len: 1654\n",
      "\n",
      "\n",
      "_paper_text_splitter:\n",
      "index: 1\n",
      "return len: 1218\n",
      "\n",
      "\n",
      "_paper_text_splitter:\n",
      "index: 1\n",
      "return len: 141\n",
      "\n",
      "\n",
      "_paper_text_splitter:\n",
      "index: 1\n",
      "return len: 491\n",
      "\n",
      "\n",
      "*** - 0 - ***\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "Interrupted by user",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mKeyboardInterrupt\u001b[39m                         Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[37]\u001b[39m\u001b[32m, line 6\u001b[39m\n\u001b[32m      4\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m i % \u001b[32m10\u001b[39m == \u001b[32m0\u001b[39m:\n\u001b[32m      5\u001b[39m     \u001b[38;5;28mprint\u001b[39m(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[33m*** - \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mi\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m - ***\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m----> \u001b[39m\u001b[32m6\u001b[39m     my_input = \u001b[38;5;28minput\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33mEnter to continue\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m      7\u001b[39m file.write(s)\n\u001b[32m      8\u001b[39m file.write(\u001b[33m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[33m--- --- ---\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[33m\"\u001b[39m)\n",
      "\u001b[36mFile \u001b[39m\u001b[32m/opt/conda/lib/python3.11/site-packages/ipykernel/kernelbase.py:1282\u001b[39m, in \u001b[36mKernel.raw_input\u001b[39m\u001b[34m(self, prompt)\u001b[39m\n\u001b[32m   1280\u001b[39m     msg = \u001b[33m\"\u001b[39m\u001b[33mraw_input was called, but this frontend does not support input requests.\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m   1281\u001b[39m     \u001b[38;5;28;01mraise\u001b[39;00m StdinNotImplementedError(msg)\n\u001b[32m-> \u001b[39m\u001b[32m1282\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._input_request(\n\u001b[32m   1283\u001b[39m     \u001b[38;5;28mstr\u001b[39m(prompt),\n\u001b[32m   1284\u001b[39m     \u001b[38;5;28mself\u001b[39m._parent_ident[\u001b[33m\"\u001b[39m\u001b[33mshell\u001b[39m\u001b[33m\"\u001b[39m],\n\u001b[32m   1285\u001b[39m     \u001b[38;5;28mself\u001b[39m.get_parent(\u001b[33m\"\u001b[39m\u001b[33mshell\u001b[39m\u001b[33m\"\u001b[39m),\n\u001b[32m   1286\u001b[39m     password=\u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[32m   1287\u001b[39m )\n",
      "\u001b[36mFile \u001b[39m\u001b[32m/opt/conda/lib/python3.11/site-packages/ipykernel/kernelbase.py:1325\u001b[39m, in \u001b[36mKernel._input_request\u001b[39m\u001b[34m(self, prompt, ident, parent, password)\u001b[39m\n\u001b[32m   1322\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mKeyboardInterrupt\u001b[39;00m:\n\u001b[32m   1323\u001b[39m     \u001b[38;5;66;03m# re-raise KeyboardInterrupt, to truncate traceback\u001b[39;00m\n\u001b[32m   1324\u001b[39m     msg = \u001b[33m\"\u001b[39m\u001b[33mInterrupted by user\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m-> \u001b[39m\u001b[32m1325\u001b[39m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mKeyboardInterrupt\u001b[39;00m(msg) \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[32m   1326\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m:\n\u001b[32m   1327\u001b[39m     \u001b[38;5;28mself\u001b[39m.log.warning(\u001b[33m\"\u001b[39m\u001b[33mInvalid Message:\u001b[39m\u001b[33m\"\u001b[39m, exc_info=\u001b[38;5;28;01mTrue\u001b[39;00m)\n",
      "\u001b[31mKeyboardInterrupt\u001b[39m: Interrupted by user"
     ]
    }
   ],
   "source": [
    "i = 0\n",
    "with open(\"./tmp/split_result.txt\", \"w\", encoding=\"utf-8\") as file:\n",
    "    for s in splitter.split_stream(abstract_res):\n",
    "        if i % 10 == 0:\n",
    "            print(f\"\\n*** - {i} - ***\")\n",
    "            my_input = input(\"Enter to continue\")\n",
    "        file.write(s)\n",
    "        file.write(\"\\n--- --- ---\\n\")\n",
    "        i = i + 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "id": "57645ea0-f385-4cbe-8c8c-82e43f05a3ee",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "13"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(abstract_res[\"paper_md_list\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "486441ff-1e7a-4795-b6ca-ea9911e67c38",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SplitState(TypedDict):\n",
    "    paper_md_list: List[str]\n",
    "    \n",
    "    index: int\n",
    "    text: str\n",
    "\n",
    "    paper_info: str\n",
    "    abstract_summary: Dict\n",
    "\n",
    "    split_tip: str\n",
    "    split_res: str\n",
    "    next_tip: str\n",
    "    \n",
    "    next_or_end: str\n",
    "\n",
    "class By3StepsSplitter():\n",
    "    \"\"\"\n",
    "    三步法解析和分割论文文本：\n",
    "    第一步 —— 文本段结构分析与分割建议生成\n",
    "    第二步 —— 文本分割与公式 / 表格纠正\n",
    "    第三步 —— 下一段分割铺垫信息提取\n",
    "    \"\"\"\n",
    "    \n",
    "    first_tip = \"这是论文中的第一段分割文本，还没有对上一段文本的分析\"\n",
    "    \n",
    "    def __init__(self, llm: ChatOpenAI, max_chunk_size: int = 2048):\n",
    "        self._llm = llm\n",
    "        self._text_splitter = RecursiveCharacterTextSplitter(\n",
    "            chunk_size=780,\n",
    "            chunk_overlap=80,\n",
    "            length_function=len,\n",
    "            separators=[\"\\n\\n\",\n",
    "                        \".\", \"。\", \"\\u3002\",\n",
    "                        \",\", \"，\", \"\\uff0c\",\n",
    "                        \"\\n\",\n",
    "                        \" \", \"\\u3000\", \"\\u200b\",    # 空格/全角空格/零宽空格\n",
    "                        ''],\n",
    "            is_separator_regex=False,\n",
    "        )\n",
    "        self._max_chunk_size = max_chunk_size\n",
    "        \n",
    "        self._build_graph()\n",
    "        \n",
    "    def split_stream(self, abstract_state: AbstractState) -> Iterator[str]:\n",
    "        abstract_summary = abstract_state[\"my_summary\"]\n",
    "        paper_info = merge_paper_basic_info(abstract_summary)\n",
    "        paper_md_list = abstract_state[\"paper_md_list\"]\n",
    "        for segement_res in self._workflow.stream({\n",
    "            \"paper_md_list\": paper_md_list,\n",
    "            \"index\": 0,\n",
    "            \"paper_info\": paper_info,\n",
    "            \"abstract_summary\": abstract_summary,\n",
    "            \"next_tip\": By3StepsSplitter.first_tip\n",
    "            }, {\"recursion_limit\": 999}, stream_mode=\"custom\"):\n",
    "            yield segement_res[\"text_segment\"]\n",
    "        \n",
    "    def _text_generator(self, state: SplitState):\n",
    "        text_index = state[\"index\"]\n",
    "        paper_md_list = state[\"paper_md_list\"]\n",
    "        if text_index >= len(paper_md_list):\n",
    "            return {\"next_or_end\": \"END\"}\n",
    "        \n",
    "        return {\"index\": text_index+1, \"text\": paper_md_list[text_index], \"next_or_end\": \"NEXT\"}\n",
    "    \n",
    "    def _next_route(self, state: SplitState):\n",
    "        return state[\"next_or_end\"]\n",
    "        \n",
    "    def _paper_split_adviser(self, state: SplitState):\n",
    "        \"\"\"第一步 —— 文本段结构分析与分割建议生成\"\"\"\n",
    "        user_content = pre_adviser_prompt.invoke({\n",
    "            \"paper_info\": state[\"paper_info\"],\n",
    "            \"research_topic\": state[\"abstract_summary\"].get(\"research_topic\", \"没有相关信息\"),\n",
    "            \"next_tip\": state[\"next_tip\"],\n",
    "            \"text\": state[\"text\"]\n",
    "            }).messages[0].content\n",
    "        \n",
    "        resp = self._llm.invoke([{\n",
    "                \"role\": \"system\",\n",
    "                \"content\": pre_adviser_system_msg,\n",
    "            },{\n",
    "                \"role\": \"user\",\n",
    "                \"content\": user_content,\n",
    "            }]\n",
    "        )\n",
    "        split_tip = resp.content\n",
    "        \n",
    "        return {\"split_tip\": split_tip}\n",
    "    \n",
    "    def _paper_text_splitter(self, state: SplitState):\n",
    "        \"\"\"第二步 —— 文本分割与公式 / 表格纠正\"\"\"\n",
    "        user_content = splitter_prompt.invoke({\n",
    "            \"paper_info\": state[\"paper_info\"],\n",
    "            \"research_topic\": state[\"abstract_summary\"].get(\"research_topic\", \"没有相关信息\"),\n",
    "            \"split_tip\": state[\"split_tip\"],\n",
    "            \"text\": state[\"text\"]}).messages[0].content\n",
    "        \n",
    "        resp = self._llm.invoke([{\n",
    "                \"role\": \"system\",\n",
    "                \"content\": splitter_system_msg,\n",
    "            },{\n",
    "                \"role\": \"user\",\n",
    "                \"content\": user_content,\n",
    "            }]\n",
    "        )\n",
    "        split_res = resp.content\n",
    "        \n",
    "        writer = get_stream_writer()\n",
    "        segments = (s.strip() for s in split_res.split(\"---shenll---\"))\n",
    "        for s in segments:\n",
    "            if len(s) == 0:\n",
    "                continue\n",
    "            if len(s) >= self._max_chunk_size:\n",
    "                for s_chunk in self._text_splitter.split_text(s):\n",
    "                    writer({\"text_segment\": s_chunk})\n",
    "            else:\n",
    "                writer({\"text_segment\": s})\n",
    "\n",
    "        return {\"split_res\": split_res}\n",
    "    \n",
    "    def _next_read_adviser(self, state: SplitState):\n",
    "        \"\"\"第三步 —— 下一段分割铺垫信息提取\"\"\"\n",
    "        user_content = next_adviser_prompt.invoke({\n",
    "            \"paper_info\": state[\"paper_info\"], \n",
    "            \"research_topic\": state[\"abstract_summary\"].get(\"research_topic\", \"没有相关信息\"),\n",
    "            \"core_method\": state[\"abstract_summary\"].get(\"core_method\", \"没有相关信息\"),\n",
    "            \"research_goal\": state[\"abstract_summary\"].get(\"research_goal\", \"没有相关信息\"),\n",
    "            \"split_tip\": state[\"split_tip\"],\n",
    "            \"split_res\": state[\"split_res\"]}).messages[0].content\n",
    "        \n",
    "        resp = self._llm.invoke([{\n",
    "                \"role\": \"system\",\n",
    "                \"content\": next_adviser_system_msg,\n",
    "            },{\n",
    "                \"role\": \"user\",\n",
    "                \"content\": user_content,\n",
    "            }]\n",
    "        )\n",
    "        next_tip = resp.content\n",
    "        \n",
    "        return {\"next_tip\": next_tip}\n",
    "    \n",
    "    def _build_graph(self):\n",
    "        self._graph = StateGraph(SplitState)\n",
    "\n",
    "        # Add the nodes\n",
    "        self._graph.add_node(\"text_generator\", self._text_generator)\n",
    "        self._graph.add_node(\"paper_split_adviser\", self._paper_split_adviser)\n",
    "        self._graph.add_node(\"paper_text_splitter\", self._paper_text_splitter)\n",
    "        self._graph.add_node(\"next_read_adviser\", self._next_read_adviser)\n",
    "\n",
    "        # Add edges to connect nodes\n",
    "        self._graph.add_edge(START, \"text_generator\")\n",
    "        self._graph.add_conditional_edges(\n",
    "            \"text_generator\",\n",
    "            self._next_route,\n",
    "            {\n",
    "                \"END\": END,\n",
    "                \"NEXT\": \"paper_split_adviser\",\n",
    "            },\n",
    "        )\n",
    "        self._graph.add_edge(\"paper_split_adviser\", \"paper_text_splitter\")\n",
    "        self._graph.add_edge(\"paper_text_splitter\", \"next_read_adviser\")\n",
    "        self._graph.add_edge(\"next_read_adviser\", \"text_generator\")\n",
    "        \n",
    "        # Compile the workflow\n",
    "        self._workflow = self._graph.compile()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
