{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "04205999-196c-4e1a-9bcd-ca2c2edcb09f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdin",
     "output_type": "stream",
     "text": [
      "dashscope api key:  ········\n"
     ]
    }
   ],
   "source": [
    "import getpass\n",
    "dashscope_api_key=getpass.getpass(\"dashscope api key: \")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "3b385c4a-8d71-4c89-8f4d-2e1b1dc1debb",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/lib/python3.11/site-packages/requests/__init__.py:86: RequestsDependencyWarning: Unable to find acceptable character detection dependency (chardet or charset_normalizer).\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "from langchain_community.embeddings import DashScopeEmbeddings\n",
    "\n",
    "embeddings = DashScopeEmbeddings(\n",
    "    model=\"text-embedding-v4\",\n",
    "    dashscope_api_key=dashscope_api_key,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "9ca48ca3-aad9-463c-8997-a1346fc9ad80",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "40199"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import os\n",
    "import re\n",
    "import time\n",
    "from typing import List\n",
    "import fitz\n",
    "import pymupdf4llm\n",
    "\n",
    "def is_page_number(span, page_height, page_width) -> bool:\n",
    "    \"\"\"页码判断逻辑\"\"\"\n",
    "    # 预处理：去除首尾空白字符（包括换行符）\n",
    "    text = span[\"text\"].strip()\n",
    "    \n",
    "    # 规则1：位置检测（底部10%区域）\n",
    "    is_bottom = span[\"bbox\"][3] > page_height * 0.9\n",
    "    \n",
    "    # 规则2：宽度检测（不超过页面宽度20%）\n",
    "    is_narrow = (span[\"bbox\"][2] - span[\"bbox\"][0]) < page_width * 0.2\n",
    "    \n",
    "    # 增强版规则3：内容检测（支持多种页码格式）\n",
    "    # 匹配模式包括：\n",
    "    # - 纯数字 \"1\", \"23\"\n",
    "    # - \"Page 1\", \"page 5\" （大小写不敏感）\n",
    "    # - 可能包含末尾空格/换行（已被strip处理）\n",
    "    page_pattern = r'^(?:page\\s*)?\\d+\\s*$'\n",
    "    is_valid_format = bool(re.match(page_pattern, text, re.IGNORECASE))\n",
    "    \n",
    "    return is_bottom and is_narrow and is_valid_format\n",
    "\n",
    "def parse_pdf2md(file_path: str) -> str:\n",
    "    doc = fitz.open(file_path)\n",
    "    md_pages = []\n",
    "\n",
    "    for page in doc:\n",
    "        page_width = page.rect.width\n",
    "        page_height = page.rect.height\n",
    "        text_dict = page.get_text(\"dict\")\n",
    "        \n",
    "        # 初始化裁剪高度\n",
    "        crop_height = page_height\n",
    "        \n",
    "        # 检测页码（仅检查最后一个block的最后一行）\n",
    "        if text_dict[\"blocks\"]:\n",
    "            last_block = text_dict[\"blocks\"][-1]\n",
    "            if \"lines\" in last_block and last_block[\"lines\"]:\n",
    "                last_line = last_block[\"lines\"][-1]\n",
    "                if \"spans\" in last_line and last_line[\"spans\"]:\n",
    "                    last_span = last_line[\"spans\"][-1]\n",
    "                    if is_page_number(last_span, page_height, page_width):\n",
    "                        crop_height = last_span[\"bbox\"][1] - (last_span[\"size\"] * 0.5)\n",
    "\n",
    "        # 创建裁剪页面\n",
    "        tmp_doc = fitz.open()\n",
    "        new_page = tmp_doc.new_page(\n",
    "            width=page.rect.width,\n",
    "            height=crop_height\n",
    "        )\n",
    "        new_page.show_pdf_page(\n",
    "            new_page.rect,\n",
    "            doc,\n",
    "            page.number,\n",
    "            clip=fitz.Rect(0, 0, page.rect.width, crop_height)\n",
    "        )\n",
    "        \n",
    "        md_pages.append(pymupdf4llm.to_markdown(tmp_doc))\n",
    "        \n",
    "    return \"\\n\".join(md_pages)\n",
    "\n",
    "md_text = parse_pdf2md(\"./tmp/Attention Is All You Need.pdf\")\n",
    "len(md_text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "f74f6924-8621-4fdc-be7f-53b947caef2b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Provided proper attribution is provided, Google hereby grants permission to\n",
      "reproduce the tables and figures in this paper solely for use in journalistic or\n",
      "scholarly works.\n",
      "\n",
      "## **Attention Is All You Need**\n",
      "\n",
      "\n",
      "\n",
      "**Niki Parmar** _[∗]_\n",
      "Google Research\n",
      "```\n",
      "nikip@google.com\n",
      "\n",
      "```\n",
      "\n",
      "\n",
      "**Ashish Vaswani** _[∗]_\n",
      "Google Brain\n",
      "```\n",
      "avaswani@google.com\n",
      "\n",
      "```\n",
      "\n",
      "**Llion Jones** _[∗]_\n",
      "Google Research\n",
      "```\n",
      " llion@google.com\n",
      "\n",
      "```\n",
      "\n",
      "\n",
      "**Noam Shazeer** _[∗]_\n",
      "Google Brain\n",
      "```\n",
      "noam@google.com\n",
      "\n",
      "```\n",
      "\n",
      "\n",
      "**Jakob Uszkoreit** _[∗]_\n"
     ]
    }
   ],
   "source": [
    "print(md_text[:500])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "23c91b1b-791f-40de-9b4f-c365e6e18841",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "13"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
    "recursive_splitter = RecursiveCharacterTextSplitter(\n",
    "        chunk_size=3600,\n",
    "        chunk_overlap=480,\n",
    "        length_function=len,\n",
    "        separators=[\"\\n\\n\",\n",
    "                    \".\", \"。\", \"\\u3002\",\n",
    "                    \",\", \"，\", \"\\uff0c\",\n",
    "                    \"\\n\",\n",
    "                    \" \", \"\\u3000\", \"\\u200b\",    # 空格/全角空格/零宽空格\n",
    "                    ''],\n",
    "        is_separator_regex=False,\n",
    "    )\n",
    "res1 = recursive_splitter.split_text(md_text)\n",
    "len(res1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "01e03dbf-1da7-4c92-9535-a06d4a15dd93",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'str'>\n"
     ]
    }
   ],
   "source": [
    "print(type(res1[0]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "acc9401e-2ccd-4534-ac25-91f967982bcf",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "AIMessage(content='{\"info\": {\"name\": \"刘五\", \"age\": \"34岁\", \"email\": \"liuwu@example.com\"}, \"hobby\": [\"打篮球\", \"旅游\"]}', additional_kwargs={'parsed': None, 'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 39, 'prompt_tokens': 269, 'total_tokens': 308, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'qwen3-235b-a22b-instruct-2507', 'system_fingerprint': None, 'id': 'chatcmpl-0dd38104-9011-9496-9f5d-576cecc1e173', 'service_tier': None, 'finish_reason': 'stop', 'logprobs': None}, id='run--69dd653b-27c4-4bb6-9403-37d9d969c8a1-0', usage_metadata={'input_tokens': 269, 'output_tokens': 39, 'total_tokens': 308, 'input_token_details': {}, 'output_token_details': {}})"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 测试json格式输出\n",
    "from langchain_openai import ChatOpenAI\n",
    "\n",
    "llm = ChatOpenAI(\n",
    "    api_key=dashscope_api_key,\n",
    "    base_url=\"https://dashscope.aliyuncs.com/compatible-mode/v1\",\n",
    "    model=\"qwen3-235b-a22b-instruct-2507\",  # 您可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models\n",
    "    # other params...\n",
    ")\n",
    "json_llm = llm.bind(response_format={\"type\": \"json_object\"})\n",
    "\n",
    "import os\n",
    "import json\n",
    "# 预定义示例响应（用于few-shot提示）\n",
    "example1_response = json.dumps(\n",
    "    {\n",
    "        \"info\": {\"name\": \"张三\", \"age\": \"25岁\", \"email\": \"zhangsan@example.com\"},\n",
    "        \"hobby\": [\"唱歌\"]\n",
    "    },\n",
    "    ensure_ascii=False\n",
    ")\n",
    "example2_response = json.dumps(\n",
    "    {\n",
    "        \"info\": {\"name\": \"李四\", \"age\": \"30岁\", \"email\": \"lisi@example.com\"},\n",
    "        \"hobby\": [\"跳舞\", \"游泳\"]\n",
    "    },\n",
    "    ensure_ascii=False\n",
    ")\n",
    "example3_response = json.dumps(\n",
    "    {\n",
    "        \"info\": {\"name\": \"王五\", \"age\": \"40岁\", \"email\": \"wangwu@example.com\"},\n",
    "        \"hobby\": [\"Rap\", \"篮球\"]\n",
    "    },\n",
    "    ensure_ascii=False\n",
    ")\n",
    "\n",
    "\n",
    "json_res = json_llm.invoke([\n",
    "    {\n",
    "        \"role\": \"system\",\n",
    "        \"content\": f\"\"\"提取name、age、email和hobby（数组类型），输出包含info层和hobby数组的JSON。\n",
    "        示例：\n",
    "        Q：我叫张三，今年25岁，邮箱是zhangsan@example.com，爱好是唱歌\n",
    "        A：{example1_response}\n",
    "        \n",
    "        Q：我叫李四，今年30岁，邮箱是lisi@example.com，平时喜欢跳舞和游泳\n",
    "        A：{example2_response}\n",
    "        \n",
    "        Q：我的邮箱是wangwu@example.com，今年40岁，名字是王五，会Rap和打篮球\n",
    "        A：{example3_response}\"\"\"\n",
    "    },\n",
    "    {\n",
    "        \"role\": \"user\",\n",
    "        \"content\": \"大家好，我叫刘五，今年34岁，邮箱是liuwu@example.com，平时喜欢打篮球和旅游\", \n",
    "    },\n",
    "])\n",
    "type(json_res)\n",
    "json_res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "e7bfb38c-6d56-4053-bbc2-5fed2cb6d697",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'name': '刘五', 'age': '34岁', 'email': 'liuwu@example.com'}"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "json.loads(json_res.content)['info']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "a1fe31c0-f3ee-4a95-9503-edfbe9204837",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 第一趟阅读测试（采用评估器-优化器模式）：分析文章开头，获取标题、作者、摘要内容，初步分析论文主题和提出相关问题\n",
    "from typing import TypedDict, Dict\n",
    "\n",
    "class State(TypedDict):\n",
    "    index: int\n",
    "    text: str\n",
    "\n",
    "    my_summary: Dict\n",
    "\n",
    "    next_or_end: str\n",
    "    read_tip: str\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "7c619c30-0ff8-4193-96e2-eb60e14e9b6b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 生成模型节点\n",
    "\n",
    "abstract_reader_system_msg = \"\"\"# 系统提示词：论文标题与摘要信息提取专家\n",
    "你需要扮演「论文信息提取专家」，基于用户提供的论文「标题文本」和「摘要文本」（二者均为论文分割后的原始内容，需优先使用文本中的原文表述，避免主观编造），提取出阅读论文第一步（标题+摘要）应获取的核心信息，并严格按照指定JSON格式输出。\n",
    "\n",
    "典型输入格式为：\n",
    "论文文本选段：...\n",
    "阅读提示：...\n",
    "\n",
    "## 一、提取信息类别及定义（需全覆盖，无对应信息时标注为\"无\"）\n",
    "请严格按照以下8个类别提取信息，每个类别需符合定义且信息精准：\n",
    "1. \"paper_basic_info\"：论文基础信息\n",
    "   - 包含\"title\"（论文完整标题，直接复制用户提供的标题文本，不可修改）、\"authors\"（摘要中提及的作者姓名，若摘要未提及则填\"无\"，多名作者用分号分隔）、\"keywords\"（从标题/摘要中提取的核心关键词，需是领域内核心术语，如研究主题、方法、对象等，3-5个为宜，用分号分隔，无则填\"无\"）。\n",
    "\n",
    "2. \"research_topic\"：研究主题\n",
    "   - 定义：论文聚焦的核心领域、具体研究方向或核心问题范畴（如“深度学习在医学CT影像分割中的应用研究”“基于强化学习的自动驾驶路径规划优化”），需从标题和摘要中整合，避免过于宽泛。\n",
    "\n",
    "3. \"existing_problem\"：现有研究存在的问题（研究背景痛点）\n",
    "   - 定义：摘要中提及的「当前领域内未解决的缺陷、不足或挑战」（如“传统U-Net模型在小样本医学影像分割中精度低”“现有路径规划算法未考虑实时交通拥堵动态变化”），需直接对应摘要中“前人研究的不足”描述，无则填\"无\"。\n",
    "\n",
    "4. \"research_goal\"：研究目标\n",
    "   - 定义：本文明确要解决的「具体问题」或「达成的具体目标」（如“提出一种轻量化改进U-Net模型，提升小样本CT影像分割精度”“设计一种融合实时交通数据的强化学习路径规划算法”），需体现“本文要做什么”，而非“做了什么”。\n",
    "\n",
    "5. \"core_method\"：核心方法/技术手段\n",
    "   - 定义：摘要中提及的「本文用于解决问题的关键方法、模型、实验设计或技术路径」（如“基于注意力机制改进U-Net模型；采用K折交叉验证进行实验”“提出双分支特征融合网络；使用Cityscapes数据集训练验证”），需包含“方法名称+核心操作”，无则填\"无\"。\n",
    "\n",
    "6. \"key_result\"：关键实验/研究结果\n",
    "   - 定义：摘要中提及的「最核心、最具代表性的实验结论或研究发现」（需包含具体数据或明确结论，如“改进模型在LIDC数据集上Dice系数达0.89，较传统U-Net提升8%”“验证了所提算法在拥堵场景下路径规划效率提升20%”），避免笼统表述，无则填\"无\"。\n",
    "\n",
    "7. \"research_significance\"：研究意义/价值\n",
    "   - 定义：摘要中提及的「本文研究的理论价值或实际应用价值」（如“为小样本医学影像分割提供新方法，助力临床实时诊断”“为自动驾驶在复杂交通场景下的路径规划提供技术支撑”），需体现“本文研究的作用”，无则填\"无\"。\n",
    "\n",
    "8. \"preliminary_judgment\"：是否值得后续阅读的初步判断\n",
    "   - 定义：基于上述信息，给出“值得”或“不值得”的判断，并简要说明理由（如“值得，因研究主题与医学影像分割方向高度相关，核心方法创新且关键结果显著”“不值得，因研究主题与用户关注的自然语言处理领域无关”），理由需紧扣提取的信息，不主观臆断。\n",
    "\n",
    "## 二、输出格式要求（必须严格遵守，不可增减字段或修改格式）\n",
    "1. 仅输出JSON文本，无任何额外解释性文字、换行或注释；\n",
    "2. JSON的键名需与上述8个类别完全一致（如\"paper_basic_info\"“research_topic”），不可修改；\n",
    "3. 每个键对应的值为字符串类型，若信息包含多个要点，用“；”分隔，语言需精炼、无冗余，优先使用论文原文中的核心表述；\n",
    "4. 无对应信息时，值必须填写“无”（不可留空或省略字段）。\n",
    "5. 除paper_basic_info外，其余的每个字段回答应该精简，每个字段不要超过600个字符。\n",
    "\n",
    "## 三、提取规则（需严格遵守）\n",
    "1. 准确性：所有信息必须来自用户提供的“标题文本”和“摘要文本”，不可添加文本外的信息，不可主观推测或编造；\n",
    "2. 完整性：8个信息类别需全部覆盖，无信息则填“无”，不可遗漏任何字段；\n",
    "3. 简洁性：每个类别的值需精炼，去除重复表述（如“核心方法”中已提及的模型，无需在“研究主题”中重复完整模型名），避免长句堆砌。\n",
    "\n",
    "## 四、输入示例（供参考，用户实际输入为具体论文的标题和摘要文本）\n",
    "用户输入：\n",
    "标题文本：基于注意力机制的轻量化U-Net在小样本CT影像分割中的应用\n",
    "摘要文本：作者：张三；李四。现有传统U-Net模型在小样本医学CT影像分割任务中，因参数冗余导致分割精度低（仅78%），且推理速度慢，无法满足临床实时诊断需求。本文以“提升小样本CT影像分割精度与速度”为目标，提出一种融合通道注意力模块的轻量化U-Net模型（LA-U-Net），通过减少卷积层参数、添加注意力权重优化特征提取。实验在LIDC小样本CT数据集上验证，结果显示LA-U-Net的Dice系数达0.89，较传统U-Net提升11%，推理速度提升2倍。该研究为临床小样本CT影像的快速精准分割提供了可行方案，适用于基层医院诊断场景。\n",
    "\n",
    "## 五、输出示例（供参考，需严格模仿此格式）\n",
    "{\n",
    "  \"paper_basic_info\": {\n",
    "    \"title\": \"基于注意力机制的轻量化U-Net在小样本CT影像分割中的应用\",\n",
    "    \"authors\": \"张三；李四\",\n",
    "    \"keywords\": \"注意力机制；轻量化U-Net；小样本CT影像；影像分割\"\n",
    "  },\n",
    "  \"research_topic\": \"轻量化U-Net模型在小样本CT影像分割中的应用研究\",\n",
    "  \"existing_problem\": \"传统U-Net模型在小样本CT影像分割中精度低（仅78%）；推理速度慢，无法满足临床实时诊断需求\",\n",
    "  \"research_goal\": \"提出融合通道注意力模块的轻量化U-Net模型（LA-U-Net），提升小样本CT影像分割精度与推理速度\",\n",
    "  \"core_method\": \"设计融合通道注意力的轻量化U-Net模型（LA-U-Net）；减少卷积层参数；在LIDC小样本CT数据集上进行实验验证\",\n",
    "  \"key_result\": \"LA-U-Net在LIDC数据集上Dice系数达0.89，较传统U-Net提升11%；推理速度提升2倍\",\n",
    "  \"research_significance\": \"为临床小样本CT影像的快速精准分割提供可行方案；适用于基层医院诊断场景\",\n",
    "  \"preliminary_judgment\": \"值得，因研究主题聚焦CT影像分割（若用户关注医学影像领域则高度相关），核心方法有创新，关键结果数据显著且应用价值明确\"\n",
    "}\n",
    "\n",
    "现在，请接收用户提供的论文「标题文本」和「摘要文本」，严格按照上述要求提取信息并输出JSON。\n",
    "\"\"\"\n",
    "\n",
    "from langchain_core.prompts import ChatPromptTemplate\n",
    "\n",
    "# 第一段内容阅读提示\n",
    "first_read_prompt = ChatPromptTemplate.from_template(\\\n",
    "\"\"\"论文文本选段：{context}\n",
    "阅读提示：\n",
    "1、阅读建议：{read_tip}\n",
    "\"\"\")\n",
    "\n",
    "# 后续内容阅读提示\n",
    "next_read_prompt = ChatPromptTemplate.from_template(\\\n",
    "\"\"\"论文文本选段：{context}\n",
    "阅读提示：\n",
    "1、阅读建议：{read_tip}\n",
    "2、论文信息：（基于已读取内容）{paper_info}\n",
    "3、当前总结：（基于已读取内容）{summary}\n",
    "\"\"\")\n",
    "\n",
    "def merge_paper_basic_info(paper_data):\n",
    "    \"\"\"\n",
    "    方法1：将paper_basic_info字段合并为一段文本\n",
    "    :param paper_data: 包含论文所有信息的字典（需包含paper_basic_info键）\n",
    "    :return: 合并后的论文基础信息文本字符串\n",
    "    \"\"\"\n",
    "    # 提取paper_basic_info子字典，若不存在则初始化空字典\n",
    "    basic_info = paper_data.get(\"paper_basic_info\", {})\n",
    "    \n",
    "    # 按字段含义拼接文本，严格遵循\"中文含义：内容\"格式\n",
    "    title_content = basic_info.get(\"title\", \"无\")  # 论文完整标题直接复制，不可修改\n",
    "    authors_content = basic_info.get(\"authors\", \"无\")  # 作者姓名用分号分隔\n",
    "    keywords_content = basic_info.get(\"keywords\", \"无\")  # 核心关键词用分号分隔\n",
    "    \n",
    "    # 组合成完整段落，各字段用分号分隔，提升可读性\n",
    "    merged_text = (\n",
    "        f\"论文完整标题：{title_content}；\"\n",
    "        f\"摘要中提及的作者姓名，若摘要未提及则填'无'，多名作者用分号分隔：{authors_content}；\"\n",
    "        f\"从标题/摘要中提取的核心关键词，需是领域内核心术语，如研究主题、方法、对象等，3-5个为宜，用分号分隔，无则填'无'：{keywords_content}\"\n",
    "    )\n",
    "    \n",
    "    return merged_text\n",
    "\n",
    "\n",
    "def merge_other_research_fields(paper_data):\n",
    "    \"\"\"\n",
    "    方法2：将除paper_basic_info外的其余字段合并为一段文本\n",
    "    :param paper_data: 包含论文所有信息的字典\n",
    "    :return: 合并后的研究相关信息文本字符串\n",
    "    \"\"\"\n",
    "    # 定义需合并的字段及其对应的中文含义与说明（映射关系严格遵循用户给定定义）\n",
    "    field_mapping = {\n",
    "        \"research_topic\": (\n",
    "            \"研究主题\",\n",
    "            \"论文聚焦的核心领域、具体研究方向或核心问题范畴（如“深度学习在医学CT影像分割中的应用研究”“基于强化学习的自动驾驶路径规划优化”），需从标题和摘要中整合，避免过于宽泛：\"\n",
    "        ),\n",
    "        \"existing_problem\": (\n",
    "            \"现有研究存在的问题（研究背景痛点）\",\n",
    "            \"摘要中提及的「当前领域内未解决的缺陷、不足或挑战」（如“传统U-Net模型在小样本医学影像分割中精度低”“现有路径规划算法未考虑实时交通拥堵动态变化”），需直接对应摘要中“前人研究的不足”描述，无则填'无'：\"\n",
    "        ),\n",
    "        \"research_goal\": (\n",
    "            \"研究目标\",\n",
    "            \"本文明确要解决的「具体问题」或「达成的具体目标」（如“提出一种轻量化改进U-Net模型，提升小样本CT影像分割精度”“设计一种融合实时交通数据的强化学习路径规划算法”），需体现“本文要做什么”，而非“做了什么”：\"\n",
    "        ),\n",
    "        \"core_method\": (\n",
    "            \"核心方法/技术手段\",\n",
    "            \"摘要中提及的「本文用于解决问题的关键方法、模型、实验设计或技术路径」（如“基于注意力机制改进U-Net模型；采用K折交叉验证进行实验”“提出双分支特征融合网络；使用Cityscapes数据集训练验证”），需包含“方法名称+核心操作”，无则填'无'：\"\n",
    "        ),\n",
    "        \"key_result\": (\n",
    "            \"关键实验/研究结果\",\n",
    "            \"摘要中提及的「最核心、最具代表性的实验结论或研究发现」（需包含具体数据或明确结论，如“改进模型在LIDC数据集上Dice系数达0.89，较传统U-Net提升8%”“验证了所提算法在拥堵场景下路径规划效率提升20%”），避免笼统表述，无则填'无'：\"\n",
    "        ),\n",
    "        \"research_significance\": (\n",
    "            \"研究意义/价值\",\n",
    "            \"摘要中提及的「本文研究的理论价值或实际应用价值」（如“为小样本医学影像分割提供新方法，助力临床实时诊断”“为自动驾驶在复杂交通场景下的路径规划提供技术支撑”），需体现“本文研究的作用”，无则填'无'：\"\n",
    "        ),\n",
    "        \"preliminary_judgment\": (\n",
    "            \"是否值得后续阅读的初步判断\",\n",
    "            \"基于上述信息，给出“值得”或“不值得”的判断，并简要说明理由（如“值得，因研究主题与医学影像分割方向高度相关，核心方法创新且关键结果显著”“不值得，因研究主题与用户关注的自然语言处理领域无关”），理由需紧扣提取的信息，不主观臆断：\"\n",
    "        )\n",
    "    }\n",
    "    \n",
    "    # 初始化文本列表，逐字段拼接内容\n",
    "    merged_parts = []\n",
    "    for field_key, (field_name, field_desc) in field_mapping.items():\n",
    "        # 获取字段内容，若不存在则填\"无\"\n",
    "        field_content = paper_data.get(field_key, \"无\")\n",
    "        # # 按\"中文含义：说明+内容\"格式拼接，加入段落分隔符\n",
    "        # part = f\"{field_name}：{field_desc}{field_content}\"\n",
    "        part = f\"{field_name}：{field_content}\"\n",
    "        merged_parts.append(part)\n",
    "    \n",
    "    # 将所有字段内容合并为一段文本，用分号+换行分隔以优化阅读体验\n",
    "    return \"；\\n\".join(merged_parts)\n",
    "\n",
    "my_abstract_json_res = None\n",
    "\n",
    "def abstract_reader(state: State):    \n",
    "    text_index = state[\"index\"]\n",
    "    user_content = None\n",
    "    if (text_index == 1):\n",
    "        user_content = first_read_prompt.invoke({\"context\": state[\"text\"], \"read_tip\": state[\"read_tip\"]}).messages[0].content\n",
    "    else:\n",
    "        my_summary = state[\"my_summary\"]\n",
    "        # 生成论文基础信息文本\n",
    "        paper_info = merge_paper_basic_info(my_summary)\n",
    "        # 生成其他研究字段合并文本\n",
    "        summary = merge_other_research_fields(my_summary)\n",
    "        user_content = next_read_prompt.invoke({\"context\": state[\"text\"], \"read_tip\": state[\"read_tip\"],\n",
    "                                                 \"paper_info\": state[\"paper_info\"], \"summary\": state[\"summary\"]}).messages[0].content\n",
    "    json_res = json_llm.invoke([{\n",
    "            \"role\": \"system\",\n",
    "            \"content\": abstract_reader_system_msg,\n",
    "        },{\n",
    "            \"role\": \"user\",\n",
    "            \"content\": user_content,\n",
    "        }]\n",
    "    )\n",
    "    res = json.loads(json_res.content)\n",
    "\n",
    "    global my_abstract_json_res\n",
    "    my_abstract_json_res = res\n",
    "    \n",
    "    return {\"my_summary\": res}\n",
    "\n",
    "res = abstract_reader({\"index\": 1, \"text\": res1[0], \"read_tip\": \"这是分割后论文片段的第一段（前3600字符），论文的标题和作者极有可能出现在这个地方，输入论文片段文本为markdown格式\"})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "c331a31d-43ab-4e9c-8ed9-0b0f17e7e4da",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'my_summary': {'paper_basic_info': {'title': 'Attention Is All You Need', 'authors': 'Niki Parmar；Ashish Vaswani；Llion Jones；Noam Shazeer；Jakob Uszkoreit；Aidan N. Gomez；Łukasz Kaiser；Illia Polosukhin', 'keywords': 'Transformer；attention mechanism；machine translation；sequence transduction；neural networks'}, 'research_topic': '基于纯注意力机制的序列转换模型研究', 'existing_problem': '主流序列转换模型依赖复杂的循环或卷积神经网络，难以并行化，训练耗时长', 'research_goal': '提出一种完全基于注意力机制、摒弃循环与卷积的新型网络架构Transformer，提升模型训练效率与翻译质量', 'core_method': '设计纯注意力驱动的Transformer架构；采用缩放点积注意力与多头注意力机制；在八个GPU上训练3.5天验证模型性能', 'key_result': '在WMT 2014英译德任务上达到28.4 BLEU，超过此前最佳结果2 BLEU以上；在英译法任务上取得41.8 BLEU的单模型新纪录', 'research_significance': '为序列建模提供高效可并行的新架构，显著降低训练成本，并在机器翻译和英语句法分析任务中展现良好泛化能力', 'preliminary_judgment': '值得，因论文提出革命性Transformer架构，核心方法创新性强，实验结果显著超越已有模型，对后续NLP研究具有重大影响'}}\n"
     ]
    }
   ],
   "source": [
    "print(res)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "4889a735-5f91-40f6-a32c-0d75d052cda3",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'paper_basic_info': {'title': 'Attention Is All You Need',\n",
       "  'authors': 'Niki Parmar；Ashish Vaswani；Llion Jones；Noam Shazeer；Jakob Uszkoreit；Aidan N. Gomez；Łukasz Kaiser；Illia Polosukhin',\n",
       "  'keywords': 'Transformer；attention mechanism；machine translation；sequence transduction；neural networks'},\n",
       " 'research_topic': '基于纯注意力机制的序列转换模型研究',\n",
       " 'existing_problem': '主流序列转换模型依赖复杂的循环或卷积神经网络，难以并行化，训练耗时长',\n",
       " 'research_goal': '提出一种完全基于注意力机制、摒弃循环与卷积的新型网络架构Transformer，提升模型训练效率与翻译质量',\n",
       " 'core_method': '设计纯注意力驱动的Transformer架构；采用缩放点积注意力与多头注意力机制；在八个GPU上训练3.5天验证模型性能',\n",
       " 'key_result': '在WMT 2014英译德任务上达到28.4 BLEU，超过此前最佳结果2 BLEU以上；在英译法任务上取得41.8 BLEU的单模型新纪录',\n",
       " 'research_significance': '为序列建模提供高效可并行的新架构，显著降低训练成本，并在机器翻译和英语句法分析任务中展现良好泛化能力',\n",
       " 'preliminary_judgment': '值得，因论文提出革命性Transformer架构，核心方法创新性强，实验结果显著超越已有模型，对后续NLP研究具有重大影响'}"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "my_abstract_json_res"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "e72a7309-3854-4e02-8da1-b8fa14912ac9",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 评估模型节点\n",
    "from openai import OpenAI\n",
    "import os\n",
    "import json\n",
    "\n",
    "abstract_evaluator_system_msg = \"\"\"你是一个论文阅读助手，专注于论文开头的标题和摘要部分。一般一篇论文的典型结构是，在论文开头会有论文的标题和摘要。现对你的工作描述如下：\n",
    "一、角色与任务目标​\n",
    "你需扮演 “论文文本片段分析助手”，核心任务是：基于输入的论文文本片段（含当前截取片段 + 已读历史片段，如有），判断论文标题是否完整、摘要部分是否已读取完毕；若未完毕，指引继续读取下一片段，若已完毕，确认结束。​\n",
    "二、输入信息​\n",
    "需接收两类信息：​\n",
    "当前论文文本截取片段（可能包含标题、作者、摘要片段、部分引言等内容）；​\n",
    "一些文本描述信息和相关上下文信息。​\n",
    "三、核心判断逻辑​\n",
    "（一）先判断 “标题完整性”​\n",
    "若当前 + 历史片段中，标题存在以下情况，判定 “标题未完整”，需继续读取（返回 NEXT）：​\n",
    "标题仅显示部分内容（如末尾为 “...”“-”，或句子未结束，如 “基于深度学习的图像分割技术在”）；​\n",
    "未出现完整标题（仅显示作者、机构，或直接开始摘要内容但无标题）。​\n",
    "若标题文字完整（语义连贯、无截断，含研究主题、核心对象 / 方法，无明显缺失），进入 “摘要完整性判断”。​\n",
    "（二）再判断 “摘要完整性”​\n",
    "摘要完整需满足：片段中包含摘要的5 类核心要素（无遗漏），且片段末尾无 “摘要未结束” 的信号（如语义截断、未完成句子），同时无 “引言启动” 信号（如出现 “1. 引言”“1. Introduction”“一、研究背景” 等）。​\n",
    "摘要核心要素判定标准：​\n",
    "必须包含：研究背景 / 待解决的领域问题（如 “现有 XX 方法存在 XX 缺陷”）、研究目标（如 “本文旨在解决 XX 问题”）、核心方法 / 技术路径（如 “提出 XX 模型 / 采用 XX 实验设计”）、关键结果 / 发现（如 “在 XX 数据集上准确率达 XX%”）、研究结论 / 意义（如 “为 XX 领域提供 XX 参考”）；​\n",
    "若当前 + 历史片段中，缺失任意 1 类核心要素，或要素描述不完整（如仅提 “提出新方法” 但未说明方法名称 / 逻辑），判定 “摘要未完毕”；​\n",
    "若片段末尾出现 “摘要”“Abstract” 的结束标识（如 “摘要结束”“Keywords”“关键词”，或直接衔接引言开头，或者出现明显代表正式文本开始的标题文字），且 5 类要素完整，判定 “摘要已完毕”。​\n",
    "四、输出格式（严格 JSON）​\n",
    "仅允许输出以下结构的 JSON，无额外文字：​\n",
    "{​\n",
    "\"judgment\": \"END\" 或 \"NEXT\",​\n",
    "\"reading_suggestion\": \"若 judgment 为 END：说明标题已完整 + 摘要 5 类要素无缺失，确认无需继续读取；若 judgment 为 NEXT：先总结当前已读内容（含完整标题 / 部分标题、摘要已获取的要素），再明确下一片段需重点补充的缺失要素（如 “当前已读标题为 XX，摘要已获取研究背景，需补充核心方法与关键结果”），建议关注片段是否出现 “Keywords”“引言” 等标识\"​\n",
    "}​\n",
    "五、约束​\n",
    "reading_suggestion 字数需少于 800 字，语言简洁，仅聚焦 “已读总结 + 下一段关注重点”，不展开无关内容。\n",
    "\"\"\"\n",
    "\n",
    "# 后续内容阅读提示\n",
    "judge_next_prompt = ChatPromptTemplate.from_template(\\\n",
    "\"\"\"论文文本选段：{context}\n",
    "阅读提示：\n",
    "1、论文信息：（基于已读取内容）{paper_info}\n",
    "2、当前总结：（基于已读取内容）{my_summary}\n",
    "\"\"\")\n",
    "\n",
    "my_json_res = None\n",
    "\n",
    "def abstract_evaluator(state: State):\n",
    "    my_summary = state[\"my_summary\"]\n",
    "    # 生成论文基础信息文本\n",
    "    paper_info = merge_paper_basic_info(my_summary)\n",
    "    # 生成其他研究字段合并文本\n",
    "    summary = merge_other_research_fields(my_summary)\n",
    "    user_content = judge_next_prompt.invoke({\"context\": state[\"text\"],\n",
    "                                                 \"paper_info\": paper_info, \"my_summary\": summary}).messages[0].content\n",
    "    json_res = json_llm.invoke([{\n",
    "            \"role\": \"system\",\n",
    "            \"content\": abstract_evaluator_system_msg,\n",
    "        },{\n",
    "            \"role\": \"user\",\n",
    "            \"content\": user_content,\n",
    "        }]\n",
    "    )\n",
    "    res = json.loads(json_res.content)\n",
    "\n",
    "    global my_json_res\n",
    "    my_json_res = res\n",
    "    \n",
    "    return {\"next_or_end\": res[\"judgment\"], \"read_tip\": res[\"reading_suggestion\"]}\n",
    "\n",
    "before_res = res\n",
    "res = abstract_evaluator({\"index\": 1, \"text\": res1[0], \"my_summary\": my_abstract_json_res})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "cff59fab-861b-4d24-aa94-0b0a4b822f6b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'next_or_end': 'END', 'read_tip': \"标题已完整，为 'Attention Is All You Need'。摘要部分包含全部5类核心要素：研究背景（主流序列转换模型依赖RNN/CNN，难以并行化）、研究目标（提出完全基于注意力的Transformer架构）、核心方法（采用缩放点积注意力、多头注意力，摒弃循环与卷积）、关键结果（英-德翻译28.4 BLEU，英-法41.8 BLEU，显著优于已有模型）、研究结论（Transformer可并行、训练成本低、泛化能力强）。摘要末尾已出现'Keywords'隐含标识（作者贡献说明及会议信息），且下文紧接'1 Introduction'，明确进入引言部分。因此，摘要已完整读取，无需继续读取。\"}\n"
     ]
    }
   ],
   "source": [
    "print(res)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "55634e7f-7298-4a2f-a95c-2dd6dd15f610",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 第二次阅读，精读分割论文（三步）\n",
    "from typing import Dict\n",
    "\n",
    "class SplitState(TypedDict):\n",
    "    index: int\n",
    "    text: str\n",
    "\n",
    "    paper_info: str\n",
    "    abstract_summary: Dict\n",
    "\n",
    "    split_tip: str\n",
    "    split_res: str\n",
    "    next_tip: str"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "997f7b0a-bf72-4502-af66-9c35f1108815",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 第一步 —— 文本段结构分析与分割建议生成\n",
    "\n",
    "adviser_system_msg = \\\n",
    "\"\"\"# 任务角色\n",
    "你是论文文本分割的“前置分析师”，需基于论文核心信息和文本内容，精准判断结构并给出可落地的分割建议。\n",
    "\n",
    "# 输入信息\n",
    "1. 前期核心信息（不可忽略）：\n",
    "   - paper_basic_info：{此处填入前期提取的论文标题、作者、关键词}\n",
    "   - research_topic：{此处填入前期提取的研究主题}\n",
    "   - 上一段文本解读信息：{此处填入上一段文本的结构分析、核心小结（若有）}\n",
    "2. 当前待分析文本段：{此处填入用户提供的当前论文文本段}\n",
    "\n",
    "# 任务要求\n",
    "1. 分析文本段结构：\n",
    "   - 先判断当前文本属于论文的哪个小节/模块（如引言-研究背景、引言-研究gap、方法-数据预处理、方法-模型架构、结果-图表分析、讨论-局限性），需结合论文主题（research_topic）和上一段内容推断；\n",
    "   - 再拆解文本的内部逻辑（如“方法-数据预处理”可能包含“数据来源→数据清洗→数据标注”三步，“结果-图表分析”可能包含“图1解读→表1对比→核心结论”）。\n",
    "\n",
    "2. 总结文本段主要内容：\n",
    "   - 提炼核心信息（如“介绍了数据集A的来源为XX，样本量1000例，标注方式为双盲标注”“推导了公式1，用于解决XX问题，关键假设是XX”），避免冗余，不超过150字符。\n",
    "\n",
    "3. 生成分割建议：\n",
    "   - 明确分割依据（需对应上述“内部逻辑”），如“按‘数据来源-数据清洗-数据标注’三部分分割，每部分对应一个完整信息点”“按‘公式1推导-公式1应用场景’分割，两部分分别对应‘理论依据-实际用途’”；\n",
    "   - 说明建议理由（如“因‘数据来源’和‘数据清洗’是两个独立预处理步骤，分开分割可避免信息混杂”“因‘公式推导’是理论，‘应用场景’是实践，拆分后更易理解逻辑链”）。\n",
    "\n",
    "# 输出格式\n",
    "1. 文本段所属小节/模块：{如“方法-数据预处理”“结果-实验对比”}\n",
    "2. 文本段内部逻辑拆解：{如“数据来源→数据清洗→数据标注”“图1（准确率对比）→表1（消融实验）→核心结论”}\n",
    "3. 文本段主要内容总结：{核心信息，≤150字符}\n",
    "4. 分割建议：{明确分割依据+建议理由，≤200字符}\n",
    "\"\"\"\n",
    "\n",
    "adviser_prompt = ChatPromptTemplate.from_template(\\\n",
    "\"\"\"1. 前期核心信息：\n",
    "   - paper_basic_info：{paper_info}\n",
    "   - research_topic：{research_topic}\n",
    "   - 上一段文本解读信息：{next_tip}\n",
    "2. 当前待分析文本段：{text}\n",
    "\"\"\")\n",
    "\n",
    "def paper_split_adviser(state: SplitState):\n",
    "    user_content = adviser_prompt.invoke({\n",
    "        \"paper_info\": state[\"paper_info\"], \"research_topic\": state[\"abstract_summary\"].get(\"research_topic\", \"没有相关信息\"),\n",
    "        \"next_tip\": state[\"next_tip\"], \"text\": state[\"text\"]}).messages[0].content\n",
    "    \n",
    "    split_tip = llm.invoke([{\n",
    "            \"role\": \"system\",\n",
    "            \"content\": adviser_system_msg,\n",
    "        },{\n",
    "            \"role\": \"user\",\n",
    "            \"content\": user_content,\n",
    "        }]\n",
    "    )\n",
    "    \n",
    "    return {\"split_tip\": split_tip.content}\n",
    "\n",
    "paper_info = merge_paper_basic_info(my_abstract_json_res)\n",
    "next_tip = \"这是论文中的第一段分割文本，还没有对上一段文本的分析\"\n",
    "adviser_res = paper_split_adviser({\"index\": 1, \"text\": res1[0], \"paper_info\": paper_info, \"abstract_summary\": my_abstract_json_res, \"next_tip\": next_tip})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "576ad92e-89df-4990-ab66-de06aa7d5959",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'split_tip': AIMessage(content='1. 文本段所属小节/模块：**引言-研究背景**  \\n2. 文本段内部逻辑拆解：**版权说明→论文标题与作者信息→摘要→贡献说明→会议信息→引言开头：RNN/LSTM在序列建模中的主导地位**  \\n3. 文本段主要内容总结：介绍RNN、LSTM等在序列建模中的主导地位，引出其局限性，为提出基于纯注意力机制的Transformer模型做铺垫。  \\n4. 分割建议：按“元信息（版权/作者/摘要/贡献/会议）→引言正文”分割，因元信息与科学论述无关，分离可提升正文逻辑连贯性。', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 152, 'prompt_tokens': 1628, 'total_tokens': 1780, 'completion_tokens_details': None, 'prompt_tokens_details': None}, 'model_name': 'qwen3-235b-a22b-instruct-2507', 'system_fingerprint': None, 'id': 'chatcmpl-1fa1c40c-752c-91cc-8fa4-99cb1acf1109', 'service_tier': None, 'finish_reason': 'stop', 'logprobs': None}, id='run--109b1fba-6d24-492c-adac-378f5473f974-0', usage_metadata={'input_tokens': 1628, 'output_tokens': 152, 'total_tokens': 1780, 'input_token_details': {}, 'output_token_details': {}})}\n"
     ]
    }
   ],
   "source": [
    "print(adviser_res)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "id": "5bab48a3-21e9-4db5-b87a-117616b299e9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---shenll---\n",
      "**小结**：声明版权许可信息，允许在学术与新闻作品中复制本文图表，需注明原作者。  \n",
      "**论文选段**：Provided proper attribution is provided, Google hereby grants permission to reproduce the tables and figures in this paper solely for use in journalistic or scholarly works.\n",
      "\n",
      "---shenll---\n",
      "**小结**：呈现论文标题、作者及其所属机构与邮箱，标注贡献标注符，为后续署名与联系提供依据。  \n",
      "**论文选段**：  \n",
      "## **Attention Is All You Need**\n",
      "\n",
      "**Niki Parmar** _[∗]_  \n",
      "Google Research  \n",
      "```\n",
      "nikip@google.com\n",
      "```\n",
      "\n",
      "**Ashish Vaswani** _[∗]_  \n",
      "Google Brain  \n",
      "```\n",
      "avaswani@google.com\n",
      "```\n",
      "\n",
      "**Llion Jones** _[∗]_  \n",
      "Google Research  \n",
      "```\n",
      "llion@google.com\n",
      "```\n",
      "\n",
      "**Noam Shazeer** _[∗]_  \n",
      "Google Brain  \n",
      "```\n",
      "noam@google.com\n",
      "```\n",
      "\n",
      "**Jakob Uszkoreit** _[∗]_  \n",
      "Google Research  \n",
      "```\n",
      "usz@google.com\n",
      "```\n",
      "\n",
      "**Aidan N. Gomez** _[∗†]_  \n",
      "University of Toronto  \n",
      "```\n",
      "aidan@cs.toronto.edu\n",
      "```\n",
      "\n",
      "**Łukasz Kaiser** _[∗]_  \n",
      "Google Brain  \n",
      "```\n",
      "lukaszkaiser@google.com\n",
      "```\n",
      "\n",
      "**Illia Polosukhin** _[∗‡]_  \n",
      "```\n",
      "illia.polosukhin@gmail.com\n",
      "```\n",
      "\n",
      "---shenll---\n",
      "**小结**：摘要指出传统序列模型依赖RNN或CNN，提出全注意力架构Transformer，提升训练效率与翻译性能。  \n",
      "**论文选段**：  \n",
      "**Abstract**  \n",
      "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks that include an encoder and a decoder. The best performing models also connect the encoder and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two machine translation tasks show these models to be superior in quality while being more parallelizable and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task, improving over the existing best results, including ensembles, by over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training costs of the best models from the literature. We show that the Transformer generalizes well to other tasks by applying it successfully to English constituency parsing both with large and limited training data.\n",
      "\n",
      "---shenll---\n",
      "**小结**：说明作者贡献标注规则及各作者在模型设计、实现、优化和代码库建设中的具体分工。  \n",
      "**论文选段**：  \n",
      "_∗_ Equal contribution. Listing order is random. Jakob proposed replacing RNNs with self-attention and started the effort to evaluate this idea. Ashish, with Illia, designed and implemented the first Transformer models and has been crucially involved in every aspect of this work. Noam proposed scaled dot-product attention, multi-head attention and the parameter-free position representation and became the other person involved in nearly every detail. Niki designed, implemented, tuned and evaluated countless model variants in our original codebase and tensor2tensor. Llion also experimented with novel model variants, was responsible for our initial codebase, and efficient inference and visualizations. Łukasz and Aidan spent countless long days designing various parts of and implementing tensor2tensor, replacing our earlier codebase, greatly improving results and massively accelerating our research.  \n",
      "_†_ Work performed while at Google Brain.  \n",
      "_‡_ Work performed while at Google Research.\n",
      "\n",
      "---shenll---\n",
      "**小结**：标注论文发表于NIPS 2017会议，提供学术交流与出版信息。  \n",
      "**论文选段**：31st Conference on Neural Information Processing Systems (NIPS 2017), Long Beach, CA, USA.\n",
      "\n",
      "---shenll---\n",
      "**小结**：引言指出RNN、LSTM等在序列建模中的主导地位，为提出Transformer模型提供背景与动机。  \n",
      "**论文选段**：**1 Introduction**  \n",
      "Recurrent neural networks, long short-term memory [13] and gated recurrent [7] neural networks in particular, have been firmly established as state of the art approaches in sequence modeling and transduction problems such as language modeling and machine translation [35, 2, 5]. Numerous efforts have since continued to push the boundaries of recurrent language models and encoder-decoder architectures [38, 24, 15].\n"
     ]
    }
   ],
   "source": [
    "# 第二步 —— 文本分割与公式 / 表格纠正\n",
    "\n",
    "splitter_system_msg = \\\n",
    "\"\"\"# 任务角色\n",
    "你是论文文本分割的“执行器”，位于第一步（文本段结构分析与分割建议生成）前置工作之后。需按前期建议分割文本，同步纠正格式问题，并补充精简小结。\n",
    "\n",
    "# 输入信息\n",
    "1. 前期核心信息（不可忽略）：\n",
    "   - paper_basic_info：{此处填入前期提取的论文标题、作者、关键词}\n",
    "   - research_topic：{此处填入前期提取的研究主题}\n",
    "2. 第一步生成的分割建议：{此处填入第一步的分割依据+建议理由}\n",
    "3. 当前待分割文本段：{此处填入用户提供的当前论文文本段}\n",
    "\n",
    "# 任务要求\n",
    "1. 文本分割规则：\n",
    "   - 严格按第一步的分割建议执行，若建议分N段则输出N段；\n",
    "   - 每段输出需包含“小结+论文选段”，总字符数≤780（含标点，小结占50-80字符，选段占剩余字符）；\n",
    "   - 选段需完整保留原文语义（不可删减关键信息，如公式、数据、实验步骤），仅修正格式错误。\n",
    "\n",
    "2. 公式与表格纠正：\n",
    "   - 公式纠正：确保符号完整（如“∑”“∂”不缺失）、运算符正确（如“×”不显示为“x”、“≥”不显示为“>=”）、上下标格式规范（如“a₂”不显示为“a2”“L₁损失”不显示为“L1损失”）；\n",
    "   - 表格纠正：补充缺失的行列标签（如“表头：模型名称/准确率（%）”“行标签： baseline模型/本文模型”）、修正数据对应关系（如确保“模型A”对应“准确率85%”不颠倒），若表格解析严重混乱，需用文字简要还原核心逻辑（如“表格1核心：本文模型在A数据集准确率89%，比baseline高7%”）。\n",
    "\n",
    "3. 小结撰写规则：\n",
    "   - 精准提炼选段核心（如“选段介绍数据集A的样本量、来源及标注方式，为后续实验提供数据基础”“选段推导公式1，说明其用于优化模型损失函数的原理”）；\n",
    "   - 不添加主观评价，仅客观总结内容，控制在50-80字符。\n",
    "\n",
    "# 输出格式\n",
    "每段分割结果用“---shenll---”分隔，格式如下：\n",
    "**小结**：{50-80字符，客观总结选段核心}\n",
    "**论文选段**：{修正后的原文选段，采用markdown格式，确保公式/表格格式正确，与小结对应，总字符≤780}\n",
    "\n",
    "---shenll---\n",
    "（若有第二段，重复上述格式）\n",
    "\"\"\"\n",
    "\n",
    "splitter_prompt = ChatPromptTemplate.from_template(\\\n",
    "\"\"\"1. 前期核心信息（不可忽略）：\n",
    "   - paper_basic_info：{paper_info}\n",
    "   - research_topic：{research_topic}\n",
    "2. 第一步生成的分割建议：{split_tip}\n",
    "3. 当前待分割文本段：{text}\n",
    "\"\"\")\n",
    "\n",
    "def paper_text_splitter(state: SplitState):\n",
    "    user_content = splitter_prompt.invoke({\n",
    "        \"paper_info\": state[\"paper_info\"], \"research_topic\": state[\"abstract_summary\"].get(\"research_topic\", \"没有相关信息\"),\n",
    "        \"split_tip\": state[\"split_tip\"], \"text\": state[\"text\"]}).messages[0].content\n",
    "    \n",
    "    resp = llm.invoke([{\n",
    "            \"role\": \"system\",\n",
    "            \"content\": splitter_system_msg,\n",
    "        },{\n",
    "            \"role\": \"user\",\n",
    "            \"content\": user_content,\n",
    "        }]\n",
    "    )\n",
    "    split_res = resp.content\n",
    "    \n",
    "    print(split_res)\n",
    "    return {\"split_res\": split_res}\n",
    "\n",
    "paper_info = merge_paper_basic_info(my_abstract_json_res)\n",
    "split_tip = '1. 文本段所属小节/模块：**引言-研究背景**  \\n2. 文本段内部逻辑拆解：**版权说明→论文标题与作者信息→摘要→贡献说明→会议信息→引言开头：RNN/LSTM在序列建模中的主导地位**  \\n3. 文本段主要内容总结：介绍RNN、LSTM等在序列建模中的主导地位，引出其局限性，为提出基于纯注意力机制的Transformer模型做铺垫。  \\n4. 分割建议：按“元信息（版权/作者/摘要/贡献/会议）→引言正文”分割，因元信息与科学论述无关，分离可提升正文逻辑连贯性。'\n",
    "split_res = paper_text_splitter({\"index\": 1, \"text\": res1[0], \"paper_info\": paper_info, \"abstract_summary\": my_abstract_json_res, \"split_tip\": split_tip})\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "043fd8cd-9d26-466f-9986-2e284beda019",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "6"
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "segments = [s.strip() for s in split_res[\"split_res\"].split(\"---shenll---\")]\n",
    "len(segments)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "03160d8c-319f-4edc-aab0-cf91d01449e3",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'**小结**：引言开篇指出RNN、LSTM在序列建模中的主导地位，为引出其局限性与Transformer的必要性做铺垫。  \\n**论文选段**：**1 Introduction**  \\nRecurrent neural networks, long short-term memory [13] and gated recurrent [7] neural networks in particular, have been firmly established as state of the art approaches in sequence modeling and transduction problems such as language modeling and machine translation [35, 2, 5]. Numerous efforts have since continued to push the boundaries of recurrent language models and encoder-decoder architectures [38, 24, 15].'"
      ]
     },
     "execution_count": 40,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "segments[5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "id": "ebfddf29-db9a-4392-8c47-032b8efdd1d1",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 第三步 —— 下一段分割铺垫信息提取\n",
    "\n",
    "next_adviser_system_msg = \\\n",
    "\"\"\"# 任务角色\n",
    "你是论文文本分割的“后续引导者”，位于第一步（文本段结构分析与分割建议生成），第二步（文本分割与公式 / 表格纠正）前置工作之后。需基于当前分割结果，为下一段分析提供精准的关注方向。\n",
    "\n",
    "# 输入信息\n",
    "1. 前期核心信息（不可忽略）：\n",
    "   - paper_basic_info：{此处填入前期提取的论文标题、作者、关键词}\n",
    "   - research_topic：{此处填入前期摘要提取的研究主题}\n",
    "   - core_method/research_goal：{此处填入前期摘要提取的核心方法/研究目标，二选一，优先选与当前文本关联的}\n",
    "2. 当前分割结果：{此处填入第二步输出的所有“小结+论文选段”，分隔符“---shenll---”}\n",
    "3. 第一步的结构分析：{此处填入第一步判断的“当前文本所属小节/模块”“内部逻辑拆解”}\n",
    "\n",
    "# 任务要求\n",
    "1. 明确当前文本的关键信息锚点：\n",
    "   - 当前小节/模块标题（需确认是否与第一步判断一致，若当前文本结尾提及下一小节标题，需补充记录，如“当前为‘方法-数据预处理’，结尾提及‘下一节介绍模型架构’，下一小节标题为‘模型架构设计’”）；\n",
    "   - 当前已覆盖的核心信息（如“已覆盖‘数据集A的来源、样本量、标注方式’”“已覆盖‘公式1推导及假设条件’”）。\n",
    "\n",
    "2. 判断当前主题完结状态：\n",
    "   - 明确当前小节/模块的主题是否完结（如“‘数据预处理’主题已完结，包含数据来源、清洗、标注三部分，无遗漏”“‘模型架构’主题未完结，仅介绍输入层，未提及隐藏层和输出层”）；\n",
    "   - 若未完结，说明“未覆盖的子主题”（如“‘模型架构’未覆盖隐藏层的注意力模块设计、输出层的激活函数选择”）。\n",
    "\n",
    "3. 提取下一段重点关注内容：\n",
    "   - 关联前期核心信息（如`core_method`是“改进U-Net”，则下一段需关注“改进的注意力模块参数、U-Net decoder层的调整”）；\n",
    "   - 补充“当前未解决的信息点”（如“当前提到‘采用K折交叉验证’，但未说明K值和验证指标，下一段需关注K值（如K=5）、验证指标（如Dice系数、IoU）”“当前提到‘实验结果优于baseline’，但未给出具体数据（如准确率数值），下一段需关注实验数据表格/图表”）；\n",
    "   - 若当前提及下一小节标题，需明确“下一小节的核心子主题”（如“下一小节为‘模型架构设计’，需重点关注注意力模块的结构、与传统U-Net的差异”）。\n",
    "\n",
    "# 输出格式\n",
    "1. 当前文本关键信息锚点：\n",
    "   - 小节/模块标题：{如“方法-数据预处理”“结果-实验对比（图1）”}\n",
    "   - 已覆盖核心信息：{如“数据集A（来源：XX数据库，样本量：1000例，标注方式：双盲标注）”“公式1推导（解决模型过拟合问题，假设条件：样本独立同分布）”}\n",
    "\n",
    "2. 当前主题完结状态：\n",
    "   - 完结/未完结：{选择其一}\n",
    "   - 说明：{如“完结，‘数据预处理’的3个子步骤均已覆盖”“未完结，‘模型架构’仅介绍输入层，缺少隐藏层和输出层描述”}\n",
    "\n",
    "3. 下一段重点关注内容：\n",
    "   - 核心方向（关联前期信息）：{如“围绕‘改进U-Net的注意力模块’，关注模块参数、与encoder层的连接方式”}\n",
    "   - 需补充的未解决信息点：{如“K折交叉验证的K值、验证指标；实验结果的具体数值（如准确率、召回率）”}\n",
    "   - 下一小节子主题（若有）：{如“模型架构设计的隐藏层注意力模块、输出层激活函数”}\n",
    "\"\"\"\n",
    "\n",
    "next_adviser_prompt = ChatPromptTemplate.from_template(\\\n",
    "\"\"\"1. 前期核心信息（不可忽略）：\n",
    "   - paper_basic_info：{paper_info}\n",
    "   - research_topic：{research_topic}\n",
    "   - core_method/research_goal：\n",
    "         core_method：{core_method}\n",
    "         research_goal：{research_goal}\n",
    "2. 当前分割结果：{split_res}\n",
    "3. 第一步的结构分析：{split_tip}\n",
    "\"\"\")\n",
    "\n",
    "def next_read_adviser(state: SplitState):\n",
    "    user_content = next_adviser_prompt.invoke({\n",
    "        \"paper_info\": state[\"paper_info\"], \n",
    "        \"research_topic\": state[\"abstract_summary\"].get(\"research_topic\", \"没有相关信息\"),\n",
    "        \"core_method\": state[\"abstract_summary\"].get(\"core_method\", \"没有相关信息\"),\n",
    "        \"research_goal\": state[\"abstract_summary\"].get(\"research_goal\", \"没有相关信息\"),\n",
    "        \"split_tip\": state[\"split_tip\"],\n",
    "        \"split_res\": state[\"split_res\"]}).messages[0].content\n",
    "    \n",
    "    resp = llm.invoke([{\n",
    "            \"role\": \"system\",\n",
    "            \"content\": next_adviser_system_msg,\n",
    "        },{\n",
    "            \"role\": \"user\",\n",
    "            \"content\": user_content,\n",
    "        }]\n",
    "    )\n",
    "    next_tip = resp.content\n",
    "    \n",
    "    return {\"next_tip\": next_tip}\n",
    "\n",
    "paper_info = merge_paper_basic_info(my_abstract_json_res)\n",
    "split_tip = '1. 文本段所属小节/模块：**引言-研究背景**  \\n2. 文本段内部逻辑拆解：**版权说明→论文标题与作者信息→摘要→贡献说明→会议信息→引言开头：RNN/LSTM在序列建模中的主导地位**  \\n3. 文本段主要内容总结：介绍RNN、LSTM等在序列建模中的主导地位，引出其局限性，为提出基于纯注意力机制的Transformer模型做铺垫。  \\n4. 分割建议：按“元信息（版权/作者/摘要/贡献/会议）→引言正文”分割，因元信息与科学论述无关，分离可提升正文逻辑连贯性。'\n",
    "my_split_res = split_res[\"split_res\"]\n",
    "next_res = next_read_adviser({\n",
    "    \"index\": 1,\n",
    "    \"text\": res1[0],\n",
    "    \"paper_info\": paper_info,\n",
    "    \"abstract_summary\": my_abstract_json_res,\n",
    "    \"split_tip\": split_tip,\n",
    "    \"split_res\": my_split_res\n",
    "})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "id": "94479e5e-e238-4544-9842-86a5ed349400",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1. 当前文本关键信息锚点：  \n",
      "   - 小节/模块标题：引言-研究背景  \n",
      "   - 已覆盖核心信息：  \n",
      "     - 论文元信息完整（标题《Attention Is All You Need》、作者及其机构与邮箱、会议信息NIPS 2017）  \n",
      "     - 版权许可说明（允许在学术与新闻作品中复制图表，需注明原作者）  \n",
      "     - 作者贡献说明（标注了各作者在模型设计、实现、代码库建设中的具体角色）  \n",
      "     - 摘要核心内容（提出完全基于注意力机制的Transformer架构，摒弃RNN与CNN，提升训练效率与翻译性能，在机器翻译任务中达到SOTA）  \n",
      "     - 引言开头背景（RNN、LSTM和GRU在序列建模与机器翻译中占主导地位，为Transformer的提出提供动机）\n",
      "\n",
      "2. 当前主题完结状态：  \n",
      "   - 完结/未完结：未完结  \n",
      "   - 说明：当前仅完成引言部分的背景铺垫，即“RNN/LSTM的主导地位”，但尚未阐述其局限性（如序列依赖导致训练难以并行化、长距离依赖建模困难等），也未引出注意力机制的优势及Transformer的提出动机。完整的“研究背景→问题提出→方法创新”逻辑链尚未闭合。\n",
      "\n",
      "3. 下一段重点关注内容：  \n",
      "   - 核心方向（关联前期信息）：  \n",
      "     围绕“设计纯注意力驱动的Transformer架构”这一核心方法，下一段应重点关注：  \n",
      "     - RNN/CNN在序列建模中的具体局限性（尤其是并行化瓶颈与长程依赖问题）  \n",
      "     - 注意力机制如何缓解上述问题，为完全依赖注意力的架构提供理论依据  \n",
      "     - 向“完全基于注意力”的范式转变的合理性与必要性论证  \n",
      "   - 需补充的未解决信息点：  \n",
      "     - 当前摘要提到“更易并行化”“显著减少训练时间”，但未说明传统模型为何难以并行，需在下一段关注RNN的顺序计算特性及其对训练效率的制约  \n",
      "     - 摘要中提到“在8个GPU上训练3.5天”，但未交代模型规模、参数量或训练数据集细节，后续需关注训练配置与数据来源（如WMT数据集的具体使用方式）  \n",
      "     - “Transformer泛化性好”在句末提及，但未展开，后续可能涉及其在英语句法分析任务中的应用，需关注跨任务迁移能力的初步论证  \n",
      "   - 下一小节子主题（若有）：  \n",
      "     根据引言常规结构，下一段很可能进入“Related Work”或直接展开“Model Architecture”的动机部分。若跳过相关工作，则下一小节子主题应为：  \n",
      "     **“注意力机制的演进与多头注意力的引入动机”**，重点关注：  \n",
      "     - 缩放点积注意力（scaled dot-product attention）的设计初衷  \n",
      "     - 多头注意力（multi-head attention）如何增强模型表示能力  \n",
      "     - 位置编码（position representation）为何采用参数自由形式（parameter-free）  \n",
      "     （这些均已在贡献说明中提及，但尚未在正文中展开）\n"
     ]
    }
   ],
   "source": [
    "print(next_res['next_tip'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "id": "dd822209-64bf-4e7c-b941-8c113be9d995",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "26"
      ]
     },
     "execution_count": 47,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "md_text = parse_pdf2md(\"./tmp/StableAvatar Infinite-Length Audio-Driven Avatar Video Generation.pdf\")\n",
    "len(md_text)\n",
    "res2 = recursive_splitter.split_text(md_text)\n",
    "len(res2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "id": "9c3f998c-2ce7-4358-ac94-09019e6158f3",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "72218"
      ]
     },
     "execution_count": 48,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(md_text)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "88584666-d75d-4bfd-9f62-e651681369a4",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
