{
 "cells": [
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-16T19:03:37.450019Z",
     "start_time": "2025-09-16T19:03:31.629348Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from llama_index.core import SimpleDirectoryReader\n",
    "\n",
    "# from 微调知识库.case1 import documents\n",
    "\n",
    "reader = SimpleDirectoryReader(\n",
    "    input_dir=r'D:\\pythonProject17\\refine\\f44e1171-2b80-441c-b2cf-d7970d767496\\119b3497-6a3d-46f0-8446-3c5584f1a3a3'\n",
    ")"
   ],
   "id": "f01fd6082e815b6f",
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-16T19:03:40.069620Z",
     "start_time": "2025-09-16T19:03:37.466326Z"
    }
   },
   "cell_type": "code",
   "source": "docs = reader.load_data()",
   "id": "de2cd67ea20183db",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-16T19:04:33.376406Z",
     "start_time": "2025-09-16T19:03:40.103621Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import os\n",
    "os.environ['DEEPSEEK_API_KEY']='sk-f5f3487896554276aaf657fe9b9661a7'\n",
    "from llama_index.core import Settings\n",
    "from llama_index.llms.deepseek import DeepSeek\n",
    "from llama_index.core.embeddings import resolve_embed_model\n",
    "\n",
    "base_embed_model = resolve_embed_model(\"local:D:/pythonProject17/transformers/model_em/BAAI/bge-small-en-v1.5\")\n",
    "llm=DeepSeek(model='deepseek-chat')\n",
    "\n",
    "Settings.llm=llm\n",
    "Settings.embed_model=base_embed_model"
   ],
   "id": "46fb846873289fb2",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:04:19,815 - INFO - PyTorch version 2.5.1+cu121 available.\n",
      "2025-09-17 03:04:22,391 - INFO - Load pretrained SentenceTransformer: D:/pythonProject17/transformers/model_em/BAAI/bge-small-en-v1.5\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-16T19:04:33.815488Z",
     "start_time": "2025-09-16T19:04:33.388446Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from llama_index.core.response_synthesizers import Refine\n",
    "\n",
    "summarizer = Refine(llm=llm, verbose=True)"
   ],
   "id": "1b446be57f2dc97",
   "outputs": [],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-16T18:49:18.613789Z",
     "start_time": "2025-09-16T18:48:38.940459Z"
    }
   },
   "cell_type": "code",
   "source": [
    "summary=[]\n",
    "for i in docs:\n",
    "    response = summarizer.get_response(\"你只要说这个文档做了什么\", [i.text])\n",
    "    summary.append(response)\n",
    "with open('./summary','w') as f:\n",
    "    f.write(summary)"
   ],
   "id": "8d66edce8bbea6cb",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 02:48:40,056 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: for answer in answers:\\\n",
      "\\\n",
      "        local_answer ...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 02:48:45,747 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: 2620:100:6035:15::a27d:550f\\\n",
      "\\\n",
      "Connecting to uc...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 02:48:50,335 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: 2620:100:6035:15::a27d:550f\\\n",
      "\\\n",
      "Connecting to uc...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 02:48:54,939 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: \"query\": query,\\\n",
      "\\\n",
      "                \"response\": ...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 02:49:00,136 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: ---\n",
      "url: \"https://developers.llamaindex.ai/pyth...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 02:49:07,578 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 02:49:12,718 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    }
   ],
   "execution_count": 23
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-16T19:08:21.699110Z",
     "start_time": "2025-09-16T19:04:33.829497Z"
    }
   },
   "cell_type": "code",
   "source": [
    "prompt = \"你只要说这个文档做了什么\"\n",
    "\n",
    "# 定义要保存摘要的文件名\n",
    "output_filename = \"document_summaries.txt\"\n",
    "\n",
    "# 用来存储所有生成的摘要\n",
    "all_summaries = []\n",
    "\n",
    "print(f\"开始为 {len(docs)} 个文档生成摘要...\")\n",
    "\n",
    "# 循环遍历每一个 Document 对象\n",
    "for i, doc in enumerate(docs):\n",
    "    print(f\"\\n正在处理文档 {i+1}/{len(docs)} (ID: {doc.id_})...\")\n",
    "\n",
    "    # 提取文档的文本内容\n",
    "    document_text = doc.text\n",
    "\n",
    "    # 调用 summarizer 生成摘要\n",
    "    # 注意：我们将文本内容放入一个列表中，以匹配 get_response 的输入格式 [docs[i].text]\n",
    "    response =await summarizer.aget_response(prompt, [document_text])\n",
    "\n",
    "    # 将生成的摘要存入列表\n",
    "    all_summaries.append(response)\n",
    "\n",
    "    print(f\"  -> 摘要生成成功: '{response[:60]}...'\")\n",
    "\n",
    "# --- 第4步: 将所有摘要写入同一个文件 ---\n",
    "\n",
    "try:\n",
    "    with open(output_filename, 'w', encoding='utf-8') as f:\n",
    "        print(f\"\\n正在将所有摘要写入文件: '{output_filename}'...\")\n",
    "\n",
    "        for i, summary in enumerate(all_summaries):\n",
    "            # 写入一个标题，方便区分每个文档的摘要\n",
    "            f.write(f\"--- 文档 {i+1} (ID: {docs[i].id_}) 的摘要 ---\\n\")\n",
    "            # 写入摘要内容\n",
    "            f.write(summary)\n",
    "            # 写入两个换行符，让每个摘要之间有空行，更美观\n",
    "            f.write(\"\\n\\n\")\n",
    "\n",
    "    print(f\"成功！所有摘要已保存到 {os.path.abspath(output_filename)}\")\n",
    "\n",
    "except IOError as e:\n",
    "    print(f\"错误：写入文件失败。原因: {e}\")\n"
   ],
   "id": "79aa3297a7ff05b",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始为 10 个文档生成摘要...\n",
      "\n",
      "正在处理文档 1/10 (ID: adc5ba64-1d88-486a-8478-a0c300ba4bf4)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:04:36,650 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '这个文档显示了一个404错误页面，提示页面未找到，并建议检查URL或使用搜索栏。...'\n",
      "\n",
      "正在处理文档 2/10 (ID: 9c25db00-c6b7-4a77-9b5f-d9913b0f80f7)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:04:41,274 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:04:46,013 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:04:50,300 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:04:54,601 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:04:59,241 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '这个文档展示了使用交叉编码器进行重排序的评估流程，包括基线评估、使用基础重排序器和微调后重排序器的对比实验，并报告了相应...'\n",
      "\n",
      "正在处理文档 3/10 (ID: 77e4727d-0f74-4ac6-8601-35dc9389b5b6)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:05:04,391 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:05:09,506 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '这个文档展示了使用NUDGE方法对语料库嵌入进行微调的过程，包括在Scifact数据集上的基准测试、评估指标实现以及如何...'\n",
      "\n",
      "正在处理文档 4/10 (ID: 1a8596bc-8703-4484-805e-7083e1b52f87)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:05:15,900 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:05:22,974 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '该文档展示了如何通过准备数据、微调模型和评估性能等步骤，使用LlamaIndex工具来微调自定义的嵌入模型。它详细介绍了...'\n",
      "\n",
      "正在处理文档 5/10 (ID: c459051d-01c6-4b70-9a86-a83bbbc986d6)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:05:30,093 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:05:35,603 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '该文档介绍了如何在现有嵌入模型之上微调适配器，以优化特定数据和查询的检索性能。它详细说明了生成训练数据集、使用不同类型的...'\n",
      "\n",
      "正在处理文档 6/10 (ID: e830ff7a-16dc-4ee8-b0aa-a1267e04bbf9)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:05:42,784 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:05:50,429 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:05:59,091 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '该文档展示了如何通过微调GPT-3.5模型，使其能够评估语言模型生成答案的正确性。具体过程包括使用GPT-4对模型回答进...'\n",
      "\n",
      "正在处理文档 7/10 (ID: 151e0fa9-d35a-4628-91f7-d57315bb87ab)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:06:07,434 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:06:13,481 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:06:19,923 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:06:27,282 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '这个文档展示了如何通过微调GPT-3.5模型来创建一个更接近GPT-4评估能力的LLM评判系统。它详细描述了使用成对比较...'\n",
      "\n",
      "正在处理文档 8/10 (ID: 828eeac6-7726-4aa9-b86d-74981f3df16c)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:06:35,184 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:06:40,181 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:06:45,507 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:06:50,376 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:06:56,740 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:07:02,561 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:07:07,513 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:07:12,565 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:07:16,960 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:07:21,750 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:07:26,423 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:07:31,005 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:07:35,420 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:07:39,937 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:07:45,117 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:07:49,280 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:07:54,141 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '这个文档展示了使用微调后的模型进行查询处理的过程，包括向嵌入服务和聊天补全服务发送HTTP请求，并记录了成功的响应状态码...'\n",
      "\n",
      "正在处理文档 9/10 (ID: a396b182-16a9-4c36-b33e-4e94b911cb62)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:07:59,085 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:08:04,283 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '这个文档介绍了使用GPT-4生成训练数据来微调GPT-3.5-turbo模型的过程，涵盖了数据准备、问题生成、模型评估以...'\n",
      "\n",
      "正在处理文档 10/10 (ID: c4e3ed9c-acff-486e-9457-8136bfec55a5)...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:08:09,429 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:08:15,745 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '这个文档展示了如何通过检索增强生成（RAG）系统来微调模型以输出结构化数据。具体包括从非结构化文档中创建上下文增强的输入...'\n",
      "\n",
      "正在将所有摘要写入文件: 'document_summaries.txt'...\n",
      "成功！所有摘要已保存到 D:\\pythonProject17\\refine\\document_summaries.txt\n"
     ]
    }
   ],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-16T19:10:33.277454Z",
     "start_time": "2025-09-16T19:10:33.254458Z"
    }
   },
   "cell_type": "code",
   "source": "docs[2]",
   "id": "eff178670f329ac5",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Document(id_='77e4727d-0f74-4ac6-8601-35dc9389b5b6', embedding=None, metadata={'file_path': 'D:\\\\pythonProject17\\\\refine\\\\f44e1171-2b80-441c-b2cf-d7970d767496\\\\119b3497-6a3d-46f0-8446-3c5584f1a3a3\\\\developers.llamaindex.ai_python_examples_finetuning_embeddings_finetune_corpus_embedding_.md', 'file_name': 'developers.llamaindex.ai_python_examples_finetuning_embeddings_finetune_corpus_embedding_.md', 'file_size': 19105, 'creation_date': '2025-09-16', 'last_modified_date': '2025-09-16'}, excluded_embed_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], excluded_llm_metadata_keys=['file_name', 'file_type', 'file_size', 'creation_date', 'last_modified_date', 'last_accessed_date'], relationships={}, metadata_template='{key}: {value}', metadata_separator='\\n', text_resource=MediaResource(embeddings=None, data=None, text='---\\nurl: \"https://developers.llamaindex.ai/python/examples/finetuning/embeddings/finetune_corpus_embedding/\"\\ntitle: \"Finetuning corpus embeddings using NUDGE\\\\n | LlamaIndex Python Documentation\"\\n---\\n\\n[Skip to content](https://developers.llamaindex.ai/python/examples/finetuning/embeddings/finetune_corpus_embedding/#_top)\\n\\n# Finetuning corpus embeddings using NUDGE\\n\\n[NUDGE](https://www.arxiv.org/abs/2409.02343) is a novel simple and lightweight fine-tuning method that boosts accuracy when retrieving text using semantic similarity with pre-trained embedding models. NUDGE directly modifies the embeddings of data records to maximize the similarity between training queries and their ground-truth answers. NUDGE does so non-parametrically. Non-parametric means that NUDGE does not modify model parameters to generate better embeddings, as fine-tuning the embedding model, or training adaptors would. Instead, NUDGE directly changes the embeddings themselves. Compared with fine-tuning the pre-trained model and training adaptors, NUDGE provides 3.3x and 4.3x higher increase in accuracy and runs 200x and 3x faster, respectively. [Here](https://data-people-group.github.io/blogs/2024/09/05/nudge/) is a blog post on NUDGE, and [here](https://www.arxiv.org/abs/2409.02343) is the paper with more details.\\n\\nWe demonstrate NUDGE’s effectiveness on a commonly used Information Retrieval benchmark called Scifact.\\n\\n```\\n\\n%pip install llama-index-experimental llama-index-embeddings-huggingface nudge-ft torch datasets\\n```\\n\\n## Load the scifact benchmark\\n\\n[Section titled “Load the scifact benchmark”](https://developers.llamaindex.ai/python/examples/finetuning/embeddings/finetune_corpus_embedding/#load-the-scifact-benchmark)\\n\\n```\\n\\nfrom llama_index.finetuning import EmbeddingQAFinetuneDataset\\n\\nfrom datasets import load_dataset\\n\\ndef load_hf_dataset(dataset_name):\\n\\n    hf_dataset_name = f\"sepz/{dataset_name}_ft\"\\n\\n    corpus = load_dataset(hf_dataset_name, \"data_records\", split=\"train\")\\n\\n    queries_train = load_dataset(hf_dataset_name, \"qs\", split=\"train\")\\n\\n    queries_validation = load_dataset(hf_dataset_name, \"qs\", split=\"dev\")\\n\\n    queries_test = load_dataset(hf_dataset_name, \"qs\", split=\"test\")\\n\\n    qrels_train = load_dataset(hf_dataset_name, \"qs_rel\", split=\"train\")\\n\\n    qrels_validation = load_dataset(hf_dataset_name, \"qs_rel\", split=\"dev\")\\n\\n    qrels_test = load_dataset(hf_dataset_name, \"qs_rel\", split=\"test\")\\n\\n    corpus = {\\n\\n        str(corpus[i][\"record_id\"]): corpus[i][\"text\"]\\n\\n        for i in range(len(corpus))\\n\\n    }\\n\\n    queries_train = {\\n\\n        str(queries_train[i][\"q_id\"]): queries_train[i][\"input\"]\\n\\n        for i in range(len(queries_train))\\n\\n    }\\n\\n    queries_validation = {\\n\\n        str(r[\"q_id\"]): r[\"input\"] for r in queries_validation\\n\\n    }\\n\\n    queries_test = {str(r[\"q_id\"]): r[\"input\"] for r in queries_test}\\n\\n    qrels_train = (\\n\\n        qrels_train.to_pandas()\\n\\n        .groupby(\"q_id\")[\"record_id\"]\\n\\n        .apply(list)\\n\\n        .to_dict()\\n\\n    )\\n\\n    qrels_validation = (\\n\\n        qrels_validation.to_pandas()\\n\\n        .groupby(\"q_id\")[\"record_id\"]\\n\\n        .apply(list)\\n\\n        .to_dict()\\n\\n    )\\n\\n    qrels_test = (\\n\\n        qrels_test.to_pandas()\\n\\n        .groupby(\"q_id\")[\"record_id\"]\\n\\n        .apply(list)\\n\\n        .to_dict()\\n\\n    )\\n\\n    # convert to strings\\n\\n    qrels_train = {str(k): [str(i) for i in v] for k, v in qrels_train.items()}\\n\\n    qrels_validation = {\\n\\n        str(k): [str(i) for i in v] for k, v in qrels_validation.items()\\n\\n    }\\n\\n    qrels_test = {str(k): [str(i) for i in v] for k, v in qrels_test.items()}\\n\\n    # Load the dataset\\n\\n    train_dataset = EmbeddingQAFinetuneDataset(\\n\\n        corpus=corpus, queries=queries_train, relevant_docs=qrels_train\\n\\n    )\\n\\n    validation_dataset = EmbeddingQAFinetuneDataset(\\n\\n        corpus=corpus,\\n\\n        queries=queries_validation,\\n\\n        relevant_docs=qrels_validation,\\n\\n    )\\n\\n    test_dataset = EmbeddingQAFinetuneDataset(\\n\\n        corpus=corpus, queries=queries_test, relevant_docs=qrels_test\\n\\n    )\\n\\n    return train_dataset, validation_dataset, test_dataset\\n```\\n\\n```\\n\\nINFO:datasets:PyTorch version 2.5.0a0+872d972e41.nv24.8 available.\\n\\nPyTorch version 2.5.0a0+872d972e41.nv24.8 available.\\n\\n/usr/local/lib/python3.10/dist-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\\n\\n  from .autonotebook import tqdm as notebook_tqdm\\n```\\n\\n## Load the dataset and base embedding model\\n\\n[Section titled “Load the dataset and base embedding model”](https://developers.llamaindex.ai/python/examples/finetuning/embeddings/finetune_corpus_embedding/#load-the-dataset-and-base-embedding-model)\\n\\n```\\n\\nfrom llama_index.core.embeddings import resolve_embed_model\\n\\ntrain_dataset, val_dataset, test_dataset = load_hf_dataset(\"scifact\")\\n\\nbase_embed_model = resolve_embed_model(\"local:BAAI/bge-small-en-v1.5\")\\n```\\n\\n```\\n\\nINFO:sentence_transformers.SentenceTransformer:Load pretrained SentenceTransformer: BAAI/bge-small-en-v1.5\\n\\nLoad pretrained SentenceTransformer: BAAI/bge-small-en-v1.5\\n\\nINFO:sentence_transformers.SentenceTransformer:2 prompts are loaded, with the keys: [\\'query\\', \\'text\\']\\n\\n2 prompts are loaded, with the keys: [\\'query\\', \\'text\\']\\n```\\n\\nIf we take a peek at the dataset, we can see that its structured as\\n\\n- courpus: mapping of document ID to text\\n- queries: mapping of query ID to query text\\n- relevant\\\\_docs: a mapping of query ID to list of document IDs\\n\\n```\\n\\nprint(val_dataset.queries[\"2\"])\\n```\\n\\n```\\n\\nDepletion of nitric oxide is responsible for vasospasm.\\n```\\n\\n```\\n\\nprint(val_dataset.relevant_docs[\"2\"])\\n```\\n\\n```\\n\\n[\\'552\\']\\n```\\n\\n```\\n\\nprint(val_dataset.corpus[\"552\"])\\n```\\n\\n```\\n\\nCONTEXT Delayed cerebral vasospasm causes permanent neurological deficits or death in at least 15% of patients following otherwise successful treatment for ruptured intracranial aneurysm. Decreased bioavailability of nitric oxide has been associated with the development of cerebral vasospasm. OBJECTIVE To determine whether infusions of nitrite will prevent delayed cerebral vasospasm. DESIGN, SETTING, AND SUBJECTS A total of 14 anesthetized cynomolgus monkeys had an autologous blood clot placed around the right middle cerebral artery. Cerebral arteriography was performed before clot placement and on days 7 and 14 to assess vasospasm. The study was conducted from August 2003 to February 2004. INTERVENTIONS A 90-mg sodium nitrite intravenous solution infused over 24 hours plus a 45-mg sodium nitrite bolus daily (n = 3); a 180-mg sodium nitrite intravenous solution infused over 24 hours (n = 3); or a control saline solution infusion (n = 8). Each was infused continuously for 14 days. MAIN OUTCOME MEASURES Nitrite, S-nitrosothiol, and methemoglobin levels in blood and cerebrospinal fluid and degree of arteriographic vasospasm. RESULTS In control monkeys, mean (SD) cerebrospinal fluid nitrite levels decreased from 3.1 (1.5) micromol/L to 0.4 (0.1) micromol/L at day 7 and to 0.4 (0.4) micromol/L at day 14 (P = .03). All 8 control monkeys developed significant vasospasm of the right middle cerebral artery, which was complicated by stroke and death in 1 animal. Sodium nitrite infusions increased the nitrite and methemoglobin levels (<2.1% of total hemoglobin) in the blood and cerebrospinal fluid without evoking systemic hypotension. Nitrite infusion prevented development of vasospasm (no animals developed significant vasospasm; mean [SD] reduction in right middle cerebral artery area on day 7 after subarachnoid hemorrhage of 8% [9%] in nitrite-treated monkeys vs 47% [5%] in saline-treated controls; P<.001). There was a negative correlation between the concentration of nitrite in cerebrospinal fluid and the degree of cerebral vasospasm (P<.001). Pharmacological effects of nitrite infusion were also associated with the formation of S-nitrosothiol in cerebrospinal fluid. There was no clinical or pathological evidence of nitrite toxicity. CONCLUSION Subacute sodium nitrite infusions prevented delayed cerebral vasospasm in a primate model of subarachnoid hemorrhage.\\n```\\n\\n### Using your own Datasets\\n\\n[Section titled “Using your own Datasets”](https://developers.llamaindex.ai/python/examples/finetuning/embeddings/finetune_corpus_embedding/#using-your-own-datasets)\\n\\nAs you can see, you can run this notebook on any dataset, as long as you have queries and a mapping to relevant documents! If you have documents but are missing a training set of queries checkout the our tools for generating a synthetic dataset ( [1](https://docs.llamaindex.ai/en/stable/api_reference/evaluation/dataset_generation/)).\\n\\nIf you wanted, you could also write your own dataset, or even use llama-index to create your own.\\n\\nUncomment the code below and add your own files if you want to try it out.\\n\\n```\\n\\n# This code would generate your own dataset against your own custom data\\n\\nfrom llama_index.finetuning import generate_qa_embedding_pairs\\n\\nfrom llama_index.core import SimpleDirectoryReader\\n\\nfrom llama_index.core.node_parser import SentenceSplitter\\n\\nfrom llama_index.core.evaluation import EmbeddingQAFinetuneDataset\\n\\ndef load_corpus(files, verbose=False):\\n\\n    if verbose:\\n\\n        print(f\"Loading files {files}\")\\n\\n    reader = SimpleDirectoryReader(input_files=files)\\n\\n    docs = reader.load_data()\\n\\n    if verbose:\\n\\n        print(f\"Loaded {len(docs)} docs\")\\n\\n    parser = SentenceSplitter()\\n\\n    nodes = parser.get_nodes_from_documents(docs, show_progress=verbose)\\n\\n    if verbose:\\n\\n        print(f\"Parsed {len(nodes)} nodes\")\\n\\n    return nodes\\n\\n# Load data\\n\\n# train_nodes = load_corpus([\"file1.pdf\", ...], verbose=True)\\n\\n# val_nodes = load_corpus([\"file2.pdf\", ...], verbose=True)\\n\\n# Generate pairs\\n\\n# train_dataset = generate_qa_embedding_pairs(train_nodes)\\n\\n# val_dataset = generate_qa_embedding_pairs(val_nodes)\\n\\n# [Optional] Save to disk\\n\\n# train_dataset.save_json(\"train_dataset.json\")\\n\\n# val_dataset.save_json(\"val_dataset.json\")\\n\\n# [Optional] Load\\n\\n# train_dataset = EmbeddingQAFinetuneDataset.from_json(\"train_dataset.json\")\\n\\n# val_dataset = EmbeddingQAFinetuneDataset.from_json(\"val_dataset.json\")\\n```\\n\\n## Evaluation\\n\\n[Section titled “Evaluation”](https://developers.llamaindex.ai/python/examples/finetuning/embeddings/finetune_corpus_embedding/#evaluation)\\n\\nA common Information Retrieval metric to report during evaluation is NDCG@k.\\n\\n```\\n\\nfrom typing import Optional, Dict\\n\\nimport torch\\n\\nimport numpy as np\\n\\nfrom tqdm import tqdm\\n\\nfrom llama_index.core.schema import TextNode\\n\\nfrom llama_index.core.base.embeddings.base import BaseEmbedding\\n\\nfrom llama_index.core.base.base_retriever import BaseRetriever\\n\\nfrom llama_index.core import VectorStoreIndex\\n\\ndef build_retriever(\\n\\n    corpus: Dict[str, str],\\n\\n    embed_model: BaseEmbedding | str,\\n\\n    corpus_embeddings: Optional[torch.Tensor] = None,\\n\\n    k: int = 10,\\n\\n) -> BaseRetriever:\\n\\n    nodes = []\\n\\n    for i, (id_, text) in enumerate(corpus.items()):\\n\\n        if corpus_embeddings is not None:\\n\\n            nodes.append(\\n\\n                TextNode(\\n\\n                    id_=id_, text=text, embedding=corpus_embeddings[i].tolist()\\n\\n                )\\n\\n            )\\n\\n        else:\\n\\n            nodes.append(TextNode(id_=id_, text=text))\\n\\n    index = VectorStoreIndex(\\n\\n        nodes=nodes,\\n\\n        embeddings=corpus_embeddings,\\n\\n        embed_model=embed_model,\\n\\n        show_progress=True,\\n\\n    )\\n\\n    return index.as_retriever(similarity_top_k=k)\\n\\ndef ndcg_at_k(\\n\\n    dataset: EmbeddingQAFinetuneDataset, retriever: BaseRetriever, k: int = 10\\n\\n):\\n\\n    queries = dataset.queries\\n\\n    relevant_docs = dataset.relevant_docs\\n\\n    ndcg_scores = []\\n\\n    for query_id, query in tqdm(queries.items()):\\n\\n        retrieved_nodes = retriever.retrieve(query)\\n\\n        retrieved_ids = [node.node.node_id for node in retrieved_nodes]\\n\\n        expected_ids = relevant_docs[query_id]\\n\\n        # Calculate NDCG\\n\\n        ideal_dcg = np.sum(\\n\\n            [1 / np.log2(i + 2) for i in range(min(k, len(expected_ids)))]\\n\\n        )\\n\\n        rel_scores = np.zeros(k)\\n\\n        for j in range(min(k, len(retrieved_ids))):\\n\\n            if retrieved_ids[j] in expected_ids:\\n\\n                rel_scores[j] = 1\\n\\n        dcg = np.sum(\\n\\n            [rel_scores[i] / np.log2(i + 2) for i in range(len(rel_scores))]\\n\\n        )\\n\\n        ndcg = dcg / ideal_dcg if ideal_dcg > 0 else 0\\n\\n        ndcg_scores.append(ndcg)\\n\\n    mean_ndcg = np.mean(ndcg_scores)\\n\\n    return mean_ndcg\\n```\\n\\n## Get the corpus embedding finetuning results\\n\\n[Section titled “Get the corpus embedding finetuning results”](https://developers.llamaindex.ai/python/examples/finetuning/embeddings/finetune_corpus_embedding/#get-the-corpus-embedding-finetuning-results)\\n\\nNext we use, [NUDGE](https://www.arxiv.org/abs/2409.02343), the state of the art method for finetuning corpus embeddings to maximize the accuracy of k-NN retrieval. We then take our new corpus embeddings along with the original embedding model to build a retriever. NUDGE only finetunes the corpus embeddings and does not change any of the parameters in the base embedding model.\\n\\n```\\n\\n%%capture\\n\\nfrom llama_index.experimental import Nudge\\n\\nk = 10\\n\\nnudge = Nudge(\\n\\n    train_dataset=train_dataset,\\n\\n    val_dataset=val_dataset,\\n\\n    embed_model=base_embed_model,\\n\\n    use_nudge_n=True,\\n\\n)\\n\\nnudge.finetune()\\n\\nnudge_corpus_embeddings = nudge.get_finetuned_corpus_embeddings()\\n\\nnudge_retriever = build_retriever(\\n\\n    train_dataset.corpus, base_embed_model, nudge_corpus_embeddings, k=k\\n\\n)\\n\\nnudge_ndcg_test = ndcg_at_k(test_dataset, nudge_retriever, k)\\n```\\n\\n```\\n\\nINFO:llama_index.experimental.nudge.base:Use pytorch device: cuda\\n\\nUse pytorch device: cuda\\n```\\n\\n## Get the adapter finetuning results\\n\\n[Section titled “Get the adapter finetuning results”](https://developers.llamaindex.ai/python/examples/finetuning/embeddings/finetune_corpus_embedding/#get-the-adapter-finetuning-results)\\n\\n```\\n\\n%%capture\\n\\nfrom llama_index.finetuning import EmbeddingAdapterFinetuneEngine\\n\\nembedding_adapater_finetune_engine = EmbeddingAdapterFinetuneEngine(\\n\\n    train_dataset,\\n\\n    base_embed_model,\\n\\n    epochs=4,\\n\\n    batch_size=10,\\n\\n)\\n\\nembedding_adapater_finetune_engine.finetune()\\n\\nembedding_adapter_model = (\\n\\n    embedding_adapater_finetune_engine.get_finetuned_model()\\n\\n)\\n\\nft_retriever = build_retriever(\\n\\n    train_dataset.corpus, embedding_adapter_model, k=k\\n\\n)\\n\\nft_ndcg_test = ndcg_at_k(test_dataset, ft_retriever, k)\\n```\\n\\n```\\n\\nINFO:llama_index.finetuning.embeddings.adapter:Use pytorch device: cuda\\n\\nUse pytorch device: cuda\\n\\nINFO:llama_index.embeddings.adapter.base:Use pytorch device: cuda\\n\\nUse pytorch device: cuda\\n```\\n\\n## Get the baseline results\\n\\n[Section titled “Get the baseline results”](https://developers.llamaindex.ai/python/examples/finetuning/embeddings/finetune_corpus_embedding/#get-the-baseline-results)\\n\\n```\\n\\n%%capture\\n\\nbase_retriever = build_retriever(train_dataset.corpus, base_embed_model, k=k)\\n\\nbge_ndcg_test = ndcg_at_k(test_dataset, base_retriever, k)\\n```\\n\\n## Display the results\\n\\n[Section titled “Display the results”](https://developers.llamaindex.ai/python/examples/finetuning/embeddings/finetune_corpus_embedding/#display-the-results)\\n\\n```\\n\\nprint(f\"bge test - ndcg@10: {bge_ndcg_test:.2f}\")\\n\\nprint(f\"adaptor finetune test - ndcg@10: {ft_ndcg_test:.2f}\")\\n\\nprint(f\"NUDGE-N test - ndcg@10: {nudge_ndcg_test:.2f}\")\\n```\\n\\n```\\n\\nbge test - ndcg@10: 0.71\\n\\nadaptor finetune test - ndcg@10: 0.72\\n\\nNUDGE-N test - ndcg@10: 0.87\\n```\\n\\n# Inserting records into the dataset\\n\\n[Section titled “Inserting records into the dataset”](https://developers.llamaindex.ai/python/examples/finetuning/embeddings/finetune_corpus_embedding/#inserting-records-into-the-dataset)\\n\\nIt’s common to have your dataset expand over time. We will now insert and finetune the nfcorpus into the scifact example we’ve been working with. Usually you’d have to retrain on your entire dataset to avoid catastrophic forgetting. With NUDGE, you can easily expand your dataset iteratively by focusing only on the newest batch of data, without worrying about catastrophic forgetting. This only works when the new data being inserted does not conflict (e.g. new queries for old corpus or new corpus changes k-NN to old queries) with the existing dataset.\\n\\n```\\n\\n%%capture\\n\\nnew_train_dataset, new_val_dataset, new_test_dataset = load_hf_dataset(\\n\\n    \"nfcorpus\"\\n\\n)\\n\\n# prepend \"nfcorpus-\" to the keys so they don\\'t conflict with the scifact ids\\n\\nnew_train_dataset.queries = {\\n\\n    f\"nfcorpus-{k}\": v for k, v in new_train_dataset.queries.items()\\n\\n}\\n\\nnew_train_dataset.relevant_docs = {\\n\\n    f\"nfcorpus-{k}\": [f\"nfcorpus-{doc_id}\" for doc_id in v]\\n\\n    for k, v in new_train_dataset.relevant_docs.items()\\n\\n}\\n\\nnew_train_dataset.corpus = {\\n\\n    f\"nfcorpus-{k}\": v for k, v in new_train_dataset.corpus.items()\\n\\n}\\n\\nnew_val_dataset.queries = {\\n\\n    f\"nfcorpus-{k}\": v for k, v in new_val_dataset.queries.items()\\n\\n}\\n\\nnew_val_dataset.relevant_docs = {\\n\\n    f\"nfcorpus-{k}\": [f\"nfcorpus-{doc_id}\" for doc_id in v]\\n\\n    for k, v in new_val_dataset.relevant_docs.items()\\n\\n}\\n\\nnew_val_dataset.corpus = {\\n\\n    f\"nfcorpus-{k}\": v for k, v in new_val_dataset.corpus.items()\\n\\n}\\n\\nnew_test_dataset.queries = {\\n\\n    f\"nfcorpus-{k}\": v for k, v in new_test_dataset.queries.items()\\n\\n}\\n\\nnew_test_dataset.relevant_docs = {\\n\\n    f\"nfcorpus-{k}\": [f\"nfcorpus-{doc_id}\" for doc_id in v]\\n\\n    for k, v in new_test_dataset.relevant_docs.items()\\n\\n}\\n\\nnew_test_dataset.corpus = {\\n\\n    f\"nfcorpus-{k}\": v for k, v in new_test_dataset.corpus.items()\\n\\n}\\n```\\n\\n## Finetune the new records\\n\\n[Section titled “Finetune the new records”](https://developers.llamaindex.ai/python/examples/finetuning/embeddings/finetune_corpus_embedding/#finetune-the-new-records)\\n\\n```\\n\\n%%capture\\n\\nnudge.insert_data_and_finetune(\\n\\n    new_train_dataset_batch=new_train_dataset,\\n\\n    new_val_dataset_batch=new_val_dataset,\\n\\n)\\n\\n# get our corpus embeddings with the newly inserted and tuned records\\n\\nnudge_corpus_embeddings = nudge.get_finetuned_corpus_embeddings()\\n\\n# aggregate the corpus\\n\\naggregated_corpus = {**train_dataset.corpus, **new_train_dataset.corpus}\\n\\n# build nudge retriever\\n\\nnudge_retriever = build_retriever(\\n\\n    aggregated_corpus, base_embed_model, nudge_corpus_embeddings, k=k\\n\\n)\\n\\n# get test results on nfcorpus\\n\\nnudge_ndcg_nfcorpus_test = ndcg_at_k(new_test_dataset, nudge_retriever, k)\\n\\n# get test results on scifact\\n\\nnudge_ndcg_scifact_test = ndcg_at_k(test_dataset, nudge_retriever, k)\\n```\\n\\n## Display the insertion results\\n\\n[Section titled “Display the insertion results”](https://developers.llamaindex.ai/python/examples/finetuning/embeddings/finetune_corpus_embedding/#display-the-insertion-results)\\n\\nCheck the results on our newly inserted nfcorpus records and verify that our scifact benchmark did not regress.\\n\\n```\\n\\nprint(\\n\\n    f\"NUDGE-N (aggregated) test on nfcorpus - ndcg@10: {nudge_ndcg_nfcorpus_test:.2f}\"\\n\\n)\\n\\nprint(\\n\\n    f\"NUDGE-N (aggregated) test on scifact - ndcg@10: {nudge_ndcg_scifact_test:.2f}\"\\n\\n)\\n```\\n\\n```\\n\\nNUDGE-N (aggregated) test on nfcorpus - ndcg@10: 0.44\\n\\nNUDGE-N (aggregated) test on scifact - ndcg@10: 0.85\\n```', path=None, url=None, mimetype=None), image_resource=None, audio_resource=None, video_resource=None, text_template='{metadata_str}\\n\\n{content}')"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-16T19:15:56.467723Z",
     "start_time": "2025-09-16T19:15:56.446202Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def extract_title(document_text: str) -> str:\n",
    "    \"\"\"\n",
    "    从文档的文本内容中解析并提取标题。\n",
    "    它会寻找以 'title:' 开头的行，并提取引号内的主要部分。\n",
    "    \"\"\"\n",
    "    for line in document_text.splitlines(): # 按行分割文本\n",
    "        if line.strip().startswith('title:'):\n",
    "            try:\n",
    "                # 1. 按双引号分割，取中间的部分\n",
    "                # e.g., 'title: \"The Title | More\"' -> ['', 'The Title | More', '']\n",
    "                content_in_quotes = line.split('\"')[1]\n",
    "\n",
    "                # 2. 按 '|' 分割，取第一部分，这是核心标题\n",
    "                main_title = content_in_quotes.split('|')[0]\n",
    "\n",
    "                # 3. 清理末尾的 \\n 和多余的空格\n",
    "                # .replace('\\\\n', ' ') 将文本中的换行符换成空格\n",
    "                return main_title.replace('\\\\n', ' ').strip()\n",
    "            except IndexError:\n",
    "                # 如果格式不正确（例如没有引号），则返回一个默认值\n",
    "                return \"（无法提取标题）\"\n",
    "    return \"（未找到标题行）\"\n"
   ],
   "id": "eb82cdd8042948c3",
   "outputs": [],
   "execution_count": 8
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-16T19:20:15.683756Z",
     "start_time": "2025-09-16T19:16:36.587384Z"
    }
   },
   "cell_type": "code",
   "source": [
    "prompt = \"你只要说这个文档做了什么\"\n",
    "output_filename = \"document_summaries_with_titles.txt\" # 新的文件名\n",
    "\n",
    "# MODIFIED: 用来存储所有生成的 (标题, 摘要) 对\n",
    "all_summaries_with_titles = []\n",
    "\n",
    "print(f\"开始为 {len(docs)} 个文档生成摘要并提取标题...\")\n",
    "\n",
    "# 循环遍历每一个 Document 对象\n",
    "for i, doc in enumerate(docs):\n",
    "    print(f\"\\n正在处理文档 {i+1}/{len(docs)} (ID: {doc.id_})...\")\n",
    "\n",
    "    document_text = doc.text\n",
    "\n",
    "    # NEW: 调用函数提取标题\n",
    "    document_title = extract_title(document_text)\n",
    "    print(f\"  -> 提取到的标题: '{document_title}'\")\n",
    "\n",
    "    # 调用 summarizer 生成摘要\n",
    "    response = await summarizer.aget_response(prompt, [document_text])\n",
    "\n",
    "    # MODIFIED: 将标题和摘要作为一个元组存入列表\n",
    "    all_summaries_with_titles.append((document_title, response))\n",
    "\n",
    "    print(f\"  -> 摘要生成成功: '{response[:60]}...'\")\n",
    "\n",
    "# --- 第4步: 将所有摘要和标题写入同一个文件 (MODIFIED) ---\n",
    "\n",
    "try:\n",
    "    with open(output_filename, 'w', encoding='utf-8') as f:\n",
    "        print(f\"\\n正在将所有摘要和标题写入文件: '{output_filename}'...\")\n",
    "\n",
    "        # MODIFIED: 循环遍历包含元组的列表\n",
    "        for i, (title, summary) in enumerate(all_summaries_with_titles):\n",
    "            # 写入一个更醒目的标题\n",
    "            f.write(f\"### {i+1}. {title}\\n\\n\") # 使用 Markdown 标题格式，非常醒目\n",
    "\n",
    "            # 写入文档的 ID 作为参考\n",
    "            f.write(f\"**源文档ID:** `{docs[i].id_}`\\n\\n\")\n",
    "\n",
    "            # 写入摘要内容\n",
    "            f.write(f\"**摘要:**\\n{summary}\\n\")\n",
    "\n",
    "            # 写入一个分隔符，让文档之间界限分明\n",
    "            f.write(\"\\n---\\n\\n\")\n",
    "\n",
    "    print(f\"成功！所有内容已保存到 {os.path.abspath(output_filename)}\")\n",
    "\n",
    "except IOError as e:\n",
    "    print(f\"错误：写入文件失败。原因: {e}\")"
   ],
   "id": "754de492ef235290",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始为 10 个文档生成摘要并提取标题...\n",
      "\n",
      "正在处理文档 1/10 (ID: adc5ba64-1d88-486a-8478-a0c300ba4bf4)...\n",
      "  -> 提取到的标题: '404'\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:16:37,256 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '这个文档显示了一个404错误页面，提示页面未找到，并建议检查URL或使用搜索栏。...'\n",
      "\n",
      "正在处理文档 2/10 (ID: 9c25db00-c6b7-4a77-9b5f-d9913b0f80f7)...\n",
      "  -> 提取到的标题: 'How to Finetune a cross-encoder using LLamaIndex'\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:16:42,244 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: for answer in answers:\\\n",
      "\\\n",
      "        local_answer ...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:16:47,739 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: 2620:100:6035:15::a27d:550f\\\n",
      "\\\n",
      "Connecting to uc...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:16:53,144 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: 2620:100:6035:15::a27d:550f\\\n",
      "\\\n",
      "Connecting to uc...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:16:58,367 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: \"query\": query,\\\n",
      "\\\n",
      "                \"response\": ...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:17:03,568 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '该文档详细描述了使用LLamaIndex库进行交叉编码器模型微调的全过程，包括数据准备、模型训练、性能评估以及结果对比分...'\n",
      "\n",
      "正在处理文档 3/10 (ID: 77e4727d-0f74-4ac6-8601-35dc9389b5b6)...\n",
      "  -> 提取到的标题: 'Finetuning corpus embeddings using NUDGE'\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:17:07,763 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: dataset: EmbeddingQAFinetuneDataset, retriever:...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:17:12,658 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '这个文档展示了如何使用NUDGE方法对语料库嵌入进行微调，以提高k-NN检索的准确性。它详细介绍了在Scifact数据集...'\n",
      "\n",
      "正在处理文档 4/10 (ID: 1a8596bc-8703-4484-805e-7083e1b52f87)...\n",
      "  -> 提取到的标题: 'Finetune Embeddings'\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:17:20,091 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: | 0.00/1.52k [00:00<?, ?B/s]\n",
      "\n",
      "Downloading (…)_P...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:17:25,509 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '该文档展示了如何利用LlamaIndex工具来微调自定义的嵌入模型，包括数据准备、模型微调以及验证评估的完整流程。通过使...'\n",
      "\n",
      "正在处理文档 5/10 (ID: c459051d-01c6-4b70-9a86-a83bbbc986d6)...\n",
      "  -> 提取到的标题: 'Finetuning an Adapter on Top of any Black-Box Embedding Model'\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:17:31,970 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: | bge | 0.787342 | 0.643038 |\n",
      "| 2 | ft\\_2layer ...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:17:37,256 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '该文档介绍了如何在现有嵌入模型之上微调适配器，以优化特定数据和查询的检索性能。它详细说明了生成训练数据集、使用不同适配器...'\n",
      "\n",
      "正在处理文档 6/10 (ID: e830ff7a-16dc-4ee8-b0aa-a1267e04bbf9)...\n",
      "  -> 提取到的标题: 'Knowledge Distillation For Fine-Tuning A GPT-3.5 Judge (Correctness)'\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:17:43,118 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: To do this, we will make use of the `OpenAIFine...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:17:49,627 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: total_obs=np_scores_gpt_4.shape[0],\\\n",
      "\\\n",
      "        ...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:17:58,011 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '该文档展示了如何通过微调GPT-3.5模型，使其在评估语言模型回答质量的任务上更接近GPT-4的判断水平。具体包括使用G...'\n",
      "\n",
      "正在处理文档 7/10 (ID: 151e0fa9-d35a-4628-91f7-d57315bb87ab)...\n",
      "  -> 提取到的标题: 'Knowledge Distillation For Fine-Tuning A GPT-3.5 Judge (Pairwise)'\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:18:04,200 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: = {\\\n",
      "\\\n",
      "    mdl: create_query_engine(mdl, test_r...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:18:10,307 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: llama_index.finetuning import OpenAIFinetuneEng...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:18:16,118 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: data_entry in tqdm.tqdm(test_dataset):\\\n",
      "\\\n",
      "    t...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:18:21,620 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '这个文档展示了如何通过微调GPT-3.5模型来提升其作为LLM评判员的能力，使其更接近GPT-4的评判水平。文档详细描述...'\n",
      "\n",
      "正在处理文档 8/10 (ID: 828eeac6-7726-4aa9-b86d-74981f3df16c)...\n",
      "  -> 提取到的标题: 'Fine Tuning MistralAI models using Finetuning API'\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:18:30,605 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: POST https://api.mistral.ai/v1/embeddings \"HTTP...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:18:35,505 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: \"HTTP/1.1 200 OK\"\n",
      "\n",
      "HTTP Request: POST https://a...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:18:40,583 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: 200 OK\"\n",
      "\n",
      "Processing questions:  65%|██████▌   |...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:18:44,891 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: 200 OK\"\n",
      "\n",
      "HTTP Request: GET https://api.mistral....\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:18:49,649 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:18:54,795 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: POST https://api.mistral.ai/v1/embeddings \"HTTP...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:18:58,907 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: Request: POST https://api.mistral.ai/v1/embeddi...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:19:03,399 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:19:07,919 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: \"HTTP/1.1 200 OK\"\n",
      "\n",
      "HTTP Request: POST https://a...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:19:12,538 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: POST https://api.openai.com/v1/embeddings \"HTTP...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:19:17,899 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:19:22,460 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: \"HTTP/1.1 200 OK\"\n",
      "\n",
      "INFO:httpx:HTTP Request: POS...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:19:26,961 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:19:30,998 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: 200 OK\"\n",
      "\n",
      "INFO:httpx:HTTP Request: POST https://...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:19:35,588 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: 77/80 [01:07<00:06,  2.27s/it]\n",
      "\n",
      "INFO:httpx:HTTP...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:19:40,584 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n",
      "2025-09-17 03:19:44,766 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: llama_index.core.response.notebook_utils import...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:19:50,148 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '这个文档展示了使用Mistral AI API进行查询处理的过程，包括发送嵌入请求和聊天补全请求到Mistral AI的...'\n",
      "\n",
      "正在处理文档 9/10 (ID: a396b182-16a9-4c36-b33e-4e94b911cb62)...\n",
      "  -> 提取到的标题: 'Fine Tuning GPT-3.5-Turbo'\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:19:55,246 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: 2016).In a few experimental evolution studies \\...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:20:00,471 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '这个文档介绍了使用GPT-4生成训练数据来微调GPT-3.5-Turbo的方法，涵盖了数据准备、问题生成、评估基准测试以...'\n",
      "\n",
      "正在处理文档 10/10 (ID: c4e3ed9c-acff-486e-9457-8136bfec55a5)...\n",
      "  -> 提取到的标题: 'Fine Tuning with Function Calling'\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:20:05,406 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "> Refine context: = OpenAIPydanticProgram.from_defaults(\n",
      "\n",
      "    out...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-09-17 03:20:10,414 - INFO - HTTP Request: POST https://api.deepseek.com/chat/completions \"HTTP/1.1 200 OK\"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "  -> 摘要生成成功: '这个文档展示了如何通过检索增强生成（RAG）系统创建训练数据集，并利用该数据集对语言模型进行微调，以提升其从非结构化文档...'\n",
      "\n",
      "正在将所有摘要和标题写入文件: 'document_summaries_with_titles.txt'...\n",
      "成功！所有内容已保存到 D:\\pythonProject17\\refine\\document_summaries_with_titles.txt\n"
     ]
    }
   ],
   "execution_count": 9
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "",
   "id": "77dbeb88caf2cefe"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
