{
  "cells": [
    {
      "cell_type": "raw",
      "metadata": {
        "vscode": {
          "languageId": "raw"
        }
      },
      "source": [
        "# RAG系统的层次化索引\n",
        "\n",
        "在这个笔记本中，我实现了一个RAG系统的层次化索引方法。这种技术通过使用两层搜索方法改进检索：首先通过摘要识别相关文档章节，然后从这些章节中检索具体细节。\n",
        "\n",
        "传统的RAG方法平等对待所有文本块，这可能导致：\n",
        "\n",
        "- 当文本块太小时会丢失上下文\n",
        "- 当文档集合很大时会产生不相关的结果\n",
        "- 在整个语料库中进行低效搜索\n",
        "\n",
        "层次化检索通过以下方式解决这些问题：\n",
        "\n",
        "- 为较大的文档章节创建简洁摘要\n",
        "- 首先搜索这些摘要以识别相关章节\n",
        "- 然后仅从这些章节中检索详细信息\n",
        "- 在保持具体细节的同时维护上下文\n"
      ]
    },
    {
      "cell_type": "raw",
      "metadata": {
        "vscode": {
          "languageId": "raw"
        }
      },
      "source": [
        "## 设置环境\n",
        "我们首先导入必要的库。\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "import os\n",
        "import numpy as np\n",
        "import json\n",
        "import fitz\n",
        "from openai import OpenAI\n",
        "import re\n",
        "import pickle\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# 层次化RAG实现 - 核心函数合并版本\n",
        "\n",
        "# 使用基础URL和API密钥初始化OpenAI客户端\n",
        "client = OpenAI(\n",
        "    base_url=\"https://api.studio.nebius.com/v1/\",\n",
        "    api_key=os.getenv(\"OPENAI_API_KEY\")  # 从环境变量中获取API密钥\n",
        ")\n",
        "\n",
        "def extract_text_from_pdf(pdf_path):\n",
        "    \"\"\"从PDF文件中提取文本内容，按页面分离。\"\"\"\n",
        "    print(f\"正在从{pdf_path}中提取文本...\")\n",
        "    pdf = fitz.open(pdf_path)\n",
        "    pages = []\n",
        "    \n",
        "    for page_num in range(len(pdf)):\n",
        "        page = pdf[page_num]\n",
        "        text = page.get_text()\n",
        "        \n",
        "        if len(text.strip()) > 50:\n",
        "            pages.append({\n",
        "                \"text\": text,\n",
        "                \"metadata\": {\n",
        "                    \"source\": pdf_path,\n",
        "                    \"page\": page_num + 1\n",
        "                }\n",
        "            })\n",
        "    \n",
        "    print(f\"提取了{len(pages)}页内容\")\n",
        "    return pages\n",
        "\n",
        "def chunk_text(text, metadata, chunk_size=1000, overlap=200):\n",
        "    \"\"\"将文本分割成重叠的块，同时保留元数据。\"\"\"\n",
        "    chunks = []\n",
        "    \n",
        "    for i in range(0, len(text), chunk_size - overlap):\n",
        "        chunk_text = text[i:i + chunk_size]\n",
        "        \n",
        "        if chunk_text and len(chunk_text.strip()) > 50:\n",
        "            chunk_metadata = metadata.copy()\n",
        "            chunk_metadata.update({\n",
        "                \"chunk_index\": len(chunks),\n",
        "                \"start_char\": i,\n",
        "                \"end_char\": i + len(chunk_text),\n",
        "                \"is_summary\": False\n",
        "            })\n",
        "            \n",
        "            chunks.append({\n",
        "                \"text\": chunk_text,\n",
        "                \"metadata\": chunk_metadata\n",
        "            })\n",
        "    \n",
        "    return chunks\n",
        "\n",
        "class SimpleVectorStore:\n",
        "    \"\"\"使用NumPy的简单向量存储实现。\"\"\"\n",
        "    def __init__(self):\n",
        "        self.vectors = []\n",
        "        self.texts = []\n",
        "        self.metadata = []\n",
        "    \n",
        "    def add_item(self, text, embedding, metadata=None):\n",
        "        \"\"\"向向量存储中添加项目。\"\"\"\n",
        "        self.vectors.append(np.array(embedding))\n",
        "        self.texts.append(text)\n",
        "        self.metadata.append(metadata or {})\n",
        "    \n",
        "    def similarity_search(self, query_embedding, k=5, filter_func=None):\n",
        "        \"\"\"找到与查询嵌入最相似的项目。\"\"\"\n",
        "        if not self.vectors:\n",
        "            return []\n",
        "        \n",
        "        query_vector = np.array(query_embedding)\n",
        "        similarities = []\n",
        "        \n",
        "        for i, vector in enumerate(self.vectors):\n",
        "            if filter_func and not filter_func(self.metadata[i]):\n",
        "                continue\n",
        "                \n",
        "            similarity = np.dot(query_vector, vector) / (np.linalg.norm(query_vector) * np.linalg.norm(vector))\n",
        "            similarities.append((i, similarity))\n",
        "        \n",
        "        similarities.sort(key=lambda x: x[1], reverse=True)\n",
        "        \n",
        "        results = []\n",
        "        for i in range(min(k, len(similarities))):\n",
        "            idx, score = similarities[i]\n",
        "            results.append({\n",
        "                \"text\": self.texts[idx],\n",
        "                \"metadata\": self.metadata[idx],\n",
        "                \"similarity\": float(score)\n",
        "            })\n",
        "        \n",
        "        return results\n",
        "\n",
        "def create_embeddings(texts, model=\"bge-m3:latest\"):\n",
        "    \"\"\"为给定文本创建嵌入向量。\"\"\"\n",
        "    if not texts:\n",
        "        return []\n",
        "        \n",
        "    batch_size = 100\n",
        "    all_embeddings = []\n",
        "    \n",
        "    for i in range(0, len(texts), batch_size):\n",
        "        batch = texts[i:i + batch_size]\n",
        "        \n",
        "        response = client.embeddings.create(\n",
        "            model=model,\n",
        "            input=batch\n",
        "        )\n",
        "        \n",
        "        batch_embeddings = [item.embedding for item in response.data]\n",
        "        all_embeddings.extend(batch_embeddings)\n",
        "    \n",
        "    return all_embeddings\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# 层次化RAG主要实现函数\n",
        "\n",
        "def generate_page_summary(page_text):\n",
        "    \"\"\"生成页面的简洁摘要。\"\"\"\n",
        "    system_prompt = \"\"\"你是一个专业的摘要系统。\n",
        "    为提供的文本创建详细摘要。\n",
        "    专注于捕获主要主题、关键信息和重要事实。\"\"\"\n",
        "\n",
        "    max_tokens = 6000\n",
        "    truncated_text = page_text[:max_tokens] if len(page_text) > max_tokens else page_text\n",
        "\n",
        "    response = client.chat.completions.create(\n",
        "        model=\"meta-llama/Llama-3.2-3B-Instruct\",\n",
        "        messages=[\n",
        "            {\"role\": \"system\", \"content\": system_prompt},\n",
        "            {\"role\": \"user\", \"content\": f\"请总结这段文本:\\\\n\\\\n{truncated_text}\"}\n",
        "        ],\n",
        "        temperature=0.3\n",
        "    )\n",
        "    \n",
        "    return response.choices[0].message.content\n",
        "\n",
        "def hierarchical_rag(query, pdf_path, chunk_size=1000, chunk_overlap=200, \n",
        "                    k_summaries=3, k_chunks=5, regenerate=False):\n",
        "    \"\"\"完整的层次化RAG流水线。\"\"\"\n",
        "    \n",
        "    # 从PDF提取页面\n",
        "    pages = extract_text_from_pdf(pdf_path)\n",
        "    \n",
        "    # 为每页创建摘要\n",
        "    print(\"正在生成页面摘要...\")\n",
        "    summaries = []\n",
        "    for i, page in enumerate(pages[:5]):  # 限制为前5页以节省时间\n",
        "        print(f\"正在总结第{i+1}页...\")\n",
        "        summary_text = generate_page_summary(page[\"text\"])\n",
        "        \n",
        "        summary_metadata = page[\"metadata\"].copy()\n",
        "        summary_metadata.update({\"is_summary\": True})\n",
        "        \n",
        "        summaries.append({\n",
        "            \"text\": summary_text,\n",
        "            \"metadata\": summary_metadata\n",
        "        })\n",
        "    \n",
        "    # 为每页创建详细块\n",
        "    detailed_chunks = []\n",
        "    for page in pages[:5]:  # 同样限制为前5页\n",
        "        page_chunks = chunk_text(page[\"text\"], page[\"metadata\"], chunk_size, chunk_overlap)\n",
        "        detailed_chunks.extend(page_chunks)\n",
        "    \n",
        "    print(f\"创建了{len(detailed_chunks)}个详细块\")\n",
        "    \n",
        "    # 创建嵌入\n",
        "    print(\"正在创建嵌入...\")\n",
        "    summary_texts = [summary[\"text\"] for summary in summaries]\n",
        "    summary_embeddings = create_embeddings(summary_texts)\n",
        "    \n",
        "    chunk_texts = [chunk[\"text\"] for chunk in detailed_chunks]\n",
        "    chunk_embeddings = create_embeddings(chunk_texts)\n",
        "    \n",
        "    # 创建向量存储\n",
        "    summary_store = SimpleVectorStore()\n",
        "    detailed_store = SimpleVectorStore()\n",
        "    \n",
        "    # 添加数据到存储\n",
        "    for i, summary in enumerate(summaries):\n",
        "        summary_store.add_item(\n",
        "            text=summary[\"text\"],\n",
        "            embedding=summary_embeddings[i],\n",
        "            metadata=summary[\"metadata\"]\n",
        "        )\n",
        "    \n",
        "    for i, chunk in enumerate(detailed_chunks):\n",
        "        detailed_store.add_item(\n",
        "            text=chunk[\"text\"],\n",
        "            embedding=chunk_embeddings[i],\n",
        "            metadata=chunk[\"metadata\"]\n",
        "        )\n",
        "    \n",
        "    # 层次化检索\n",
        "    print(f\"对查询进行层次化检索: {query}\")\n",
        "    query_embedding = create_embeddings([query])[0]\n",
        "    \n",
        "    # 首先检索相关摘要\n",
        "    summary_results = summary_store.similarity_search(query_embedding, k=k_summaries)\n",
        "    print(f\"检索到{len(summary_results)}个相关摘要\")\n",
        "    \n",
        "    # 收集相关页面\n",
        "    relevant_pages = [result[\"metadata\"][\"page\"] for result in summary_results]\n",
        "    \n",
        "    # 从相关页面检索详细块\n",
        "    def page_filter(metadata):\n",
        "        return metadata[\"page\"] in relevant_pages\n",
        "    \n",
        "    detailed_results = detailed_store.similarity_search(\n",
        "        query_embedding, k=k_chunks * len(relevant_pages), filter_func=page_filter\n",
        "    )\n",
        "    \n",
        "    print(f\"从相关页面检索到{len(detailed_results)}个详细块\")\n",
        "    \n",
        "    # 生成响应\n",
        "    context_parts = []\n",
        "    for chunk in detailed_results:\n",
        "        page_num = chunk[\"metadata\"][\"page\"]\n",
        "        context_parts.append(f\"[第{page_num}页]: {chunk['text']}\")\n",
        "    \n",
        "    context = \"\\\\n\\\\n\".join(context_parts)\n",
        "    \n",
        "    system_message = \"\"\"你是一个有用的AI助手，基于提供的上下文回答问题。\n",
        "使用上下文中的信息准确回答用户的问题。\n",
        "在引用具体信息时包含页码。\"\"\"\n",
        "\n",
        "    response = client.chat.completions.create(\n",
        "        model=\"meta-llama/Llama-3.2-3B-Instruct\",\n",
        "        messages=[\n",
        "            {\"role\": \"system\", \"content\": system_message},\n",
        "            {\"role\": \"user\", \"content\": f\"上下文:\\\\n\\\\n{context}\\\\n\\\\n问题: {query}\"}\n",
        "        ],\n",
        "        temperature=0.2\n",
        "    )\n",
        "    \n",
        "    return {\n",
        "        \"query\": query,\n",
        "        \"response\": response.choices[0].message.content,\n",
        "        \"retrieved_chunks\": detailed_results,\n",
        "        \"summary_count\": len(summaries),\n",
        "        \"detailed_count\": len(detailed_chunks)\n",
        "    }\n"
      ]
    },
    {
      "cell_type": "raw",
      "metadata": {
        "vscode": {
          "languageId": "raw"
        }
      },
      "source": [
        "## 测试示例\n",
        "\n",
        "演示层次化RAG的使用方法：\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {},
      "outputs": [],
      "source": [
        "# 层次化RAG测试示例\n",
        "\n",
        "# 包含AI信息的PDF文档路径\n",
        "pdf_path = \"data/AI_Information.pdf\"\n",
        "\n",
        "# 测试查询\n",
        "query = \"transformer模型在自然语言处理中的主要应用有哪些？\"\n",
        "\n",
        "print(f\"测试查询: {query}\")\n",
        "print(\"=\" * 50)\n",
        "\n",
        "# 运行层次化RAG\n",
        "result = hierarchical_rag(query, pdf_path)\n",
        "\n",
        "print(f\"\\\\n响应:\\\\n{result['response']}\")\n",
        "print(f\"\\\\n检索统计:\")\n",
        "print(f\"- 检索的块数量: {len(result['retrieved_chunks'])}\")\n",
        "print(f\"- 摘要总数: {result['summary_count']}\")\n",
        "print(f\"- 详细块总数: {result['detailed_count']}\")\n",
        "\n",
        "# 显示检索到的块信息\n",
        "print(f\"\\\\n检索到的块详情:\")\n",
        "for i, chunk in enumerate(result['retrieved_chunks'][:3]):  # 显示前3个\n",
        "    page = chunk['metadata']['page']\n",
        "    similarity = chunk['similarity']\n",
        "    text_preview = chunk['text'][:100] + \"...\" if len(chunk['text']) > 100 else chunk['text']\n",
        "    print(f\"块 {i+1}: 第{page}页 (相似度: {similarity:.3f})\")\n",
        "    print(f\"内容预览: {text_preview}\")\n",
        "    print(\"-\" * 30)\n"
      ]
    },
    {
      "cell_type": "raw",
      "metadata": {
        "vscode": {
          "languageId": "raw"
        }
      },
      "source": [
        "## 总结\n",
        "\n",
        "这个层次化RAG实现展示了如何通过两层检索策略改进传统RAG系统：\n",
        "\n",
        "### 主要特点：\n",
        "1. **两层检索**：首先通过摘要识别相关文档章节，然后从这些章节检索详细信息\n",
        "2. **上下文保持**：通过页面级摘要维护文档结构\n",
        "3. **效率提升**：通过预过滤减少需要搜索的文本块数量\n",
        "4. **可扩展性**：适用于大型文档集合\n",
        "\n",
        "### 优势：\n",
        "- 减少不相关检索结果\n",
        "- 保持文档层次结构  \n",
        "- 提高长文档的检索质量\n",
        "- 支持页面级别的精确引用\n",
        "\n",
        "### 适用场景：\n",
        "- 大型技术文档\n",
        "- 学术论文集合\n",
        "- 法律文件\n",
        "- 任何需要保持上下文结构的多页文档\n",
        "\n",
        "这种方法特别适合于需要理解文档整体结构和局部细节的应用场景。\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "vscode": {
          "languageId": "raw"
        }
      },
      "source": [
        "# RAG系统的层次化索引\n",
        "\n",
        "在这个笔记本中，我实现了一个RAG系统的层次化索引方法。这种技术通过使用两层搜索方法改进检索：首先通过摘要识别相关文档章节，然后从这些章节中检索具体细节。\n",
        "\n",
        "传统的RAG方法平等对待所有文本块，这可能导致：\n",
        "\n",
        "- 当文本块太小时会丢失上下文\n",
        "- 当文档集合很大时会产生不相关的结果\n",
        "- 在整个语料库中进行低效搜索\n",
        "\n",
        "层次化检索通过以下方式解决这些问题：\n",
        "\n",
        "- 为较大的文档章节创建简洁摘要\n",
        "- 首先搜索这些摘要以识别相关章节\n",
        "- 然后仅从这些章节中检索详细信息\n",
        "- 在保持具体细节的同时维护上下文\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "vscode": {
          "languageId": "raw"
        }
      },
      "source": [
        "## 设置环境\n",
        "我们首先导入必要的库。\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 1,
      "metadata": {},
      "outputs": [],
      "source": [
        "import os\n",
        "import numpy as np\n",
        "import json\n",
        "import fitz\n",
        "from openai import OpenAI\n",
        "import re\n",
        "import pickle\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "vscode": {
          "languageId": "raw"
        }
      },
      "source": [
        "## 设置OpenAI API客户端\n",
        "我们初始化OpenAI客户端以生成嵌入向量和响应。\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 3,
      "metadata": {},
      "outputs": [],
      "source": [
        "# 使用基础URL和API密钥初始化OpenAI客户端\n",
        "client = OpenAI(\n",
        "    base_url=\"http://127.0.0.1:11434/v1/\",\n",
        "    api_key=\"ollama\" # 从环境变量中获取API密钥\n",
        ")\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "vscode": {
          "languageId": "raw"
        }
      },
      "source": [
        "## 文档处理函数\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 4,
      "metadata": {},
      "outputs": [],
      "source": [
        "def extract_text_from_pdf(pdf_path):\n",
        "    \"\"\"\n",
        "    从PDF文件中提取文本内容，按页面分离。\n",
        "    \n",
        "    参数:\n",
        "        pdf_path (str): PDF文件路径\n",
        "        \n",
        "    返回:\n",
        "        List[Dict]: 包含文本内容和元数据的页面列表\n",
        "    \"\"\"\n",
        "    print(f\"正在从{pdf_path}中提取文本...\")  # 打印正在处理的PDF路径\n",
        "    pdf = fitz.open(pdf_path)  # 使用PyMuPDF打开PDF文件\n",
        "    pages = []  # 初始化空列表来存储包含文本内容的页面\n",
        "    \n",
        "    # 遍历PDF中的每一页\n",
        "    for page_num in range(len(pdf)):\n",
        "        page = pdf[page_num]  # 获取当前页面\n",
        "        text = page.get_text()  # 从当前页面提取文本\n",
        "        \n",
        "        # 跳过文本内容很少的页面（少于50个字符）\n",
        "        if len(text.strip()) > 50:\n",
        "            # 将页面文本和元数据添加到列表中\n",
        "            pages.append({\n",
        "                \"text\": text,\n",
        "                \"metadata\": {\n",
        "                    \"source\": pdf_path,  # 源文件路径\n",
        "                    \"page\": page_num + 1  # 页码（从1开始的索引）\n",
        "                }\n",
        "            })\n",
        "    \n",
        "    print(f\"提取了{len(pages)}页内容\")  # 打印提取的页面数量\n",
        "    return pages  # 返回包含文本内容和元数据的页面列表\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 5,
      "metadata": {},
      "outputs": [],
      "source": [
        "def chunk_text(text, metadata, chunk_size=1000, overlap=200):\n",
        "    \"\"\"\n",
        "    将文本分割成重叠的块，同时保留元数据。\n",
        "    \n",
        "    参数:\n",
        "        text (str): 要分块的输入文本\n",
        "        metadata (Dict): 要保留的元数据\n",
        "        chunk_size (int): 每个块的字符大小\n",
        "        overlap (int): 块之间的重叠字符数\n",
        "        \n",
        "    返回:\n",
        "        List[Dict]: 包含元数据的文本块列表\n",
        "    \"\"\"\n",
        "    chunks = []  # 初始化空列表来存储块\n",
        "    \n",
        "    # 使用指定的块大小和重叠遍历文本\n",
        "    for i in range(0, len(text), chunk_size - overlap):\n",
        "        chunk_text = text[i:i + chunk_size]  # 提取文本块\n",
        "        \n",
        "        # 跳过非常小的块（少于50个字符）\n",
        "        if chunk_text and len(chunk_text.strip()) > 50:\n",
        "            # 创建元数据副本并添加块特定信息\n",
        "            chunk_metadata = metadata.copy()\n",
        "            chunk_metadata.update({\n",
        "                \"chunk_index\": len(chunks),  # 块的索引\n",
        "                \"start_char\": i,  # 块的起始字符索引\n",
        "                \"end_char\": i + len(chunk_text),  # 块的结束字符索引\n",
        "                \"is_summary\": False  # 表示这不是摘要的标志\n",
        "            })\n",
        "            \n",
        "            # 将块及其元数据添加到列表中\n",
        "            chunks.append({\n",
        "                \"text\": chunk_text,\n",
        "                \"metadata\": chunk_metadata\n",
        "            })\n",
        "    \n",
        "    return chunks  # 返回包含元数据的块列表\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "vscode": {
          "languageId": "raw"
        }
      },
      "source": [
        "## 简单向量存储实现\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 6,
      "metadata": {},
      "outputs": [],
      "source": [
        "class SimpleVectorStore:\n",
        "    \"\"\"\n",
        "    使用NumPy的简单向量存储实现。\n",
        "    \"\"\"\n",
        "    def __init__(self):\n",
        "        self.vectors = []  # 存储向量嵌入的列表\n",
        "        self.texts = []  # 存储文本内容的列表\n",
        "        self.metadata = []  # 存储元数据的列表\n",
        "    \n",
        "    def add_item(self, text, embedding, metadata=None):\n",
        "        \"\"\"\n",
        "        向向量存储中添加项目。\n",
        "        \n",
        "        参数:\n",
        "            text (str): 文本内容\n",
        "            embedding (List[float]): 向量嵌入\n",
        "            metadata (Dict, optional): 附加元数据\n",
        "        \"\"\"\n",
        "        self.vectors.append(np.array(embedding))  # 将嵌入作为numpy数组添加\n",
        "        self.texts.append(text)  # 添加文本内容\n",
        "        self.metadata.append(metadata or {})  # 添加元数据，如果为None则添加空字典\n",
        "    \n",
        "    def similarity_search(self, query_embedding, k=5, filter_func=None):\n",
        "        \"\"\"\n",
        "        找到与查询嵌入最相似的项目。\n",
        "        \n",
        "        参数:\n",
        "            query_embedding (List[float]): 查询嵌入向量\n",
        "            k (int): 要返回的结果数量\n",
        "            filter_func (callable, optional): 过滤结果的函数\n",
        "            \n",
        "        返回:\n",
        "            List[Dict]: 前k个最相似的项目\n",
        "        \"\"\"\n",
        "        if not self.vectors:\n",
        "            return []  # 如果没有向量则返回空列表\n",
        "        \n",
        "        # 将查询嵌入转换为numpy数组\n",
        "        query_vector = np.array(query_embedding)\n",
        "        \n",
        "        # 使用余弦相似度计算相似性\n",
        "        similarities = []\n",
        "        for i, vector in enumerate(self.vectors):\n",
        "            # 如果不通过过滤器则跳过\n",
        "            if filter_func and not filter_func(self.metadata[i]):\n",
        "                continue\n",
        "                \n",
        "            # 计算余弦相似度\n",
        "            similarity = np.dot(query_vector, vector) / (np.linalg.norm(query_vector) * np.linalg.norm(vector))\n",
        "            similarities.append((i, similarity))  # 添加索引和相似度分数\n",
        "        \n",
        "        # 按相似度排序（降序）\n",
        "        similarities.sort(key=lambda x: x[1], reverse=True)\n",
        "        \n",
        "        # 返回前k个结果\n",
        "        results = []\n",
        "        for i in range(min(k, len(similarities))):\n",
        "            idx, score = similarities[i]\n",
        "            results.append({\n",
        "                \"text\": self.texts[idx],  # 添加文本内容\n",
        "                \"metadata\": self.metadata[idx],  # 添加元数据\n",
        "                \"similarity\": float(score)  # 添加相似度分数\n",
        "            })\n",
        "        \n",
        "        return results  # 返回前k个结果列表\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "vscode": {
          "languageId": "raw"
        }
      },
      "source": [
        "## 创建嵌入向量\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 7,
      "metadata": {},
      "outputs": [],
      "source": [
        "def create_embeddings(texts, model=\"bge-m3:latest\"):\n",
        "    \"\"\"\n",
        "    为给定文本创建嵌入向量。\n",
        "    \n",
        "    参数:\n",
        "        texts (List[str]): 输入文本\n",
        "        model (str): 嵌入模型名称\n",
        "        \n",
        "    返回:\n",
        "        List[List[float]]: 嵌入向量\n",
        "    \"\"\"\n",
        "    # 处理空输入\n",
        "    if not texts:\n",
        "        return []\n",
        "        \n",
        "    # 如果需要则分批处理（OpenAI API限制）\n",
        "    batch_size = 100\n",
        "    all_embeddings = []\n",
        "    \n",
        "    # 分批遍历输入文本\n",
        "    for i in range(0, len(texts), batch_size):\n",
        "        batch = texts[i:i + batch_size]  # 获取当前批次的文本\n",
        "        \n",
        "        # 为当前批次创建嵌入\n",
        "        response = client.embeddings.create(\n",
        "            model=model,\n",
        "            input=batch\n",
        "        )\n",
        "        \n",
        "        # 从响应中提取嵌入\n",
        "        batch_embeddings = [item.embedding for item in response.data]\n",
        "        all_embeddings.extend(batch_embeddings)  # 将批次嵌入添加到列表中\n",
        "    \n",
        "    return all_embeddings  # 返回所有嵌入\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "vscode": {
          "languageId": "raw"
        }
      },
      "source": [
        "## 摘要生成函数\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 9,
      "metadata": {},
      "outputs": [],
      "source": [
        "def generate_page_summary(page_text):\n",
        "    \"\"\"\n",
        "    生成页面的简洁摘要。\n",
        "    \n",
        "    参数:\n",
        "        page_text (str): 页面的文本内容\n",
        "        \n",
        "    返回:\n",
        "        str: 生成的摘要\n",
        "    \"\"\"\n",
        "    # 定义系统提示来指导摘要模型\n",
        "    system_prompt = \"\"\"你是一个专业的摘要系统。\n",
        "    为提供的文本创建详细摘要。\n",
        "    专注于捕获主要主题、关键信息和重要事实。\n",
        "    你的摘要应该足够全面以理解页面包含的内容，\n",
        "    但比原文更简洁。\"\"\"\n",
        "\n",
        "    # 如果输入文本超过最大token限制则截断\n",
        "    max_tokens = 6000\n",
        "    truncated_text = page_text[:max_tokens] if len(page_text) > max_tokens else page_text\n",
        "\n",
        "    # 向OpenAI API请求生成摘要\n",
        "    response = client.chat.completions.create(\n",
        "        model=\"qwen2.5:7b\",  # 指定使用的模型\n",
        "        messages=[\n",
        "            {\"role\": \"system\", \"content\": system_prompt},  # 指导助手的系统消息\n",
        "            {\"role\": \"user\", \"content\": f\"请总结这段文本:\\\\n\\\\n{truncated_text}\"}  # 包含要总结文本的用户消息\n",
        "        ],\n",
        "        temperature=0.3  # 设置响应生成的温度\n",
        "    )\n",
        "    \n",
        "    # 返回生成的摘要内容\n",
        "    return response.choices[0].message.content\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "vscode": {
          "languageId": "raw"
        }
      },
      "source": [
        "## 层次化文档处理和检索\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 10,
      "metadata": {},
      "outputs": [],
      "source": [
        "def hierarchical_rag(query, pdf_path, chunk_size=1000, chunk_overlap=200, \n",
        "                    k_summaries=3, k_chunks=5, regenerate=False):\n",
        "    \"\"\"\n",
        "    完整的层次化RAG流水线。\n",
        "    \n",
        "    参数:\n",
        "        query (str): 用户查询\n",
        "        pdf_path (str): PDF文档路径\n",
        "        chunk_size (int): 每个详细块的大小\n",
        "        chunk_overlap (int): 块之间的重叠\n",
        "        k_summaries (int): 要检索的摘要数量\n",
        "        k_chunks (int): 每个摘要要检索的块数量\n",
        "        regenerate (bool): 是否重新生成向量存储\n",
        "        \n",
        "    返回:\n",
        "        Dict: 包含响应和检索块的结果\n",
        "    \"\"\"\n",
        "    # 为缓存创建存储文件名\n",
        "    summary_store_file = f\"{os.path.basename(pdf_path)}_summary_store.pkl\"\n",
        "    detailed_store_file = f\"{os.path.basename(pdf_path)}_detailed_store.pkl\"\n",
        "    \n",
        "    # 如果需要则处理文档并创建存储\n",
        "    if regenerate or not os.path.exists(summary_store_file) or not os.path.exists(detailed_store_file):\n",
        "        print(\"正在处理文档并创建向量存储...\")\n",
        "        # 处理文档以创建层次化索引和向量存储\n",
        "        summary_store, detailed_store = process_document_hierarchically(\n",
        "            pdf_path, chunk_size, chunk_overlap\n",
        "        )\n",
        "        \n",
        "        # 保存摘要存储到文件供将来使用\n",
        "        with open(summary_store_file, 'wb') as f:\n",
        "            pickle.dump(summary_store, f)\n",
        "        \n",
        "        # 保存详细存储到文件供将来使用\n",
        "        with open(detailed_store_file, 'wb') as f:\n",
        "            pickle.dump(detailed_store, f)\n",
        "    else:\n",
        "        # 从文件加载现有的摘要存储\n",
        "        print(\"正在加载现有的向量存储...\")\n",
        "        with open(summary_store_file, 'rb') as f:\n",
        "            summary_store = pickle.load(f)\n",
        "        \n",
        "        # 从文件加载现有的详细存储\n",
        "        with open(detailed_store_file, 'rb') as f:\n",
        "            detailed_store = pickle.load(f)\n",
        "    \n",
        "    # 使用查询层次化检索相关块\n",
        "    retrieved_chunks = retrieve_hierarchically(\n",
        "        query, summary_store, detailed_store, k_summaries, k_chunks\n",
        "    )\n",
        "    \n",
        "    # 基于检索的块生成响应\n",
        "    response = generate_response(query, retrieved_chunks)\n",
        "    \n",
        "    # 返回包含查询、响应、检索块和摘要、详细块计数的结果\n",
        "    return {\n",
        "        \"query\": query,\n",
        "        \"response\": response,\n",
        "        \"retrieved_chunks\": retrieved_chunks,\n",
        "        \"summary_count\": len(summary_store.texts),\n",
        "        \"detailed_count\": len(detailed_store.texts)\n",
        "    }\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 11,
      "metadata": {},
      "outputs": [],
      "source": [
        "def process_document_hierarchically(pdf_path, chunk_size=1000, chunk_overlap=200):\n",
        "    \"\"\"\n",
        "    将文档处理成层次化索引。\n",
        "    \n",
        "    参数:\n",
        "        pdf_path (str): PDF文件路径\n",
        "        chunk_size (int): 每个详细块的大小\n",
        "        chunk_overlap (int): 块之间的重叠\n",
        "        \n",
        "    返回:\n",
        "        Tuple[SimpleVectorStore, SimpleVectorStore]: 摘要和详细向量存储\n",
        "    \"\"\"\n",
        "    # 从PDF提取页面\n",
        "    pages = extract_text_from_pdf(pdf_path)\n",
        "    \n",
        "    # 为每页创建摘要\n",
        "    print(\"正在生成页面摘要...\")\n",
        "    summaries = []\n",
        "    for i, page in enumerate(pages):\n",
        "        print(f\"正在总结第{i+1}/{len(pages)}页...\")\n",
        "        summary_text = generate_page_summary(page[\"text\"])\n",
        "        \n",
        "        # 创建摘要元数据\n",
        "        summary_metadata = page[\"metadata\"].copy()\n",
        "        summary_metadata.update({\"is_summary\": True})\n",
        "        \n",
        "        # 将摘要文本和元数据添加到摘要列表\n",
        "        summaries.append({\n",
        "            \"text\": summary_text,\n",
        "            \"metadata\": summary_metadata\n",
        "        })\n",
        "    \n",
        "    # 为每页创建详细块\n",
        "    detailed_chunks = []\n",
        "    for page in pages:\n",
        "        # 分块页面文本\n",
        "        page_chunks = chunk_text(\n",
        "            page[\"text\"], \n",
        "            page[\"metadata\"], \n",
        "            chunk_size, \n",
        "            chunk_overlap\n",
        "        )\n",
        "        # 将当前页面的块扩展到detailed_chunks列表\n",
        "        detailed_chunks.extend(page_chunks)\n",
        "    \n",
        "    print(f\"创建了{len(detailed_chunks)}个详细块\")\n",
        "    \n",
        "    # 为摘要创建嵌入\n",
        "    print(\"正在为摘要创建嵌入...\")\n",
        "    summary_texts = [summary[\"text\"] for summary in summaries]\n",
        "    summary_embeddings = create_embeddings(summary_texts)\n",
        "    \n",
        "    # 为详细块创建嵌入\n",
        "    print(\"正在为详细块创建嵌入...\")\n",
        "    chunk_texts = [chunk[\"text\"] for chunk in detailed_chunks]\n",
        "    chunk_embeddings = create_embeddings(chunk_texts)\n",
        "    \n",
        "    # 创建向量存储\n",
        "    summary_store = SimpleVectorStore()\n",
        "    detailed_store = SimpleVectorStore()\n",
        "    \n",
        "    # 将摘要添加到摘要存储\n",
        "    for i, summary in enumerate(summaries):\n",
        "        summary_store.add_item(\n",
        "            text=summary[\"text\"],\n",
        "            embedding=summary_embeddings[i],\n",
        "            metadata=summary[\"metadata\"]\n",
        "        )\n",
        "    \n",
        "    # 将块添加到详细存储\n",
        "    for i, chunk in enumerate(detailed_chunks):\n",
        "        detailed_store.add_item(\n",
        "            text=chunk[\"text\"],\n",
        "            embedding=chunk_embeddings[i],\n",
        "            metadata=chunk[\"metadata\"]\n",
        "        )\n",
        "    \n",
        "    print(f\"创建了包含{len(summaries)}个摘要和{len(detailed_chunks)}个块的向量存储\")\n",
        "    return summary_store, detailed_store\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 13,
      "metadata": {},
      "outputs": [],
      "source": [
        "def retrieve_hierarchically(query, summary_store, detailed_store, k_summaries=3, k_chunks=5):\n",
        "    \"\"\"\n",
        "    使用层次化索引检索信息。\n",
        "    \n",
        "    参数:\n",
        "        query (str): 用户查询\n",
        "        summary_store (SimpleVectorStore): 文档摘要存储\n",
        "        detailed_store (SimpleVectorStore): 详细块存储\n",
        "        k_summaries (int): 要检索的摘要数量\n",
        "        k_chunks (int): 每个摘要要检索的块数量\n",
        "        \n",
        "    返回:\n",
        "        List[Dict]: 包含相关性分数的检索块\n",
        "    \"\"\"\n",
        "    print(f\"对查询进行层次化检索: {query}\")\n",
        "    \n",
        "    # 创建查询嵌入\n",
        "    query_embedding = create_embeddings([query])[0]\n",
        "    \n",
        "    # 首先，检索相关摘要\n",
        "    summary_results = summary_store.similarity_search(\n",
        "        query_embedding, \n",
        "        k=k_summaries\n",
        "    )\n",
        "    \n",
        "    print(f\"检索到{len(summary_results)}个相关摘要\")\n",
        "    \n",
        "    # 从相关摘要收集页面\n",
        "    relevant_pages = [result[\"metadata\"][\"page\"] for result in summary_results]\n",
        "    \n",
        "    # 创建过滤函数以仅保留来自相关页面的块\n",
        "    def page_filter(metadata):\n",
        "        return metadata[\"page\"] in relevant_pages\n",
        "    \n",
        "    # 然后，仅从这些相关页面检索详细块\n",
        "    detailed_results = detailed_store.similarity_search(\n",
        "        query_embedding, \n",
        "        k=k_chunks * len(relevant_pages),\n",
        "        filter_func=page_filter\n",
        "    )\n",
        "    \n",
        "    print(f\"从相关页面检索到{len(detailed_results)}个详细块\")\n",
        "    \n",
        "    return detailed_results\n",
        "\n",
        "def generate_response(query, retrieved_chunks):\n",
        "    \"\"\"\n",
        "    基于查询和检索的块生成响应。\n",
        "    \n",
        "    参数:\n",
        "        query (str): 用户查询\n",
        "        retrieved_chunks (List[Dict]): 从层次化搜索检索的块\n",
        "        \n",
        "    返回:\n",
        "        str: 生成的响应\n",
        "    \"\"\"\n",
        "    # 从块中提取文本并准备上下文部分\n",
        "    context_parts = []\n",
        "    \n",
        "    for i, chunk in enumerate(retrieved_chunks):\n",
        "        page_num = chunk[\"metadata\"][\"page\"]  # 从元数据获取页码\n",
        "        context_parts.append(f\"[第{page_num}页]: {chunk['text']}\")  # 用页码格式化块文本\n",
        "    \n",
        "    # 将所有上下文部分合并为单个上下文字符串\n",
        "    context = \"\\\\n\\\\n\".join(context_parts)\n",
        "    \n",
        "    # 定义系统消息以指导AI助手\n",
        "    system_message = \"\"\"你是一个有用的AI助手，基于提供的上下文回答问题。\n",
        "使用上下文中的信息准确回答用户的问题。\n",
        "如果上下文不包含相关信息，请承认这一点。\n",
        "在引用具体信息时包含页码。\"\"\"\n",
        "\n",
        "    # 使用OpenAI API生成响应\n",
        "    response = client.chat.completions.create(\n",
        "        model=\"qwen2.5:7b\",  # 指定要使用的模型\n",
        "        messages=[\n",
        "            {\"role\": \"system\", \"content\": system_message},  # 指导助手的系统消息\n",
        "            {\"role\": \"user\", \"content\": f\"上下文:\\\\n\\\\n{context}\\\\n\\\\n问题: {query}\"}  # 包含上下文和查询的用户消息\n",
        "        ],\n",
        "        temperature=0.2  # 设置响应生成的温度\n",
        "    )\n",
        "    \n",
        "    # 返回生成的响应内容\n",
        "    return response.choices[0].message.content\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "vscode": {
          "languageId": "raw"
        }
      },
      "source": [
        "## 层次化和标准RAG方法的评估\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 14,
      "metadata": {},
      "outputs": [
        {
          "name": "stdout",
          "output_type": "stream",
          "text": [
            "正在处理文档并创建向量存储...\n",
            "正在从data/AI_Information.pdf中提取文本...\n",
            "提取了15页内容\n",
            "正在生成页面摘要...\n",
            "正在总结第1/15页...\n",
            "正在总结第2/15页...\n",
            "正在总结第3/15页...\n",
            "正在总结第4/15页...\n",
            "正在总结第5/15页...\n",
            "正在总结第6/15页...\n",
            "正在总结第7/15页...\n",
            "正在总结第8/15页...\n",
            "正在总结第9/15页...\n",
            "正在总结第10/15页...\n",
            "正在总结第11/15页...\n",
            "正在总结第12/15页...\n",
            "正在总结第13/15页...\n",
            "正在总结第14/15页...\n",
            "正在总结第15/15页...\n",
            "创建了47个详细块\n",
            "正在为摘要创建嵌入...\n",
            "正在为详细块创建嵌入...\n",
            "创建了包含15个摘要和47个块的向量存储\n",
            "对查询进行层次化检索: transformer模型在自然语言处理中的主要应用有哪些？\n",
            "检索到3个相关摘要\n",
            "从相关页面检索到10个详细块\n",
            "\\n=== 响应 ===\n",
            "根据提供的上下文信息，Transformer模型主要用于生成原始内容，包括文本、图像和音乐。具体来说，在自然语言处理（NLP）领域中，Transformers能够用于创建原创的文本内容。\n",
            "\n",
            "相关引用：\n",
            "- [第12页]: Generative AI models, such as Generative Adversarial Networks (GANs) and transformers, are capable of creating original content, including images, text, and music. These models are pushing the boundaries of AI-driven creativity and opening up new possibilities for artistic expression.\n",
            "\n",
            "因此，Transformer模型在自然语言处理中的主要应用是生成原创文本内容。\n",
            "\\n=== 运行测试查询 ===\n",
            "查询: transformer如何处理序列数据与RNN相比？\n",
            "正在加载现有的向量存储...\n",
            "对查询进行层次化检索: transformer如何处理序列数据与RNN相比？\n",
            "检索到3个相关摘要\n",
            "从相关页面检索到10个详细块\n",
            "\\n层次化RAG响应:\\n上下文中没有提到Transformer的具体信息，但根据常见的知识，Transformer和RNN在处理序列数据方面有一些不同之处。\n",
            "\n",
            "与传统的RNN（如LSTM或GRU）相比，Transformer通过使用自注意力机制来并行处理整个输入序列，而不是像RNN那样按顺序逐个处理每个元素。这种设计使得Transformer能够更高效地处理长序列数据，并且不需要反馈连接来保持信息的持久性。\n",
            "\n",
            "具体来说，在处理序列数据时：\n",
            "- **RNN** 采用递归的方式处理序列中的每一个元素，前一个时间步的信息会影响当前时间步的输出。\n",
            "- **Transformer** 则使用自注意力机制（Self-Attention），可以在不依赖于顺序的情况下并行地关注序列中不同位置的信息。\n",
            "\n",
            "因此，对于长序列数据或需要高效并行计算的应用场景，Transformer通常比RNN更有效。不过，上下文中没有直接提到Transformer的具体信息，所以无法提供更详细的比较内容。\n",
            "\\n检索的块数量: 10\n",
            "摘要总数: 15\n",
            "详细块总数: 47\n"
          ]
        }
      ],
      "source": [
        "# 包含AI信息的PDF文档路径\n",
        "pdf_path = \"data/AI_Information.pdf\"\n",
        "\n",
        "# 用于测试层次化RAG方法的AI相关示例查询\n",
        "query = \"transformer模型在自然语言处理中的主要应用有哪些？\"\n",
        "result = hierarchical_rag(query, pdf_path)\n",
        "\n",
        "print(\"\\\\n=== 响应 ===\")\n",
        "print(result[\"response\"])\n",
        "\n",
        "# 用于正式评估的测试查询（按要求仅使用一个查询）\n",
        "test_queries = [\n",
        "    \"transformer如何处理序列数据与RNN相比？\"\n",
        "]\n",
        "\n",
        "# 测试查询的参考答案以便进行比较\n",
        "reference_answers = [\n",
        "    \"Transformer通过使用自注意力机制而不是循环连接来处理序列数据，这与RNN不同。这使得transformer能够并行处理所有token而不是顺序处理，更高效地捕获长程依赖关系，并在训练期间实现更好的并行化。与RNN不同，transformer不会在长序列中遭受梯度消失问题。\"\n",
        "]\n",
        "\n",
        "print(f\"\\\\n=== 运行测试查询 ===\")\n",
        "print(f\"查询: {test_queries[0]}\")\n",
        "\n",
        "# 运行层次化RAG测试\n",
        "hierarchical_result = hierarchical_rag(test_queries[0], pdf_path)\n",
        "print(f\"\\\\n层次化RAG响应:\\\\n{hierarchical_result['response']}\")\n",
        "\n",
        "print(f\"\\\\n检索的块数量: {len(hierarchical_result['retrieved_chunks'])}\")\n",
        "print(f\"摘要总数: {hierarchical_result['summary_count']}\")\n",
        "print(f\"详细块总数: {hierarchical_result['detailed_count']}\")\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "vscode": {
          "languageId": "raw"
        }
      },
      "source": [
        "## 总结\n",
        "\n",
        "这个层次化RAG实现展示了如何通过两层检索策略改进传统RAG系统：\n",
        "\n",
        "### 主要特点：\n",
        "1. **两层检索**：首先通过摘要识别相关文档章节，然后从这些章节检索详细信息\n",
        "2. **上下文保持**：通过页面级摘要维护文档结构\n",
        "3. **效率提升**：通过预过滤减少需要搜索的文本块数量\n",
        "4. **可扩展性**：适用于大型文档集合\n",
        "\n",
        "### 优势：\n",
        "- 减少不相关检索结果\n",
        "- 保持文档层次结构\n",
        "- 提高长文档的检索质量\n",
        "- 支持缓存以提高性能\n",
        "\n",
        "### 适用场景：\n",
        "- 大型技术文档\n",
        "- 学术论文集合\n",
        "- 法律文件\n",
        "- 任何需要保持上下文结构的多页文档\n",
        "\n",
        "这种方法特别适合于需要理解文档整体结构和局部细节的应用场景。\n"
      ]
    }
  ],
  "metadata": {
    "kernelspec": {
      "display_name": "rag",
      "language": "python",
      "name": "python3"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.12.11"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 2
}
