{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 文档拆分器的使用\n",
    "\n",
    "## 1、CharacterTextSplitter"
   ],
   "id": "631353f195d99e4b"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-06T01:23:50.144705Z",
     "start_time": "2025-08-06T01:23:50.138705Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 1.导入相关依赖\n",
    "from langchain.text_splitter import CharacterTextSplitter\n",
    "\n",
    "# 2.示例文本\n",
    "text = \"\"\"\n",
    "LangChain 是一个用于开发由语言模型驱动的应用程序的框架的。它提供了一套工具和抽象，使开发者能够更容易地构建复杂的应用程序。\n",
    "\"\"\"\n",
    "\n",
    "# 3.定义字符分割器\n",
    "splitter = CharacterTextSplitter(\n",
    "    chunk_size=51, # 每块大小\n",
    "    chunk_overlap=5,# 块与块之间的重复字符数\n",
    "    #length_function=len,\n",
    "    separator=\"\"   # 设置为空字符串时，表示禁用分隔符优先\n",
    ")\n",
    "\n",
    "# 4.分割文本\n",
    "# split_text()的参数是字符串，返回的是字符串列表\n",
    "texts = splitter.split_text(text)\n",
    "\n",
    "# 5.打印结果\n",
    "for i, chunk in enumerate(texts):\n",
    "    print(f\"块 {i+1}:长度：{len(chunk)}\")\n",
    "    print(chunk)\n",
    "    print(\"-\" * 50)"
   ],
   "id": "c818268540c1a614",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "块 1:长度：50\n",
      "LangChain 是一个用于开发由语言模型驱动的应用程序的框架的。它提供了一套工具和抽象，使开发者\n",
      "--------------------------------------------------\n",
      "块 2:长度：21\n",
      "，使开发者能够更容易地构建复杂的应用程序。\n",
      "--------------------------------------------------\n"
     ]
    }
   ],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-06T01:29:34.145605Z",
     "start_time": "2025-08-06T01:29:34.137602Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 1.导入相关依赖\n",
    "from langchain.text_splitter import CharacterTextSplitter\n",
    "\n",
    "# 2.定义要分割的文本\n",
    "# text = \"这是一个示例文本啊。我们将使用CharacterTextSplitter将其分割成小块。分割基于字符数。\"\n",
    "\n",
    "text = \"\"\"\n",
    "LangChain 是一个用于开发由语言模型。驱动的应用程序的框架的。它提供了一套工具和抽象。使开发者能够更容易地构建复杂的应用程序。123123123123。\n",
    "\"\"\"\n",
    "\n",
    "# 3.定义分割器实例\n",
    "text_splitter = CharacterTextSplitter(\n",
    "    chunk_size=30,   # 每个块的最大字符数\n",
    "    chunk_overlap=5, # 块之间的重叠字符数\n",
    "    separator=\"。\",  # 按句号分割\n",
    "    # keep_separator=True\n",
    ")\n",
    "\n",
    "# 4.开始分割\n",
    "chunks = text_splitter.split_text(text)\n",
    "\n",
    "# 5.打印效果\n",
    "for  i,chunk in enumerate(chunks):\n",
    "    print(f\"块 {i + 1}:长度：{len(chunk)}\")\n",
    "    print(chunk)\n",
    "    print(\"-\"*50)\n"
   ],
   "id": "da2e7d0b1ef56e3",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "块 1:长度：22\n",
      "LangChain 是一个用于开发由语言模型\n",
      "--------------------------------------------------\n",
      "块 2:长度：24\n",
      "。驱动的应用程序的框架的。它提供了一套工具和抽象\n",
      "--------------------------------------------------\n",
      "块 3:长度：20\n",
      "。使开发者能够更容易地构建复杂的应用程序\n",
      "--------------------------------------------------\n",
      "块 4:长度：14\n",
      "。123123123123。\n",
      "--------------------------------------------------\n"
     ]
    }
   ],
   "execution_count": 8
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-06T01:32:27.298671Z",
     "start_time": "2025-08-06T01:32:27.281659Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 1.导入相关依赖\n",
    "from langchain.text_splitter import CharacterTextSplitter\n",
    "\n",
    "# 2.定义要分割的文本\n",
    "text = \"这是第一段文本。这是第二段内容。最后一段结束。\"\n",
    "\n",
    "# 3.定义字符分割器\n",
    "text_splitter = CharacterTextSplitter(\n",
    "    separator=\"。\",\n",
    "    chunk_size=20,\n",
    "    chunk_overlap=8,\n",
    "    keep_separator=True #chunk中是否保留切割符\n",
    ")\n",
    "\n",
    "# 4.分割文本\n",
    "chunks = text_splitter.split_text(text)\n",
    "\n",
    "# 5.打印结果\n",
    "for  i,chunk in enumerate(chunks):\n",
    "    print(f\"块 {i + 1}:长度：{len(chunk)}\")\n",
    "    print(chunk)\n",
    "    print(\"-\"*50)"
   ],
   "id": "7884c1847d66aec7",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "块 1:长度：15\n",
      "这是第一段文本。这是第二段内容\n",
      "--------------------------------------------------\n",
      "块 2:长度：16\n",
      "。这是第二段内容。最后一段结束。\n",
      "--------------------------------------------------\n"
     ]
    }
   ],
   "execution_count": 11
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "## 2、RecursiveCharacterTextSplitter\n",
    "\n"
   ],
   "id": "37ad920b82e64849"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-06T02:05:25.524905Z",
     "start_time": "2025-08-06T02:05:25.503856Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 1.导入相关依赖\n",
    "from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
    "\n",
    "# 2.定义RecursiveCharacterTextSplitter分割器对象\n",
    "text_splitter = RecursiveCharacterTextSplitter(\n",
    "    # chunk_size=10,\n",
    "    chunk_size=60,\n",
    "    chunk_overlap=0,\n",
    "    # length_function=len,\n",
    "    add_start_index=True,\n",
    ")\n",
    "\n",
    "# 3.定义分割的内容\n",
    "list=[\"LangChain框架特性\\n\\n多模型集成(GPT/Claude)\\n记忆管理功能\\n链式调用设计。文档分析场景示例：需要处理PDF/Word等格式。\"]\n",
    "\n",
    "# 4.分割器分割\n",
    "# create_documents()：形参是字符串列表，返回值是文档的列表\n",
    "paragraphs = text_splitter.create_documents(list)\n",
    "\n",
    "\n",
    "for para in paragraphs:\n",
    "    print(para)\n",
    "    print('-------')"
   ],
   "id": "1578fca91b9e6c40",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "page_content='LangChain框架特性' metadata={'start_index': 0}\n",
      "-------\n",
      "page_content='多模型集成(GPT/Claude)\n",
      "记忆管理功能\n",
      "链式调用设计。文档分析场景示例：需要处理PDF/Word等格式。' metadata={'start_index': 15}\n",
      "-------\n"
     ]
    }
   ],
   "execution_count": 16
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-06T02:12:21.202177Z",
     "start_time": "2025-08-06T02:12:21.189461Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 1.导入相关依赖\n",
    "from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
    "\n",
    "# 2.打开.txt文件\n",
    "with open(\"asset/load/08-ai.txt\", encoding=\"utf-8\") as f:\n",
    "    state_of_the_union = f.read()  #返回的是字符串\n",
    "\n",
    "# 3.定义RecursiveCharacterTextSplitter（递归字符分割器）\n",
    "text_splitter = RecursiveCharacterTextSplitter(\n",
    "    chunk_size=100,\n",
    "    chunk_overlap=20,\n",
    "    #chunk_overlap=0,\n",
    "    length_function=len\n",
    ")\n",
    "\n",
    "# 4.分割文本\n",
    "texts = text_splitter.create_documents([state_of_the_union])\n",
    "\n",
    "# 5.打印分割文本\n",
    "for text in texts:\n",
    "    print(text)\n"
   ],
   "id": "daa48d8f17ff95c6",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "page_content='人工智能（AI）是什么？'\n",
      "page_content='人工智能（Artificial'\n",
      "page_content='Intelligence，简称AI）是指由计算机系统模拟人类智能的技术，使其能够执行通常需要人类认知能力的任务，如学习、推理、决策和语言理解。AI的核心目标是让机器具备感知环境、处理信息并自主行动的'\n",
      "page_content='让机器具备感知环境、处理信息并自主行动的能力。'\n",
      "page_content='1. AI的技术基础\n",
      "AI依赖多种关键技术：\n",
      "\n",
      "机器学习（ML）：通过算法让计算机从数据中学习规律，无需显式编程。例如，推荐系统通过用户历史行为预测偏好。'\n",
      "page_content='深度学习：基于神经网络的机器学习分支，擅长处理图像、语音等复杂数据。AlphaGo击败围棋冠军便是典型案例。\n",
      "\n",
      "自然语言处理（NLP）：使计算机理解、生成人类语言，如ChatGPT的对话能力。'\n",
      "page_content='2. AI的应用场景\n",
      "AI已渗透到日常生活和各行各业：\n",
      "\n",
      "医疗：辅助诊断（如AI分析医学影像）、药物研发加速。\n",
      "\n",
      "交通：自动驾驶汽车通过传感器和AI算法实现安全导航。'\n",
      "page_content='金融：欺诈检测、智能投顾（如风险评估模型）。\n",
      "\n",
      "教育：个性化学习平台根据学生表现调整教学内容。\n",
      "\n",
      "3. AI的挑战与未来\n",
      "尽管前景广阔，AI仍面临问题：'\n",
      "page_content='伦理争议：数据隐私、算法偏见（如招聘AI歧视特定群体）。\n",
      "\n",
      "就业影响：自动化可能取代部分人工岗位，但也会创造新职业。\n",
      "\n",
      "技术瓶颈：通用人工智能（AGI）尚未实现，当前AI仅擅长特定任务。'\n",
      "page_content='未来，AI将与人类协作而非替代：医生借助AI提高诊断效率，教师利用AI定制课程。其发展需平衡技术创新与社会责任，确保技术造福全人类。'\n"
     ]
    }
   ],
   "execution_count": 18
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-06T02:22:58.058222Z",
     "start_time": "2025-08-06T02:22:58.044220Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 1.导入相关依赖\n",
    "from langchain_community.document_loaders import PyPDFLoader\n",
    "from langchain.text_splitter import RecursiveCharacterTextSplitter\n",
    "\n",
    "# 2.定义PyPDFLoader加载器\n",
    "loader = PyPDFLoader(\"./asset/load/02-load.pdf\")\n",
    "\n",
    "# 3.加载和切割文档对象\n",
    "docs = loader.load()   # 返回document构成的list\n",
    "# print(f\"第0页：\\n{docs[0]}\")\n",
    "\n",
    "text = \"\"\"\n",
    "他的车，他的命！ 他忽然想起来，一年，二年，至少有三四年；一滴汗，两滴汗，不\n",
    "知道多少万滴汗，才挣出那辆车。从风里雨里的咬牙，从饭里茶里的自苦，才赚出那辆车。\n",
    "那辆车是他的一切挣扎与困苦的总结果与报酬，像身经百战的武士的一颗徽章。……他老想\n",
    "着远远的一辆车，可以使他自由，独立，像自己的手脚的那么一辆车。\"\n",
    "\n",
    "\"他吃，他喝，他嫖，他赌，他懒，他狡猾， 因为他没了心，他的心被人家摘了去。他\n",
    "只剩下那个高大的肉架子，等着溃烂，预备着到乱死岗子去。……体面的、要强的、好梦想\n",
    "的、利己的、个人的、健壮的、伟大的祥子，不知陪着人家送了多少回殡；不知道何时何地\n",
    "会埋起他自己来， 埋起这堕落的、 自私的、 不幸的、 社会病胎里的产儿， 个人主义的末路鬼！\n",
    "\"\"\"\n",
    "\n",
    "# 4.定义切割器\n",
    "text_splitter = RecursiveCharacterTextSplitter(\n",
    "    # chunk_size=200,\n",
    "    chunk_size=120,\n",
    "    chunk_overlap=0,\n",
    "    # chunk_overlap=100,\n",
    "    length_function=len,\n",
    "    add_start_index=True,\n",
    ")\n",
    "\n",
    "# 5.对pdf内容进行切割得到文档对象\n",
    "# create_documents()的形参是字符串列表，返回值是文档列表\n",
    "# paragraphs = text_splitter.create_documents([docs[0].page_content])\n",
    "paragraphs = text_splitter.create_documents([text])\n",
    "for para in paragraphs:\n",
    "    print(para.page_content)\n",
    "    print('-------')"
   ],
   "id": "a896f12325bdef66",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "他的车，他的命！ 他忽然想起来，一年，二年，至少有三四年；一滴汗，两滴汗，不\n",
      "知道多少万滴汗，才挣出那辆车。从风里雨里的咬牙，从饭里茶里的自苦，才赚出那辆车。\n",
      "-------\n",
      "那辆车是他的一切挣扎与困苦的总结果与报酬，像身经百战的武士的一颗徽章。……他老想\n",
      "着远远的一辆车，可以使他自由，独立，像自己的手脚的那么一辆车。\"\n",
      "-------\n",
      "\"他吃，他喝，他嫖，他赌，他懒，他狡猾， 因为他没了心，他的心被人家摘了去。他\n",
      "只剩下那个高大的肉架子，等着溃烂，预备着到乱死岗子去。……体面的、要强的、好梦想\n",
      "-------\n",
      "的、利己的、个人的、健壮的、伟大的祥子，不知陪着人家送了多少回殡；不知道何时何地\n",
      "会埋起他自己来， 埋起这堕落的、 自私的、 不幸的、 社会病胎里的产儿， 个人主义的末路鬼！\n",
      "-------\n"
     ]
    }
   ],
   "execution_count": 23
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "## 3、split by tokens\n",
    "\n"
   ],
   "id": "e088d3f34c226c26"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-06T02:34:18.817904Z",
     "start_time": "2025-08-06T02:34:18.801902Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 1. 导入相关依赖\n",
    "from langchain_text_splitters import CharacterTextSplitter\n",
    "import tiktoken  # 用于计算Token数量\n",
    "\n",
    "\n",
    "# 2. 定义通过Token切割器\n",
    "text_splitter = CharacterTextSplitter.from_tiktoken_encoder(\n",
    "    encoding_name=\"cl100k_base\",\n",
    "    chunk_size=31,\n",
    "    chunk_overlap=0,\n",
    "    separator=\"。\",  # 指定中文句号为分隔符\n",
    "    keep_separator=False,  # chunk中是否保留分隔符\n",
    ")\n",
    "# 3.定义文本\n",
    "text = \"人工智能是一个强大的开发框架。它支持多种语言模型和工具链。今天你吃了吗？我好想吃饭，但是现在不饿 咋办 12345不说话12345不说话12345不说话\"\n",
    "\n",
    "# 4. 开始切割\n",
    "texts = text_splitter.split_text(text)\n",
    "print(f\"分割后的块数: {len(texts)}\")\n",
    "\n",
    "# 5. 初始化tiktoken编码器（用于Token计数）\n",
    "encoder = tiktoken.get_encoding(\"cl100k_base\")  # 确保与CharacterTextSplitter的encoding_name一致\n",
    "\n",
    "# 6. 打印每个块的Token数和内容\n",
    "for i, chunk in enumerate(texts):\n",
    "    tokens = encoder.encode(chunk)  # 现在encoder已定义\n",
    "    print(f\"块 {i + 1}: {len(tokens)} Token\\n内容: {chunk}\\n\")"
   ],
   "id": "1699f7c3ea6e1bb7",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "分割后的块数: 3\n",
      "块 1: 17 Token\n",
      "内容: 人工智能是一个强大的开发框架\n",
      "\n",
      "块 2: 14 Token\n",
      "内容: 它支持多种语言模型和工具链\n",
      "\n",
      "块 3: 47 Token\n",
      "内容: 今天你吃了吗？我好想吃饭，但是现在不饿 咋办 12345不说话12345不说话12345不说话\n",
      "\n"
     ]
    }
   ],
   "execution_count": 30
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "## 4、SemanticChunker 语义分块\n",
    "\n"
   ],
   "id": "37cabf5adaccbc9b"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "source": [
    "from langchain_experimental.text_splitter import SemanticChunker\n",
    "from langchain_openai.embeddings import OpenAIEmbeddings\n",
    "import os\n",
    "import dotenv\n",
    "\n",
    "dotenv.load_dotenv()\n",
    "\n",
    "# 加载文本\n",
    "with open(\"asset/load/09-ai1.txt\", encoding=\"utf-8\") as f:\n",
    "    state_of_the_union = f.read()  #返回字符串\n",
    "\n",
    "# 获取嵌入模型\n",
    "os.environ['OPENAI_API_KEY'] = os.getenv(\"OPENAI_API_KEY1\")\n",
    "os.environ['OPENAI_BASE_URL'] = os.getenv(\"OPENAI_BASE_URL\")\n",
    "embed_model = OpenAIEmbeddings(\n",
    "    model=\"text-embedding-3-large\"\n",
    ")\n",
    "\n",
    "# 获取切割器\n",
    "text_splitter = SemanticChunker(\n",
    "    embeddings=embed_model,\n",
    "    breakpoint_threshold_type=\"percentile\",#断点阈值类型：字面值[\"百分位数\", \"标准差\", \"四分位距\", \"梯度\"] 选其一\n",
    "    breakpoint_threshold_amount=65.0 #断点阈值数量 (极低阈值 → 高分割敏感度)\n",
    ")\n",
    "\n",
    "# 切分文档\n",
    "docs = text_splitter.create_documents(texts = [state_of_the_union])\n",
    "print(len(docs))\n",
    "for doc in docs:\n",
    "    print(f\"🔍 文档 {doc}:\")"
   ],
   "id": "bdb4501ccc6f14f8",
   "outputs": [],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "总结：文档切分器的方法的调用\n",
   "id": "e123d0877b776500"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "# 伪码\n",
    "splitter = CharacterTextSplitter()\n",
    "\n",
    "#方式1：传入的参数类型：字符串; 返回值类型：list[str]\n",
    "splitter.split_text()\n",
    "\n",
    "#方式2：传入的参数类型：list[str]; 返回值类型：list[Document]\n",
    "splitter.create_documents()   #底层调用了split_text()\n",
    "\n",
    "#方式3：传入的参数类型：list[Document]; 返回值类型：list[Document]\n",
    "splitter.split_documents()  #底层调用了create_documents()"
   ],
   "id": "4857b3a36bb7ef81"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
