

from langchain_text_splitters.base import (
    Language,
    TextSplitter,
    Tokenizer,
    TokenTextSplitter,
    split_text_on_tokens,
)
from langchain_text_splitters.character import (
    CharacterTextSplitter,
    RecursiveCharacterTextSplitter,
)
from langchain_text_splitters.html import (
    ElementType,
    HTMLHeaderTextSplitter,
    HTMLSectionSplitter,
    HTMLSemanticPreservingSplitter,
)
from langchain_text_splitters.json import RecursiveJsonSplitter
from langchain_text_splitters.jsx import JSFrameworkTextSplitter
from langchain_text_splitters.konlpy import KonlpyTextSplitter
from langchain_text_splitters.latex import LatexTextSplitter
from langchain_text_splitters.markdown import (
    ExperimentalMarkdownSyntaxTextSplitter,
    HeaderType,
    LineType,
    MarkdownHeaderTextSplitter,
    MarkdownTextSplitter,
)
from langchain_text_splitters.nltk import NLTKTextSplitter
from langchain_text_splitters.python import PythonCodeTextSplitter
from langchain_text_splitters.sentence_transformers import (
    SentenceTransformersTokenTextSplitter,
)
from langchain_text_splitters.spacy import SpacyTextSplitter
text='''
10月15日起，2026年度国家公务员考试开始报名，今年国考计划招录3.81万人，
报考年龄限制放宽至本科38周岁以下、应届硕博43周岁以下。\n\n
报考年龄限制放宽至本科38周岁以下、应届硕博43周岁以下。
'''


html_content = '''
<html>
    <body>
    <h1>Introduction</h1>
    <p>Welcome to the introduction section.</p>
    <h2>Background</h2>
    <p>Some background details here.</p>
    <h1>Conclusion</h1>
    <p>Final thoughts.</p>
    </body>
</html>'''

 
# 定义HTML文档示例
html_content = """
<!DOCTYPE html>
<html>
<body>
  <div>
    <h1>人工智能概述</h1>
    <p>人工智能是模拟人类智能的计算机系统。</p>
    <div>
      <h2>机器学习</h2>
      <p>机器学习是实现AI的核心技术。</p>
      <h3>监督学习</h3>
      <p>使用标注数据进行模型训练。</p>
      <h3>无监督学习</h3>
      <p>发现数据中的隐藏模式。</p>
    </div>
    <div>
      <h2>自然语言处理</h2>
      <p>让计算机理解人类语言的技术。</p>
    </div>
  </div>
</body>
</html>
"""
 

# 包含表格和列表的复杂HTML文档
html_content = """
<!DOCTYPE html>
<html>
<body>
  <div>
    <h1>产品规格</h1>
    <table>
      <tr><th>参数</th><th>值</th></tr>
      <tr><td>尺寸</td><td>15.6英寸</td></tr>
      <tr><td>重量</td><td>1.8kg</td></tr>
    </table>
    <ul>
      <li>支持4K分辨率</li>
      <li>内置指纹识别</li>
    </ul>
    <div>
      <h2>技术特点</h2>
      <p>采用最新散热技术</p>
      <ol>
        <li>双风扇设计</li>
        <li>液态金属导热</li>
      </ol>
    </div>
  </div>
</body>
</html>
"""




# 初始化分割器（默认使用英文模型）
text_splitter = SpacyTextSplitter()

# 示例中文文本（需先安装中文模型zh_core_web_sm）
chinese_text = "自然语言处理是人工智能的重要领域。它包含文本分类、实体识别等技术。"

# 执行分割
split_chunks = text_splitter.split_text(chinese_text)

# 输出结果
for i, chunk in enumerate(split_chunks, 1):
    print(f"分段{i}: {chunk}")





'''
# 初始化分割器（块大小40字符，无重叠）
splitter = MarkdownTextSplitter(chunk_size=40, chunk_overlap=0)

# 示例Markdown文档
markdown_text = """
# 加州游玩指南
## 交通
建议沿1号公路自驾至圣地亚哥
### 美食
别忘了品尝当地墨西哥卷饼
## 徒步
推荐优胜美地国家公园
"""

# 执行分割
documents = splitter.create_documents([markdown_text])

# 输出结果
for doc in documents:
    print(f"内容块: {doc.page_content}\n{'='*30}")


headers_to_split_on=[("h1", "Header 1"), ("h2", "Header 2")],
# 初始化分割器（默认配置即可保留语义结构）
splitter = HTMLSemanticPreservingSplitter(headers_to_split_on=headers_to_split_on)

# 执行分割
documents = splitter.split_text(html_content)

# 输出结果
for idx, doc in enumerate(documents):
    print(f"【分割块 {idx+1}】\n{doc.page_content}\n{'='*50}")


# 配置需要分割的标题层级
header_splits = [
    ("h1", "一级标题"),
    ("h2", "二级标题"), 
    ("h3", "三级标题")
]

# 初始化分割器
splitter = HTMLSectionSplitter(headers_to_split_on=header_splits)

# 执行分割
documents = splitter.split_text(html_content)

# 输出结果
for doc in documents:
    print(f"【{doc.metadata}】\n{doc.page_content}\n{'='*50}")




headers_to_split_on = [("h1", "Main Topic"), ("h2", "Sub Topic")]

splitter = HTMLHeaderTextSplitter(
    headers_to_split_on=headers_to_split_on,
    return_each_element=False
)


documents = splitter.split_text(html_content)
print(len(documents))

sp=CharacterTextSplitter(separator="下")

rs=sp.split_text(text)
print(len(rs))

sp=TokenTextSplitter()

rs=sp.split_text(text)
print(rs)
'''