'''
* This is the projet for Brtc LlmOps Platform
* @Author Leon-liao <liaosiliang@alltman.com>
* @Description //TODO 
* @File: 17_study_splitter_with_llm_token.py
* @Time: 2025/10/29
* @All Rights Reserve By Brtc
'''
import dotenv
import tiktoken
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain_experimental.text_splitter import SemanticChunker
from langchain_text_splitters import RecursiveJsonSplitter, RecursiveCharacterTextSplitter


def calculate_token_count(query:str)->int:
    """计算传入的文本token数量"""
    encoding = tiktoken.encoding_for_model("text-embedding-3-large")
    return len(encoding.encode(query))


dotenv.load_dotenv()
#1、构建加载器和文本分割
loader = UnstructuredFileLoader("./data.txt")
text_splitter = RecursiveCharacterTextSplitter(
    separators=[
            "\n\n",
            "\n",
            "。|！|？",
            "\.\s|\!\s|\?\s",  # 英文标点符号后面通常需要加空格
            "；|;\s",
            "，|,\s",
            " ",
            ""
        ],
    is_separator_regex=True,
    chunk_size=500,
    chunk_overlap=50,
    length_function=calculate_token_count,
)

documents = loader.load()
chuncks = text_splitter.split_documents(documents)

for chunk in chuncks:
    print(f"块大小：{len(chunk.page_content)}, 元数据：{chunk.metadata}")
