from langchain.text_splitter import CharacterTextSplitter
text = """是一段 需要被分割的 长文本实力.....，每个文本快的长度最大(多少个字符)Documet
loader are designed to load doucment objects. LangChain has uundreds of integratings wite
various datasource to load data from Slackm,NotionmGoole Drive"""

splitter = CharacterTextSplitter(
    separator=" ",
    chunk_size=50, #切割后面块最大字符数
    chunk_overlap=10 #切割后相邻快重叠的最大字符数
)

chunks = splitter.split_text(text)

print(len(chunks))
for chunk in chunks:
    print(chunk)


#分割器文档 ： https://python.langchain.com/docs/how_to