from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter


class DocumentSpliter:
    documents: list[Document]

    # 分块的大小
    chunk_size: int = 500

    # 块与块之间重叠区间大小，保证连续
    chunk_overlap: int = 100

    def __init__(self, documents: list[Document]):
        self.documents = documents

    def split(self) -> list[Document]:
        # 如果分词后的document 太大的，还的继续拆分
        text_spliter = RecursiveCharacterTextSplitter(chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap,
                                                      separators=["\n\n", "\n", "。", " "])
        return text_spliter.split_documents(self.documents)
