from langchain.text_splitter import CharacterTextSplitter
import re
from typing import List, Iterable, AsyncIterable
from langchain_core.documents import Document

from app.config.model_config import SENTENCE_SIZE



class ChineseTextSplitter1(CharacterTextSplitter):
    def __init__(self, pdf: bool = False, sentence_size: int = SENTENCE_SIZE, **kwargs):
        super().__init__(**kwargs)
        self.pdf = pdf
        self.sentence_size = sentence_size

    def split_text(self, text: str) -> List[str]:
        if self.pdf:
            text = re.sub(r"\n{3,}", r"\n", text)
            text = re.sub("\s", " ", text)
            text = re.sub("\n\n", "", text)

        # 更新正则表达式模式，更准确地匹配中文句号、英文句号、问号、感叹号等句子分隔符
        text = re.sub(r"([。！？?；;])([^”’])", r"\1\n\2", text)
        text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text)
        text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text)
        text = re.sub(
            r'([。！？?]["’”」』]{0,2})([^。！？?，。！？?])', r"\1\n\2", text
        )

        text = text.rstrip()
        ls = [i for i in text.split("\n") if i]

        # 考虑上下文信息，避免将连续的几个词分开
        new_ls = []
        for ele in ls:
            if not new_ls or len(new_ls[-1]) + len(ele) <= self.sentence_size:
                if new_ls:
                    new_ls[-1] += ele
                else:
                    new_ls.append(ele)
            else:
                new_ls.append(ele)

        return new_ls
    
    # async def split_documents(self, documents: AsyncIterable[Document]) -> List[Document]:
    #     """Split documents."""
    #     texts, metadatas = [], []
    #     async for doc in documents:
    #         texts.append(doc.page_content)
    #         metadatas.append(doc.metadata)
    #     return self.create_documents(texts, metadatas=metadatas)
