from typing import List

import jieba.analyse
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain_text_splitters import TextSplitter


class CustomSplitter(TextSplitter):
    """自定义文本分割器, 传入分隔符和默认关键词个数，默认是十个"""
    def __init__(self, seperator:str, top_k:int = 10, **kwargs)-> None:
        super().__init__(**kwargs)
        self._seperator = seperator
        self._top_k = top_k

    def split_text(self, text:str) -> List[str]:
        """分割传入的文本为字符串列表"""
        # 1、根据传入的分隔符分割传入的文本
        split_texts = text.split(self._seperator)
        #2、提取关键词
        text_keywords = []
        for split_text in split_texts:
            text_keywords.append(
                jieba.analyse.extract_tags(split_text, self._top_k)
            )
        return [",".join(keywords) for keywords in text_keywords]
# 1、创建加载器与分割器
loader = UnstructuredFileLoader("./data.txt")
text_splitter = CustomSplitter("\n\n")
documnets = loader.load()
chuncks = text_splitter.split_documents(documnets)

for one in chuncks:
    print(one)