


from llama_index.core.node_parser import SentenceSplitter as InnerSentenceSplitter
from llama_index.core.node_parser import SemanticSplitterNodeParser as InnerSemanticSplitterNodeParser
from typing import List

s = "PCF对接数据配置\n\n子主题：\n\n  * 服务配置\n\n  * 配置NRF对接数据\n\n  * 配置SMF对接数据\n\n  * 配置BSF对接数据\n\n  * 配置CHF对接数据\n\n\n\n"

class SemanticSplitterNodeParser:
    
        def __init__(self,chunk_size=256,overlap_size=100):
            self.sentence_splitter = InnerSemanticSplitterNodeParser(
                chunk_size=chunk_size,
                chunk_overlap=overlap_size
            )
    
        def split(self, text) -> List[str]:
            return self.sentence_splitter.split_text(text)


class SentenceSplitter:

    def __init__(self,chunk_size=256,overlap_size=100):
        self.sentence_splitter = InnerSentenceSplitter(
            chunk_size=chunk_size,
            chunk_overlap=overlap_size
        )

    def split(self, text) -> List[str]:
        return self.sentence_splitter.split_text(text)


class CustomSplitter:
          
        def __init__(self):

            pass
    
        def split(self, text) -> List[str]:
            return []


if __name__ == "__main__":

    file_path = "../data/rcp/初始配置（手工部署）/32-配置NRF服务器.txt"

    with open(file_path, "r", encoding="utf-8") as f:
        s = f.read()

    splitter = SentenceSplitter(256,10)
    res = splitter.split(s)
    for r in res:
        print(r)
        print("============")



# def sentence_splitter(text):
#     # 常见中文句子结束标点符号
#     end_punctuation = ['。', '！', '？', '……', '…']

#     # 使用LlamaIndex对输入文本进行分词
#     tokenizer = LlamaIndex.tokenizer()
#     tokens = tokenizer.tokenize(text)

#     sentences = []
#     current_sentence = []

#     for token in tokens:
#         current_sentence.append(token)
#         if token in end_punctuation:
#             sentences.append(''.join(current_sentence))
#             current_sentence = []

#     # 如果最后一个句子没有结束标点符号，也添加进句子列表
#     if current_sentence:
#         sentences.append(''.join(current_sentence))

#     return sentences

# # # 示例使用
# # text = "你好！这是一个句子分割的示例。希望它能正常工作。"
# # sentences = sentence_splitter(text)
# # print(sentences)






