import openai
from langchain.text_splitter import TextSplitter
from langchain_community.document_loaders import PyPDFLoader
from langchain_text_splitters import TokenTextSplitter


class CustomTokenTextSplitter(TextSplitter):
    def __init__(self, chunk_size, chunk_overlap, openai_client, model="text-davinci-002"):
        super().__init__(chunk_size, chunk_overlap)
        self.openai_client = openai_client
        self.model = model

    def split_text(self, text):
        # 使用 OpenAI 的 API 进行分词
        response = self.openai_client.Completion.create(
            engine=self.model,
            prompt=f"Split the following text into chunks of {self.chunk_size} tokens with an overlap of {self.chunk_overlap} tokens:\n{text}",
            max_tokens=1000,
            n=1,
            stop=None,
            temperature=0.5,
        )
        chunks = response.choices[0].text.strip().split('\n')
        return chunks

# 示例使用
if __name__ == "__main__":
    api_key = "sk-117hahihft1nkgc0t6qa5gvhd7q1mvndgcud046o3d4l6hm3"
    api_base_url = "http://api.aihao123.cn/luomacode-api/open-api/v1"
    query = "Elizabeth I"

    # 创建 OpenAI 客户端
    openai.api_key = api_key
    openai.base_url = api_base_url
    openai_client = openai

    # 加载文档
    loader = PyPDFLoader("/Users/wuchengxing/Downloads/code/data.pdf")
    text_splitter = TokenTextSplitter(chunk_size=1024, chunk_overlap=24)
    documents = text_splitter.split_documents(loader.load())
    # 打印分割后的文档数量
    print(f"Split into {len(documents)} chunks.")
    print("Load and Split document successfully.")
    print(f"Number of chunks: {len(documents)}")
