from langchain_community.vectorstores import Chroma
from langchain_openai.embeddings import AzureOpenAIEmbeddings  # 导入嵌入模型
from langchain_community.document_loaders import PyPDFLoader
from tool import get_azure_endpoint, get_api_key, get_api_version

if __name__ == '__main__':
    persist_directory_chinese = './docs/chroma/matplotlib/'

    embedding = AzureOpenAIEmbeddings(
        azure_endpoint=get_azure_endpoint().rstrip('/'),  # 移除尾部斜杠，只保留基础URL
        model="text-embedding-3-small",  # 重命名为 azure_deployment
        api_key=get_api_key(),
        api_version=get_api_version()
    )

    # 加载 PDF
    loaders_chinese = [
        # 故意添加重复文档，使数据混乱
        PyPDFLoader("./docs/matplotlib/第一回：Matplotlib初相识.pdf"),
        PyPDFLoader("./docs/matplotlib/第一回：Matplotlib初相识.pdf"),
        PyPDFLoader("./docs/matplotlib/第二回：艺术画笔见乾坤.pdf"),
        PyPDFLoader("./docs/matplotlib/第三回：布局格式定方圆.pdf"),
        PyPDFLoader("./docs/matplotlib/第四回：文字图例尽眉目.pdf"),
        PyPDFLoader("./docs/matplotlib/第五回：样式色彩秀芳华.pdf")
    ]
    docs = []
    for loader in loaders_chinese:
        docs.extend(loader.load())
    print(len(docs))

    # 分割文本
    from langchain.text_splitter import RecursiveCharacterTextSplitter

    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=1500,  # 每个文本块的大小。这意味着每次切分文本时，会尽量使每个块包含 1500 个字符。
        chunk_overlap=150  # 每个文本块之间的重叠部分。
    )
    splits = text_splitter.split_documents(docs)

    vectordb_chinese = Chroma.from_documents(
        documents=splits,
        embedding=embedding,
        persist_directory=persist_directory_chinese  # 允许我们将persist_directory目录保存到磁盘上
    )

    # 已经废弃，会自动保存
    vectordb_chinese.persist()

    vectordb_chinese = Chroma(
        persist_directory="./docs/chroma/matplotlib",
        embedding_function=embedding,
    )

    print(vectordb_chinese._collection.count())

