# pip install langchain-chroma
from langchain.vectorstores import Chroma
from langchain_community.document_loaders import TextLoader
# pip install -U langchain-huggingface
from langchain_text_splitters import CharacterTextSplitter

from config.embedding_config import get_openai_embeddings_local

# 加载文档并将其分割成片段
loader = TextLoader("knowledge.txt", encoding="UTF-8")
documents = loader.load()
# 将其分割成片段
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=100)
docs = text_splitter.split_documents(documents)
print(docs)
# 创建开源嵌入函数
embedding_function = get_openai_embeddings_local()
# 将其加载到 Chroma 内存中
PERSIST_DIR = "./chroma_db"  # 持久化目录
db = Chroma.from_documents(documents=docs,
    embedding=embedding_function,
    # persist_directory=PERSIST_DIR
                           )

# batch_size = 100
# for i in range(0, len(docs), batch_size):
#     batch_docs = docs[i:i + batch_size]
#     if i == 0:
#         db = Chroma.from_documents(
#             documents=batch_docs,
#             embedding=embedding_function,
#             persist_directory=PERSIST_DIR
#         )
#     else:
#         db.add_documents(batch_docs)
#
# db.persist()  # 显式保存到磁盘 :ml-citation{ref="3,5" data="citationList"}


# 4. 创建/加载向量库
# PERSIST_DIR = "./chroma_db"  # 持久化目录
# db = Chroma.from_documents(
#     documents=docs,
#     embedding=embedding_function,
#     persist_directory=PERSIST_DIR  # 启用持久化
# )
# db.persist()  # 显式保存到磁盘 :ml-citation{ref="3,5" data="citationList"}

# 进行查询
query = "Pixar公司是做什么的?"
docs = db.similarity_search(query)
# 打印结果
print(docs[0].page_content)