# pip install langchain-chroma
# pip install -U langchain-huggingface
from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import Chroma
from langchain_text_splitters import CharacterTextSplitter

from my_huggingface.ModelScopeEmbeddings import ModelScopeEmbeddings

# 加载文档并将其分割成片段
loader = TextLoader("knowledge.txt", encoding="UTF-8")
documents = loader.load()

# chunk_size：分割的字符限制；
# chunk_overlap：两个chunk之间的重迭大小
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)

# 创建开源嵌入函数 nomic-embed-text
local_model_path = "/Users/brightzhou/.cache/modelscope/hub/models/sentence-transformers/all-MiniLM-L6-v2"
embedding_function = ModelScopeEmbeddings(local_model_path, device='cpu')
query = "Pixar公司是做什么的"
# 加载到chroma内存中
# db = Chroma.from_documents(docs, embedding_function, persist_directory="./chroma_db")
# docs = db.similarity_search(query)

example_db = Chroma(embedding_function=embedding_function, persist_directory="./chroma_db")
##
results = example_db.similarity_search_with_score(query)
for doc, score in results:
    print(f"相似度分数: {score:.4f}")
    print(f"文档内容: {doc.page_content[:100]}...")  # 显示前100个字符
    print(f"元数据: {doc.metadata}")
    print("-" * 50)

# 获取所有 IDs
ids = example_db._collection.get()["ids"]
print("所有 IDs:", ids)

