# pip install langchain-chroma
# pip install -U langchain-huggingface
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_text_splitters import CharacterTextSplitter
from numpy.ma.core import ids

from my_huggingface.HFModelUtils import HFModelUtils
from my_huggingface.ModelScopeEmbeddings import ModelScopeEmbeddings
from model.MyOllamaEmbeddings import MyOllamaEmbeddings

# 加载文档并将其分割成片段
loader = TextLoader("knowledge.txt", encoding="UTF-8")
documents = loader.load()

# chunk_size：分割的字符限制；
# chunk_overlap：两个chunk之间的重迭大小
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)

# 创建开源嵌入函数 nomic-embed-text
local_model_path = "/Users/brightzhou/.cache/modelscope/hub/models/sentence-transformers/all-MiniLM-L6-v2"
embedding_function = ModelScopeEmbeddings(local_model_path, device='cpu')
# embedding_function = HuggingFaceEmbeddings(model_name="all-MiniLm-L6-v2")
# embedding_function = MyOllamaEmbeddings()
# embedding_function = HFModelUtils.embed_model()

query = "Pixar公司是做什么的"
# 加载到chroma内存中
db = Chroma.from_documents(docs, embedding_function, persist_directory="./chroma_db")
# docs = db.similarity_search(query)

example_db = Chroma(embedding_function=embedding_function, persist_directory="./chroma_db")
# 进行查询

docs = example_db.similarity_search(query, k=1)

##
docs_score = example_db.similarity_search_with_score(query)
print("相似性分数：",docs_score[0])

# 打印结果
# print("---")
# print(docs[0].page_content)
# for doc in docs:
#     print(doc.page_content)
#     print("---")


# 创建简单的ids
# ids = [str(i) for i in range(1, len(docs) + 1)]

# 获取所有 IDs
ids = example_db._collection.get()["ids"]
print("所有 IDs:", ids)

# 更新文档的元数据
docs[0].metadata = {
    "source": "knowledge.txt",
    "new_value": "hello world",
}
print(example_db._collection.count())
print(f"更新前的内容：ids[0]={ids[0]}")
print(example_db._collection.get(ids=[ids[0]]))
example_db.update_document(ids[0], docs[0])
print("更新后的内容：")
print(example_db._collection.get(ids=[ids[0]]))
## 删除最后一个文档
# print("删除前计数：",example_db._collection.count())
# print(example_db._collection.delete(ids=[ids[-1]]))
# print("删除后计数：",example_db._collection.count())

