from langchain_chroma import Chroma
from langchain_core.documents import Document
from modelscope import snapshot_download
# from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_huggingface import HuggingFaceEmbeddings

# 指定自定义下载路径（示例路径）
# custom_path = "D:/my_models"  # Windows路径示例

# model_dir = snapshot_download('sentence-transformers/all-MiniLM-L6-v2', cache_dir=custom_path)

model_dir = "D:\\my_models\\sentence-transformers\\all-MiniLM-L6-v2"

# print(f"模型已下载到：{model_dir}")  # 会输出包含完整自定义路径的地址

documents = [
    Document(
        page_content="早上9:00上班，晚上18:00下班。中午吃饭午休时间为：12:00-13:00",
        metadata={"source": "公司章程"}
    ),
    Document(
        page_content="我们公司的名字是陈李哈哈哈，主营业务为卖水果。",
        metadata={"source": "公司简介"}
    ),
    Document(
        page_content="公司在华三村证券交易所上市，每股单价10元，总股本为1000股。",
        metadata={"source": "公司上市情况"}
    )
]

# document = Document(
#     page_content="陈李哈哈哈是一家卖水果的公司，公司成立于2023年，公司主要业务为卖水果。",
#     metadata={"source": "公司简介"}
# )

# 使用本地模型创建嵌入
embeddings = HuggingFaceEmbeddings(model_name=model_dir)

# res = embeddings.embed_query(document.page_content)
# print(res)
# print(len(res))

# Chroma.from_documents(documents, embeddings)

try:
    vector_store = Chroma.from_documents(documents, embeddings)
except Exception as e:
    print(f"发生错误: {e}")
    import traceback
    traceback.print_exc()

query = "陈李哈哈哈是一家卖水果的公司吗？"
res = vector_store.similarity_search_with_score(query)
print(res)
