# pip install langchain-chroma_本地与云端 langchain llama-index sentence-transformers

import os
from langchain.vectorstores import Chroma
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings

# ========================
# 1️⃣ 获取当前脚本绝对路径，定位文本文件
# ========================
base_dir = os.path.dirname(os.path.abspath(__file__))
doc_file_path = os.path.join(base_dir, "data", "manager_learn.txt")

if not os.path.exists(doc_file_path):
    raise FileNotFoundError(f"{doc_file_path} 文件不存在，请检查路径！")

# ========================
# 2️⃣ 加载文本
# ========================
loader = TextLoader(doc_file_path, encoding="utf-8")
documents = loader.load()

# ========================
# 3️⃣ 文本切分
# ========================
text_splitter = CharacterTextSplitter(chunk_size=500, chunk_overlap=20)
split_docs = text_splitter.split_documents(documents)

# ========================
# 4️⃣ 初始化 HuggingFace 本地嵌入模型
# ========================
embedding_model = HuggingFaceEmbeddings(
    model_name="D:\\models\\models\\sentence-transformers\\paraphrase-multilingual-MiniLM-L12-v2"
)

# ========================
# 5️⃣ 创建 Chroma 向量库
# ========================
db = Chroma.from_documents(
    documents=split_docs,
    embedding=embedding_model,
    #persist_directory=os.path.join(base_dir, "chroma_db")  # 本地持久化, 不加是内存处理
)
db.persist()  # 保存向量库到磁盘

# ========================
# 6️⃣ 相似度搜索
# ========================
query = "管理"
# 基于余弦相似度搜索
query_docs = db.similarity_search_with_score(query)
print(query_docs[0])


# mmr 进行相似度检索


