from langchain_community.document_loaders import TextLoader  # 导入文本
from langchain_text_splitters import RecursiveCharacterTextSplitter  # 文本切割
from langchain_ollama import OllamaEmbeddings  # 绑定向量模型
from langchain_chroma import Chroma  # 数据向量化入库
import os


def saveToVectorDB(filePath):
    loader = TextLoader(filePath, encoding="utf-8")
    txt = loader.load()
    print(txt)

    textSpliter = RecursiveCharacterTextSplitter(
        separators=["\n\n", "\r\n", "\n", "。", ".", "！", "!", "，", ",", "、", " ", ""],
        chunk_size=150,
        chunk_overlap=50,
    )
    allSpliter = textSpliter.split_documents(txt)
    print(allSpliter)

    # 绑定向量模型
    Embeddings = OllamaEmbeddings(model="autumnzsd/nlp_gte_sentence-embedding_chinese-large:latest")

    # 数据向量化入库
    persist_dir = "./vector_db"
    if os.path.exists(persist_dir):
        db = Chroma(
            embedding_function=Embeddings,
            persist_directory=persist_dir,
            collection_metadata={"hnsw:space": "cosine"}
        )
        db.add_documents(documents=allSpliter)
    else:
        db = Chroma.from_documents(
            documents=allSpliter,
            embedding=Embeddings,
            persist_directory=persist_dir,
            collection_metadata={"hnsw:space": "cosine"}
        )
    print("count=", db._collection.count())


saveToVectorDB(os.getcwd() + "\\docs\\断网离线部署方法.txt")
saveToVectorDB(os.getcwd() + "\\docs\\公司产品手册.md")
saveToVectorDB(os.getcwd() + "\\docs\\公司员工手册(AI生成).md")
