from langchain_community.document_loaders import TextLoader
from customize.embedding import get_embedding  # 这里假设使用HuggingFace的嵌入模型，你可根据实际替换
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_chroma import Chroma




def load_and_vectorize_text(file_path):
    # 加载文本文件
    print("加载文本……")
    loader = TextLoader(file_path, encoding="utf-8")
    documents = loader.load()

    # 对文本进行分割，可根据实际情况调整参数
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50, length_function=len)
    texts = text_splitter.split_documents(documents)
    print(len(texts))
    model_path = "F:/models/BAAI/bge-large-zh-v1.5"
    # 选择嵌入模型，这里以HuggingFace的一个常见模型为例，你可能需要根据需求安装特定模型
    print("导入embedding模型……")
    embeddings = get_embedding(model_path)
    persist_directory = 'db'
    # 将向量化后的文本存入Chroma数据库
    print("向量化并保存至数据库……")
    # vectorstore = Chroma.from_documents(texts, embeddings, collection_name="jpjd", persist_directory=persist_directory)
    vectorstore = Chroma(embedding_function=embeddings, collection_name="jpjd", persist_directory=persist_directory,collection_metadata={"hnsw:batch_size": 10000})
    for i in range(len(texts)):
        print(i)
        if i == 99:
            print("max num")

        vectorstore.add_documents([texts[i]], ids=[str(i)])
    return vectorstore

if __name__ == "__main__":
    file_path = "resources/jpjd.txt"  # 替换为你的5M文本文件的实际路径
    # vectorstore = load_and_vectorize_text(file_path)
    vectorstore = Chroma(embedding_function=get_embedding("F:/models/BAAI/bge-large-zh-v1.5"), collection_name="jpjd", persist_directory="db")
    retri = vectorstore.as_retriever()
    # 现在你可以使用vectorstore进行后续的查询操作等，例如：
    query = "林三和肖青璇是什么关系？"
    print(query)
    results = vectorstore.similarity_search(query)
    print(results)