"""
完整的RAG流程，
# 1.加载文本文件
# 2.构建分割器和文档分割   RecursiveCharacterTextSplitter  和 text_splitter.split_documents(docs)
# 3.文本嵌入模型
# 4.构建词向量数据库 vector_db.as_retriever
# 5.转成检索器
# 6.构建 RetrievalQA 返回BaseRetrievalQA对象
"""

from langchain.chains.retrieval_qa.base import RetrievalQA
from langchain_chroma import Chroma
from langchain_text_splitters import RecursiveCharacterTextSplitter
from model_utils import getEmd, getLLM
from langchain_community.document_loaders import TextLoader

# 1.加载文本文件
loader = TextLoader("/root/project/Code/sshcode/lc/character.txt", encoding="utf-8")
docs = loader.load()  # 返回 List[Document]

# 2.构建分割器和文档分割   RecursiveCharacterTextSplitter  和 text_splitter.split_documents(docs)
text_splitter = RecursiveCharacterTextSplitter(
    # 设置分隔符，这里使用 ".!?" 来匹配句子结束符号
    separators=["\n\n", "\n", "(?<=\。！？ )", " ", ""],
    # 设置每个片段的最大长度
    chunk_size=200,
    # 设置每个片段之间的重叠字符数量
    chunk_overlap=50,
    # 设置是否长内容优先
    length_function=len
)
# 拆分文档
doc_list = text_splitter.split_documents(docs)

# 3.文本嵌入模型
embedding_model = getEmd()

# persist_directory 参数将数据库保存在指定路径
vector_db = Chroma.from_documents(doc_list, embedding_model, persist_directory="./chromadb_2")

# 4.构建词向量数据库
# persist_directory 参数将数据库保存在指定路径 vector_db.add_documents()
# vector_db = Chroma(
#     embedding_function=embedding_model,
#     persist_directory="./chromadb"  # 存放的路径
# )

# 5. 转成检索器
retriever = vector_db.as_retriever(search_kwargs={"k": 10})

from langchain.chains.retrieval_qa.base import RetrievalQA

QA = RetrievalQA.from_chain_type(
    llm=getLLM(),
    retriever=retriever,
    return_source_documents=False,
)

r = QA.invoke("林浅夏的父亲是干什么的")

print(r)