"""
1.document对象
2.得到bge-large模型
3.生成一个向量数据   chroma doc model
4.生成检索器 RunnableLambda 将函数转runnable
5.生成大模型chain对象
6.chain推理用户问题
"""

from langchain_community.document_loaders import TextLoader
from nltk.data import retrieve

from model_utils import getEmd, getLLM
from langchain_chroma import Chroma

# 1.document对象，并分割文档  RecursiveCharacterTextSplitter文本分割器
loader = TextLoader("/root/project/Code/sshcode/lc/character.txt", encoding="utf-8")
docs = loader.load()  # 返回 List[Document]

# 2.得到bge-large模型
model= getEmd()

from langchain_text_splitters import RecursiveCharacterTextSplitter
# 3.生成一个向量数据   chroma doc model

# 按汉字数量位100
_splitter = RecursiveCharacterTextSplitter(chunk_size=100,chunk_overlap=20)

split_doc = _splitter.split_documents(documents=docs)
# vector_db = Chroma.from_documents(docs,model)
vector_db = Chroma.from_documents(split_doc,model)

# _r = vector_db.similarity_search_with_score(query="林浅夏的父亲是干什么的",k=2)

llm = getLLM()

from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser

_prompts = """
回答问题：
{question}

参考内容：
{ref}

"""

prompt_template = ChatPromptTemplate([("system","根据问题和参考内容回答问题"),("human",_prompts)])


parser = StrOutputParser()

from langchain_core.runnables import RunnableLambda,RunnablePassthrough

retriever = RunnableLambda(vector_db.similarity_search_with_score).bind(k=5)

chain = {"question":RunnablePassthrough(),"ref":retriever}| prompt_template | llm | parser

r = chain.invoke("林浅夏的父亲是干什么的")
print(r)