from langchain_community.document_loaders import PyPDFLoader
from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain_text_splitters import RecursiveCharacterTextSplitter
# 加载 .env 到环境变量
from dotenv import load_dotenv, find_dotenv

_ = load_dotenv(find_dotenv())
# 加载文档
loader = PyPDFLoader("llama2.pdf")
# 切割文档
pages = loader.load_and_split()
# 文档切分
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=200,
    chunk_overlap=200,
    length_function=len,
    add_start_index=True,
)
texts = text_splitter.create_documents(
    [page.page_content for page in pages[:4]]
)
# 灌库
embeddings = OpenAIEmbeddings()
db = Chroma.from_documents(texts, embeddings)
# 检索 top-1 结果
retriever = db.as_retriever(search_kwargs={"k": 2})
# Prompt模板
template = """Answer the question based only on the following context:
{context}

Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
# 模型
model = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
# Chain
reg_chain = (
        {"question": RunnablePassthrough(), "context": retriever} | prompt | model | StrOutputParser()
)
res = reg_chain.invoke("Llama 2有多少参数")
print(res)
