from dotenv import load_dotenv, find_dotenv
from langchain_community.document_loaders import PyMuPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_openai import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
from langchain.prompts import ChatPromptTemplate

_ = load_dotenv(find_dotenv())

load = PyMuPDFLoader("llama2.pdf")
pages = load.load_and_split()

text_splitters = RecursiveCharacterTextSplitter(
    chunk_size=300,
    chunk_overlap=100,
    length_function=len,
    add_start_index=True
)

texts = text_splitters.create_documents([page.page_content for page in pages[:4]])

embeddings = OpenAIEmbeddings(model="text-embedding-ada-002")
db = Chroma.from_documents(texts, embeddings)

retriever = db.as_retriever(search_kwargs={"k": 2})

# Prompt模板
template = """Answer the question based only on the following context:
{context}

Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)

llm = ChatOpenAI(model="gpt-4o")

rag_chain = (
     {"question": RunnablePassthrough(), "context": retriever}
     | prompt
     | llm
     | StrOutputParser()
)

print(rag_chain.invoke("Llama 2有多少参数"))
   
