import os

from langchain_community.document_loaders import PyMuPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import PromptTemplate, ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI

os.environ["OPENAI_API_KEY"] = os.environ["OPENAI_API_KEY_ZHIHU"]
os.environ["OPENAI_API_BASE"] = os.environ["OPENAI_API_BASE_ZHIHU"]

pdfloader = PyMuPDFLoader("./langchain_docs/llama2.pdf")
pages = pdfloader.load_and_split()

splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=100,length_function=len,add_start_index=True)


texts = splitter.create_documents([page.page_content for page in pages])

embedding = OpenAIEmbeddings(model="text-embedding-ada-002")
db = FAISS.from_documents(texts,embedding)

retriever = db.as_retriever(search_kwags = {"k":3})

prompt_template = """
System:
Please answer the user question base on the info in the context.。

Context:
{context}

User：
{question}

AI:
"""

llm = ChatOpenAI(model= "gpt-4o",temperature = 0.9)
prompt = ChatPromptTemplate.from_template(prompt_template)
chain = ({"context":retriever,"question":RunnablePassthrough()}| prompt|llm|StrOutputParser())

print(chain.invoke("llama2有多少参数"))