from langchain.embeddings import OpenAIEmbeddings
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
from langchain.prompts import ChatPromptTemplate
from langchain.vectorstores import Chroma
from dotenv import load_dotenv,find_dotenv
from langchain.chat_models import ChatOpenAI
"""
嵌套调用
"""
_=load_dotenv(find_dotenv())


# 模型
model = ChatOpenAI(temperature=0)
# 数据库
vectorstore = Chroma.from_texts(
  [
    "Sam Altman 是OpenAI的CEO",
    "Sam ALtman 被解雇了",
    "Sam ALTman 被复职了"

  ],embedding = OpenAIEmbeddings()
)
# 解锁接口
retriever = vectorstore.as_retriever()

# 模板
template = """
Answer the question base only on the follwing context:{context}
Question:{question}
"""

prompt = ChatPromptTemplate.from_template(template)

# chain
# 嵌套调用
retriever_chain = (
  {'context':retriever,"question":RunnablePassthrough}
                   | prompt
                   | model
                   | StrOutputParser()
                  
)

# 调用
retriever_chain.invoke("OpenAI的CEO是谁")