### 通过 LCEL，还可以实现
# 1. 配置运行时变量：https://python.langchain.com/docs/expression_language/how_to/configure
# 2. 故障回退：https://python.langchain.com/docs/expression_language/how_to/fallbacks
# 3. 并行调用：https://python.langchain.com/docs/expression_language/how_to/map
# 4. 逻辑分支：https://python.langchain.com/docs/expression_language/how_to/routing
# 5. 调用自定义流式函数：https://python.langchain.com/docs/expression_language/how_to/generators
# 6. 链接外部Memory：https://python.langchain.com/docs/expression_language/how_to/message_history
#
# 更多例子：https://python.langchain.com/docs/expression_language/cookbook/
from langchain_community.chat_models import ErnieBotChat
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableParallel
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
from langchain_community.vectorstores import Chroma
from dotenv import load_dotenv, find_dotenv

_ = load_dotenv(find_dotenv())  # 读取本地 .env 文件，里面定义了 OPENAI_API_KEY

# 模型
# llm = ErnieBotChat(model_name='ERNIE-Bot-4')
# llm = ChatOpenAI(temperature=0, model="gpt-4")
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo")

# 向量数据库
vectorstore = Chroma.from_texts(
    [
        "Sam Altman被复职了",
        "Sam Altman被解雇了",
        # "Sam Altman是CEO，他在OpenAI工作",
        "WAF 是OpenAI的CEO",
        # "OpenAI 发布了 GPT-3和 GPT-4",
        "OpenAI 发布了 GPT-3",
        "OpenAI 在2023年发布了 GPT-4",
        "OpenAI 将要发布 GPT-5",
    ], embedding=OpenAIEmbeddings()
)

# 检索接口
retriever = vectorstore.as_retriever()  # search_kwargs={"k": 1}

# Prompt模板
# Keep the answer short and concise.  简明扼要
# Answer questions based on the language used by the user. 根据用户的语言回答问题
# If the following known information is not sufficient to answer the user's question, please reply directly "I am unable to answer your question".
template = """Answer the question based only on the following context. 
Context：{context}

Question: {question}
Answer in the following language: chinese
"""

# If the following known information is not sufficient to answer the user's question, please reply directly "我无法回答你的问题".
# Don't fabricate answers. If the following known information is not sufficient to answer the user's question, please reply directly "根据已知内容，我无法回答你的问题".
# If the user asks in Chinese, answer in Chinese. If the user asks in English, answer in English.
# Let's think step by step.
prompt = ChatPromptTemplate.from_template(template)

# 创建一个包含两个入口的 RunnableParallel 对象。
# 第一个入口： context 将包括检索器获取的文档结果。
# 第二个入口： question 将包含用户的原始问题，RunnablePassthrough 允许我们将用户的问题传递给提示和模型。
setup_and_retrieval = RunnableParallel(
    {"context": retriever, "question": RunnablePassthrough()}
)
# Chain
retrieval_chain = (
        setup_and_retrieval
        | prompt
        | llm
        | StrOutputParser()
)

# response = retrieval_chain.invoke("OpenAI的CEO是谁")
# response = retrieval_chain.invoke("OpenAI的CEO是谁,是男是女？")
# response = retrieval_chain.invoke("OpenAI目前发布了什么？")

# response = retrieval_chain.invoke("OpenAI的CEO是男的还是女的?")
# response = retrieval_chain.invoke("Is the CEO of OpenAI male or female?")
# response = retrieval_chain.invoke("OpenAI的CEO的年龄是多少？")
# response = retrieval_chain.invoke("What is the age of the CEO of OpenAI?")
# response = retrieval_chain.invoke("OpenAI的CEO的年龄是20还是30?")
# response = retrieval_chain.invoke("Is the CEO of OpenAI in his 20s or 30s?")
response = retrieval_chain.invoke("Openai 即将发布什么?")
print(response)
