from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableBranch
from langchain_community.chat_models import ErnieBotChat
from langchain_openai import ChatOpenAI
from dotenv import load_dotenv, find_dotenv

_ = load_dotenv(find_dotenv())  # 读取本地 .env 文件，里面定义了 OPENAI_API_KEY

# 模型
llm = ErnieBotChat(model_name='ERNIE-Bot-4')
# llm = ChatOpenAI(temperature=0, model="gpt-4")
# llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo")

# 主题分类处理链
chain = (
        PromptTemplate.from_template(
            """Given the user question below, classify it as either being about `LangChain`, `Anthropic`, or `Other`.

Do not respond with more than one word.

<question>
{question}
</question>

Classification:"""
        )
        | llm
        | StrOutputParser()
)
# LangChain处理链
langchain_chain = (
        PromptTemplate.from_template(
            """You are an expert in langchain. \
Always answer questions starting with "As Harrison Chase told me". \
Respond to the following question:

Question: {question}
Answer:"""
        )
        | llm
)
# Anthropic处理链
anthropic_chain = (
        PromptTemplate.from_template(
            """You are an expert in anthropic. \
Always answer questions starting with "As Dario Amodei told me". \
Respond to the following question:

Question: {question}
Answer:"""
        )
        | llm
)
# 通用处理链
general_chain = (
        PromptTemplate.from_template(
            """Respond to the following question:

Question: {question}
Answer:"""
        )
        | llm
)

# 根据输入字典的 "topic" 值来选择相应的处理链进行执行。
branch = RunnableBranch(
    (lambda x: "anthropic" in x["topic"].lower(), anthropic_chain),
    (lambda x: "langchain" in x["topic"].lower(), langchain_chain),
    general_chain,
)

full_chain = {"topic": chain, "question": lambda x: x["question"]} | branch | StrOutputParser()

print(full_chain.invoke({"question": "how do I use Anthropic?"}))
