from langchain_ollama import ChatOllama
from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate, PromptTemplate
# 添加一个字符串输出解析器，以便两个llm的输出是相同类型的
from langchain_core.output_parsers import StrOutputParser
from openaiConfigurations import openai_api_key, openai_api_base

chat_prompt = ChatPromptTemplate.from_messages(
    [
        (
            "system", "你真是一个贴心的助手，每次回复都会附上赞美之词。"
        ),
        ("human", "为什么你喜欢{city}"),
    ]
)

# 使用一个错误的模型名称来构建一个会报错的链
chat_model = ChatOpenAI(model = "gpt-fake", api_key = openai_api_key, base_url = openai_api_base)
bad_chain = chat_prompt | chat_model | StrOutputParser()


# 在构建一条正确的链
prompt_template = """说明：你应该在回复中始终包含赞美之词。
问题：你为什么喜欢{city}？"""
prompt = PromptTemplate.from_template(prompt_template)
llm = ChatOllama(model = "deepseek-r1:7b")
good_chain = prompt | llm

# 合并上面两条链
chain = bad_chain.with_fallbacks([good_chain])
answer = chain.invoke({"city": "巴中"})
print(answer)