from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_core.runnables.utils import ConfigurableField
from langchain_community.chat_models import QianfanChatEndpoint
from langchain.prompts import (
    ChatPromptTemplate,
    HumanMessagePromptTemplate,
)
from langchain.chat_models import init_chat_model
from langchain.schema import HumanMessage
import os

# 模型1
ds_model = init_chat_model("deepseek-chat", model_provider="deepseek",api_key="sk-ce4bb9f61a2a4a4da41cf6c0d23c752d")

# 模型2
gpt_model = init_chat_model("gpt-4o-mini", model_provider="openai", api_key="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")

# 通过 configurable_alternatives 按指定字段选择模型
model = gpt_model.configurable_alternatives(
    ConfigurableField(id="llm"),
    default_key="gpt",
    deepseek=ds_model,
    # claude=claude_model,
)

# Prompt 模板
prompt = ChatPromptTemplate.from_messages(
    [
        HumanMessagePromptTemplate.from_template("{query}"),
    ]
)

# LCEL
chain = (
        {"query": RunnablePassthrough()}
        | prompt
        | model
        | StrOutputParser()
)

# 运行时指定模型 "gpt" or "deepseek"
ret = chain.with_config(configurable={"llm": "gpt"}).invoke("请自我介绍")

print(ret)
