import os
from operator import itemgetter
from typing import Union

from langchain_core.output_parsers import StrOutputParser,JsonOutputToolsParser
from langchain_core.runnables import RunnablePassthrough, Runnable, RunnableLambda
from langchain_core.tools import tool
from langchain_openai import ChatOpenAI

from langchain_core.runnables.utils import ConfigurableField
from langchain_openai import ChatOpenAI
from langchain_community.chat_models import ChatZhipuAI, QianfanChatEndpoint
from langchain.prompts import (
    ChatPromptTemplate,
    HumanMessagePromptTemplate,
)
from langchain.schema import HumanMessage
import os


# 模型1
chatglm_model = ChatZhipuAI(
    model="glm-4",
    api_key="50a8c58ae3deb079a23f78d41b52607c.reUUo8Fx7UkWum8B"
)

os.environ["OPENAI_API_KEY"] = os.environ["OPENAI_API_KEY_ZHIHU"]
os.environ["OPENAI_API_BASE"] = os.environ["OPENAI_API_BASE_ZHIHU"]

# 模型2
gpt_model = ChatOpenAI()

#模型3
ernie_model = QianfanChatEndpoint(
    qianfan_ak=os.getenv('ERNIE_CLIENT_ID'),
    qianfan_sk=os.getenv('ERNIE_CLIENT_SECRET')
)


# 通过 configurable_alternatives 按指定字段选择模型
model = gpt_model.configurable_alternatives(
    ConfigurableField(id="llm"),
    default_key="gpt",
    ernie=ernie_model,
    chatglm=chatglm_model,
)

# Prompt 模板
prompt = ChatPromptTemplate.from_messages(
    [
        HumanMessagePromptTemplate.from_template("{query}"),
    ]
)

# LCEL
chain = (
    {"query": RunnablePassthrough()}
    | prompt
    | model
    | StrOutputParser()
)

# 运行时指定模型 "gpt" or "ernie"
ret = chain.with_config(configurable={"llm": "gpt"}).invoke("介绍你自己，包括你的生产商")
print(ret)

ret = chain.with_config(configurable={"llm": "chatglm"}).invoke("介绍你自己，包括你的生产商")
print(ret)