from langchain_core.prompts.chat import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser

from config.load_key import load_key

llm = ChatOpenAI(
    model="Qwen/Qwen2.5-7B-Instruct",
    api_key=load_key("siliconflow_api_key"),
    base_url=load_key("siliconflow_base_url"),
)

# LCEL 定制一个 chain
prompt = ChatPromptTemplate.from_messages([("human", "你好，请用下面这种语言回答我的问题 {language}.")])

parser = StrOutputParser()

chain = prompt | llm | parser

# 将 chain 转换为工具  这种方式 在0.3版本还是实验阶段，未来可能发生改变
as_tool = chain.as_tool(name="translatetool", description="翻译任务")
print(as_tool.args)
all_tools = {
    "translatetool": as_tool
}

# 绑定工具
llm_with_tools = llm.bind_tools([as_tool])

query = "今天天气真冷，这句用英语怎么回答？"
messages = [query]

ai_msg = llm_with_tools.invoke(messages)
messages.append(ai_msg)
print(ai_msg.tool_calls)
print(">>>>>>>>>>>>")
if ai_msg.tool_calls:
    for tool_call in ai_msg.tool_calls:
        selected_tool = all_tools[tool_call["name"].lower()]
        tool_msg = selected_tool.invoke(tool_call)
        messages.append(tool_msg)
print(llm_with_tools.invoke(messages).content)
