"""
为聊天模型添加临时工具调用支持。这是一种调用工具的替代方法，适用于使用不原生支持工具调用的模型。
"""
from typing import Dict, Any, Optional, TypedDict, Callable

from langchain_core.output_parsers import JsonOutputParser
from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
from langchain_core.runnables import RunnableConfig, RunnablePassthrough
from langchain_core.tools import tool, render_text_description
from models import get_ds_model_client

@tool
def add(a: int, b: int) -> int:
    """add two numbers

    Args:
        a: first number,
        b: second number
    """
    return a +b

@tool
def multiply(a: int, b: int) -> int:
    """multiply two numbers

    Args:
        a: first number,
        b: second number
    """
    return a * b

# 调用工具
class ToolCallRequest(TypedDict):
    """A typed dict that shows the inputs into the invoke_tool function."""
    name: str
    arguments: Dict[str, Any]

# 调用函数执行
def invoke_tool(tool_call_request: ToolCallRequest, config: Optional[RunnableConfig] = None):
    tool_name_to_tool: Dict[str, Callable] = {tool.name: tool for tool in tools}
    name = tool_call_request["name"]
    requested_tool = tool_name_to_tool[name]
    return requested_tool.invoke(tool_call_request["arguments"], config=config)
# print(invoke_tool({"name": "multiply", "arguments": {"a": 3, "b": 5}}))

tools = [add, multiply]
rendered_tools  = render_text_description(tools)
# 打印工具描述
# print(rendered_tools)

# 提示词将工具描述作为上下文传递到模型，并使用工具描述来生成工具调用
system_prompt = f"""\
You are an assistant that has access to the following set of tools. 
Here are the names and descriptions for each tool:

{rendered_tools}

Given the user input, return the name and input of the tool to use. 
Return your response as a JSON blob with 'name' and 'arguments' keys.

The `arguments` should be a dictionary, with keys corresponding 
to the argument names and the values corresponding to the requested values.
"""

prompt = ChatPromptTemplate.from_messages([
    SystemMessagePromptTemplate.from_template(system_prompt),
    HumanMessagePromptTemplate.from_template("{input}")
])
llm = get_ds_model_client()

chain = prompt | llm | JsonOutputParser()
print(chain.invoke({"input": "what's 3 plus 1132"}))
# 打印：{'name': 'add', 'arguments': {'a': 3, 'b': 1132}}

# chain = prompt | llm | JsonOutputParser() | invoke_tool
# print(chain.invoke({"input": "what's 3 plus 1132"}))
# 打印：1135

# chain = prompt | llm | JsonOutputParser() | (RunnablePassthrough.assign(output=invoke_tool))
# print(chain.invoke({"input": "what's 3 plus 1132"}))
# 打印：{'name': 'add', 'arguments': {'a': 3, 'b': 1132}, 'output': 1135}

