from langchain_core.tools import tool
#from langchain_openai import ChatOpenAI

from langchain_ollama import ChatOllama
from langchain_core.messages import HumanMessage, ToolMessage
import json
# 定义函数并应用工具装饰器
@tool
def multiply(first_int: int, second_int: int) -> int:
    """multi first_int and second_int"""
    return first_int * second_int

@tool
def add(first_int: int, second_int: int) -> int:
    """add first_int and second_int"""
    return first_int + second_int

tools = [multiply, add]
print(tools)
# 定义大模型并绑定工具

llm = ChatOllama(base_url="http://192.168.99.142:11434", model="qwen2.5-coder:latest")
llm_with_tools = llm.bind_tools(tools)

# 构建对话
prompt = "what is 200 * 300?"
messagesLC = [HumanMessage(prompt)]

# 调用 LLM 接口
response = llm_with_tools.invoke(messagesLC)
messagesLC.append(response)

print("--original response--")
print(response)
print("--response--")
print(response.tool_calls)
#print("--pretty response--")
#print(response.pretty_repr())
print("--hawk--")
#print(response.additional_kwargs.get("tool_calls"))
'''
# 处理函数调用结果
while response.additional_kwargs.get("tool_calls") is not None:
    for tool_call in response.additional_kwargs["tool_calls"]:
        print("call para:")
        print(tool_call["function"]["name"])
        print(tool_call["function"]["arguments"])
        selected_tool = {"add": add, "multiply": multiply}[tool_call["function"]["name"]]
        args = json.loads(tool_call["function"]["arguments"])
        tool_output = selected_tool(**args)
        print(tool_output)
        messagesLC.append(ToolMessage(tool_output, tool_call_id=tool_call["id"]))

    response = llm_with_tools.invoke(messagesLC)
    messagesLC.append(response)
'''
for tool_call in response.tool_calls:
    selected_tool = {"add": add, "multiply": multiply}[tool_call["name"].lower()]
    tool_msg = selected_tool.invoke(tool_call)
    messagesLC.append(tool_msg)
'''
for tool_call in response.tool_calls:
    print("call para:")
    print(tool_call["name"])
    print(tool_call["args"])
    selected_tool = {"add": add, "multiply": multiply}[tool_call["name"]]
    #args = json.loads(tool_call["args"])
    args = tool_call["args"]
    print("test ")
    print(args['first_int'])
    tool_output = selected_tool(**args)
    print("result="+tool_output)
    messagesLC.append(Trst_intrst_intage(tool_output, tool_call_id=tool_call["id"]))
'''
response = llm_with_tools.invoke(messagesLC)
messagesLC.append(response)
print("=====最终结果=====")
print(messagesLC[-1].content)
print("=====all messages=====")
for i in messagesLC:
    #i.pretty_print()
    print(i)