from langchain_core.tools import tool

@tool
def multiply(a: int, b: int) -> int:
    """Multiply two numbers."""
    return a * b

# run tools directly with .invoke() method
# result = multiply.invoke({"a": 6, "b": 7})  # returns 42
# print("=============  run tools directly  ==============")
# print(result)  # prints 42

# run tools directly with a ToolMessage object
# tool_call = {
#     "type": "tool_call",
#     "id": "1",
#     "args": {"a": 42, "b": 7}
# }
# result = multiply.invoke(tool_call) # returns a ToolMessage object
# print("=============  run tools with tool message  ==============")
# print(result)  # prints 42

# run tools in an agent loop with a model that can call tools
from langchain_core.tools import tool
from langgraph.prebuilt import create_react_agent
from langchain_ollama import ChatOllama

llm = ChatOllama(model="qwen3:8b", temperature=0.5, reasoning=False)

# run tools with prebuild agent
# agent = create_react_agent(
#     model=llm,
#     tools=[multiply]
# )
# result = agent.invoke({"messages": [{"role": "user", "content": "what's 42 x 7?"}]})
# print("=============  run tools in an agent loop  ==============")
# for msg in result['messages']:
#     print(f"{msg.__class__.__name__} : {getattr(msg, 'content', '')} with tool_call={getattr(msg, 'tool_calls', None)}")
#     print(f"{msg}")

model_with_tools = llm.bind_tools([multiply])

response_message = model_with_tools.invoke("what's 42 x 7?")
tool_call = response_message.tool_calls[0]
# print(tool_call)  # prints ToolMessage object with tool call details
# print("=============  run tools in an agent loop  ==============")
# print(response_message.tool_calls)  # prints ToolMessage object with tool call details
tool_msg = multiply.invoke(tool_call)
print(f"{tool_msg.__class__.__name__} : {tool_msg}")  # prints 294
