from langchain_core.tools import tool
from langchain_ollama import ChatOllama

# llm = ChatOllama(model="qwen2.5:7b-instruct-q5_K_S",temperature=0.5)
llm = ChatOllama(model="qwen3:8b", temperature=0.5, reasoning=False)

# Define tools
@tool
def multiply(a: int, b: int) -> int:
    """Multiply a and b.

    Args:
        a: first int
        b: second int
    """
    return a * b

@tool
def add(a: int, b: int) -> int:
    """Adds a and b.

    Args:
        a: first int
        b: second int
    """
    return a + b

@tool
def divide(a: int, b: int) -> float:
    """Divide a and b.

    Args:
        a: first int
        b: second int
    """
    return a / b

# Augment the LLM with tools
tools = [add, multiply, divide]

# from IPython.display import display, Image
from langchain_core.messages import SystemMessage, HumanMessage, ToolMessage
from langgraph.prebuilt import create_react_agent

# Pass in:
# (1) the augmented LLM with tools
# (2) the tools list (which is used to create the tool node)
pre_built_agent = create_react_agent(llm, tools=tools)

# Show the agent
# from IPython.display import Image, display
# display(Image(pre_built_agent.get_graph().draw_mermaid_png()))

# pre_built_agent.get_graph().draw_png("langgraph1.png")

# 保存graph成图片
# import io
# from PIL import Image as PILImage
# img_data = pre_built_agent.get_graph().draw_mermaid_png()
# img = PILImage.open(io.BytesIO(img_data))
# img.save("langgraph.png")


# Invoke
messages = [HumanMessage(content="Add 6 and 8.")]
messages = pre_built_agent.invoke({"messages": messages})
for m in messages["messages"]:
    m.pretty_print()
