from llama_index.core.agent import ReActAgent
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.tools import FunctionTool
from llama_index.llms.dashscope import DashScope


# 定义工具函数
def multiply(a: float, b: float) -> float:
    """Multiply two numbers and returns the product"""
    result = a * b
    print(f"🔧 执行加法: {a} * {b} = {result}")
    return result


def add(a: float, b: float) -> float:
    """Add two numbers and returns the sum"""
    result = a + b
    print(f"🔧 执行加法: {a} + {b} = {result}")
    return result

# 创建工具
multiply_tool = FunctionTool.from_defaults(fn=multiply)
add_tool = FunctionTool.from_defaults(fn=add)

# 使用 Qwen 模型
llm = DashScope(model="qwen-plus", api_key="sk-965dc39b016c49ecbe29de180f4db2b6")
# 创建 memory
memory = ChatMemoryBuffer.from_defaults()
# 创建代理
agent = ReActAgent(
    tools=[multiply_tool, add_tool],
    llm=llm,
    memory=memory,
    verbose=True
)

prompt_dict = agent.get_prompts()
print(prompt_dict)

for k,v in prompt_dict.items():
    print(f"{k}: {v.template}")

# 正确的方法：使用 query 而不是 chat
response = agent.chat("What is 123 * 45?Calculate step by step")
print("\n=== 查询结果 ===")
print("响应类型:", type(response))
print("响应内容:", response)

# 提取响应文本
if hasattr(response, 'content'):
    print("最终答案:", response.content)
else:
    print("最终答案:", str(response))