

from common import add, chat_prompt_template, llm


tool_dict= {
    "add":add
}

llm_with_tools = llm.bind_tools([add])

chain = chat_prompt_template | llm_with_tools




try:
    print("=== 使用 Stream 方法（流式显示）===")
    # 流式处理：实时显示生成过程
    for chunk in chain.stream(input={"role":'计算',"domain":'数学计算',"question":'使用工具计算：100+100'}):
        if hasattr(chunk, 'tool_call_chunks'):
            for tool_chunk in chunk.tool_call_chunks:
                if tool_chunk.get('args'):
                    print(f"模型思考中: {tool_chunk['args']}")
    
    print("\n=== 使用 Invoke 方法（完整结果）===")
    # 同步处理：获取完整结果
    resp = chain.invoke(input={"role":'计算',"domain":'数学计算',"question":'使用工具计算：100+100'})
    
    # 处理工具调用
    if hasattr(resp, 'tool_calls') and resp.tool_calls:
        for tool_call in resp.tool_calls:
            print(f"检测到工具调用: {tool_call['name']}")
            print(f"参数: {tool_call['args']}")
            
            # 执行工具
            tool_func = tool_dict[tool_call['name']]
            result = tool_func.invoke(tool_call['args'])
            print(f"计算结果: {result}")
        
except Exception as e:
    print(f"API调用失败: {e}")