from langchain_core.tools import Tool
from app.bailian.common import chat_prompt_template, llm


"绑定自定义工具"


# 1.开发自定义工具
def fetch_weather(city: str) -> str:
    """获取某个城市的真实天气"""
    # 模拟调用天气API逻辑...
    weather_data = {
        '北京': '多云',
        '深圳': '晴朗'
    }
    if city not in weather_data.keys():
        return '天气多变'
    return weather_data[city]


# 2.将工具方法转为LangChain Tool对象
fetch_weather_tool = Tool.from_function(
    func=fetch_weather,  # 与工具方法关联
    name='fetch_weather',  # 工具名, 唯一
    description='获取某个城市的真实天气',  # 工具说明
)
# 定义工具字典, value为python方法
tool_dict = {
    'fetch_weather': fetch_weather
}
# 3.将大模型与Tool对象绑定
llm_with_tool = llm.bind_tools([fetch_weather_tool])
# 链式声明
chain = chat_prompt_template | llm_with_tool
# 4.调用大模型
resp = chain.invoke(input={"role": "气象学", "domain": "天气领域", "question": "深圳天气怎么样?"})
print(resp)  # content='' additional_kwargs={} response_metadata={'finish_reason': 'tool_calls', 'model_name': 'qwen-flash', 'model_provider': 'openai'} id='lc_run--22c45fc8-81dd-49dc-bdea-aa7ef5288321' tool_calls=[{'name': 'fetch_weather', 'args': {'__arg1': '深圳'}, 'id': 'call_5bea195addc242cbafc984', 'type': 'tool_call'}]
# 5.调用工具
for tool_calls in resp.tool_calls:
    print(tool_calls)  # {'name': 'fetch_weather', 'args': {'__arg1': '深圳'}, 'id': 'call_a74fe41dae4d4b85a69ddb', 'type': 'tool_call'}
    args = tool_calls["args"]
    print(args)  # {'__arg1': '深圳'}
    func_name = tool_calls["name"]
    print(func_name)  # fetch_weather
    tool_func = tool_dict[func_name]
    tool_content = tool_func(args["__arg1"])
    print(tool_content)  # 晴朗
