# -*- coding: utf-8 -*-
"""
@Time    : 2024/8/23 16:00 
@Author  : ZhangShenao 
@File    : 8.使用LLM实现工具调用.py 
@Desc    : 使用LLM实现工具调用
"""
import os

import dotenv
from langchain_core.messages import ToolMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field

from gaode_weather_tool import GaodeWeatherTool
from google_serper_tool import create_google_serper_tool


class SerperArgSchema(BaseModel):
    query: str = Field(description="执行搜索的查询语句")


# 加载环境变量
dotenv.load_dotenv()

# 创建LLM
llm = ChatOpenAI(model="gpt-4o", temperature=0, openai_api_base=os.getenv("OPENAI_API_BASE"))

# 创建工具列表
serper_tool = create_google_serper_tool()
weather_tool = GaodeWeatherTool()
tools = {
    weather_tool.name: weather_tool,
    serper_tool.name: serper_tool
}

# 将LLM绑定工具
llm_with_tools = llm.bind_tools(tools=[tool for tool in tools.values()], tool_choice="auto")

# 创建Prompt
prompt = ChatPromptTemplate.from_messages([
    ("system", "你是一个功能强大的聊天机器人，请根据用户的提问进行准确地回答。必要时可以调用工具来解决问题。"),
    ("human", "{query}")
])

# 构建Chain
chain = {"query": RunnablePassthrough()} | prompt | llm_with_tools

# 执行Chain,获取结果
query = "我想知道2024年奥运会谁是男子乒乓球单打冠军？"
resp = chain.invoke(query)
tool_calls = resp.tool_calls
if len(tool_calls) == 0:  # 没有调用工具,输出普通文本
    print(f"输出普通文本: {resp.content}")
else:  # 进行了工具调用
    # 保存所有AI、Human和Tool消息
    messages = prompt.invoke({"query": query}).to_messages()
    messages.append(resp)

    for tool_call in tool_calls:
        # 调用工具,获取结果,并将结果作为ToolMessage,添加到消息列表中
        tool = tools.get(tool_call.get("name"))
        args = tool_call.get("args")
        call_id = tool_call.get("id")
        call_result = tool.invoke(args)
        tool_message = ToolMessage(content=call_result, tool_call_id=call_id)
        print(f"正在调用工具: {tool.name}...")
        print(f"调用参数: {args}")
        print(f"工具调用结果: {call_result}")
        messages.append(tool_message)

    # 合并所有消息,发送给LLM,获取最终响应
    final_resp = llm.invoke(messages)
    print(final_resp)
