import json
import os
from zhipuai import ZhipuAI
from dotenv import load_dotenv, find_dotenv

_ = load_dotenv(find_dotenv())
api_key = os.environ.get('ZHIPUAI_API_KEY')
if api_key is None:
    raise ValueError("API Key is not set in the .env file")
client = ZhipuAI(api_key=api_key)

# 定义一个简单的加法函数
def add_numbers(a, b):
    return a + b

# 创建一个用于Function Calling的函数定义
def create_function_calling_request(prompt):
    return client.chat.completions.create(
        model="glm-4",
        messages=[
            {"role": "user", "content": prompt}
        ],
        tools=[
            {
                "type": "function",
                "function": {
                    "name": "add_numbers",
                    "description": "将两数相加求和",
                    "parameters": {
                        "type": "object",
                        "properties": {
                            "a": {"type": "number"},
                            "b": {"type": "number"}
                        },
                        "required": ["a", "b"]
                    }
                }
            }
        ],
        tool_choice="auto"
    )

# 创建一个包含函数调用的prompt
prompt = "计算13与24的和。"

# 发送请求并获取响应
response = create_function_calling_request(prompt)
print(f"===Debug===\n{response}\n")

# 解析响应并调用相应的函数
#function_call = response.choices[0].message['function_call']
function_call = response.choices[0].message.tool_calls[0]
function_name = function_call.function.name
function_args = json.loads(function_call.function.arguments)

if function_name == "add_numbers":
    a = function_args.get("a")
    b = function_args.get("b")
    result = add_numbers(a, b)
    
    # 将本地计算结果返回大模型
    follow_up_prompt = {
        "role": "tool",
        "content": json.dumps({"result": result})
    }
    messages=[
            {"role": "user", "content": prompt},
            response.choices[0].message.model_dump(),
            follow_up_prompt
        ]
    
    print(f"===Debug===\nmessages=\n{messages}\n")
    # 使用tool_call.id向大模型返回结果
    final_response = client.chat.completions.create(
        model="glm-4",
        messages=messages
    )
    print(f"===Debug===\n{final_response}\n")
    # 输出最终结果
    print(final_response.choices[0].message.content.strip())
else:
    print("Function not recognized.")
  