File size: 4,662 Bytes
cd03e37 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
Function Calling
================
We offer a wrapper for function calling over the dashscope API and the
OpenAI API in `Qwen-Agent <https://github.com/QwenLM/Qwen-Agent>`__.
Use Case
--------
.. code:: py
import json
import os
from qwen_agent.llm import get_chat_model
# Example dummy function hard coded to return the same weather
# In production, this could be your backend API or an external API
def get_current_weather(location, unit='fahrenheit'):
"""Get the current weather in a given location"""
if 'tokyo' in location.lower():
return json.dumps({
'location': 'Tokyo',
'temperature': '10',
'unit': 'celsius'
})
elif 'san francisco' in location.lower():
return json.dumps({
'location': 'San Francisco',
'temperature': '72',
'unit': 'fahrenheit'
})
elif 'paris' in location.lower():
return json.dumps({
'location': 'Paris',
'temperature': '22',
'unit': 'celsius'
})
else:
return json.dumps({'location': location, 'temperature': 'unknown'})
def test():
llm = get_chat_model({
# Use the model service provided by DashScope:
'model': 'qwen-max',
'model_server': 'dashscope',
'api_key': os.getenv('DASHSCOPE_API_KEY'),
# Use the model service provided by Together.AI:
# 'model': 'Qwen/Qwen1.5-14B-Chat',
# 'model_server': 'https://api.together.xyz', # api_base
# 'api_key': os.getenv('TOGETHER_API_KEY'),
# Use your own model service compatible with OpenAI API:
# 'model': 'Qwen/Qwen1.5-72B-Chat',
# 'model_server': 'http://localhost:8000/v1', # api_base
# 'api_key': 'EMPTY',
})
# Step 1: send the conversation and available functions to the model
messages = [{
'role': 'user',
'content': "What's the weather like in San Francisco?"
}]
functions = [{
'name': 'get_current_weather',
'description': 'Get the current weather in a given location',
'parameters': {
'type': 'object',
'properties': {
'location': {
'type': 'string',
'description':
'The city and state, e.g. San Francisco, CA',
},
'unit': {
'type': 'string',
'enum': ['celsius', 'fahrenheit']
},
},
'required': ['location'],
},
}]
print('# Assistant Response 1:')
responses = []
for responses in llm.chat(messages=messages,
functions=functions,
stream=True):
print(responses)
messages.extend(responses) # extend conversation with assistant's reply
# Step 2: check if the model wanted to call a function
last_response = messages[-1]
if last_response.get('function_call', None):
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors
available_functions = {
'get_current_weather': get_current_weather,
} # only one function in this example, but you can have multiple
function_name = last_response['function_call']['name']
function_to_call = available_functions[function_name]
function_args = json.loads(last_response['function_call']['arguments'])
function_response = function_to_call(
location=function_args.get('location'),
unit=function_args.get('unit'),
)
print('# Function Response:')
print(function_response)
# Step 4: send the info for each function call and function response to the model
messages.append({
'role': 'function',
'name': function_name,
'content': function_response,
}) # extend conversation with function response
print('# Assistant Response 2:')
for responses in llm.chat(
messages=messages,
functions=functions,
stream=True,
): # get a new response from the model where it can see the function response
print(responses)
if __name__ == '__main__':
test()
|