Usage From Our SDK
pip install scalegen-function-calling
from scalegen_function_calling import CustomOpenAIClient
from openai import OpenAI
tools = [
{
"type":"function",
"function":{
"name":"Expense",
"description":"",
"parameters":{
"type":"object",
"properties":{
"description":{
"type":"string"
},
"net_amount":{
"type":"number"
},
"gross_amount":{
"type":"number"
},
"tax_rate":{
"type":"number"
},
"date":{
"type":"string",
"format":"date-time"
}
},
"required":[
"description",
"net_amount",
"gross_amount",
"tax_rate",
"date"
]
}
}
},
{
"type":"function",
"function":{
"name":"ReportTool",
"description":"",
"parameters":{
"type":"object",
"properties":{
"report":{
"type":"string"
}
},
"required":[
"report"
]
}
}
}
]
model_name = "ScaleGenAI/Llama3-70B-Function-Calling"
api_key = "<YOUR_API_KEY>"
api_endpint = "<YOUR_API_ENDPOINT>"
messages = [
{"role":"user", "content": 'I have spend 5$ on a coffee today please track my expense. The tax rate is 0.2. plz add to expense'}
]
client = OpenAI(
api_key=api_key,
base_url=api_endpoint,
)
custom_client = CustomOpenAIClient(client) #patch the client
response = custom_client.chat.completions.create(
model=model_name,
messages=messages,
tools=tools,
stream=False
)
- Downloads last month
- 6
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social
visibility and check back later, or deploy to Inference Endpoints (dedicated)
instead.