Spaces:
Sleeping
Sleeping
File size: 8,740 Bytes
08af2b0 788336c 08af2b0 93aaea3 08af2b0 570ab35 08af2b0 570ab35 08af2b0 570ab35 08af2b0 570ab35 08af2b0 93aaea3 08af2b0 570ab35 08af2b0 570ab35 08af2b0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 |
import gradio as gr
import json
import requests
from typing import List, Optional
from pydantic import BaseModel, Field
from together import Together
import os
# Initialize Together API
together = Together(api_key=os.getenv("togetherai"))
# Define the schemas for different API operations
class OrderItem(BaseModel):
item_id: str
quantity: int
class CreateOrderExtract(BaseModel):
user_id: Optional[str] = Field(None, description="The user's ID if provided it can be a name as well")
items: List[OrderItem] = Field(..., description="List of items ordered")
class CancelOrderExtract(BaseModel):
order_id: str = Field(..., description="The ID of the order to cancel can be referenced as order id or order or id or order number")
class CheckOrderStatusExtract(BaseModel):
order_id: str = Field(..., description="The ID of the order to check can be referenced as order id or order or id or order number")
class CreateInvoiceExtract(BaseModel):
order_id: str = Field(..., description="The ID of the order for which to create an invoice")
amount: float = Field(..., description="The amount of the invoice")
class GetInvoiceDetailsExtract(BaseModel):
invoice_id: str = Field(..., description="The ID of the invoice to get details for")
class CreatePaymentExtract(BaseModel):
invoice_id: str = Field(..., description="The ID of the invoice to pay")
order_id: str = Field(..., description="The ID of the order associated with the payment")
amount: float = Field(..., description="The amount of the payment")
# Function to classify user message
def classify_message(message: str) -> str:
classify = together.chat.completions.create(
messages=[
{
"role": "system",
"content": "Strictly only Classify the following message into one of these categories: create_order (for creating an order) , cancel_order (for cancelling an order) , check_order_status (for checking order status and details of the order) , create_invoice (create an invoice) , get_invoice_details (get invoice details) , create_payment. Respond only with the category name and nothing else , even if its ambgious , give your best classification",
},
{
"role": "user",
"content": message,
},
],
model="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
)
print(classify.choices[0].message.content.strip())
return classify.choices[0].message.content.strip()
# Function to extract information based on classification
def extract_info(message: str, classification: str) -> dict:
schema_map = {
"create_order": CreateOrderExtract,
"cancel_order": CancelOrderExtract,
"check_order_status": CheckOrderStatusExtract,
"create_invoice": CreateInvoiceExtract,
"get_invoice_details": GetInvoiceDetailsExtract,
"create_payment": CreatePaymentExtract,
}
schema = schema_map[classification]
extract = together.chat.completions.create(
messages=[
{
"role": "system",
"content": f"Extract {classification} information from the following message. Respond only in JSON.",
},
{
"role": "user",
"content": message,
},
],
model="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
response_format={
"type": "json_object",
"schema": schema.model_json_schema(),
},
)
print(json.loads(extract.choices[0].message.content))
return json.loads(extract.choices[0].message.content)
# Function to make API call
def make_api_call(classification: str, info: dict) -> dict:
base_url = os.getenv("baseurl")
api_map = {
"create_order": ("POST", f"{base_url}/orders"),
"cancel_order": ("POST", f"{base_url}/orders/{{order_id}}/cancel"),
"check_order_status": ("GET", f"{base_url}/orders/{{order_id}}/status"),
"create_invoice": ("POST", f"{base_url}/invoices"),
"get_invoice_details": ("GET", f"{base_url}/invoices/{{invoice_id}}"),
"create_payment": ("POST", f"{base_url}/payments"),
}
method, url_template = api_map[classification]
# Replace placeholders in the URL if necessary
try:
url = url_template.format(**info)
except KeyError as e:
return {"error": f"Missing required information: {str(e)}"}
try:
if method == "GET":
response = requests.get(url)
elif method == "POST":
response = requests.post(url, json=info)
response.raise_for_status()
return response.json()
except requests.RequestException as e:
return {"error": f"API request failed: {str(e)}"}
# Function to interpret API response
def interpret_response(user_message: str, classification: str, api_response: dict) -> str:
interpret = together.chat.completions.create(
messages=[
{
"role": "system",
"content": "Process user query and automated system API response to form a coherent natural reply that will help the user affirm the situation of their request. Your response will be streamed directly to the user. Do not include the fact that you are reading the API response. It should be natural and helpful. Make sure to give the user the relevant IDs and information, be sure to include all the information stated in api response dont leave any detail out even if not requested by user.",
},
{
"role": "user",
"content": f"User message: {user_message}\nRequest type: {classification}\nAPI response: {json.dumps(api_response)}",
},
],
model="meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
)
print(interpret.choices[0].message.content)
return interpret.choices[0].message.content
# Main function to process the user request
def process_request(user_message: str) -> str:
# Classify the message
classification = classify_message(user_message)
# Extract information based on classification
info = extract_info(user_message, classification)
# Make API call
api_response = make_api_call(classification, info)
# Interpret the response
interpretation = interpret_response(user_message, classification, api_response)
return interpretation
# Define the API documentation with the repository link
api_documentation = """
## Available API Methods
### 1. Create Order
- **Endpoint:** POST /orders
- **Inputs:**
- user_id: (Optional) The user's ID or name.
- items: List of ordered items with `item_id` and `quantity`.
### 2. Cancel Order
- **Endpoint:** POST /orders/{order_id}/cancel
- **Inputs:**
- order_id: The ID of the order to be canceled.
### 3. Check Order Status
- **Endpoint:** GET /orders/{order_id}/status
- **Inputs:**
- order_id: The ID of the order to check status.
### 4. Create Invoice
- **Endpoint:** POST /invoices
- **Inputs:**
- order_id: The ID of the order.
- amount: The invoice amount.
### 5. Get Invoice Details
- **Endpoint:** GET /invoices/{invoice_id}
- **Inputs:**
- invoice_id: The ID of the invoice to retrieve.
### 6. Create Payment
- **Endpoint:** POST /payments
- **Inputs:**
- invoice_id: The ID of the invoice to pay.
- order_id: The ID of the associated order.
- amount: The payment amount.
You can use these examples when interacting with the system.
For more details on the API, including the routes and implementation, visit the GitHub repository:
[Order Management API Repository](https://github.com/akash-mondal/order-management-api)
"""
# Build the Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Function Calling Demo")
gr.Markdown("""
This is a demo for performing function calls using custom API endpoints. The demo uses Together AI for language models (LLMs), where:
- **Mixtral 8x7B** is used to extract API information from natural language.
- **Llama 3.1 70B** is used to classify which API to route to.
- **Llama 3.1 8B** is used to interpret API responses into natural language for the end user.
""")
with gr.Tab("User Input"):
user_input = gr.Textbox(label="Enter your message")
output = gr.Textbox(label="Response")
submit_button = gr.Button("Submit")
def handle_submit(user_message):
return process_request(user_message)
submit_button.click(handle_submit, inputs=user_input, outputs=output)
with gr.Tab("API Documentation"):
gr.Markdown(api_documentation)
# Run the demo
demo.launch()
|