Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
6 |
|
7 |
title = """# 🙋🏻♂️ Welcome to Tonic's Salesforce/Xlam-7B-r"""
|
8 |
description = """
|
9 |
-
Large Action Models (LAMs) are advanced large language models designed to enhance decision-making and translate user intentions into executable actions that interact with the world. LAMs autonomously plan and execute tasks to achieve specific goals, serving as the brains of AI agents. They have the potential to automate workflow processes across various domains, making them invaluable for a wide range of applications.
|
10 |
### Join us :
|
11 |
🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface:[MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [Build Tonic](https://git.tonic-ai.com/)🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
|
12 |
"""
|
@@ -31,6 +31,45 @@ tool_calls an empty list "[]".
|
|
31 |
```
|
32 |
""".strip()
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
def convert_to_xlam_tool(tools):
|
35 |
if isinstance(tools, dict):
|
36 |
return {
|
@@ -88,55 +127,57 @@ def generate_response(tools_input, query):
|
|
88 |
|
89 |
return agent_action
|
90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
# Gradio interface
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
"properties": {
|
124 |
-
"query": {
|
125 |
-
"type": "string",
|
126 |
-
"description": "The search query, e.g. 'latest news on AI'"
|
127 |
-
}
|
128 |
-
},
|
129 |
-
"required": ["query"]
|
130 |
-
}
|
131 |
-
}
|
132 |
-
], indent=2)
|
133 |
-
),
|
134 |
-
gr.Textbox(label="User Query", lines=2, value="What's the weather like in New York in fahrenheit?")
|
135 |
-
],
|
136 |
-
outputs=gr.Textbox(label="Generated Response", lines=5),
|
137 |
-
title=title,
|
138 |
-
description=description,
|
139 |
-
)
|
140 |
|
141 |
if __name__ == "__main__":
|
142 |
-
|
|
|
6 |
|
7 |
title = """# 🙋🏻♂️ Welcome to Tonic's Salesforce/Xlam-7B-r"""
|
8 |
description = """
|
9 |
+
🎬 Large Action Models (LAMs) are advanced large language models designed to enhance decision-making and translate user intentions into executable actions that interact with the world. LAMs autonomously plan and execute tasks to achieve specific goals, serving as the brains of AI agents. They have the potential to automate workflow processes across various domains, making them invaluable for a wide range of applications.Check our the Salesforce/xLAM models : [🤗 xLAM-1b-fc-r](https://huggingface.co/Salesforce/xLAM-1b-fc-r) | [🤗 xLAM-1b-fc-r-GGUF](https://huggingface.co/Salesforce/xLAM-1b-fc-r-gguf) [🤗 xLAM-7b-fc-r](https://huggingface.co/Salesforce/xLAM-7b-fc-r) | [🤗 xLAM-7b-fc-r-GGUF](https://huggingface.co/Salesforce/xLAM-7b-fc-r-gguf) [🤗 xLAM-7b-r ](https://huggingface.co/Salesforce/xLAM-7b-r) | [🤗 xLAM-8x7b-r](https://huggingface.co/Salesforce/xLAM-8x7b-r) [🤗 xLAM-8x22b-r](https://huggingface.co/Salesforce/xLAM-8x22b-r) |
|
10 |
### Join us :
|
11 |
🌟TeamTonic🌟 is always making cool demos! Join our active builder's 🛠️community 👻 [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/GWpVpekp) On 🤗Huggingface:[MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Tonic-AI](https://github.com/tonic-ai) & contribute to🌟 [Build Tonic](https://git.tonic-ai.com/)🤗Big thanks to Yuvi Sharma and all the folks at huggingface for the community grant 🤗
|
12 |
"""
|
|
|
31 |
```
|
32 |
""".strip()
|
33 |
|
34 |
+
# Example tools and query
|
35 |
+
example_tools = json.dumps([
|
36 |
+
{
|
37 |
+
"name": "get_weather",
|
38 |
+
"description": "Get the current weather for a location",
|
39 |
+
"parameters": {
|
40 |
+
"type": "object",
|
41 |
+
"properties": {
|
42 |
+
"location": {
|
43 |
+
"type": "string",
|
44 |
+
"description": "The city and state, e.g. San Francisco, New York"
|
45 |
+
},
|
46 |
+
"unit": {
|
47 |
+
"type": "string",
|
48 |
+
"enum": ["celsius", "fahrenheit"],
|
49 |
+
"description": "The unit of temperature to return"
|
50 |
+
}
|
51 |
+
},
|
52 |
+
"required": ["location"]
|
53 |
+
}
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"name": "search",
|
57 |
+
"description": "Search for information on the internet",
|
58 |
+
"parameters": {
|
59 |
+
"type": "object",
|
60 |
+
"properties": {
|
61 |
+
"query": {
|
62 |
+
"type": "string",
|
63 |
+
"description": "The search query, e.g. 'latest news on AI'"
|
64 |
+
}
|
65 |
+
},
|
66 |
+
"required": ["query"]
|
67 |
+
}
|
68 |
+
}
|
69 |
+
], indent=2)
|
70 |
+
|
71 |
+
example_query = "What's the weather like in New York in fahrenheit?"
|
72 |
+
|
73 |
def convert_to_xlam_tool(tools):
|
74 |
if isinstance(tools, dict):
|
75 |
return {
|
|
|
127 |
|
128 |
return agent_action
|
129 |
|
130 |
+
def generate_response(tools_input, query):
|
131 |
+
try:
|
132 |
+
tools = json.loads(tools_input)
|
133 |
+
except json.JSONDecodeError:
|
134 |
+
return "Error: Invalid JSON format for tools input."
|
135 |
+
|
136 |
+
xlam_format_tools = convert_to_xlam_tool(tools)
|
137 |
+
content = build_prompt(task_instruction, format_instruction, xlam_format_tools, query)
|
138 |
+
|
139 |
+
messages = [
|
140 |
+
{'role': 'user', 'content': content}
|
141 |
+
]
|
142 |
+
|
143 |
+
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
144 |
+
outputs = model.generate(inputs, max_new_tokens=512, do_sample=False, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id)
|
145 |
+
agent_action = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
|
146 |
+
|
147 |
+
return agent_action
|
148 |
+
|
149 |
# Gradio interface
|
150 |
+
with gr.Blocks() as demo:
|
151 |
+
gr.Markdown(title)
|
152 |
+
gr.Markdown(description)
|
153 |
+
|
154 |
+
with gr.Row():
|
155 |
+
with gr.Column():
|
156 |
+
tools_input = gr.Code(
|
157 |
+
label="Available Tools (JSON format)",
|
158 |
+
lines=20,
|
159 |
+
value=example_tools
|
160 |
+
language='json'
|
161 |
+
)
|
162 |
+
query_input = gr.Textbox(
|
163 |
+
label="User Query",
|
164 |
+
lines=2,
|
165 |
+
value=example_query
|
166 |
+
)
|
167 |
+
submit_button = gr.Button("Generate Response")
|
168 |
+
|
169 |
+
with gr.Column():
|
170 |
+
output = gr.Textbox(label="Generated Response", lines=10)
|
171 |
+
|
172 |
+
submit_button.click(generate_response, inputs=[tools_input, query_input], outputs=output)
|
173 |
+
|
174 |
+
gr.Examples(
|
175 |
+
examples=[
|
176 |
+
[example_tools, "What's the weather like in San Francisco in celsius?"],
|
177 |
+
[example_tools, "Search for the latest news on artificial intelligence"],
|
178 |
+
],
|
179 |
+
inputs=[tools_input, query_input],
|
180 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
|
182 |
if __name__ == "__main__":
|
183 |
+
demo.launch()
|