Spaces:
Runtime error
Runtime error
Improve code formatting, fix groupchat messages, improve prompts to avoid azure filters
Browse files- app.py +63 -16
- autogen_utils.py +68 -19
- configs.py +6 -0
app.py
CHANGED
@@ -14,7 +14,7 @@ from autogen_utils import (
|
|
14 |
get_retrieve_config,
|
15 |
initialize_agents,
|
16 |
)
|
17 |
-
from configs import Q1, Q2, Q3, TIMEOUT, TITLE
|
18 |
from custom_widgets import RowAgentWidget
|
19 |
from panel.chat import ChatInterface
|
20 |
from panel.widgets import Button, CodeEditor, PasswordInput, Switch, TextInput
|
@@ -28,7 +28,7 @@ def get_description_text():
|
|
28 |
return f"""
|
29 |
# {TITLE}
|
30 |
|
31 |
-
This is an AutoGen playground built with [Panel](https://panel.holoviz.org/). You can use it to interact with the AutoGen agents.
|
32 |
|
33 |
#### [[AutoGen](https://github.com/microsoft/autogen)] [[Discord](https://discord.gg/pAbnFJrkgZ)] [[Paper](https://arxiv.org/abs/2308.08155)] [[SourceCode](https://github.com/thinkall/autogen-demos)]
|
34 |
"""
|
@@ -42,6 +42,9 @@ txt_model = TextInput(
|
|
42 |
pwd_openai_key = PasswordInput(
|
43 |
name="OpenAI API Key", placeholder="Enter your OpenAI API Key here...", sizing_mode="stretch_width"
|
44 |
)
|
|
|
|
|
|
|
45 |
pwd_aoai_key = PasswordInput(
|
46 |
name="Azure OpenAI API Key", placeholder="Enter your Azure OpenAI API Key here...", sizing_mode="stretch_width"
|
47 |
)
|
@@ -49,7 +52,7 @@ pwd_aoai_url = PasswordInput(
|
|
49 |
name="Azure OpenAI Base Url", placeholder="Enter your Azure OpenAI Base Url here...", sizing_mode="stretch_width"
|
50 |
)
|
51 |
file_cfg = pn.widgets.FileInput(filename="OAI_CONFIG_LIST", sizing_mode="stretch_width")
|
52 |
-
template.main.append(pn.Row(txt_model, pwd_openai_key, pwd_aoai_key, pwd_aoai_url, file_cfg))
|
53 |
|
54 |
|
55 |
def get_config(tmpfilename="OAI_CONFIG_LIST"):
|
@@ -68,12 +71,18 @@ def get_config(tmpfilename="OAI_CONFIG_LIST"):
|
|
68 |
if not config_list:
|
69 |
os.environ["MODEL"] = txt_model.value
|
70 |
os.environ["OPENAI_API_KEY"] = pwd_openai_key.value
|
|
|
71 |
os.environ["AZURE_OPENAI_API_KEY"] = pwd_aoai_key.value
|
72 |
os.environ["AZURE_OPENAI_API_BASE"] = pwd_aoai_url.value
|
73 |
|
74 |
config_list = autogen.config_list_from_models(
|
75 |
model_list=[os.environ.get("MODEL", "gpt-35-turbo")],
|
76 |
)
|
|
|
|
|
|
|
|
|
|
|
77 |
if not config_list:
|
78 |
config_list = [
|
79 |
{
|
@@ -86,7 +95,7 @@ def get_config(tmpfilename="OAI_CONFIG_LIST"):
|
|
86 |
]
|
87 |
|
88 |
llm_config = {
|
89 |
-
"timeout":
|
90 |
"cache_seed": 42,
|
91 |
"config_list": config_list,
|
92 |
"temperature": 0,
|
@@ -112,7 +121,7 @@ column_agents = pn.Column(
|
|
112 |
RowAgentWidget(
|
113 |
value=[
|
114 |
"User_Proxy",
|
115 |
-
"
|
116 |
"UserProxyAgent",
|
117 |
"",
|
118 |
]
|
@@ -123,7 +132,7 @@ column_agents.append(
|
|
123 |
RowAgentWidget(
|
124 |
value=[
|
125 |
"Assistant_Agent",
|
126 |
-
"
|
127 |
"AssistantAgent",
|
128 |
"",
|
129 |
]
|
@@ -145,11 +154,46 @@ btn_add.on_click(add_agent)
|
|
145 |
btn_remove.on_click(remove_agent)
|
146 |
|
147 |
|
148 |
-
def send_messages(recipient, messages, sender, config):
|
|
|
149 |
chatiface.send(messages[-1]["content"], user=sender.name, respond=False)
|
150 |
return False, None # required to ensure the agent communication flow continues
|
151 |
|
152 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
153 |
def init_groupchat(event, collection_name):
|
154 |
llm_config = get_config(collection_name)
|
155 |
agents = []
|
@@ -182,9 +226,9 @@ def init_groupchat(event, collection_name):
|
|
182 |
agents.append(agent)
|
183 |
if len(agents) >= 3:
|
184 |
groupchat = autogen.GroupChat(
|
185 |
-
agents=agents, messages=[], max_round=12, speaker_selection_method="
|
186 |
-
)
|
187 |
-
manager =
|
188 |
else:
|
189 |
manager = None
|
190 |
return agents, manager
|
@@ -231,6 +275,9 @@ async def reply_chat(contents, user, instance):
|
|
231 |
partial(check_termination_and_human_reply, instance=instance),
|
232 |
1,
|
233 |
)
|
|
|
|
|
|
|
234 |
|
235 |
if not init_sender:
|
236 |
init_sender = agents[0]
|
@@ -312,7 +359,7 @@ def load_example(event):
|
|
312 |
RowAgentWidget(
|
313 |
value=[
|
314 |
"Senior_Python_Engineer",
|
315 |
-
"You are a senior python engineer.
|
316 |
"RetrieveAssistantAgent",
|
317 |
"",
|
318 |
]
|
@@ -323,7 +370,7 @@ def load_example(event):
|
|
323 |
RowAgentWidget(
|
324 |
value=[
|
325 |
"User_Proxy",
|
326 |
-
"
|
327 |
"UserProxyAgent",
|
328 |
"",
|
329 |
]
|
@@ -333,7 +380,7 @@ def load_example(event):
|
|
333 |
RowAgentWidget(
|
334 |
value=[
|
335 |
"Assistant_Agent",
|
336 |
-
"
|
337 |
"AssistantAgent",
|
338 |
"",
|
339 |
]
|
@@ -344,7 +391,7 @@ def load_example(event):
|
|
344 |
RowAgentWidget(
|
345 |
value=[
|
346 |
"Boss",
|
347 |
-
"The boss who ask questions and give tasks.
|
348 |
"UserProxyAgent",
|
349 |
"",
|
350 |
]
|
@@ -354,7 +401,7 @@ def load_example(event):
|
|
354 |
RowAgentWidget(
|
355 |
value=[
|
356 |
"Senior_Python_Engineer",
|
357 |
-
"You are a senior python engineer.
|
358 |
"AssistantAgent",
|
359 |
"",
|
360 |
]
|
@@ -364,7 +411,7 @@ def load_example(event):
|
|
364 |
RowAgentWidget(
|
365 |
value=[
|
366 |
"Product_Manager",
|
367 |
-
"You are a product manager.
|
368 |
"AssistantAgent",
|
369 |
"",
|
370 |
]
|
|
|
14 |
get_retrieve_config,
|
15 |
initialize_agents,
|
16 |
)
|
17 |
+
from configs import DEFAULT_TERMINATE_MESSAGE, Q1, Q2, Q3, TIMEOUT, TITLE
|
18 |
from custom_widgets import RowAgentWidget
|
19 |
from panel.chat import ChatInterface
|
20 |
from panel.widgets import Button, CodeEditor, PasswordInput, Switch, TextInput
|
|
|
28 |
return f"""
|
29 |
# {TITLE}
|
30 |
|
31 |
+
This is an AutoGen playground built with [Panel](https://panel.holoviz.org/). You can use it to interact with the AutoGen agents. Scroll down to see the code for creating and using the agents.
|
32 |
|
33 |
#### [[AutoGen](https://github.com/microsoft/autogen)] [[Discord](https://discord.gg/pAbnFJrkgZ)] [[Paper](https://arxiv.org/abs/2308.08155)] [[SourceCode](https://github.com/thinkall/autogen-demos)]
|
34 |
"""
|
|
|
42 |
pwd_openai_key = PasswordInput(
|
43 |
name="OpenAI API Key", placeholder="Enter your OpenAI API Key here...", sizing_mode="stretch_width"
|
44 |
)
|
45 |
+
pwd_openai_url = PasswordInput(
|
46 |
+
name="OpenAI Base Url", placeholder="Enter your OpenAI Base Url here...", sizing_mode="stretch_width"
|
47 |
+
)
|
48 |
pwd_aoai_key = PasswordInput(
|
49 |
name="Azure OpenAI API Key", placeholder="Enter your Azure OpenAI API Key here...", sizing_mode="stretch_width"
|
50 |
)
|
|
|
52 |
name="Azure OpenAI Base Url", placeholder="Enter your Azure OpenAI Base Url here...", sizing_mode="stretch_width"
|
53 |
)
|
54 |
file_cfg = pn.widgets.FileInput(filename="OAI_CONFIG_LIST", sizing_mode="stretch_width")
|
55 |
+
template.main.append(pn.Row(txt_model, pwd_openai_key, pwd_openai_url, pwd_aoai_key, pwd_aoai_url, file_cfg))
|
56 |
|
57 |
|
58 |
def get_config(tmpfilename="OAI_CONFIG_LIST"):
|
|
|
71 |
if not config_list:
|
72 |
os.environ["MODEL"] = txt_model.value
|
73 |
os.environ["OPENAI_API_KEY"] = pwd_openai_key.value
|
74 |
+
os.environ["OPENAI_API_BASE"] = pwd_openai_url.value
|
75 |
os.environ["AZURE_OPENAI_API_KEY"] = pwd_aoai_key.value
|
76 |
os.environ["AZURE_OPENAI_API_BASE"] = pwd_aoai_url.value
|
77 |
|
78 |
config_list = autogen.config_list_from_models(
|
79 |
model_list=[os.environ.get("MODEL", "gpt-35-turbo")],
|
80 |
)
|
81 |
+
for cfg in config_list:
|
82 |
+
if cfg.get("api_type", "open_ai") == "open_ai":
|
83 |
+
base_url = os.environ.get("OPENAI_API_BASE", "").strip()
|
84 |
+
if base_url:
|
85 |
+
cfg["base_url"] = base_url
|
86 |
if not config_list:
|
87 |
config_list = [
|
88 |
{
|
|
|
95 |
]
|
96 |
|
97 |
llm_config = {
|
98 |
+
"timeout": TIMEOUT,
|
99 |
"cache_seed": 42,
|
100 |
"config_list": config_list,
|
101 |
"temperature": 0,
|
|
|
121 |
RowAgentWidget(
|
122 |
value=[
|
123 |
"User_Proxy",
|
124 |
+
"",
|
125 |
"UserProxyAgent",
|
126 |
"",
|
127 |
]
|
|
|
132 |
RowAgentWidget(
|
133 |
value=[
|
134 |
"Assistant_Agent",
|
135 |
+
"",
|
136 |
"AssistantAgent",
|
137 |
"",
|
138 |
]
|
|
|
154 |
btn_remove.on_click(remove_agent)
|
155 |
|
156 |
|
157 |
+
async def send_messages(recipient, messages, sender, config):
|
158 |
+
print(f"{sender.name} -> {recipient.name}: {messages[-1]['content']}")
|
159 |
chatiface.send(messages[-1]["content"], user=sender.name, respond=False)
|
160 |
return False, None # required to ensure the agent communication flow continues
|
161 |
|
162 |
|
163 |
+
class myGroupChatManager(autogen.GroupChatManager):
|
164 |
+
def _send_messages(self, message, sender, config):
|
165 |
+
message = self._message_to_dict(message)
|
166 |
+
|
167 |
+
if message.get("role") == "function":
|
168 |
+
content = message["content"]
|
169 |
+
else:
|
170 |
+
content = message.get("content")
|
171 |
+
if content is not None:
|
172 |
+
if "context" in message:
|
173 |
+
content = autogen.OpenAIWrapper.instantiate(
|
174 |
+
content,
|
175 |
+
message["context"],
|
176 |
+
self.llm_config and self.llm_config.get("allow_format_str_template", False),
|
177 |
+
)
|
178 |
+
if "function_call" in message:
|
179 |
+
function_call = dict(message["function_call"])
|
180 |
+
content = f"Suggested function Call: {function_call.get('name', '(No function name found)')}"
|
181 |
+
chatiface.send(content, user=sender.name, respond=False)
|
182 |
+
return False, None # required to ensure the agent communication flow continues
|
183 |
+
|
184 |
+
def _process_received_message(self, message, sender, silent):
|
185 |
+
message = self._message_to_dict(message)
|
186 |
+
# When the agent receives a message, the role of the message is "user". (If 'role' exists and is 'function', it will remain unchanged.)
|
187 |
+
valid = self._append_oai_message(message, "user", sender)
|
188 |
+
if not valid:
|
189 |
+
raise ValueError(
|
190 |
+
"Received message can't be converted into a valid ChatCompletion message. Either content or function_call must be provided."
|
191 |
+
)
|
192 |
+
if not silent:
|
193 |
+
self._print_received_message(message, sender)
|
194 |
+
self._send_messages(message, sender, None)
|
195 |
+
|
196 |
+
|
197 |
def init_groupchat(event, collection_name):
|
198 |
llm_config = get_config(collection_name)
|
199 |
agents = []
|
|
|
226 |
agents.append(agent)
|
227 |
if len(agents) >= 3:
|
228 |
groupchat = autogen.GroupChat(
|
229 |
+
agents=agents, messages=[], max_round=12, speaker_selection_method="auto", allow_repeat_speaker=False
|
230 |
+
)
|
231 |
+
manager = myGroupChatManager(groupchat=groupchat, llm_config=llm_config)
|
232 |
else:
|
233 |
manager = None
|
234 |
return agents, manager
|
|
|
275 |
partial(check_termination_and_human_reply, instance=instance),
|
276 |
1,
|
277 |
)
|
278 |
+
if manager is not None:
|
279 |
+
for agent in agents:
|
280 |
+
agent._reply_func_list.pop(0)
|
281 |
|
282 |
if not init_sender:
|
283 |
init_sender = agents[0]
|
|
|
359 |
RowAgentWidget(
|
360 |
value=[
|
361 |
"Senior_Python_Engineer",
|
362 |
+
f"You are a senior python engineer. {DEFAULT_TERMINATE_MESSAGE}",
|
363 |
"RetrieveAssistantAgent",
|
364 |
"",
|
365 |
]
|
|
|
370 |
RowAgentWidget(
|
371 |
value=[
|
372 |
"User_Proxy",
|
373 |
+
"",
|
374 |
"UserProxyAgent",
|
375 |
"",
|
376 |
]
|
|
|
380 |
RowAgentWidget(
|
381 |
value=[
|
382 |
"Assistant_Agent",
|
383 |
+
"",
|
384 |
"AssistantAgent",
|
385 |
"",
|
386 |
]
|
|
|
391 |
RowAgentWidget(
|
392 |
value=[
|
393 |
"Boss",
|
394 |
+
f"The boss who ask questions and give tasks. {DEFAULT_TERMINATE_MESSAGE}",
|
395 |
"UserProxyAgent",
|
396 |
"",
|
397 |
]
|
|
|
401 |
RowAgentWidget(
|
402 |
value=[
|
403 |
"Senior_Python_Engineer",
|
404 |
+
f"You are a senior python engineer. {DEFAULT_TERMINATE_MESSAGE}",
|
405 |
"AssistantAgent",
|
406 |
"",
|
407 |
]
|
|
|
411 |
RowAgentWidget(
|
412 |
value=[
|
413 |
"Product_Manager",
|
414 |
+
f"You are a product manager. {DEFAULT_TERMINATE_MESSAGE}",
|
415 |
"AssistantAgent",
|
416 |
"",
|
417 |
]
|
autogen_utils.py
CHANGED
@@ -1,4 +1,6 @@
|
|
1 |
import asyncio
|
|
|
|
|
2 |
import sys
|
3 |
import textwrap
|
4 |
import threading
|
@@ -18,7 +20,15 @@ from autogen.agentchat.contrib.retrieve_assistant_agent import RetrieveAssistant
|
|
18 |
from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
|
19 |
from autogen.agentchat.contrib.teachable_agent import TeachableAgent
|
20 |
from autogen.code_utils import extract_code
|
21 |
-
from configs import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
try:
|
24 |
from termcolor import colored
|
@@ -89,7 +99,7 @@ def new_generate_oai_reply(
|
|
89 |
else:
|
90 |
msg["role"] = "assistant"
|
91 |
if len(_messages) % 2 == 1:
|
92 |
-
_messages.append({"content":
|
93 |
# print(f"messages: {_messages}")
|
94 |
response = client.create(context=_context, messages=_messages)
|
95 |
# print(f"{response=}")
|
@@ -99,20 +109,24 @@ def new_generate_oai_reply(
|
|
99 |
def initialize_agents(
|
100 |
llm_config, agent_name, system_msg, agent_type, retrieve_config=None, code_execution_config=False
|
101 |
):
|
|
|
|
|
|
|
102 |
if "RetrieveUserProxyAgent" == agent_type:
|
103 |
agent = RetrieveUserProxyAgent(
|
104 |
name=agent_name,
|
105 |
-
|
|
|
106 |
human_input_mode="TERMINATE",
|
107 |
max_consecutive_auto_reply=5,
|
108 |
retrieve_config=retrieve_config,
|
109 |
code_execution_config=code_execution_config, # set to False if you don't want to execute the code
|
110 |
-
default_auto_reply=
|
111 |
)
|
112 |
elif "GPTAssistantAgent" == agent_type:
|
113 |
agent = GPTAssistantAgent(
|
114 |
name=agent_name,
|
115 |
-
instructions=system_msg,
|
116 |
llm_config=llm_config,
|
117 |
is_termination_msg=termination_msg,
|
118 |
)
|
@@ -125,7 +139,7 @@ def initialize_agents(
|
|
125 |
}
|
126 |
agent = CompressibleAgent(
|
127 |
name=agent_name,
|
128 |
-
system_message=system_msg,
|
129 |
llm_config=llm_config,
|
130 |
compress_config=compress_config,
|
131 |
is_termination_msg=termination_msg,
|
@@ -136,7 +150,7 @@ def initialize_agents(
|
|
136 |
is_termination_msg=termination_msg,
|
137 |
human_input_mode="TERMINATE",
|
138 |
system_message=system_msg,
|
139 |
-
default_auto_reply=
|
140 |
max_consecutive_auto_reply=5,
|
141 |
code_execution_config=code_execution_config,
|
142 |
)
|
@@ -145,7 +159,7 @@ def initialize_agents(
|
|
145 |
name=agent_name,
|
146 |
is_termination_msg=termination_msg,
|
147 |
human_input_mode="NEVER",
|
148 |
-
system_message=system_msg,
|
149 |
llm_config=llm_config,
|
150 |
)
|
151 |
# if any(["ernie" in cfg["model"].lower() for cfg in llm_config["config_list"]]):
|
@@ -261,11 +275,26 @@ async def check_termination_and_human_reply(
|
|
261 |
return False, None
|
262 |
|
263 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
264 |
async def generate_code(agents, manager, contents, code_editor):
|
265 |
code = """import autogen
|
266 |
import os
|
267 |
from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
|
268 |
from autogen.agentchat.contrib.math_user_proxy_agent import MathUserProxyAgent
|
|
|
269 |
|
270 |
config_list = autogen.config_list_from_json(
|
271 |
"OAI_CONFIG_LIST",
|
@@ -291,31 +320,52 @@ def termination_msg(x):
|
|
291 |
_msg = str(x.get("content", "")).upper().strip().strip("\\n").strip(".")
|
292 |
return isinstance(x, dict) and (_msg.endswith("TERMINATE") or _msg.startswith("TERMINATE"))
|
293 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
294 |
agents = []
|
295 |
|
296 |
"""
|
297 |
|
298 |
for agent in agents:
|
299 |
if isinstance(agent, RetrieveUserProxyAgent):
|
|
|
|
|
300 |
_code = f"""from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
|
|
|
301 |
|
302 |
agent = RetrieveUserProxyAgent(
|
303 |
name="{agent.name}",
|
304 |
-
|
|
|
305 |
human_input_mode="TERMINATE",
|
306 |
max_consecutive_auto_reply=5,
|
307 |
-
retrieve_config={
|
308 |
code_execution_config={agent._code_execution_config}, # set to False if you don't want to execute the code
|
309 |
-
default_auto_reply="
|
310 |
)
|
311 |
|
312 |
"""
|
|
|
|
|
|
|
313 |
elif isinstance(agent, GPTAssistantAgent):
|
314 |
_code = f"""from auotgen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent
|
315 |
|
316 |
agent = GPTAssistantAgent(
|
317 |
name="{agent.name}",
|
318 |
-
instructions
|
319 |
llm_config=llm_config,
|
320 |
is_termination_msg=termination_msg,
|
321 |
)
|
@@ -333,7 +383,7 @@ compress_config = {{
|
|
333 |
|
334 |
agent = CompressibleAgent(
|
335 |
name="{agent.name}",
|
336 |
-
system_message
|
337 |
llm_config=llm_config,
|
338 |
compress_config=compress_config,
|
339 |
is_termination_msg=termination_msg,
|
@@ -347,7 +397,7 @@ agent = UserProxyAgent(
|
|
347 |
name="{agent.name}",
|
348 |
is_termination_msg=termination_msg,
|
349 |
human_input_mode="TERMINATE",
|
350 |
-
default_auto_reply="
|
351 |
max_consecutive_auto_reply=5,
|
352 |
code_execution_config={agent._code_execution_config},
|
353 |
)
|
@@ -358,10 +408,9 @@ agent = UserProxyAgent(
|
|
358 |
|
359 |
agent = RetrieveAssistantAgent(
|
360 |
name="{agent.name}",
|
361 |
-
system_message
|
362 |
llm_config=llm_config,
|
363 |
is_termination_msg=termination_msg,
|
364 |
-
retrieve_config={agent._retrieve_config},
|
365 |
)
|
366 |
|
367 |
"""
|
@@ -370,7 +419,7 @@ agent = RetrieveAssistantAgent(
|
|
370 |
|
371 |
agent = AssistantAgent(
|
372 |
name="{agent.name}",
|
373 |
-
system_message
|
374 |
llm_config=llm_config,
|
375 |
is_termination_msg=termination_msg,
|
376 |
)
|
@@ -393,7 +442,7 @@ if not init_sender:
|
|
393 |
if manager:
|
394 |
_code = """
|
395 |
groupchat = autogen.GroupChat(
|
396 |
-
agents=agents, messages=[], max_round=12, speaker_selection_method="
|
397 |
) # todo: auto, sometimes message has no name
|
398 |
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
|
399 |
|
@@ -413,4 +462,4 @@ else:
|
|
413 |
"""
|
414 |
code += _code
|
415 |
code = textwrap.dedent(code)
|
416 |
-
code_editor.value =
|
|
|
1 |
import asyncio
|
2 |
+
import os
|
3 |
+
import random
|
4 |
import sys
|
5 |
import textwrap
|
6 |
import threading
|
|
|
20 |
from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
|
21 |
from autogen.agentchat.contrib.teachable_agent import TeachableAgent
|
22 |
from autogen.code_utils import extract_code
|
23 |
+
from configs import (
|
24 |
+
DEFAULT_AUTO_REPLY,
|
25 |
+
DEFAULT_SYSTEM_MESSAGE,
|
26 |
+
Q1,
|
27 |
+
Q2,
|
28 |
+
Q3,
|
29 |
+
TIMEOUT,
|
30 |
+
TITLE,
|
31 |
+
)
|
32 |
|
33 |
try:
|
34 |
from termcolor import colored
|
|
|
99 |
else:
|
100 |
msg["role"] = "assistant"
|
101 |
if len(_messages) % 2 == 1:
|
102 |
+
_messages.append({"content": DEFAULT_AUTO_REPLY, "role": "user"})
|
103 |
# print(f"messages: {_messages}")
|
104 |
response = client.create(context=_context, messages=_messages)
|
105 |
# print(f"{response=}")
|
|
|
109 |
def initialize_agents(
|
110 |
llm_config, agent_name, system_msg, agent_type, retrieve_config=None, code_execution_config=False
|
111 |
):
|
112 |
+
agent_name = agent_name.strip()
|
113 |
+
system_msg = system_msg.strip()
|
114 |
+
|
115 |
if "RetrieveUserProxyAgent" == agent_type:
|
116 |
agent = RetrieveUserProxyAgent(
|
117 |
name=agent_name,
|
118 |
+
system_message=system_msg,
|
119 |
+
is_termination_msg=_is_termination_msg,
|
120 |
human_input_mode="TERMINATE",
|
121 |
max_consecutive_auto_reply=5,
|
122 |
retrieve_config=retrieve_config,
|
123 |
code_execution_config=code_execution_config, # set to False if you don't want to execute the code
|
124 |
+
default_auto_reply=DEFAULT_AUTO_REPLY,
|
125 |
)
|
126 |
elif "GPTAssistantAgent" == agent_type:
|
127 |
agent = GPTAssistantAgent(
|
128 |
name=agent_name,
|
129 |
+
instructions=system_msg if system_msg else DEFAULT_SYSTEM_MESSAGE,
|
130 |
llm_config=llm_config,
|
131 |
is_termination_msg=termination_msg,
|
132 |
)
|
|
|
139 |
}
|
140 |
agent = CompressibleAgent(
|
141 |
name=agent_name,
|
142 |
+
system_message=system_msg if system_msg else DEFAULT_SYSTEM_MESSAGE,
|
143 |
llm_config=llm_config,
|
144 |
compress_config=compress_config,
|
145 |
is_termination_msg=termination_msg,
|
|
|
150 |
is_termination_msg=termination_msg,
|
151 |
human_input_mode="TERMINATE",
|
152 |
system_message=system_msg,
|
153 |
+
default_auto_reply=DEFAULT_AUTO_REPLY,
|
154 |
max_consecutive_auto_reply=5,
|
155 |
code_execution_config=code_execution_config,
|
156 |
)
|
|
|
159 |
name=agent_name,
|
160 |
is_termination_msg=termination_msg,
|
161 |
human_input_mode="NEVER",
|
162 |
+
system_message=system_msg if system_msg else DEFAULT_SYSTEM_MESSAGE,
|
163 |
llm_config=llm_config,
|
164 |
)
|
165 |
# if any(["ernie" in cfg["model"].lower() for cfg in llm_config["config_list"]]):
|
|
|
275 |
return False, None
|
276 |
|
277 |
|
278 |
+
async def format_code(code_to_format: str) -> str:
|
279 |
+
"""Format the code using isort and black."""
|
280 |
+
filename = f"temp_code_{int(time.time())}_{random.randint(10000, 99999)}.py"
|
281 |
+
with open(filename, "w") as file:
|
282 |
+
file.write(code_to_format)
|
283 |
+
isort.file(filename, profile="black", known_first_party=["autogen"], float_to_top=True)
|
284 |
+
|
285 |
+
formatted_code = ""
|
286 |
+
with open(filename, "r") as file:
|
287 |
+
formatted_code = file.read()
|
288 |
+
os.remove(filename)
|
289 |
+
return formatted_code
|
290 |
+
|
291 |
+
|
292 |
async def generate_code(agents, manager, contents, code_editor):
|
293 |
code = """import autogen
|
294 |
import os
|
295 |
from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
|
296 |
from autogen.agentchat.contrib.math_user_proxy_agent import MathUserProxyAgent
|
297 |
+
from autogen.code_utils import extract_code
|
298 |
|
299 |
config_list = autogen.config_list_from_json(
|
300 |
"OAI_CONFIG_LIST",
|
|
|
320 |
_msg = str(x.get("content", "")).upper().strip().strip("\\n").strip(".")
|
321 |
return isinstance(x, dict) and (_msg.endswith("TERMINATE") or _msg.startswith("TERMINATE"))
|
322 |
|
323 |
+
def _is_termination_msg(message):
|
324 |
+
if isinstance(message, dict):
|
325 |
+
message = message.get("content")
|
326 |
+
if message is None:
|
327 |
+
return False
|
328 |
+
cb = extract_code(message)
|
329 |
+
contain_code = False
|
330 |
+
for c in cb:
|
331 |
+
# todo: support more languages
|
332 |
+
if c[0] == "python":
|
333 |
+
contain_code = True
|
334 |
+
break
|
335 |
+
return not contain_code
|
336 |
+
|
337 |
agents = []
|
338 |
|
339 |
"""
|
340 |
|
341 |
for agent in agents:
|
342 |
if isinstance(agent, RetrieveUserProxyAgent):
|
343 |
+
_retrieve_config = agent._retrieve_config
|
344 |
+
_retrieve_config["client"] = 'chromadb.PersistentClient(path=".chromadb")'
|
345 |
_code = f"""from autogen.agentchat.contrib.retrieve_user_proxy_agent import RetrieveUserProxyAgent
|
346 |
+
import chromadb
|
347 |
|
348 |
agent = RetrieveUserProxyAgent(
|
349 |
name="{agent.name}",
|
350 |
+
system_message=\"\"\"{agent.system_message}\"\"\",
|
351 |
+
is_termination_msg=_is_termination_msg,
|
352 |
human_input_mode="TERMINATE",
|
353 |
max_consecutive_auto_reply=5,
|
354 |
+
retrieve_config={_retrieve_config},
|
355 |
code_execution_config={agent._code_execution_config}, # set to False if you don't want to execute the code
|
356 |
+
default_auto_reply="{DEFAULT_AUTO_REPLY}",
|
357 |
)
|
358 |
|
359 |
"""
|
360 |
+
_code = _code.replace(
|
361 |
+
"""'chromadb.PersistentClient(path=".chromadb")'""", "chromadb.PersistentClient(path='.chromadb')"
|
362 |
+
)
|
363 |
elif isinstance(agent, GPTAssistantAgent):
|
364 |
_code = f"""from auotgen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent
|
365 |
|
366 |
agent = GPTAssistantAgent(
|
367 |
name="{agent.name}",
|
368 |
+
instructions=\"\"\"{agent.system_message}\"\"\",
|
369 |
llm_config=llm_config,
|
370 |
is_termination_msg=termination_msg,
|
371 |
)
|
|
|
383 |
|
384 |
agent = CompressibleAgent(
|
385 |
name="{agent.name}",
|
386 |
+
system_message=\"\"\"{agent.system_message}\"\"\",
|
387 |
llm_config=llm_config,
|
388 |
compress_config=compress_config,
|
389 |
is_termination_msg=termination_msg,
|
|
|
397 |
name="{agent.name}",
|
398 |
is_termination_msg=termination_msg,
|
399 |
human_input_mode="TERMINATE",
|
400 |
+
default_auto_reply="{DEFAULT_AUTO_REPLY}",
|
401 |
max_consecutive_auto_reply=5,
|
402 |
code_execution_config={agent._code_execution_config},
|
403 |
)
|
|
|
408 |
|
409 |
agent = RetrieveAssistantAgent(
|
410 |
name="{agent.name}",
|
411 |
+
system_message=\"\"\"{agent.system_message}\"\"\",
|
412 |
llm_config=llm_config,
|
413 |
is_termination_msg=termination_msg,
|
|
|
414 |
)
|
415 |
|
416 |
"""
|
|
|
419 |
|
420 |
agent = AssistantAgent(
|
421 |
name="{agent.name}",
|
422 |
+
system_message=\"\"\"{agent.system_message}\"\"\",
|
423 |
llm_config=llm_config,
|
424 |
is_termination_msg=termination_msg,
|
425 |
)
|
|
|
442 |
if manager:
|
443 |
_code = """
|
444 |
groupchat = autogen.GroupChat(
|
445 |
+
agents=agents, messages=[], max_round=12, speaker_selection_method="auto", allow_repeat_speaker=False
|
446 |
) # todo: auto, sometimes message has no name
|
447 |
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
|
448 |
|
|
|
462 |
"""
|
463 |
code += _code
|
464 |
code = textwrap.dedent(code)
|
465 |
+
code_editor.value = await format_code(code)
|
configs.py
CHANGED
@@ -1,5 +1,11 @@
|
|
|
|
|
|
1 |
TIMEOUT = 60
|
2 |
TITLE = "Microsoft AutoGen Playground"
|
3 |
Q1 = "What's AutoGen?"
|
4 |
Q2 = "Write a python function to compute the sum of numbers."
|
5 |
Q3 = "find papers on LLM applications from arxiv in the last week, create a markdown table of different domains."
|
|
|
|
|
|
|
|
|
|
1 |
+
import autogen
|
2 |
+
|
3 |
TIMEOUT = 60
|
4 |
TITLE = "Microsoft AutoGen Playground"
|
5 |
Q1 = "What's AutoGen?"
|
6 |
Q2 = "Write a python function to compute the sum of numbers."
|
7 |
Q3 = "find papers on LLM applications from arxiv in the last week, create a markdown table of different domains."
|
8 |
+
|
9 |
+
DEFAULT_SYSTEM_MESSAGE = autogen.AssistantAgent.DEFAULT_SYSTEM_MESSAGE
|
10 |
+
DEFAULT_TERMINATE_MESSAGE = "Reply `TERMINATE` in the end if the task is done."
|
11 |
+
DEFAULT_AUTO_REPLY = "Good, thank you. Reply `TERMINATE` to finish."
|