thinkall commited on
Commit
f80dc67
1 Parent(s): e066d55

Init for hf

Browse files
Files changed (5) hide show
  1. README.md +23 -1
  2. app.py +420 -0
  3. autogen.png +0 -0
  4. human.png +0 -0
  5. requirements.txt +3 -0
README.md CHANGED
@@ -10,4 +10,26 @@ pinned: false
10
  license: mit
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  license: mit
11
  ---
12
 
13
+
14
+ # Microsoft AutoGen: Multi-Round Human Interaction Chatbot Demo
15
+
16
+ This demo shows how to build a chatbot which can handle multi-round conversations with human interactions.
17
+
18
+ ## Run app
19
+ ```
20
+ # Install dependencies
21
+ pip install -U -r requirements.txt
22
+
23
+ # Launch app
24
+ python app.py
25
+ ```
26
+
27
+ ## Run docker locally
28
+ ```
29
+ docker build -t autogen/groupchat .
30
+ docker run -it autogen/groupchat -p 7860:7860
31
+ ```
32
+
33
+ #### [GitHub](https://github.com/microsoft/autogen) [SourceCode](https://github.com/thinkall/autogen-demos)
34
+
35
+ ![](autogen-human-input.gif)
app.py ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio import ChatInterface, Request
3
+ from gradio.helpers import special_args
4
+ import anyio
5
+ import os
6
+ import threading
7
+ import sys
8
+ from itertools import chain
9
+ import autogen
10
+ from autogen.code_utils import extract_code
11
+ from autogen import UserProxyAgent, AssistantAgent, Agent, OpenAIWrapper
12
+
13
+
14
+ LOG_LEVEL = "INFO"
15
+ TIMEOUT = 60
16
+
17
+
18
+ class myChatInterface(ChatInterface):
19
+ async def _submit_fn(
20
+ self,
21
+ message: str,
22
+ history_with_input: list[list[str | None]],
23
+ request: Request,
24
+ *args,
25
+ ) -> tuple[list[list[str | None]], list[list[str | None]]]:
26
+ history = history_with_input[:-1]
27
+ inputs, _, _ = special_args(
28
+ self.fn, inputs=[message, history, *args], request=request
29
+ )
30
+
31
+ if self.is_async:
32
+ response = await self.fn(*inputs)
33
+ else:
34
+ response = await anyio.to_thread.run_sync(
35
+ self.fn, *inputs, limiter=self.limiter
36
+ )
37
+
38
+ # history.append([message, response])
39
+ return history, history
40
+
41
+
42
+ with gr.Blocks() as demo:
43
+
44
+ def flatten_chain(list_of_lists):
45
+ return list(chain.from_iterable(list_of_lists))
46
+
47
+ class thread_with_trace(threading.Thread):
48
+ # https://www.geeksforgeeks.org/python-different-ways-to-kill-a-thread/
49
+ # https://stackoverflow.com/questions/6893968/how-to-get-the-return-value-from-a-thread
50
+ def __init__(self, *args, **keywords):
51
+ threading.Thread.__init__(self, *args, **keywords)
52
+ self.killed = False
53
+ self._return = None
54
+
55
+ def start(self):
56
+ self.__run_backup = self.run
57
+ self.run = self.__run
58
+ threading.Thread.start(self)
59
+
60
+ def __run(self):
61
+ sys.settrace(self.globaltrace)
62
+ self.__run_backup()
63
+ self.run = self.__run_backup
64
+
65
+ def run(self):
66
+ if self._target is not None:
67
+ self._return = self._target(*self._args, **self._kwargs)
68
+
69
+ def globaltrace(self, frame, event, arg):
70
+ if event == "call":
71
+ return self.localtrace
72
+ else:
73
+ return None
74
+
75
+ def localtrace(self, frame, event, arg):
76
+ if self.killed:
77
+ if event == "line":
78
+ raise SystemExit()
79
+ return self.localtrace
80
+
81
+ def kill(self):
82
+ self.killed = True
83
+
84
+ def join(self, timeout=0):
85
+ threading.Thread.join(self, timeout)
86
+ return self._return
87
+
88
+ def update_agent_history(recipient, messages, sender, config):
89
+ if config is None:
90
+ config = recipient
91
+ if messages is None:
92
+ messages = recipient._oai_messages[sender]
93
+ message = messages[-1]
94
+ msg = message.get("content", "")
95
+ # config.append(msg) if msg is not None else None # config can be agent_history
96
+ return False, None # required to ensure the agent communication flow continues
97
+
98
+ def _is_termination_msg(message):
99
+ """Check if a message is a termination message.
100
+ Terminate when no code block is detected. Currently only detect python code blocks.
101
+ """
102
+ if isinstance(message, dict):
103
+ message = message.get("content")
104
+ if message is None:
105
+ return False
106
+ cb = extract_code(message)
107
+ contain_code = False
108
+ for c in cb:
109
+ # todo: support more languages
110
+ if c[0] == "python":
111
+ contain_code = True
112
+ break
113
+ return not contain_code
114
+
115
+ def initialize_agents(config_list):
116
+ assistant = AssistantAgent(
117
+ name="assistant",
118
+ max_consecutive_auto_reply=5,
119
+ llm_config={
120
+ # "seed": 42,
121
+ "timeout": TIMEOUT,
122
+ "config_list": config_list,
123
+ },
124
+ )
125
+
126
+ userproxy = UserProxyAgent(
127
+ name="userproxy",
128
+ human_input_mode="NEVER",
129
+ is_termination_msg=_is_termination_msg,
130
+ max_consecutive_auto_reply=5,
131
+ # code_execution_config=False,
132
+ code_execution_config={
133
+ "work_dir": "coding",
134
+ "use_docker": False, # set to True or image name like "python:3" to use docker
135
+ },
136
+ )
137
+
138
+ # assistant.register_reply([Agent, None], update_agent_history)
139
+ # userproxy.register_reply([Agent, None], update_agent_history)
140
+
141
+ return assistant, userproxy
142
+
143
+ def chat_to_oai_message(chat_history):
144
+ """Convert chat history to OpenAI message format."""
145
+ messages = []
146
+ if LOG_LEVEL == "DEBUG":
147
+ print(f"chat_to_oai_message: {chat_history}")
148
+ for msg in chat_history:
149
+ messages.append(
150
+ {
151
+ "content": msg[0].split()[0]
152
+ if msg[0].startswith("exitcode")
153
+ else msg[0],
154
+ "role": "user",
155
+ }
156
+ )
157
+ messages.append({"content": msg[1], "role": "assistant"})
158
+ return messages
159
+
160
+ def oai_message_to_chat(oai_messages, sender):
161
+ """Convert OpenAI message format to chat history."""
162
+ chat_history = []
163
+ messages = oai_messages[sender]
164
+ if LOG_LEVEL == "DEBUG":
165
+ print(f"oai_message_to_chat: {messages}")
166
+ for i in range(0, len(messages), 2):
167
+ chat_history.append(
168
+ [
169
+ messages[i]["content"],
170
+ messages[i + 1]["content"] if i + 1 < len(messages) else "",
171
+ ]
172
+ )
173
+ return chat_history
174
+
175
+ def agent_history_to_chat(agent_history):
176
+ """Convert agent history to chat history."""
177
+ chat_history = []
178
+ for i in range(0, len(agent_history), 2):
179
+ chat_history.append(
180
+ [
181
+ agent_history[i],
182
+ agent_history[i + 1] if i + 1 < len(agent_history) else None,
183
+ ]
184
+ )
185
+ return chat_history
186
+
187
+ def initiate_chat(config_list, user_message, chat_history):
188
+ if LOG_LEVEL == "DEBUG":
189
+ print(f"chat_history_init: {chat_history}")
190
+ # agent_history = flatten_chain(chat_history)
191
+ if len(config_list[0].get("api_key", "")) < 2:
192
+ chat_history.append(
193
+ [
194
+ user_message,
195
+ "Hi, nice to meet you! Please enter your API keys in below text boxs.",
196
+ ]
197
+ )
198
+ return chat_history
199
+ else:
200
+ llm_config = {
201
+ # "seed": 42,
202
+ "timeout": TIMEOUT,
203
+ "config_list": config_list,
204
+ }
205
+ assistant.llm_config.update(llm_config)
206
+ assistant.client = OpenAIWrapper(**assistant.llm_config)
207
+
208
+ if user_message.strip().lower().startswith("show file:"):
209
+ filename = user_message.strip().lower().replace("show file:", "").strip()
210
+ filepath = os.path.join("coding", filename)
211
+ if os.path.exists(filepath):
212
+ chat_history.append([user_message, (filepath,)])
213
+ else:
214
+ chat_history.append([user_message, f"File {filename} not found."])
215
+ return chat_history
216
+
217
+ assistant.reset()
218
+ oai_messages = chat_to_oai_message(chat_history)
219
+ assistant._oai_system_message_origin = assistant._oai_system_message.copy()
220
+ assistant._oai_system_message += oai_messages
221
+
222
+ try:
223
+ userproxy.initiate_chat(assistant, message=user_message)
224
+ messages = userproxy.chat_messages
225
+ chat_history += oai_message_to_chat(messages, assistant)
226
+ # agent_history = flatten_chain(chat_history)
227
+ except Exception as e:
228
+ # agent_history += [user_message, str(e)]
229
+ # chat_history[:] = agent_history_to_chat(agent_history)
230
+ chat_history.append([user_message, str(e)])
231
+
232
+ assistant._oai_system_message = assistant._oai_system_message_origin.copy()
233
+ if LOG_LEVEL == "DEBUG":
234
+ print(f"chat_history: {chat_history}")
235
+ # print(f"agent_history: {agent_history}")
236
+ return chat_history
237
+
238
+ def chatbot_reply_thread(input_text, chat_history, config_list):
239
+ """Chat with the agent through terminal."""
240
+ thread = thread_with_trace(
241
+ target=initiate_chat, args=(config_list, input_text, chat_history)
242
+ )
243
+ thread.start()
244
+ try:
245
+ messages = thread.join(timeout=TIMEOUT)
246
+ if thread.is_alive():
247
+ thread.kill()
248
+ thread.join()
249
+ messages = [
250
+ input_text,
251
+ "Timeout Error: Please check your API keys and try again later.",
252
+ ]
253
+ except Exception as e:
254
+ messages = [
255
+ [
256
+ input_text,
257
+ str(e)
258
+ if len(str(e)) > 0
259
+ else "Invalid Request to OpenAI, please check your API keys.",
260
+ ]
261
+ ]
262
+ return messages
263
+
264
+ def chatbot_reply_plain(input_text, chat_history, config_list):
265
+ """Chat with the agent through terminal."""
266
+ try:
267
+ messages = initiate_chat(config_list, input_text, chat_history)
268
+ except Exception as e:
269
+ messages = [
270
+ [
271
+ input_text,
272
+ str(e)
273
+ if len(str(e)) > 0
274
+ else "Invalid Request to OpenAI, please check your API keys.",
275
+ ]
276
+ ]
277
+ return messages
278
+
279
+ def chatbot_reply(input_text, chat_history, config_list):
280
+ """Chat with the agent through terminal."""
281
+ return chatbot_reply_thread(input_text, chat_history, config_list)
282
+
283
+ def get_description_text():
284
+ return """
285
+ # Microsoft AutoGen: Multi-Round Human Interaction Chatbot Demo
286
+
287
+ This demo shows how to build a chatbot which can handle multi-round conversations with human interactions.
288
+
289
+ #### [AutoGen](https://github.com/microsoft/autogen) [Discord](https://discord.gg/pAbnFJrkgZ) [Paper](https://arxiv.org/abs/2308.08155) [SourceCode](https://github.com/thinkall/autogen-demos)
290
+ """
291
+
292
+ def update_config():
293
+ config_list = autogen.config_list_from_models(
294
+ model_list=[os.environ.get("MODEL", "gpt-35-turbo")],
295
+ )
296
+ if not config_list:
297
+ config_list = [
298
+ {
299
+ "api_key": "",
300
+ "base_url": "",
301
+ "api_type": "azure",
302
+ "api_version": "2023-07-01-preview",
303
+ "model": "gpt-35-turbo",
304
+ }
305
+ ]
306
+
307
+ return config_list
308
+
309
+ def set_params(model, oai_key, aoai_key, aoai_base):
310
+ os.environ["MODEL"] = model
311
+ os.environ["OPENAI_API_KEY"] = oai_key
312
+ os.environ["AZURE_OPENAI_API_KEY"] = aoai_key
313
+ os.environ["AZURE_OPENAI_API_BASE"] = aoai_base
314
+
315
+ def respond(message, chat_history, model, oai_key, aoai_key, aoai_base):
316
+ set_params(model, oai_key, aoai_key, aoai_base)
317
+ config_list = update_config()
318
+ chat_history[:] = chatbot_reply(message, chat_history, config_list)
319
+ if LOG_LEVEL == "DEBUG":
320
+ print(f"return chat_history: {chat_history}")
321
+ return ""
322
+
323
+ config_list, assistant, userproxy = (
324
+ [
325
+ {
326
+ "api_key": "",
327
+ "base_url": "",
328
+ "api_type": "azure",
329
+ "api_version": "2023-07-01-preview",
330
+ "model": "gpt-35-turbo",
331
+ }
332
+ ],
333
+ None,
334
+ None,
335
+ )
336
+ assistant, userproxy = initialize_agents(config_list)
337
+
338
+ description = gr.Markdown(get_description_text())
339
+
340
+ with gr.Row() as params:
341
+ txt_model = gr.Dropdown(
342
+ label="Model",
343
+ choices=[
344
+ "gpt-4",
345
+ "gpt-35-turbo",
346
+ "gpt-3.5-turbo",
347
+ ],
348
+ allow_custom_value=True,
349
+ value="gpt-35-turbo",
350
+ container=True,
351
+ )
352
+ txt_oai_key = gr.Textbox(
353
+ label="OpenAI API Key",
354
+ placeholder="Enter OpenAI API Key",
355
+ max_lines=1,
356
+ show_label=True,
357
+ container=True,
358
+ type="password",
359
+ )
360
+ txt_aoai_key = gr.Textbox(
361
+ label="Azure OpenAI API Key",
362
+ placeholder="Enter Azure OpenAI API Key",
363
+ max_lines=1,
364
+ show_label=True,
365
+ container=True,
366
+ type="password",
367
+ )
368
+ txt_aoai_base_url = gr.Textbox(
369
+ label="Azure OpenAI API Base",
370
+ placeholder="Enter Azure OpenAI Base Url",
371
+ max_lines=1,
372
+ show_label=True,
373
+ container=True,
374
+ type="password",
375
+ )
376
+
377
+ chatbot = gr.Chatbot(
378
+ [],
379
+ elem_id="chatbot",
380
+ bubble_full_width=False,
381
+ avatar_images=(
382
+ "human.png",
383
+ (os.path.join(os.path.dirname(__file__), "autogen.png")),
384
+ ),
385
+ render=False,
386
+ height=800,
387
+ )
388
+
389
+ txt_input = gr.Textbox(
390
+ scale=4,
391
+ show_label=False,
392
+ placeholder="Enter text and press enter",
393
+ container=False,
394
+ render=False,
395
+ autofocus=True,
396
+ )
397
+
398
+ chatiface = myChatInterface(
399
+ respond,
400
+ chatbot=chatbot,
401
+ textbox=txt_input,
402
+ additional_inputs=[
403
+ txt_model,
404
+ txt_oai_key,
405
+ txt_aoai_key,
406
+ txt_aoai_base_url,
407
+ ],
408
+ examples=[
409
+ ["write a python function to count the sum of two numbers?"],
410
+ ["what if the production of two numbers?"],
411
+ [
412
+ "Plot a chart of the last year's stock prices of Microsoft, Google and Apple and save to stock_price.png."
413
+ ],
414
+ ["show file: stock_price.png"],
415
+ ],
416
+ )
417
+
418
+
419
+ if __name__ == "__main__":
420
+ demo.launch(share=True, server_name="0.0.0.0")
autogen.png ADDED
human.png ADDED
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ pyautogen==0.2.0b4
2
+ gradio>=4.0.0
3
+ yfinance