harry85 commited on
Commit
ff0aee5
1 Parent(s): dcf3ce8

Upload 4 files

Browse files
Files changed (4) hide show
  1. README.md +10 -9
  2. app.py +34 -0
  3. multi_agent.py +90 -0
  4. requirements.txt +3 -0
README.md CHANGED
@@ -1,13 +1,14 @@
1
  ---
2
- title: AI Agent Language
3
- emoji: 👁
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: streamlit
7
- sdk_version: 1.36.0
8
  app_file: app.py
9
- pinned: false
10
- license: mit
 
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Multi-Agent AI - Coding
3
+ emoji:
4
+ colorFrom: yellow
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 4.36.1
8
  app_file: app.py
9
+ pinned: true
10
+ license: apache-2.0
11
+ short_description: Multi-Agent AI with Microsoft AutoGen
12
  ---
13
 
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import datetime, os, threading
3
+
4
+ from multi_agent import run_multi_agent
5
+
6
+ lock = threading.Lock()
7
+
8
+ LLM = "gpt-4o"
9
+
10
+ def invoke(openai_api_key, task):
11
+ if not openai_api_key:
12
+ raise gr.Error("OpenAI API Key is required.")
13
+
14
+ if not task:
15
+ raise gr.Error("Task is required.")
16
+
17
+ raise gr.Error("Please clone space due to local code execution.")
18
+
19
+ with lock:
20
+ os.environ["OPENAI_API_KEY"] = openai_api_key
21
+ result = run_multi_agent(LLM, task)
22
+ del os.environ["OPENAI_API_KEY"]
23
+ return result
24
+
25
+ gr.close_all()
26
+
27
+ demo = gr.Interface(fn = invoke,
28
+ inputs = [gr.Textbox(label = "OpenAI API Key", type = "password", lines = 1),
29
+ gr.Textbox(label = "Task", value = f"Today is {datetime.date.today()}. {os.environ['INPUT']}")],
30
+ outputs = [gr.Markdown(label = "Output", value = os.environ["OUTPUT"], line_breaks = True, sanitize_html = False)],
31
+ title = "Multi-Agent AI: Coding",
32
+ description = os.environ["DESCRIPTION"])
33
+
34
+ demo.launch()
multi_agent.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64, datetime, json, os
2
+
3
+ from autogen import ConversableAgent, AssistantAgent
4
+ from autogen.coding import LocalCommandLineCodeExecutor
5
+
6
+ def read_file(file_path: str) -> str:
7
+ with open(file_path, "r", encoding="utf-8") as file:
8
+ return file.read()
9
+
10
+ def read_image_file(image_file_path: str) -> str:
11
+ with open(image_file_path, "rb") as image_file:
12
+ image_data = image_file.read()
13
+ return base64.b64encode(image_data).decode("utf-8")
14
+
15
+ def generate_markdown_image(image_data: str) -> str:
16
+ return f"![Image](data:image/png;base64,{image_data})"
17
+
18
+ def format_as_markdown(code: str) -> str:
19
+ markdown_code = '```\n'
20
+ markdown_code += code
21
+ markdown_code += '\n```'
22
+ return markdown_code
23
+
24
+ def get_latest_file(directory, file_extension):
25
+ latest_file = None
26
+ latest_date = datetime.datetime.min
27
+
28
+ for file in os.listdir(directory):
29
+ if file:
30
+ _, file_ext = os.path.splitext(file)
31
+
32
+ if file_ext == file_extension:
33
+ file_path = os.path.join(directory, file)
34
+ file_date = datetime.datetime.fromtimestamp(os.path.getmtime(file_path))
35
+
36
+ if file_date > latest_date:
37
+ latest_date = file_date
38
+ latest_file = file
39
+
40
+ return latest_file
41
+
42
+ def run_multi_agent(llm, task):
43
+ llm_config = {"model": llm}
44
+
45
+ executor = LocalCommandLineCodeExecutor(
46
+ timeout=60,
47
+ work_dir="coding",
48
+ )
49
+
50
+ code_executor_agent = ConversableAgent(
51
+ name="code_executor_agent",
52
+ llm_config=False,
53
+ code_execution_config={"executor": executor},
54
+ human_input_mode="NEVER",
55
+ default_auto_reply="TERMINATE",
56
+ )
57
+
58
+ code_writer_agent = AssistantAgent(
59
+ name="code_writer_agent",
60
+ llm_config=llm_config,
61
+ code_execution_config=False,
62
+ human_input_mode="NEVER",
63
+ )
64
+
65
+ chat_result = code_executor_agent.initiate_chat(
66
+ code_writer_agent,
67
+ message=task,
68
+ max_turns=10
69
+ )
70
+
71
+ chat = ""
72
+ first_message = True
73
+
74
+ for message in chat_result.chat_history:
75
+ if not first_message:
76
+ chat += f"**{message['role'].replace('assistant', 'Code Executor').replace('user', 'Code Writer')}**\n{message['content']}\n\n"
77
+ first_message = False
78
+
79
+ file_name_png = get_latest_file("coding", ".png")
80
+
81
+ image_data = read_image_file(f"/home/user/app/coding/{file_name_png}")
82
+ markdown_code_png = generate_markdown_image(image_data)
83
+
84
+ result = f"{markdown_code_png}\n\n{chat}"
85
+
86
+ print("===")
87
+ print(result)
88
+ print("===")
89
+
90
+ return result
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ chess==1.10.0
2
+ markdown==3.6
3
+ pyautogen==0.2.25