Spaces:
Runtime error
Runtime error
init
Browse files- .gitignore +2 -0
- Action/__init__.py +1 -0
- Action/base_action.py +51 -0
- Agent/Agent.py +243 -0
- Agent/__init__.py +1 -0
- Component/ExtraComponent.py +128 -0
- Component/PromptComponent.py +126 -0
- Component/ToolComponent.py +887 -0
- Component/__init__.py +3 -0
- Environment/__init__.py +1 -0
- Environment/base_environment.py +177 -0
- LLM/__init__.py +0 -0
- LLM/base_LLM.py +137 -0
- Memory/__init__.py +1 -0
- Memory/base_Memory.py +32 -0
- Prompt/__init__.py +1 -0
- Prompt/base_Prompts.py +84 -0
- SOP.py +291 -0
- State.py +142 -0
- app.py +398 -0
- config.json +501 -0
- design_states.py +94 -0
- evolve.py +17 -0
- gen_utils.py +59 -0
- gradio_backend.py +125 -0
- gradio_base.py +574 -0
- gradio_config.py +439 -0
- image.jpg +0 -0
- muti_prompts.py +264 -0
- requirements.txt +10 -0
- template.py +111 -0
- utils.py +482 -0
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
__pycache__
|
2 |
+
logs
|
Action/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .base_action import Action
|
Action/base_action.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from Memory import Memory
|
2 |
+
from utils import extract
|
3 |
+
import os
|
4 |
+
class Action:
|
5 |
+
"""
|
6 |
+
The basic action unit of agent
|
7 |
+
"""
|
8 |
+
def __init__(self,**kwargs):
|
9 |
+
self.response = None
|
10 |
+
self.is_user = False
|
11 |
+
self.res_dict = {}
|
12 |
+
self.name = ""
|
13 |
+
self.role = ""
|
14 |
+
for key,value in kwargs.items():
|
15 |
+
setattr(self,key,value)
|
16 |
+
|
17 |
+
|
18 |
+
def process(self):
|
19 |
+
"""
|
20 |
+
processing action
|
21 |
+
Rerutn : memory(Memory)
|
22 |
+
"""
|
23 |
+
response = self.response
|
24 |
+
send_name = self.name
|
25 |
+
send_role = self.role
|
26 |
+
all = ""
|
27 |
+
for res in response:
|
28 |
+
all += res
|
29 |
+
parse = f"{send_name}:"
|
30 |
+
|
31 |
+
# 将里面对话的第三人称删了
|
32 |
+
# The third person in the dialogue was deleted.
|
33 |
+
while parse in all:
|
34 |
+
index = all.index(parse) + len(parse)
|
35 |
+
all = all[index:]
|
36 |
+
|
37 |
+
if not self.is_user:
|
38 |
+
print(f"{send_name}({send_role}):{all}")
|
39 |
+
# for software
|
40 |
+
if "<title>" in all:
|
41 |
+
title = extract(all,"title")
|
42 |
+
title = "main.py" if title == "" else title
|
43 |
+
python = extract(all,"python")
|
44 |
+
os.makedirs("output_code", exist_ok=True)
|
45 |
+
file_name = "output_code/" + title
|
46 |
+
with open(file_name, "w", encoding="utf-8") as f:
|
47 |
+
f.write(python)
|
48 |
+
memory = Memory(send_role, send_name, all)
|
49 |
+
return memory
|
50 |
+
|
51 |
+
|
Agent/Agent.py
ADDED
@@ -0,0 +1,243 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The AIWaves Inc. team.
|
3 |
+
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
"""LLM autonoumous agent"""
|
17 |
+
from LLM.base_LLM import *
|
18 |
+
from Component import *
|
19 |
+
from Action import Action
|
20 |
+
from Prompt import *
|
21 |
+
|
22 |
+
headers = {
|
23 |
+
"Content-Type": "text/event-stream",
|
24 |
+
"Cache-Control": "no-cache",
|
25 |
+
"X-Accel-Buffering": "no",
|
26 |
+
}
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
|
31 |
+
class Agent:
|
32 |
+
"""
|
33 |
+
Auto agent, input the JSON of SOP.
|
34 |
+
"""
|
35 |
+
|
36 |
+
# Agent should have args: agents,states
|
37 |
+
def __init__(self, name, agent_state_roles, **kwargs) -> None:
|
38 |
+
self.state_roles = agent_state_roles
|
39 |
+
self.name = name
|
40 |
+
|
41 |
+
self.style = kwargs["style"]
|
42 |
+
self.LLMs = kwargs["LLMs"]
|
43 |
+
self.LLM = None
|
44 |
+
self.is_user = kwargs["is_user"]
|
45 |
+
self.begins = kwargs["begins"] if "begins" in kwargs else False
|
46 |
+
self.current_role = ""
|
47 |
+
self.long_term_memory = []
|
48 |
+
self.short_term_memory = ""
|
49 |
+
self.current_state = None
|
50 |
+
self.first_speak = True
|
51 |
+
self.environment = None
|
52 |
+
|
53 |
+
|
54 |
+
@classmethod
|
55 |
+
def from_config(cls, config_path):
|
56 |
+
"""
|
57 |
+
Initialize agents based on json file
|
58 |
+
Return:
|
59 |
+
agents(dict) : key:agent_name;value:class(Agent)
|
60 |
+
names_to_roles(dict) : key:state_name value:(dict; (key:agent_name ; value:agent_role))
|
61 |
+
roles_to_names(dict) : key:state_name value:(dict; (key:agent_role ; value:agent_name))
|
62 |
+
"""
|
63 |
+
with open(config_path) as f:
|
64 |
+
config = json.load(f)
|
65 |
+
|
66 |
+
roles_to_names = {}
|
67 |
+
names_to_roles = {}
|
68 |
+
agents = {}
|
69 |
+
user_names = json.loads(os.environ["User_Names"]) if "User_Names" in os.environ else []
|
70 |
+
for agent_name, agent_dict in config["agents"].items():
|
71 |
+
agent_state_roles = {}
|
72 |
+
agent_LLMs = {}
|
73 |
+
agent_begins = {}
|
74 |
+
for state_name, agent_role in agent_dict["roles"].items():
|
75 |
+
|
76 |
+
agent_begins[state_name] = {}
|
77 |
+
|
78 |
+
if state_name not in roles_to_names:
|
79 |
+
roles_to_names[state_name] = {}
|
80 |
+
if state_name not in names_to_roles:
|
81 |
+
names_to_roles[state_name] = {}
|
82 |
+
roles_to_names[state_name][agent_role] = agent_name
|
83 |
+
names_to_roles[state_name][agent_name] = agent_role
|
84 |
+
agent_state_roles[state_name] = agent_role
|
85 |
+
current_state = config["states"][state_name]
|
86 |
+
current_state["roles"] = list(current_state["agent_states"].keys()) if "roles" not in current_state else current_state["roles"]
|
87 |
+
current_state_begin_role = current_state["begin_role"] if "begin_role" in current_state else current_state["roles"][0]
|
88 |
+
agent_begins[state_name]["is_begin"] = current_state_begin_role==agent_role if "begin_role" in current_state else False
|
89 |
+
agent_begins[state_name]["begin_query"] = current_state["begin_query"] if "begin_query" in current_state else " "
|
90 |
+
agent_LLMs[state_name] = init_LLM("logs"+os.sep+f"{agent_name}",**current_state["agent_states"][agent_role])
|
91 |
+
agents[agent_name] = cls(
|
92 |
+
agent_name,
|
93 |
+
agent_state_roles,
|
94 |
+
LLMs=agent_LLMs,
|
95 |
+
is_user=agent_name in user_names,
|
96 |
+
style = agent_dict["style"],
|
97 |
+
begins = agent_begins
|
98 |
+
)
|
99 |
+
assert len(config["agents"].keys()) != 2 or (roles_to_names[config["root"]][config["states"][config["root"]]["begin_role"]] not in user_names and "begin_query" in config["states"][config["root"]]),"In a single-agent scenario, there must be an opening statement and it must be the agent"
|
100 |
+
return agents, roles_to_names, names_to_roles
|
101 |
+
|
102 |
+
def step(self, current_state,input=""):
|
103 |
+
"""
|
104 |
+
return actions by current state and environment
|
105 |
+
Return: action(Action)
|
106 |
+
"""
|
107 |
+
|
108 |
+
current_state.chat_nums +=1
|
109 |
+
state_begin = current_state.is_begin
|
110 |
+
agent_begin = self.begins[current_state.name]["is_begin"]
|
111 |
+
self.begins[current_state.name]["is_begin"] = False
|
112 |
+
current_state.is_begin = False
|
113 |
+
environment = self.environment
|
114 |
+
|
115 |
+
self.current_state = current_state
|
116 |
+
# 先根据当前环境更新信息
|
117 |
+
# First update the information according to the current environment
|
118 |
+
|
119 |
+
response = " "
|
120 |
+
res_dict = {}
|
121 |
+
|
122 |
+
if self.is_user:
|
123 |
+
response = f"{self.name}:{input}"
|
124 |
+
else:
|
125 |
+
if len(environment.shared_memory["long_term_memory"])>0:
|
126 |
+
current_history = self.observe()
|
127 |
+
self.long_term_memory.append(current_history)
|
128 |
+
if agent_begin:
|
129 |
+
response = (char for char in self.begins[current_state.name]["begin_query"])
|
130 |
+
else:
|
131 |
+
response,res_dict = self.act()
|
132 |
+
|
133 |
+
|
134 |
+
action_dict = {
|
135 |
+
"response": response,
|
136 |
+
"res_dict": res_dict,
|
137 |
+
"role": self.state_roles[current_state.name],
|
138 |
+
"name": self.name,
|
139 |
+
"state_begin" : state_begin,
|
140 |
+
"agent_begin" : agent_begin,
|
141 |
+
"is_user" : self.is_user
|
142 |
+
}
|
143 |
+
return Action(**action_dict)
|
144 |
+
|
145 |
+
def act(self):
|
146 |
+
"""
|
147 |
+
return actions by the current state
|
148 |
+
"""
|
149 |
+
current_state = self.current_state
|
150 |
+
chat_history = self.long_term_memory
|
151 |
+
current_LLM = self.LLMs[current_state.name]
|
152 |
+
|
153 |
+
system_prompt, last_prompt, res_dict = self.compile()
|
154 |
+
|
155 |
+
|
156 |
+
|
157 |
+
response = current_LLM.get_response(
|
158 |
+
chat_history, system_prompt, last_prompt, stream=True
|
159 |
+
)
|
160 |
+
return response,res_dict
|
161 |
+
|
162 |
+
def update_memory(self, memory):
|
163 |
+
self.long_term_memory.append(
|
164 |
+
{"role": "assistant", "content": memory.content}
|
165 |
+
)
|
166 |
+
|
167 |
+
MAX_CHAT_HISTORY = eval(os.environ["MAX_CHAT_HISTORY"])
|
168 |
+
environment = self.environment
|
169 |
+
current_chat_history_idx = environment.current_chat_history_idx if environment.environment_type == "competive" else 0
|
170 |
+
|
171 |
+
current_long_term_memory = environment.shared_memory["long_term_memory"][current_chat_history_idx:]
|
172 |
+
last_conversation_idx = environment._get_agent_last_conversation_idx(self,current_long_term_memory)
|
173 |
+
if len(current_long_term_memory)-last_conversation_idx >= MAX_CHAT_HISTORY:
|
174 |
+
current_state = self.current_state
|
175 |
+
current_role = self.state_roles[current_state.name]
|
176 |
+
current_component_dict = current_state.components[current_role]
|
177 |
+
|
178 |
+
# get chat history from new conversation
|
179 |
+
conversations = environment._get_agent_new_memory(self,current_long_term_memory)
|
180 |
+
|
181 |
+
# get summary
|
182 |
+
summary_prompt = (
|
183 |
+
current_state.summary_prompt[current_role]
|
184 |
+
if current_state.summary_prompt
|
185 |
+
else f"""your name is {self.name},your role is{current_component_dict["style"].role},your task is {current_component_dict["task"].task}.\n"""
|
186 |
+
)
|
187 |
+
summary_prompt =eval(Agent_summary_system_prompt)
|
188 |
+
summary = self.LLMs[current_state.name].get_response(None, summary_prompt,stream = False)
|
189 |
+
self.short_term_memory = summary
|
190 |
+
|
191 |
+
|
192 |
+
def compile(self):
|
193 |
+
"""
|
194 |
+
get prompt from state depend on your role
|
195 |
+
Return:
|
196 |
+
system_prompt:system_prompt for agents's LLM
|
197 |
+
last_prompt:last_prompt for agents's LLM
|
198 |
+
res_dict(dict): Other return from tool component.For example: search engine results
|
199 |
+
"""
|
200 |
+
current_state = self.current_state
|
201 |
+
self.current_roles = self.state_roles[current_state.name]
|
202 |
+
current_state_name = current_state.name
|
203 |
+
self.LLM = self.LLMs[current_state_name]
|
204 |
+
components = current_state.components[self.state_roles[current_state_name]]
|
205 |
+
|
206 |
+
system_prompt = self.current_state.environment_prompt
|
207 |
+
last_prompt = ""
|
208 |
+
|
209 |
+
res_dict = {}
|
210 |
+
for component in components.values():
|
211 |
+
if isinstance(component, (OutputComponent, LastComponent)):
|
212 |
+
last_prompt = last_prompt + "\n" + component.get_prompt(self)
|
213 |
+
elif isinstance(component, PromptComponent):
|
214 |
+
system_prompt = (
|
215 |
+
system_prompt + "\n" + component.get_prompt(self)
|
216 |
+
)
|
217 |
+
elif isinstance(component, ToolComponent):
|
218 |
+
response = component.func(self)
|
219 |
+
if "prompt" in response and response["prompt"]:
|
220 |
+
last_prompt = last_prompt + "\n" + response["prompt"]
|
221 |
+
res_dict.update(response)
|
222 |
+
|
223 |
+
name = self.name
|
224 |
+
query = self.environment.shared_memory["long_term_memory"][-1] if len(self.environment.shared_memory["long_term_memory"]) else ""
|
225 |
+
last_prompt = eval(Agent_last_prompt)
|
226 |
+
system_prompt = eval(Agent_system_prompt)
|
227 |
+
return system_prompt, last_prompt, res_dict
|
228 |
+
|
229 |
+
|
230 |
+
def observe(self):
|
231 |
+
"""
|
232 |
+
Update one's own memory according to the current environment, including: updating short-term memory; updating long-term memory
|
233 |
+
"""
|
234 |
+
return self.environment._observe(self)
|
235 |
+
|
236 |
+
|
237 |
+
def generate_sop(self):
|
238 |
+
pass
|
239 |
+
|
240 |
+
def reflection(self):
|
241 |
+
pass
|
242 |
+
|
243 |
+
|
Agent/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .Agent import Agent
|
Component/ExtraComponent.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .ToolComponent import ToolComponent
|
2 |
+
import json
|
3 |
+
from utils import flatten_dict,get_embedding,matching_category,search_with_api,limit_keys,limit_values
|
4 |
+
import os
|
5 |
+
|
6 |
+
|
7 |
+
class CategoryRequirementsComponent(ToolComponent):
|
8 |
+
def __init__(self, information_path):
|
9 |
+
super().__init__()
|
10 |
+
self.information_dataset = []
|
11 |
+
self.leaf_name = []
|
12 |
+
for toy_path in information_path:
|
13 |
+
with open(toy_path, encoding="utf-8") as json_file:
|
14 |
+
data = json.load(json_file)
|
15 |
+
for d in data:
|
16 |
+
if "/" in d["cat_leaf_name"]:
|
17 |
+
leaf_names = d["cat_leaf_name"].split("/") + [d["cat_leaf_name"]]
|
18 |
+
else:
|
19 |
+
leaf_names = [d["cat_leaf_name"]]
|
20 |
+
for name in leaf_names:
|
21 |
+
self.leaf_name.append(name)
|
22 |
+
new_d = d.copy()
|
23 |
+
new_d["cat_leaf_name"] = name
|
24 |
+
new_d["information"] = flatten_dict(new_d["information"])
|
25 |
+
self.information_dataset.append(new_d)
|
26 |
+
|
27 |
+
self.target_embbeding = get_embedding(
|
28 |
+
self.leaf_name
|
29 |
+
)
|
30 |
+
|
31 |
+
def search_information(self, category, information_dataset):
|
32 |
+
knowledge = {}
|
33 |
+
for d in information_dataset:
|
34 |
+
if category == d["cat_leaf_name"]:
|
35 |
+
knowledge = d["information"]
|
36 |
+
knowledge = {
|
37 |
+
key: value
|
38 |
+
for key, value in knowledge.items()
|
39 |
+
if (value and key != "相关分类")
|
40 |
+
}
|
41 |
+
break
|
42 |
+
return knowledge
|
43 |
+
|
44 |
+
def func(self, agent):
|
45 |
+
prompt = ""
|
46 |
+
messages = agent.long_term_memory
|
47 |
+
outputdict = {}
|
48 |
+
functions = [
|
49 |
+
{
|
50 |
+
"name": "search_information",
|
51 |
+
"description": "根据用户所需要购买商品的种类跟用户的需求去寻找用户所需要的商品",
|
52 |
+
"parameters": {
|
53 |
+
"type": "object",
|
54 |
+
"properties": {
|
55 |
+
"category": {
|
56 |
+
"type": "string",
|
57 |
+
"description": "用户现在所需要的商品类别,比如纸尿布,笔记本电脑等,注意,只能有一个",
|
58 |
+
},
|
59 |
+
"requirements": {
|
60 |
+
"type": "string",
|
61 |
+
"description": "用户现在的需求,比如说便宜,安踏品牌等等,可以有多个需求,中间以“ ”分隔",
|
62 |
+
},
|
63 |
+
},
|
64 |
+
"required": ["category", "requirements"],
|
65 |
+
},
|
66 |
+
}
|
67 |
+
]
|
68 |
+
|
69 |
+
response = agent.LLM.get_response(
|
70 |
+
messages,
|
71 |
+
None,
|
72 |
+
None,
|
73 |
+
functions=functions,
|
74 |
+
stream=False,
|
75 |
+
function_call={"name": "search_information"},
|
76 |
+
)
|
77 |
+
response_message = json.loads(response["function_call"]["arguments"])
|
78 |
+
category = (
|
79 |
+
response_message["category"] if response_message["category"] else None
|
80 |
+
)
|
81 |
+
requirements = (
|
82 |
+
response_message["requirements"]
|
83 |
+
if response_message["requirements"]
|
84 |
+
else category
|
85 |
+
)
|
86 |
+
if not (category or requirements):
|
87 |
+
return {}
|
88 |
+
|
89 |
+
topk_result = matching_category(
|
90 |
+
category, self.leaf_name, None, self.target_embbeding, top_k=3
|
91 |
+
)
|
92 |
+
|
93 |
+
top1_score = topk_result[1][0]
|
94 |
+
request_items, top_category = search_with_api(requirements, category)
|
95 |
+
|
96 |
+
|
97 |
+
MIN_CATEGORY_SIM = eval(os.environ["MIN_CATEGORY_SIM"]
|
98 |
+
) if "MIN_CATEGORY_SIM" in os.environ else 0.7
|
99 |
+
|
100 |
+
if top1_score > MIN_CATEGORY_SIM:
|
101 |
+
agent.environment.shared_memory["category"] = topk_result[0][0]
|
102 |
+
category = topk_result[0][0]
|
103 |
+
information = self.search_information(
|
104 |
+
topk_result[0][0], self.information_dataset
|
105 |
+
)
|
106 |
+
information = limit_keys(information, 3)
|
107 |
+
information = limit_values(information, 2)
|
108 |
+
prompt += f"""你需要知道的是:用户目前选择的商品是{category},该商品信息为{information}。你需要根据这些商品信息来详细介绍商品,比如详细介绍商品有哪些品牌,有哪些分类等等,并且询问用户是否有更多的需求。"""
|
109 |
+
if category in top_category:
|
110 |
+
top_category.remove(category)
|
111 |
+
|
112 |
+
recommend = "\n经过搜索后,推荐商品如下:\n"
|
113 |
+
prompt += "筛选出的商品如下:\n"
|
114 |
+
|
115 |
+
for i, request_item in enumerate(request_items):
|
116 |
+
|
117 |
+
itemTitle = request_item["itemTitle"]
|
118 |
+
itemPrice = request_item["itemPrice"]
|
119 |
+
itemPicUrl = request_item["itemPicUrl"]
|
120 |
+
recommend += f"[{i}.商品名称:{itemTitle},商品价格:{float(itemPrice)/100}]({itemPicUrl})\n"
|
121 |
+
prompt += f"[{i}.商品名称:{itemTitle},商品价格:{float(itemPrice)/100}]\n"
|
122 |
+
outputdict["recommend"] = recommend
|
123 |
+
print(recommend)
|
124 |
+
else:
|
125 |
+
prompt += f"""你需要知道的是:用户目前选择的商品是{category},而我们店里没有这类商品,但是我们店里有一些近似商品,如{top_category},{topk_result[0][0]},你需要对这些近似商品进行介绍,并引导用户购买"""
|
126 |
+
outputdict["prompt"] = prompt
|
127 |
+
return outputdict
|
128 |
+
|
Component/PromptComponent.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import abstractmethod
|
2 |
+
|
3 |
+
|
4 |
+
class PromptComponent:
|
5 |
+
def __init__(self):
|
6 |
+
pass
|
7 |
+
|
8 |
+
@abstractmethod
|
9 |
+
def get_prompt(self, agent):
|
10 |
+
pass
|
11 |
+
|
12 |
+
class TaskComponent(PromptComponent):
|
13 |
+
def __init__(self, task):
|
14 |
+
super().__init__()
|
15 |
+
self.task = task
|
16 |
+
|
17 |
+
def get_prompt(self, agent):
|
18 |
+
return f"""The task you need to execute is: {self.task}.\n"""
|
19 |
+
|
20 |
+
|
21 |
+
class OutputComponent(PromptComponent):
|
22 |
+
def __init__(self, output):
|
23 |
+
super().__init__()
|
24 |
+
self.output = output
|
25 |
+
|
26 |
+
def get_prompt(self, agent):
|
27 |
+
return f"""Please contact the above to extract <{self.output}> and </{self.output}>, \
|
28 |
+
do not perform additional output, please output in strict accordance with the above format!\n"""
|
29 |
+
|
30 |
+
|
31 |
+
class SystemComponent(PromptComponent):
|
32 |
+
def __init__(self,system_prompt):
|
33 |
+
super().__init__()
|
34 |
+
self.system_prompt = system_prompt
|
35 |
+
|
36 |
+
def get_prompt(self, agent):
|
37 |
+
return self.system_prompt
|
38 |
+
|
39 |
+
class LastComponent(PromptComponent):
|
40 |
+
def __init__(self, last_prompt):
|
41 |
+
super().__init__()
|
42 |
+
self.last_prompt = last_prompt
|
43 |
+
|
44 |
+
def get_prompt(self, agent):
|
45 |
+
return self.last_prompt
|
46 |
+
|
47 |
+
|
48 |
+
class StyleComponent(PromptComponent):
|
49 |
+
"""
|
50 |
+
角色、风格组件
|
51 |
+
"""
|
52 |
+
|
53 |
+
def __init__(self, role):
|
54 |
+
super().__init__()
|
55 |
+
self.role = role
|
56 |
+
|
57 |
+
def get_prompt(self, agent):
|
58 |
+
name = agent.name
|
59 |
+
style = agent.style
|
60 |
+
return f"""Now your role is:\n{self.role}, your name is:\n{name}. \
|
61 |
+
You need to follow the output style:\n{style}.\n"""
|
62 |
+
|
63 |
+
|
64 |
+
class RuleComponent(PromptComponent):
|
65 |
+
def __init__(self, rule):
|
66 |
+
super().__init__()
|
67 |
+
self.rule = rule
|
68 |
+
|
69 |
+
def get_prompt(self, agent):
|
70 |
+
return f"""The rule you need to follow is:\n{self.rule}.\n"""
|
71 |
+
|
72 |
+
|
73 |
+
class DemonstrationComponent(PromptComponent):
|
74 |
+
"""
|
75 |
+
input a list,the example of answer.
|
76 |
+
"""
|
77 |
+
|
78 |
+
def __init__(self, demonstrations):
|
79 |
+
super().__init__()
|
80 |
+
self.demonstrations = demonstrations
|
81 |
+
|
82 |
+
|
83 |
+
def get_prompt(self, agent):
|
84 |
+
prompt = f"Here are demonstrations you can refer to:\n{self.demonstrations}"
|
85 |
+
return prompt
|
86 |
+
|
87 |
+
|
88 |
+
class CoTComponent(PromptComponent):
|
89 |
+
"""
|
90 |
+
input a list,the example of answer.
|
91 |
+
"""
|
92 |
+
|
93 |
+
def __init__(self, demonstrations):
|
94 |
+
super().__init__()
|
95 |
+
self.demonstrations = demonstrations
|
96 |
+
|
97 |
+
def add_demonstration(self, demonstration):
|
98 |
+
self.demonstrations.append(demonstration)
|
99 |
+
|
100 |
+
def get_prompt(self, agent):
|
101 |
+
prompt = "You need to think in detail before outputting, the thinking case is as follows:\n"
|
102 |
+
for demonstration in self.demonstrations:
|
103 |
+
prompt += "\n" + demonstration
|
104 |
+
return prompt
|
105 |
+
|
106 |
+
|
107 |
+
class CustomizeComponent(PromptComponent):
|
108 |
+
"""
|
109 |
+
Custom template
|
110 |
+
template(str) : example: "i am {}"
|
111 |
+
keywords(list) : example : ["name"]
|
112 |
+
example : agent.environment.shared_memory["name"] = "Lilong"
|
113 |
+
the component will get the keyword attribute from the environment, and then add it to the template.
|
114 |
+
Return : "i am Lilong"
|
115 |
+
"""
|
116 |
+
def __init__(self, template, keywords) -> None:
|
117 |
+
super().__init__()
|
118 |
+
self.template = template
|
119 |
+
self.keywords = keywords
|
120 |
+
|
121 |
+
def get_prompt(self, agent):
|
122 |
+
template_keyword = {}
|
123 |
+
for keyword in self.keywords:
|
124 |
+
current_keyword = agent.environment.shared_memory[keyword] if keyword in agent.environment.shared_memory else ""
|
125 |
+
template_keyword[keyword] = current_keyword
|
126 |
+
return self.template.format(**template_keyword)
|
Component/ToolComponent.py
ADDED
@@ -0,0 +1,887 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import abstractmethod
|
2 |
+
import uuid
|
3 |
+
from text2vec import semantic_search
|
4 |
+
from utils import (
|
5 |
+
get_relevant_history,
|
6 |
+
load_knowledge_base_qa,
|
7 |
+
load_knowledge_base_UnstructuredFile,
|
8 |
+
get_embedding,
|
9 |
+
extract,
|
10 |
+
)
|
11 |
+
import json
|
12 |
+
from typing import Dict, List
|
13 |
+
import os
|
14 |
+
from googleapiclient.discovery import build
|
15 |
+
import requests
|
16 |
+
from selenium import webdriver
|
17 |
+
from selenium.webdriver.common.by import By
|
18 |
+
from selenium.webdriver.support.ui import WebDriverWait
|
19 |
+
from selenium.webdriver.support import expected_conditions as EC
|
20 |
+
from bs4 import BeautifulSoup
|
21 |
+
import base64
|
22 |
+
import re
|
23 |
+
from datetime import datetime, timedelta
|
24 |
+
from typing import Tuple, List, Any, Dict
|
25 |
+
from email.mime.text import MIMEText
|
26 |
+
from email.mime.multipart import MIMEMultipart
|
27 |
+
from google.auth.transport.requests import Request
|
28 |
+
from google.oauth2.credentials import Credentials
|
29 |
+
from google_auth_oauthlib.flow import InstalledAppFlow
|
30 |
+
from googleapiclient.discovery import build
|
31 |
+
from googleapiclient.errors import HttpError
|
32 |
+
from tqdm import tqdm
|
33 |
+
|
34 |
+
class ToolComponent:
|
35 |
+
def __init__(self):
|
36 |
+
pass
|
37 |
+
|
38 |
+
@abstractmethod
|
39 |
+
def func(self):
|
40 |
+
pass
|
41 |
+
|
42 |
+
class KnowledgeBaseComponent(ToolComponent):
|
43 |
+
"""
|
44 |
+
Inject knowledge base
|
45 |
+
top_k : Top_k with the highest matching degree
|
46 |
+
type : "QA" or others
|
47 |
+
knowledge_base(json_path) : knowledge_base_path
|
48 |
+
"""
|
49 |
+
def __init__(self, top_k, type, knowledge_base):
|
50 |
+
super().__init__()
|
51 |
+
self.top_k = top_k
|
52 |
+
self.type = type
|
53 |
+
self.knowledge_base = knowledge_base
|
54 |
+
|
55 |
+
if self.type == "QA":
|
56 |
+
(
|
57 |
+
self.kb_embeddings,
|
58 |
+
self.kb_questions,
|
59 |
+
self.kb_answers,
|
60 |
+
self.kb_chunks,
|
61 |
+
) = load_knowledge_base_qa(self.knowledge_base)
|
62 |
+
else:
|
63 |
+
self.kb_embeddings, self.kb_chunks = load_knowledge_base_UnstructuredFile(
|
64 |
+
self.knowledge_base
|
65 |
+
)
|
66 |
+
|
67 |
+
def func(self, agent):
|
68 |
+
query = (
|
69 |
+
agent.long_term_memory[-1]["content"]
|
70 |
+
if len(agent.long_term_memory) > 0
|
71 |
+
else ""
|
72 |
+
)
|
73 |
+
knowledge = ""
|
74 |
+
query = extract(query, "query")
|
75 |
+
query_embedding = get_embedding(query)
|
76 |
+
hits = semantic_search(query_embedding, self.kb_embeddings, top_k=50)
|
77 |
+
hits = hits[0]
|
78 |
+
temp = []
|
79 |
+
if self.type == "QA":
|
80 |
+
for hit in hits:
|
81 |
+
matching_idx = hit["corpus_id"]
|
82 |
+
if self.kb_chunks[matching_idx] in temp:
|
83 |
+
pass
|
84 |
+
else:
|
85 |
+
knowledge = (
|
86 |
+
knowledge
|
87 |
+
+ f"question:{self.kb_questions[matching_idx]},answer:{self.kb_answers[matching_idx]}\n\n"
|
88 |
+
)
|
89 |
+
temp.append(self.kb_answers[matching_idx])
|
90 |
+
if len(temp) == 1:
|
91 |
+
break
|
92 |
+
print(hits[0]["score"])
|
93 |
+
score = hits[0]["score"]
|
94 |
+
if score < 0.5:
|
95 |
+
return {"prompt": "No matching knowledge base"}
|
96 |
+
else:
|
97 |
+
return {"prompt": "The relevant content is: " + knowledge + "\n"}
|
98 |
+
else:
|
99 |
+
for hit in hits:
|
100 |
+
matching_idx = hit["corpus_id"]
|
101 |
+
if self.kb_chunks[matching_idx] in temp:
|
102 |
+
pass
|
103 |
+
else:
|
104 |
+
knowledge = knowledge + f"{self.kb_answers[matching_idx]}\n\n"
|
105 |
+
temp.append(self.kb_answers[matching_idx])
|
106 |
+
if len(temp) == self.top_k:
|
107 |
+
break
|
108 |
+
print(hits[0]["score"])
|
109 |
+
score = hits[0]["score"]
|
110 |
+
if score < 0.5:
|
111 |
+
return {"prompt": "No matching knowledge base"}
|
112 |
+
else:
|
113 |
+
print(knowledge)
|
114 |
+
return {"prompt": "The relevant content is: " + knowledge + "\n"}
|
115 |
+
|
116 |
+
|
117 |
+
class StaticComponent(ToolComponent):
|
118 |
+
"Return static response"
|
119 |
+
def __init__(self, output):
|
120 |
+
super().__init__()
|
121 |
+
self.output = output
|
122 |
+
|
123 |
+
def func(self, agent):
|
124 |
+
outputdict = {"response": self.output}
|
125 |
+
return outputdict
|
126 |
+
|
127 |
+
|
128 |
+
class ExtractComponent(ToolComponent):
|
129 |
+
"""
|
130 |
+
Extract keywords based on the current scene and store them in the environment
|
131 |
+
extract_words(list) : Keywords to be extracted
|
132 |
+
system_prompt & last_prompt : Prompt to extract keywords
|
133 |
+
"""
|
134 |
+
def __init__(
|
135 |
+
self,
|
136 |
+
extract_words,
|
137 |
+
system_prompt,
|
138 |
+
last_prompt=None,
|
139 |
+
):
|
140 |
+
super().__init__()
|
141 |
+
self.extract_words = extract_words
|
142 |
+
self.system_prompt = system_prompt
|
143 |
+
self.default_prompt = (
|
144 |
+
"Please strictly adhere to the following format for outputting:\n"
|
145 |
+
)
|
146 |
+
for extract_word in extract_words:
|
147 |
+
self.default_prompt += (
|
148 |
+
f"<{extract_word}> the content you need to extract </{extract_word}>"
|
149 |
+
)
|
150 |
+
self.last_prompt = last_prompt if last_prompt else self.default_prompt
|
151 |
+
|
152 |
+
def func(self, agent):
|
153 |
+
response = agent.LLM.get_response(
|
154 |
+
agent.long_term_memory,
|
155 |
+
self.system_prompt,
|
156 |
+
self.last_prompt,
|
157 |
+
stream=False,
|
158 |
+
)
|
159 |
+
for extract_word in self.extract_words:
|
160 |
+
key = extract(response, extract_word)
|
161 |
+
key = key if key else response
|
162 |
+
agent.environment.shared_memory[extract_word] = key
|
163 |
+
|
164 |
+
return {}
|
165 |
+
|
166 |
+
|
167 |
+
"""Search sources: chatgpt/search engines/specific search sources/can even be multimodal (if it comes to clothing)"""
|
168 |
+
|
169 |
+
|
170 |
+
class WebSearchComponent(ToolComponent):
|
171 |
+
"""search engines"""
|
172 |
+
|
173 |
+
__ENGINE_NAME__: List = ["google", "bing"]
|
174 |
+
|
175 |
+
def __init__(self, engine_name: str, api: Dict):
|
176 |
+
"""
|
177 |
+
:param engine_name: The name of the search engine used
|
178 |
+
:param api: Pass in a dictionary, such as {"bing":"key1", "google":"key2", ...}, of course each value can also be a list, or more complicated
|
179 |
+
"""
|
180 |
+
super(WebSearchComponent, self).__init__()
|
181 |
+
"""Determine whether the key and engine_name of the api are legal"""
|
182 |
+
|
183 |
+
assert engine_name in WebSearchComponent.__ENGINE_NAME__
|
184 |
+
for api_name in api:
|
185 |
+
assert api_name in WebSearchComponent.__ENGINE_NAME__
|
186 |
+
|
187 |
+
self.api = api
|
188 |
+
self.engine_name = engine_name
|
189 |
+
|
190 |
+
self.search: Dict = {"bing": self._bing_search, "google": self._google_search}
|
191 |
+
|
192 |
+
def _bing_search(self, query: str, **kwargs):
|
193 |
+
"""Initialize search hyperparameters"""
|
194 |
+
subscription_key = self.api["bing"]
|
195 |
+
search_url = "https://api.bing.microsoft.com/v7.0/search"
|
196 |
+
headers = {"Ocp-Apim-Subscription-Key": subscription_key}
|
197 |
+
params = {
|
198 |
+
"q": query,
|
199 |
+
"textDecorations": True,
|
200 |
+
"textFormat": "HTML",
|
201 |
+
"count": 10,
|
202 |
+
}
|
203 |
+
"""start searching"""
|
204 |
+
response = requests.get(search_url, headers=headers, params=params)
|
205 |
+
response.raise_for_status()
|
206 |
+
results = response.json()["webPages"]["value"]
|
207 |
+
"""execute"""
|
208 |
+
metadata_results = []
|
209 |
+
for result in results:
|
210 |
+
metadata_result = {
|
211 |
+
"snippet": result["snippet"],
|
212 |
+
"title": result["name"],
|
213 |
+
"link": result["url"],
|
214 |
+
}
|
215 |
+
metadata_results.append(metadata_result)
|
216 |
+
return {"meta data": metadata_results}
|
217 |
+
|
218 |
+
def _google_search(self, query: str, **kwargs):
|
219 |
+
"""Initialize search hyperparameters"""
|
220 |
+
api_key = self.api[self.engine_name]["api_key"]
|
221 |
+
cse_id = self.api[self.engine_name]["cse_id"]
|
222 |
+
service = build("customsearch", "v1", developerKey=api_key)
|
223 |
+
"""start searching"""
|
224 |
+
results = (
|
225 |
+
service.cse().list(q=query, cx=cse_id, num=10, **kwargs).execute()["items"]
|
226 |
+
)
|
227 |
+
"""execute"""
|
228 |
+
metadata_results = []
|
229 |
+
for result in results:
|
230 |
+
metadata_result = {
|
231 |
+
"snippet": result["snippet"],
|
232 |
+
"title": result["title"],
|
233 |
+
"link": result["link"],
|
234 |
+
}
|
235 |
+
metadata_results.append(metadata_result)
|
236 |
+
return {"meta data": metadata_results}
|
237 |
+
|
238 |
+
def func(self, agent, **kwargs) -> Dict:
|
239 |
+
query = (
|
240 |
+
agent.long_term_memory[-1]["content"]
|
241 |
+
if len(agent.long_term_memory) > 0
|
242 |
+
else " "
|
243 |
+
)
|
244 |
+
response = agent.LLM.get_response(
|
245 |
+
None,
|
246 |
+
system_prompt=f"Please analyze the provided conversation and identify keywords that can be used for a search engine query. Format the output as <keywords>extracted keywords</keywords>:\nConversation:\n{query}",
|
247 |
+
stream=False,
|
248 |
+
)
|
249 |
+
response = extract(response, "keywords")
|
250 |
+
query = response if response else query
|
251 |
+
|
252 |
+
search_results = self.search[self.engine_name](query=query, **kwargs)
|
253 |
+
information = ""
|
254 |
+
for i in search_results["meta data"][:5]:
|
255 |
+
information += i["snippet"]
|
256 |
+
return {
|
257 |
+
"prompt": "You can refer to the following information to reply:\n"
|
258 |
+
+ information
|
259 |
+
}
|
260 |
+
|
261 |
+
def convert_search_engine_to(self, engine_name):
|
262 |
+
assert engine_name in WebSearchComponent.__ENGINE_NAME__
|
263 |
+
self.engine_name = engine_name
|
264 |
+
|
265 |
+
|
266 |
+
class WebCrawlComponent(ToolComponent):
|
267 |
+
"""Open a single web page for crawling"""
|
268 |
+
|
269 |
+
def __init__(self):
|
270 |
+
super(WebCrawlComponent, self).__init__()
|
271 |
+
|
272 |
+
def func(self, agent_dict) -> Dict:
|
273 |
+
url = agent_dict["url"]
|
274 |
+
print(f"crawling {url} ......")
|
275 |
+
content = ""
|
276 |
+
"""Crawling content from url may need to be carried out according to different websites, such as wiki, baidu, zhihu, etc."""
|
277 |
+
driver = webdriver.Chrome()
|
278 |
+
try:
|
279 |
+
"""open url"""
|
280 |
+
driver.get(url)
|
281 |
+
|
282 |
+
"""wait 20 second"""
|
283 |
+
wait = WebDriverWait(driver, 20)
|
284 |
+
wait.until(EC.presence_of_element_located((By.TAG_NAME, "body")))
|
285 |
+
|
286 |
+
"""crawl code"""
|
287 |
+
page_source = driver.page_source
|
288 |
+
|
289 |
+
"""parse"""
|
290 |
+
soup = BeautifulSoup(page_source, "html.parser")
|
291 |
+
|
292 |
+
"""concatenate"""
|
293 |
+
for paragraph in soup.find_all("p"):
|
294 |
+
content = f"{content}\n{paragraph.get_text()}"
|
295 |
+
except Exception as e:
|
296 |
+
print("Error:", e)
|
297 |
+
finally:
|
298 |
+
"""quit"""
|
299 |
+
driver.quit()
|
300 |
+
return {"content": content.strip()}
|
301 |
+
|
302 |
+
|
303 |
+
class MailComponent(ToolComponent):
|
304 |
+
__VALID_ACTION__ = ["read", "send"]
|
305 |
+
|
306 |
+
def __init__(
|
307 |
+
self, cfg_file: str, default_action: str = "read", name: str = "e-mail"
|
308 |
+
):
|
309 |
+
"""'../config/google_mail.json'"""
|
310 |
+
super(MailComponent, self).__init__(name)
|
311 |
+
self.name = name
|
312 |
+
assert (
|
313 |
+
default_action.lower() in self.__VALID_ACTION__
|
314 |
+
), f"Action `{default_action}` is not allowed! The valid action is in `{self.__VALID_ACTION__}`"
|
315 |
+
self.action = default_action.lower()
|
316 |
+
self.credential = self._login(cfg_file)
|
317 |
+
|
318 |
+
def _login(self, cfg_file: str):
|
319 |
+
SCOPES = [
|
320 |
+
"https://www.googleapis.com/auth/gmail.readonly",
|
321 |
+
"https://www.googleapis.com/auth/gmail.send",
|
322 |
+
]
|
323 |
+
creds = None
|
324 |
+
if os.path.exists("token.json"):
|
325 |
+
print("Login Successfully!")
|
326 |
+
creds = Credentials.from_authorized_user_file("token.json", SCOPES)
|
327 |
+
if not creds or not creds.valid:
|
328 |
+
print("Please authorize in an open browser.")
|
329 |
+
if creds and creds.expired and creds.refresh_token:
|
330 |
+
creds.refresh(Request())
|
331 |
+
else:
|
332 |
+
flow = InstalledAppFlow.from_client_secrets_file(cfg_file, SCOPES)
|
333 |
+
creds = flow.run_local_server(port=0)
|
334 |
+
# Save the credentials for the next run
|
335 |
+
with open("token.json", "w") as token:
|
336 |
+
token.write(creds.to_json())
|
337 |
+
return creds
|
338 |
+
|
339 |
+
def _read(self, mail_dict: dict):
|
340 |
+
credential = self.credential
|
341 |
+
state = mail_dict["state"] if "state" in mail_dict else None
|
342 |
+
time_between = (
|
343 |
+
mail_dict["time_between"] if "time_between" in mail_dict else None
|
344 |
+
)
|
345 |
+
sender_mail = mail_dict["sender_mail"] if "sender_mail" in mail_dict else None
|
346 |
+
only_both = mail_dict["only_both"] if "only_both" in mail_dict else False
|
347 |
+
order_by_time = (
|
348 |
+
mail_dict["order_by_time"] if "order_by_time" in mail_dict else "descend"
|
349 |
+
)
|
350 |
+
include_word = (
|
351 |
+
mail_dict["include_word"] if "include_word" in mail_dict else None
|
352 |
+
)
|
353 |
+
exclude_word = (
|
354 |
+
mail_dict["exclude_word"] if "exclude_word" in mail_dict else None
|
355 |
+
)
|
356 |
+
MAX_SEARCH_CNT = (
|
357 |
+
mail_dict["MAX_SEARCH_CNT"] if "MAX_SEARCH_CNT" in mail_dict else 50
|
358 |
+
)
|
359 |
+
number = mail_dict["number"] if "number" in mail_dict else 10
|
360 |
+
if state is None:
|
361 |
+
state = "all"
|
362 |
+
if time_between is not None:
|
363 |
+
assert isinstance(time_between, tuple)
|
364 |
+
assert len(time_between) == 2
|
365 |
+
assert state in ["all", "unread", "read", "sent"]
|
366 |
+
if only_both:
|
367 |
+
assert sender_mail is not None
|
368 |
+
if sender_mail is not None:
|
369 |
+
assert isinstance(sender_mail, str)
|
370 |
+
assert credential
|
371 |
+
assert order_by_time in ["descend", "ascend"]
|
372 |
+
|
373 |
+
def generate_query():
|
374 |
+
query = ""
|
375 |
+
if state in ["unread", "read"]:
|
376 |
+
query = f"is:{state}"
|
377 |
+
if state in ["sent"]:
|
378 |
+
query = f"in:{state}"
|
379 |
+
if only_both:
|
380 |
+
query = f"{query} from:{sender_mail} OR to:{sender_mail}"
|
381 |
+
if sender_mail is not None and not only_both:
|
382 |
+
query = f"{query} from:({sender_mail})"
|
383 |
+
if include_word is not None:
|
384 |
+
query = f"{query} {include_word}"
|
385 |
+
if exclude_word is not None:
|
386 |
+
query = f"{query} -{exclude_word}"
|
387 |
+
if time_between is not None:
|
388 |
+
TIME_FORMAT = "%Y/%m/%d"
|
389 |
+
t1, t2 = time_between
|
390 |
+
if t1 == "now":
|
391 |
+
t1 = datetime.now().strftime(TIME_FORMAT)
|
392 |
+
if t2 == "now":
|
393 |
+
t2 = datetime.now().strftime(TIME_FORMAT)
|
394 |
+
if isinstance(t1, str) and isinstance(t2, str):
|
395 |
+
t1 = datetime.strptime(t1, TIME_FORMAT)
|
396 |
+
t2 = datetime.strptime(t2, TIME_FORMAT)
|
397 |
+
elif isinstance(t1, str) and isinstance(t2, int):
|
398 |
+
t1 = datetime.strptime(t1, TIME_FORMAT)
|
399 |
+
t2 = t1 + timedelta(days=t2)
|
400 |
+
elif isinstance(t1, int) and isinstance(t2, str):
|
401 |
+
t2 = datetime.strptime(t2, TIME_FORMAT)
|
402 |
+
t1 = t2 + timedelta(days=t1)
|
403 |
+
else:
|
404 |
+
assert False, "invalid time"
|
405 |
+
if t1 > t2:
|
406 |
+
t1, t2 = t2, t1
|
407 |
+
query = f"{query} after:{t1.strftime(TIME_FORMAT)} before:{t2.strftime(TIME_FORMAT)}"
|
408 |
+
return query.strip()
|
409 |
+
|
410 |
+
def sort_by_time(data: List[Dict]):
|
411 |
+
if order_by_time == "descend":
|
412 |
+
reverse = True
|
413 |
+
else:
|
414 |
+
reverse = False
|
415 |
+
sorted_data = sorted(
|
416 |
+
data,
|
417 |
+
key=lambda x: datetime.strptime(x["time"], "%Y-%m-%d %H:%M:%S"),
|
418 |
+
reverse=reverse,
|
419 |
+
)
|
420 |
+
return sorted_data
|
421 |
+
|
422 |
+
try:
|
423 |
+
service = build("gmail", "v1", credentials=credential)
|
424 |
+
results = (
|
425 |
+
service.users()
|
426 |
+
.messages()
|
427 |
+
.list(userId="me", labelIds=["INBOX"], q=generate_query())
|
428 |
+
.execute()
|
429 |
+
)
|
430 |
+
|
431 |
+
messages = results.get("messages", [])
|
432 |
+
email_data = list()
|
433 |
+
|
434 |
+
if not messages:
|
435 |
+
print("No eligible emails.")
|
436 |
+
return None
|
437 |
+
else:
|
438 |
+
pbar = tqdm(total=min(MAX_SEARCH_CNT, len(messages)))
|
439 |
+
for cnt, message in enumerate(messages):
|
440 |
+
pbar.update(1)
|
441 |
+
if cnt >= MAX_SEARCH_CNT:
|
442 |
+
break
|
443 |
+
msg = (
|
444 |
+
service.users()
|
445 |
+
.messages()
|
446 |
+
.get(
|
447 |
+
userId="me",
|
448 |
+
id=message["id"],
|
449 |
+
format="full",
|
450 |
+
metadataHeaders=None,
|
451 |
+
)
|
452 |
+
.execute()
|
453 |
+
)
|
454 |
+
|
455 |
+
subject = ""
|
456 |
+
for header in msg["payload"]["headers"]:
|
457 |
+
if header["name"] == "Subject":
|
458 |
+
subject = header["value"]
|
459 |
+
break
|
460 |
+
|
461 |
+
sender = ""
|
462 |
+
for header in msg["payload"]["headers"]:
|
463 |
+
if header["name"] == "From":
|
464 |
+
sender = re.findall(
|
465 |
+
r"\b[\w\.-]+@[\w\.-]+\.\w+\b", header["value"]
|
466 |
+
)[0]
|
467 |
+
break
|
468 |
+
body = ""
|
469 |
+
if "parts" in msg["payload"]:
|
470 |
+
for part in msg["payload"]["parts"]:
|
471 |
+
if part["mimeType"] == "text/plain":
|
472 |
+
data = part["body"]["data"]
|
473 |
+
body = base64.urlsafe_b64decode(data).decode("utf-8")
|
474 |
+
break
|
475 |
+
|
476 |
+
email_info = {
|
477 |
+
"sender": sender,
|
478 |
+
"time": datetime.fromtimestamp(
|
479 |
+
int(msg["internalDate"]) / 1000
|
480 |
+
).strftime("%Y-%m-%d %H:%M:%S"),
|
481 |
+
"subject": subject,
|
482 |
+
"body": body,
|
483 |
+
}
|
484 |
+
email_data.append(email_info)
|
485 |
+
pbar.close()
|
486 |
+
email_data = sort_by_time(email_data)[0:number]
|
487 |
+
return {"results": email_data}
|
488 |
+
except Exception as e:
|
489 |
+
print(e)
|
490 |
+
return None
|
491 |
+
|
492 |
+
def _send(self, mail_dict: dict):
|
493 |
+
recipient_mail = mail_dict["recipient_mail"]
|
494 |
+
subject = mail_dict["subject"]
|
495 |
+
body = mail_dict["body"]
|
496 |
+
credential = self.credential
|
497 |
+
service = build("gmail", "v1", credentials=credential)
|
498 |
+
|
499 |
+
message = MIMEMultipart()
|
500 |
+
message["to"] = recipient_mail
|
501 |
+
message["subject"] = subject
|
502 |
+
|
503 |
+
message.attach(MIMEText(body, "plain"))
|
504 |
+
|
505 |
+
raw_message = base64.urlsafe_b64encode(message.as_bytes()).decode("utf-8")
|
506 |
+
try:
|
507 |
+
message = (
|
508 |
+
service.users()
|
509 |
+
.messages()
|
510 |
+
.send(userId="me", body={"raw": raw_message})
|
511 |
+
.execute()
|
512 |
+
)
|
513 |
+
return {"state": True}
|
514 |
+
except HttpError as error:
|
515 |
+
print(error)
|
516 |
+
return {"state": False}
|
517 |
+
|
518 |
+
def func(self, mail_dict: dict):
|
519 |
+
if "action" in mail_dict:
|
520 |
+
assert mail_dict["action"].lower() in self.__VALID_ACTION__
|
521 |
+
self.action = mail_dict["action"]
|
522 |
+
functions = {"read": self._read, "send": self._send}
|
523 |
+
return functions[self.action](mail_dict)
|
524 |
+
|
525 |
+
def convert_action_to(self, action_name: str):
|
526 |
+
assert (
|
527 |
+
action_name.lower() in self.__VALID_ACTION__
|
528 |
+
), f"Action `{action_name}` is not allowed! The valid action is in `{self.__VALID_ACTION__}`"
|
529 |
+
self.action = action_name.lower()
|
530 |
+
|
531 |
+
|
532 |
+
class WeatherComponet(ToolComponent):
|
533 |
+
def __init__(self, api_key, name="weather", TIME_FORMAT="%Y-%m-%d"):
|
534 |
+
super(WeatherComponet, self).__init__(name)
|
535 |
+
self.name = name
|
536 |
+
self.TIME_FORMAT = TIME_FORMAT
|
537 |
+
self.api_key = api_key
|
538 |
+
|
539 |
+
def _parse(self, data):
|
540 |
+
dict_data: dict = {}
|
541 |
+
for item in data["data"]:
|
542 |
+
date = item["datetime"]
|
543 |
+
dict_data[date] = {}
|
544 |
+
if "weather" in item:
|
545 |
+
dict_data[date]["description"] = item["weather"]["description"]
|
546 |
+
mapping = {
|
547 |
+
"temp": "temperature",
|
548 |
+
"max_temp": "max_temperature",
|
549 |
+
"min_temp": "min_temperature",
|
550 |
+
"precip": "accumulated_precipitation",
|
551 |
+
}
|
552 |
+
for key in ["temp", "max_temp", "min_temp", "precip"]:
|
553 |
+
if key in item:
|
554 |
+
dict_data[date][mapping[key]] = item[key]
|
555 |
+
return dict_data
|
556 |
+
|
557 |
+
def _query(self, city_name, country_code, start_date, end_date):
|
558 |
+
"""https://www.weatherbit.io/api/historical-weather-daily"""
|
559 |
+
# print(datetime.strftime(start_date, self.TIME_FORMAT), datetime.strftime(datetime.now(), self.TIME_FORMAT), end_date, datetime.strftime(datetime.now()+timedelta(days=1), self.TIME_FORMAT))
|
560 |
+
if start_date == datetime.strftime(
|
561 |
+
datetime.now(), self.TIME_FORMAT
|
562 |
+
) and end_date == datetime.strftime(
|
563 |
+
datetime.now() + timedelta(days=1), self.TIME_FORMAT
|
564 |
+
):
|
565 |
+
"""today"""
|
566 |
+
url = f"https://api.weatherbit.io/v2.0/current?city={city_name}&country={country_code}&key={self.api_key}"
|
567 |
+
else:
|
568 |
+
url = f"https://api.weatherbit.io/v2.0/history/daily?&city={city_name}&country={country_code}&start_date={start_date}&end_date={end_date}&key={self.api_key}"
|
569 |
+
response = requests.get(url)
|
570 |
+
data = response.json()
|
571 |
+
return self._parse(data)
|
572 |
+
|
573 |
+
def func(self, weather_dict: Dict) -> Dict:
|
574 |
+
TIME_FORMAT = self.TIME_FORMAT
|
575 |
+
# Beijing, Shanghai
|
576 |
+
city_name = weather_dict["city_name"]
|
577 |
+
# CN, US
|
578 |
+
country_code = weather_dict["country_code"]
|
579 |
+
# 2020-02-02
|
580 |
+
start_date = datetime.strftime(
|
581 |
+
datetime.strptime(weather_dict["start_date"], self.TIME_FORMAT),
|
582 |
+
self.TIME_FORMAT,
|
583 |
+
)
|
584 |
+
end_date = weather_dict["end_date"] if "end_date" in weather_dict else None
|
585 |
+
if end_date is None:
|
586 |
+
end_date = datetime.strftime(
|
587 |
+
datetime.strptime(start_date, TIME_FORMAT) + timedelta(days=-1),
|
588 |
+
TIME_FORMAT,
|
589 |
+
)
|
590 |
+
else:
|
591 |
+
end_date = datetime.strftime(
|
592 |
+
datetime.strptime(weather_dict["end_date"], self.TIME_FORMAT),
|
593 |
+
self.TIME_FORMAT,
|
594 |
+
)
|
595 |
+
if datetime.strptime(start_date, TIME_FORMAT) > datetime.strptime(
|
596 |
+
end_date, TIME_FORMAT
|
597 |
+
):
|
598 |
+
start_date, end_date = end_date, start_date
|
599 |
+
assert start_date != end_date
|
600 |
+
return self._query(city_name, country_code, start_date, end_date)
|
601 |
+
|
602 |
+
|
603 |
+
class TranslateComponent(ToolComponent):
|
604 |
+
__SUPPORT_LANGUAGE__ = [
|
605 |
+
"af",
|
606 |
+
"am",
|
607 |
+
"ar",
|
608 |
+
"as",
|
609 |
+
"az",
|
610 |
+
"ba",
|
611 |
+
"bg",
|
612 |
+
"bn",
|
613 |
+
"bo",
|
614 |
+
"bs",
|
615 |
+
"ca",
|
616 |
+
"cs",
|
617 |
+
"cy",
|
618 |
+
"da",
|
619 |
+
"de",
|
620 |
+
"dsb",
|
621 |
+
"dv",
|
622 |
+
"el",
|
623 |
+
"en",
|
624 |
+
"es",
|
625 |
+
"et",
|
626 |
+
"eu",
|
627 |
+
"fa",
|
628 |
+
"fi",
|
629 |
+
"fil",
|
630 |
+
"fj",
|
631 |
+
"fo",
|
632 |
+
"fr",
|
633 |
+
"fr-CA",
|
634 |
+
"ga",
|
635 |
+
"gl",
|
636 |
+
"gom",
|
637 |
+
"gu",
|
638 |
+
"ha",
|
639 |
+
"he",
|
640 |
+
"hi",
|
641 |
+
"hr",
|
642 |
+
"hsb",
|
643 |
+
"ht",
|
644 |
+
"hu",
|
645 |
+
"hy",
|
646 |
+
"id",
|
647 |
+
"ig",
|
648 |
+
"ikt",
|
649 |
+
"is",
|
650 |
+
"it",
|
651 |
+
"iu",
|
652 |
+
"iu-Latn",
|
653 |
+
"ja",
|
654 |
+
"ka",
|
655 |
+
"kk",
|
656 |
+
"km",
|
657 |
+
"kmr",
|
658 |
+
"kn",
|
659 |
+
"ko",
|
660 |
+
"ku",
|
661 |
+
"ky",
|
662 |
+
"ln",
|
663 |
+
"lo",
|
664 |
+
"lt",
|
665 |
+
"lug",
|
666 |
+
"lv",
|
667 |
+
"lzh",
|
668 |
+
"mai",
|
669 |
+
"mg",
|
670 |
+
"mi",
|
671 |
+
"mk",
|
672 |
+
"ml",
|
673 |
+
"mn-Cyrl",
|
674 |
+
"mn-Mong",
|
675 |
+
"mr",
|
676 |
+
"ms",
|
677 |
+
"mt",
|
678 |
+
"mww",
|
679 |
+
"my",
|
680 |
+
"nb",
|
681 |
+
"ne",
|
682 |
+
"nl",
|
683 |
+
"nso",
|
684 |
+
"nya",
|
685 |
+
"or",
|
686 |
+
"otq",
|
687 |
+
"pa",
|
688 |
+
"pl",
|
689 |
+
"prs",
|
690 |
+
"ps",
|
691 |
+
"pt",
|
692 |
+
"pt-PT",
|
693 |
+
"ro",
|
694 |
+
"ru",
|
695 |
+
"run",
|
696 |
+
"rw",
|
697 |
+
"sd",
|
698 |
+
"si",
|
699 |
+
"sk",
|
700 |
+
"sl",
|
701 |
+
"sm",
|
702 |
+
"sn",
|
703 |
+
"so",
|
704 |
+
"sq",
|
705 |
+
"sr-Cyrl",
|
706 |
+
"sr-Latn",
|
707 |
+
"st",
|
708 |
+
"sv",
|
709 |
+
"sw",
|
710 |
+
"ta",
|
711 |
+
"te",
|
712 |
+
"th",
|
713 |
+
"ti",
|
714 |
+
"tk",
|
715 |
+
"tlh-Latn",
|
716 |
+
"tlh-Piqd",
|
717 |
+
"tn",
|
718 |
+
"to",
|
719 |
+
"tr",
|
720 |
+
"tt",
|
721 |
+
"ty",
|
722 |
+
"ug",
|
723 |
+
"uk",
|
724 |
+
"ur",
|
725 |
+
"uz",
|
726 |
+
"vi",
|
727 |
+
"xh",
|
728 |
+
"yo",
|
729 |
+
"yua",
|
730 |
+
"yue",
|
731 |
+
"zh-Hans",
|
732 |
+
"zh-Hant",
|
733 |
+
"zu",
|
734 |
+
]
|
735 |
+
|
736 |
+
def __init__(
|
737 |
+
self, api_key, location, default_target_language="zh-cn", name="translate"
|
738 |
+
):
|
739 |
+
super(TranslateComponent, self).__init__(name)
|
740 |
+
self.name = name
|
741 |
+
self.api_key = api_key
|
742 |
+
self.location = location
|
743 |
+
self.default_target_language = default_target_language
|
744 |
+
|
745 |
+
def func(self, translate_dict: Dict) -> Dict:
|
746 |
+
content = translate_dict["content"]
|
747 |
+
target_language = self.default_target_language
|
748 |
+
if "target_language" in translate_dict:
|
749 |
+
target_language = translate_dict["target_language"]
|
750 |
+
assert (
|
751 |
+
target_language in self.__SUPPORT_LANGUAGE__
|
752 |
+
), f"language `{target_language}` is not supported."
|
753 |
+
|
754 |
+
endpoint = "https://api.cognitive.microsofttranslator.com"
|
755 |
+
|
756 |
+
path = "/translate"
|
757 |
+
constructed_url = endpoint + path
|
758 |
+
|
759 |
+
params = {"api-version": "3.0", "to": target_language}
|
760 |
+
|
761 |
+
headers = {
|
762 |
+
"Ocp-Apim-Subscription-Key": self.api_key,
|
763 |
+
"Ocp-Apim-Subscription-Region": self.location,
|
764 |
+
"Content-type": "application/json",
|
765 |
+
"X-ClientTraceId": str(uuid.uuid4()),
|
766 |
+
}
|
767 |
+
|
768 |
+
body = [{"text": content}]
|
769 |
+
|
770 |
+
request = requests.post(
|
771 |
+
constructed_url, params=params, headers=headers, json=body
|
772 |
+
)
|
773 |
+
response = request.json()
|
774 |
+
response = json.dumps(
|
775 |
+
response,
|
776 |
+
sort_keys=True,
|
777 |
+
ensure_ascii=False,
|
778 |
+
indent=4,
|
779 |
+
separators=(",", ": "),
|
780 |
+
)
|
781 |
+
response = eval(response)
|
782 |
+
return {"result": response[0]["translations"][0]["text"]}
|
783 |
+
|
784 |
+
|
785 |
+
class APIComponent(ToolComponent):
|
786 |
+
def __init__(self):
|
787 |
+
super(APIComponent, self).__init__()
|
788 |
+
|
789 |
+
def func(self, agent) -> Dict:
|
790 |
+
pass
|
791 |
+
|
792 |
+
|
793 |
+
class FunctionComponent(ToolComponent):
|
794 |
+
def __init__(
|
795 |
+
self,
|
796 |
+
functions,
|
797 |
+
function_call="auto",
|
798 |
+
response_type="response",
|
799 |
+
your_function=None,
|
800 |
+
):
|
801 |
+
super().__init__()
|
802 |
+
self.functions = functions
|
803 |
+
self.function_call = function_call
|
804 |
+
self.parameters = {}
|
805 |
+
self.available_functions = {}
|
806 |
+
self.response_type = response_type
|
807 |
+
if your_function:
|
808 |
+
function_name = your_function["name"]
|
809 |
+
function_content = your_function["content"]
|
810 |
+
exec(function_content)
|
811 |
+
self.available_functions[function_name] = eval(function_name)
|
812 |
+
|
813 |
+
for function in self.functions:
|
814 |
+
self.parameters[function["name"]] = list(
|
815 |
+
function["parameters"]["properties"].keys()
|
816 |
+
)
|
817 |
+
self.available_functions[function["name"]] = eval(function["name"])
|
818 |
+
|
819 |
+
def func(self, agent):
|
820 |
+
messages = agent.long_term_memory
|
821 |
+
outputdict = {}
|
822 |
+
query = agent.long_term_memory[-1].content if len(agent.long_term_memory) > 0 else " "
|
823 |
+
relevant_history = get_relevant_history(
|
824 |
+
query,
|
825 |
+
agent.long_term_memory[:-1],
|
826 |
+
agent.chat_embeddings[:-1],
|
827 |
+
)
|
828 |
+
response = agent.LLM.get_response(
|
829 |
+
messages,
|
830 |
+
None,
|
831 |
+
functions=self.functions,
|
832 |
+
stream=False,
|
833 |
+
function_call=self.function_call,
|
834 |
+
relevant_history=relevant_history,
|
835 |
+
)
|
836 |
+
response_message = response
|
837 |
+
if response_message.get("function_call"):
|
838 |
+
function_name = response_message["function_call"]["name"]
|
839 |
+
fuction_to_call = self.available_functions[function_name]
|
840 |
+
function_args = json.loads(response_message["function_call"]["arguments"])
|
841 |
+
input_args = {}
|
842 |
+
for args_name in self.parameters[function_name]:
|
843 |
+
input_args[args_name] = function_args.get(args_name)
|
844 |
+
function_response = fuction_to_call(**input_args)
|
845 |
+
if self.response_type == "response":
|
846 |
+
outputdict["response"] = function_response
|
847 |
+
elif self.response_type == "prompt":
|
848 |
+
outputdict["prompt"] = function_response
|
849 |
+
|
850 |
+
return outputdict
|
851 |
+
|
852 |
+
|
853 |
+
class CodeComponent(ToolComponent):
|
854 |
+
def __init__(self, file_name, keyword) -> None:
|
855 |
+
super().__init__()
|
856 |
+
self.file_name = file_name
|
857 |
+
self.keyword = keyword
|
858 |
+
self.system_prompt = (
|
859 |
+
"you need to extract the modified code as completely as possible."
|
860 |
+
)
|
861 |
+
self.last_prompt = (
|
862 |
+
f"Please strictly adhere to the following format for outputting: \n"
|
863 |
+
)
|
864 |
+
self.last_prompt += (
|
865 |
+
f"<{self.keyword}> the content you need to extract </{self.keyword}>"
|
866 |
+
)
|
867 |
+
|
868 |
+
def func(self, agent):
|
869 |
+
response = agent.LLM.get_response(
|
870 |
+
agent.long_term_memory,
|
871 |
+
self.system_prompt,
|
872 |
+
self.last_prompt,
|
873 |
+
stream=False,
|
874 |
+
)
|
875 |
+
code = extract(response, self.keyword)
|
876 |
+
code = code if code else response
|
877 |
+
os.makedirs("output_code", exist_ok=True)
|
878 |
+
file_name = "output_code/" + self.file_name
|
879 |
+
codes = code.split("\n")
|
880 |
+
if codes[0] == "```python":
|
881 |
+
codes.remove(codes[0])
|
882 |
+
if codes[-1] == "```":
|
883 |
+
codes.remove(codes[-1])
|
884 |
+
code = "\n".join(codes)
|
885 |
+
with open(file_name, "w", encoding="utf-8") as f:
|
886 |
+
f.write(code)
|
887 |
+
return {}
|
Component/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from .ExtraComponent import *
|
2 |
+
from .PromptComponent import *
|
3 |
+
from .ToolComponent import *
|
Environment/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .base_environment import Environment
|
Environment/base_environment.py
ADDED
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from utils import get_relevant_history, get_embedding
|
2 |
+
import torch
|
3 |
+
from LLM.base_LLM import *
|
4 |
+
from Memory import Memory
|
5 |
+
from Prompt import *
|
6 |
+
import json
|
7 |
+
class Environment:
|
8 |
+
"""
|
9 |
+
The place where the agent activities, responsible for storing some shared memories
|
10 |
+
"""
|
11 |
+
def __init__(self, config) -> None:
|
12 |
+
self.shared_memory = {"long_term_memory": [], "short_term_memory": None}
|
13 |
+
self.agents = None
|
14 |
+
|
15 |
+
self.summary_system_prompt = {}
|
16 |
+
self.summary_last_prompt = {}
|
17 |
+
self.environment_prompt = {}
|
18 |
+
self.environment_type = config["environment_type"] if "environment_type" in config else "cooperative"
|
19 |
+
self.current_chat_history_idx = 0
|
20 |
+
self.LLMs = {}
|
21 |
+
|
22 |
+
# 初始化每个state 的summary 方法
|
23 |
+
# Initialize the summary method for each state
|
24 |
+
for state_name, state_dict in config["states"].items():
|
25 |
+
if state_name != "end_state":
|
26 |
+
self.summary_system_prompt[state_name] = (
|
27 |
+
state_dict["summary_system_prompt"]
|
28 |
+
if "summary_system_prompt" in state_dict
|
29 |
+
else eval(Default_environment_summary_system_prompt)
|
30 |
+
)
|
31 |
+
|
32 |
+
self.summary_last_prompt[state_name] = (
|
33 |
+
state_dict["summary_last_prompt"]
|
34 |
+
if "summary_last_prompt" in state_dict
|
35 |
+
else eval(Default_environment_summary_last_prompt)
|
36 |
+
)
|
37 |
+
|
38 |
+
self.environment_prompt[state_name] = (
|
39 |
+
state_dict["environment_prompt"]
|
40 |
+
if "environment_prompt" in state_dict
|
41 |
+
else " "
|
42 |
+
)
|
43 |
+
self.LLMs[state_name] = init_LLM("logs"+os.sep+f"{state_name}",**state_dict)
|
44 |
+
self.roles_to_names = None
|
45 |
+
self.names_to_roles = None
|
46 |
+
|
47 |
+
@classmethod
|
48 |
+
def from_config(cls, config_path):
|
49 |
+
with open(config_path) as f:
|
50 |
+
config = json.load(f)
|
51 |
+
return cls(config)
|
52 |
+
|
53 |
+
def summary(self, current_state):
|
54 |
+
"""
|
55 |
+
Summarize the situation in the current environment every once in a while
|
56 |
+
"""
|
57 |
+
MAX_CHAT_HISTORY = eval(os.environ["MAX_CHAT_HISTORY"])
|
58 |
+
current_state_name = current_state.name
|
59 |
+
|
60 |
+
query = self.shared_memory["long_term_memory"][-1].content
|
61 |
+
if len(self.shared_memory["long_term_memory"])>1:
|
62 |
+
relevant_history = get_relevant_history(
|
63 |
+
query,
|
64 |
+
self.shared_memory["long_term_memory"][:-1],
|
65 |
+
self.shared_memory["chat_embeddings"][:-1],
|
66 |
+
)
|
67 |
+
|
68 |
+
relevant_history = Memory.get_chat_history(relevant_history)
|
69 |
+
else:
|
70 |
+
relevant_history = ""
|
71 |
+
chat_history = Memory.get_chat_history(
|
72 |
+
self.shared_memory["long_term_memory"][-MAX_CHAT_HISTORY + 1 :]
|
73 |
+
)
|
74 |
+
summary = self.shared_memory["short_term_memory"]
|
75 |
+
|
76 |
+
|
77 |
+
# system prompt = environment prompt + current memory + system prompt
|
78 |
+
# current_memory = summary + chat history + relevant history
|
79 |
+
current_memory = eval(Environment_summary_memory)
|
80 |
+
environment_prompt = self.environment_prompt[current_state_name]
|
81 |
+
summary_system_prompt = self.summary_system_prompt[current_state_name]
|
82 |
+
|
83 |
+
environment_summary_system_prompt = eval(Environment_summary_system_prompt)
|
84 |
+
response = self.LLMs[current_state_name].get_response(None, environment_summary_system_prompt, stream=False)
|
85 |
+
return response
|
86 |
+
|
87 |
+
def update_memory(self, memory, current_state):
|
88 |
+
"""
|
89 |
+
update chat embbedings and long term memory,short term memory,agents long term memory
|
90 |
+
"""
|
91 |
+
MAX_CHAT_HISTORY = eval(os.environ["MAX_CHAT_HISTORY"])
|
92 |
+
self.shared_memory["long_term_memory"].append(memory)
|
93 |
+
current_embedding = get_embedding(memory.content)
|
94 |
+
if "chat_embeddings" not in self.shared_memory:
|
95 |
+
self.shared_memory["chat_embeddings"] = current_embedding
|
96 |
+
else:
|
97 |
+
self.shared_memory["chat_embeddings"] = torch.cat(
|
98 |
+
[self.shared_memory["chat_embeddings"], current_embedding], dim=0
|
99 |
+
)
|
100 |
+
if len(self.shared_memory["long_term_memory"]) % MAX_CHAT_HISTORY == 0:
|
101 |
+
summary = self.summary(current_state)
|
102 |
+
self.shared_memory["short_term_memory"] = summary
|
103 |
+
|
104 |
+
self.agents[memory.send_name].update_memory(memory)
|
105 |
+
|
106 |
+
|
107 |
+
def _get_agent_last_conversation_idx(self,agent,current_long_term_memory):
|
108 |
+
last_conversation_idx = -1
|
109 |
+
for i, history in enumerate(current_long_term_memory):
|
110 |
+
if history.send_name == agent.name:
|
111 |
+
last_conversation_idx = i
|
112 |
+
return last_conversation_idx
|
113 |
+
|
114 |
+
|
115 |
+
def _get_agent_new_memory(self,agent,current_long_term_memory):
|
116 |
+
# get new conversation
|
117 |
+
last_conversation_idx = self._get_agent_last_conversation_idx(agent,current_long_term_memory)
|
118 |
+
|
119 |
+
if last_conversation_idx == -1:
|
120 |
+
new_conversation =current_long_term_memory
|
121 |
+
elif (
|
122 |
+
last_conversation_idx
|
123 |
+
== len(current_long_term_memory) - 1
|
124 |
+
):
|
125 |
+
new_conversation = []
|
126 |
+
else:
|
127 |
+
new_conversation = current_long_term_memory[
|
128 |
+
last_conversation_idx + 1 :
|
129 |
+
]
|
130 |
+
MAX_CHAT_HISTORY = eval(os.environ["MAX_CHAT_HISTORY"])
|
131 |
+
if len(new_conversation) > 2 * MAX_CHAT_HISTORY:
|
132 |
+
new_conversation = new_conversation[-2*MAX_CHAT_HISTORY+1:]
|
133 |
+
|
134 |
+
# get chat history from new conversation
|
135 |
+
return Memory.get_chat_history(new_conversation)
|
136 |
+
|
137 |
+
|
138 |
+
def _observe(self,agent):
|
139 |
+
MAX_CHAT_HISTORY = eval(os.environ["MAX_CHAT_HISTORY"])
|
140 |
+
current_state = agent.current_state
|
141 |
+
current_role = agent.state_roles[current_state.name]
|
142 |
+
current_component_dict = current_state.components[current_role]
|
143 |
+
|
144 |
+
# cooperative:Sharing information between different states ; competive: No information is shared between different states
|
145 |
+
current_chat_history_idx = self.current_chat_history_idx if self.environment_type == "competive" else 0
|
146 |
+
current_long_term_memory = self.shared_memory["long_term_memory"][current_chat_history_idx:]
|
147 |
+
current_chat_embbedings = self.shared_memory["chat_embeddings"][current_chat_history_idx:]
|
148 |
+
|
149 |
+
if len(current_long_term_memory)>2*MAX_CHAT_HISTORY:
|
150 |
+
current_long_term_memory = current_long_term_memory[-2*MAX_CHAT_HISTORY+1:]
|
151 |
+
current_chat_embbedings = current_chat_embbedings[-2*MAX_CHAT_HISTORY+1:]
|
152 |
+
# relevant_memory
|
153 |
+
query = current_long_term_memory[-1].content
|
154 |
+
if len(current_long_term_memory)>1:
|
155 |
+
relevant_memory = get_relevant_history(
|
156 |
+
query,
|
157 |
+
current_long_term_memory[:-2],
|
158 |
+
current_chat_embbedings[:-2],
|
159 |
+
)
|
160 |
+
relevant_memory = Memory.get_chat_history(relevant_memory,agent.name)
|
161 |
+
else:
|
162 |
+
relevant_memory = ""
|
163 |
+
|
164 |
+
relevant_memory = eval(Agent_observe_relevant_memory)
|
165 |
+
agent.relevant_memory = relevant_memory
|
166 |
+
|
167 |
+
|
168 |
+
# get chat history from new conversation
|
169 |
+
conversations = self._get_agent_new_memory(agent,current_long_term_memory)
|
170 |
+
|
171 |
+
# memory = relevant_memory + summary + history + query
|
172 |
+
query = current_long_term_memory[-1]
|
173 |
+
current_memory = eval(Agent_observe_memory)
|
174 |
+
|
175 |
+
return {"role": "user", "content": current_memory}
|
176 |
+
|
177 |
+
|
LLM/__init__.py
ADDED
File without changes
|
LLM/base_LLM.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import abstractclassmethod
|
2 |
+
import openai
|
3 |
+
import os
|
4 |
+
import time
|
5 |
+
from Memory import Memory
|
6 |
+
from utils import save_logs
|
7 |
+
|
8 |
+
class LLM:
|
9 |
+
def __init__(self) -> None:
|
10 |
+
pass
|
11 |
+
|
12 |
+
@abstractclassmethod
|
13 |
+
def get_response():
|
14 |
+
pass
|
15 |
+
|
16 |
+
|
17 |
+
class OpenAILLM(LLM):
|
18 |
+
def __init__(self,**kwargs) -> None:
|
19 |
+
super().__init__()
|
20 |
+
self.MAX_CHAT_HISTORY = eval(
|
21 |
+
os.environ["MAX_CHAT_HISTORY"]) if "MAX_CHAT_HISTORY" in os.environ else 10
|
22 |
+
|
23 |
+
self.model = kwargs["model"] if "model" in kwargs else "gpt-3.5-turbo-16k-0613"
|
24 |
+
self.temperature = kwargs["temperature"] if "temperature" in kwargs else 0.3
|
25 |
+
self.log_path = kwargs["log_path"].replace("/",os.sep) if "log_path" in kwargs else "logs"
|
26 |
+
|
27 |
+
|
28 |
+
def get_stream(self,response, log_path, messages):
|
29 |
+
ans = ""
|
30 |
+
for res in response:
|
31 |
+
if res:
|
32 |
+
r = (res.choices[0]["delta"].get("content")
|
33 |
+
if res.choices[0]["delta"].get("content") else "")
|
34 |
+
ans += r
|
35 |
+
yield r
|
36 |
+
|
37 |
+
save_logs(log_path, messages, ans)
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
def get_response(self,
|
42 |
+
chat_history,
|
43 |
+
system_prompt,
|
44 |
+
last_prompt=None,
|
45 |
+
stream=False,
|
46 |
+
functions=None,
|
47 |
+
function_call="auto",
|
48 |
+
WAIT_TIME=20,
|
49 |
+
**kwargs):
|
50 |
+
"""
|
51 |
+
return LLM's response
|
52 |
+
"""
|
53 |
+
openai.api_key = os.environ["API_KEY"]
|
54 |
+
if "PROXY" in os.environ:
|
55 |
+
assert "http:" in os.environ["PROXY"] or "socks" in os.environ["PROXY"],"PROXY error,PROXY must be http or socks"
|
56 |
+
openai.proxy = os.environ["PROXY"]
|
57 |
+
if "API_BASE" in os.environ:
|
58 |
+
openai.api_base = os.environ["API_BASE"]
|
59 |
+
active_mode = True if ("ACTIVE_MODE" in os.environ and os.environ["ACTIVE_MODE"] == "0") else False
|
60 |
+
model = self.model
|
61 |
+
temperature = self.temperature
|
62 |
+
|
63 |
+
|
64 |
+
if active_mode:
|
65 |
+
system_prompt = system_prompt + "Please keep your reply as concise as possible."
|
66 |
+
|
67 |
+
messages = [{
|
68 |
+
"role": "system",
|
69 |
+
"content": system_prompt
|
70 |
+
}] if system_prompt else []
|
71 |
+
|
72 |
+
if chat_history:
|
73 |
+
if len(chat_history) > self.MAX_CHAT_HISTORY:
|
74 |
+
chat_history = chat_history[- self.MAX_CHAT_HISTORY:]
|
75 |
+
if isinstance(chat_history[0],dict):
|
76 |
+
messages += chat_history
|
77 |
+
elif isinstance(chat_history[0],Memory):
|
78 |
+
messages += [memory.get_gpt_message("user") for memory in chat_history]
|
79 |
+
|
80 |
+
|
81 |
+
|
82 |
+
if last_prompt:
|
83 |
+
if active_mode:
|
84 |
+
last_prompt = last_prompt + "Please keep your reply as concise as possible."
|
85 |
+
# messages += [{"role": "system", "content": f"{last_prompt}"}]
|
86 |
+
messages[-1]["content"] += last_prompt
|
87 |
+
|
88 |
+
|
89 |
+
while True:
|
90 |
+
try:
|
91 |
+
if functions:
|
92 |
+
response = openai.ChatCompletion.create(
|
93 |
+
model=model,
|
94 |
+
messages=messages,
|
95 |
+
functions=functions,
|
96 |
+
function_call=function_call,
|
97 |
+
temperature=temperature,
|
98 |
+
)
|
99 |
+
else:
|
100 |
+
response = openai.ChatCompletion.create(
|
101 |
+
model=model,
|
102 |
+
messages=messages,
|
103 |
+
temperature=temperature,
|
104 |
+
stream=stream)
|
105 |
+
break
|
106 |
+
except Exception as e:
|
107 |
+
print(e)
|
108 |
+
if "maximum context length is" in str(e):
|
109 |
+
if len(messages)>1:
|
110 |
+
del messages[1]
|
111 |
+
else:
|
112 |
+
assert False, "exceed max length"
|
113 |
+
else:
|
114 |
+
print(f"Please wait {WAIT_TIME} seconds and resend later ...")
|
115 |
+
time.sleep(WAIT_TIME)
|
116 |
+
|
117 |
+
if functions:
|
118 |
+
save_logs(self.log_path, messages, response)
|
119 |
+
return response.choices[0].message
|
120 |
+
elif stream:
|
121 |
+
return self.get_stream(response, self.log_path, messages)
|
122 |
+
else:
|
123 |
+
save_logs(self.log_path, messages, response)
|
124 |
+
return response.choices[0].message["content"]
|
125 |
+
|
126 |
+
|
127 |
+
def init_LLM(default_log_path,**kwargs):
|
128 |
+
LLM_type = kwargs["LLM_type"] if "LLM_type" in kwargs else "OpenAI"
|
129 |
+
log_path = kwargs["log_path"].replace("/",os.sep) if "log_path" in kwargs else default_log_path
|
130 |
+
if LLM_type == "OpenAI":
|
131 |
+
LLM = (
|
132 |
+
OpenAILLM(**kwargs["LLM"])
|
133 |
+
if "LLM" in kwargs
|
134 |
+
else OpenAILLM(model = "gpt-3.5-turbo-16k-0613",temperature=0.3,log_path=log_path)
|
135 |
+
)
|
136 |
+
return LLM
|
137 |
+
|
Memory/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .base_Memory import Memory
|
Memory/base_Memory.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from Prompt import *
|
2 |
+
class Memory:
|
3 |
+
def __init__(self,role,name,content) -> None:
|
4 |
+
self.send_role = role
|
5 |
+
self.send_name = name
|
6 |
+
self.content = content
|
7 |
+
|
8 |
+
def get_gpt_message(self,role):
|
9 |
+
return {"role":role,"content":self.content}
|
10 |
+
|
11 |
+
@classmethod
|
12 |
+
def get_chat_history(self,messages,agent_name =None):
|
13 |
+
"""
|
14 |
+
Splice a memory list into a sentence
|
15 |
+
input :
|
16 |
+
messages(list) : list of memory(Memory)
|
17 |
+
Return :
|
18 |
+
chat_history(str) : One sentence after integration
|
19 |
+
"""
|
20 |
+
chat_history = ""
|
21 |
+
for message in messages:
|
22 |
+
name,role,content = message.send_name,message.send_role,message.content
|
23 |
+
if agent_name and agent_name==name:
|
24 |
+
name = "you"
|
25 |
+
chat_history += eval(Single_message)
|
26 |
+
chat_history = eval(Chat_total_message)
|
27 |
+
return chat_history
|
28 |
+
|
29 |
+
def get_query(self):
|
30 |
+
"Return : query(str):last sentence"
|
31 |
+
name,role,content = self.send_name,self.send_role,self.content
|
32 |
+
return eval(Single_message)
|
Prompt/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .base_Prompts import *
|
Prompt/base_Prompts.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# SOP========================================================================================================
|
3 |
+
# "environment_prompt"
|
4 |
+
# current_state , self(sop)
|
5 |
+
Get_environment_prompt = "f\"Here are the description of current scenario:{self.current_state.environment_prompt};\\n\""
|
6 |
+
|
7 |
+
|
8 |
+
# sop.transit
|
9 |
+
#================================================================
|
10 |
+
Transit_system_prompt = "f\"{environment_prompt};\\n{judge_system_prompt}\\n\"";
|
11 |
+
|
12 |
+
# transit chat message
|
13 |
+
# "environment_prompt" is get from "Get_environment_prompt" ; "chat_history_message" if from Memory
|
14 |
+
Transit_message = "f\"{environment_summary};\\n Here is the The chat history:\\n {chat_history_message};\\nHere is the last query you especially need to pay attention:\\n{query};\\n Here is the relevant conversation: \\n{relevant_history} \\n\\n\""
|
15 |
+
|
16 |
+
|
17 |
+
Transit_last_prompt = "f\"{judge_last_prompt}\""
|
18 |
+
#sop.transit================================================================
|
19 |
+
|
20 |
+
# sop.call
|
21 |
+
#================================================================
|
22 |
+
# help controller to determine the next role to speak.(the {} is agent role) call_prompt + allocate_component
|
23 |
+
Allocate_component = "f\"If it's currently supposed to be speaking for {role}, then output <end>{role}</end>.\\n\""
|
24 |
+
|
25 |
+
# environment_prompt is get from "Get_environment_prompt" ; "chat_history_message" if from Memory
|
26 |
+
Call_system_prompt = "f\"{environment_prompt};\\n{call_system_prompt};\\n{allocate_prompt}.\\n\""
|
27 |
+
|
28 |
+
#
|
29 |
+
Call_last_prompt = "f\"Here is the last query you especially need to pay attention:\\n{query};\\n Here is the the relevant conversation :\\n{relevant_history};\\nNow please choose the person to speak according to the following rules :{allocate_prompt};\\nNote: The person whose turn it is now cannot be the same as the person who spoke last time, so {last_name} cannot be output\\n.\""
|
30 |
+
|
31 |
+
Call_message = "f\"Here is the chat history:\\n{chat_history_message};\\nHere is the name of the person who last speak: {last_name}.\\n \""
|
32 |
+
#sop.call================================================================
|
33 |
+
# SOP========================================================================================================
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
# Memory========================================================================================================
|
41 |
+
Single_message = "f\"role: {role} \\n speak content : {content}; \""
|
42 |
+
|
43 |
+
Chat_total_message = "f\"<chat history>{{{chat_history}}}</chat history>\""
|
44 |
+
# Memory========================================================================================================
|
45 |
+
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
|
50 |
+
|
51 |
+
# Environment========================================================================================================
|
52 |
+
Default_environment_summary_system_prompt = "\"\\nYour task is to summarize the historical dialogue records according to the current scene, and summarize the most important information\""
|
53 |
+
|
54 |
+
Default_environment_summary_last_prompt = "\"Please make a summary based on the historical chat records, the output format is history summary: \{your summary content\} \""
|
55 |
+
|
56 |
+
Environment_summary_memory = "f\"Here is the information you need to know:\\n\\n\
|
57 |
+
Here is the summary of the previous dialogue history:\\n{summary}.\\n\
|
58 |
+
Here is the latest conversation record:\\n {chat_history},\\n\
|
59 |
+
Here is the relevant chat history you may need:{relevant_history}.\\n\""
|
60 |
+
|
61 |
+
Environment_summary_system_prompt = "f\"{environment_prompt};\\n{current_memory};\\n{summary_system_prompt};\\n\""
|
62 |
+
|
63 |
+
|
64 |
+
# observe
|
65 |
+
Agent_observe_relevant_memory = "f\"\\n{relevant_memory}. \\n\""
|
66 |
+
|
67 |
+
|
68 |
+
Agent_observe_memory = "f\"Here's what you need to know(Remember, this is just information, Try not to repeat what's inside):\\nHere is the relevant chat history you may need:{relevant_memory};\\n\
|
69 |
+
Here is the previous summary of chat history :\\n{agent.short_term_memory}.\\n\
|
70 |
+
Here is the relevant memory :\\n{agent.relevant_memory}.\\n\
|
71 |
+
Here is the new chat history:\\n {conversations};\\n\
|
72 |
+
\""
|
73 |
+
# Environment========================================================================================================
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
+
|
78 |
+
# Agent========================================================================================================
|
79 |
+
Agent_summary_system_prompt = "f\"{summary_prompt};\\n Here is the past summary:{self.short_term_memory};\\nHere is the new chat_history:\\n{conversations};\\nPlease summary Please summarize based on the above information;\\n\""
|
80 |
+
|
81 |
+
Agent_last_prompt = "f\"{last_prompt};Please continue the talk based on your known information;Remember that you just represent {name}, do not speak for others,just speak as normal.\""
|
82 |
+
|
83 |
+
Agent_system_prompt = "f\"{system_prompt},\""
|
84 |
+
# Agent========================================================================================================
|
SOP.py
ADDED
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The AIWaves Inc. team.
|
3 |
+
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
"""standard operation procedure of an LLM Autonomous agent"""
|
17 |
+
import random
|
18 |
+
from LLM.base_LLM import *
|
19 |
+
from State import State
|
20 |
+
from utils import extract, get_relevant_history
|
21 |
+
from Memory import Memory
|
22 |
+
from Prompt import *
|
23 |
+
import json
|
24 |
+
import os
|
25 |
+
|
26 |
+
class SOP:
|
27 |
+
"""
|
28 |
+
Responsible for managing the operational processes of all agents
|
29 |
+
"""
|
30 |
+
|
31 |
+
# SOP should have args : "states" "relations" "root"
|
32 |
+
|
33 |
+
def __init__(self, **kwargs):
|
34 |
+
self.controller_dict = {}
|
35 |
+
self.LLM = init_LLM("logs"+os.sep+"god",**kwargs)
|
36 |
+
|
37 |
+
self.states = {}
|
38 |
+
self.init_states(kwargs["states"])
|
39 |
+
self.init_relation(kwargs["relations"])
|
40 |
+
for state_name, states_dict in kwargs["states"].items():
|
41 |
+
if state_name != "end_state" and "controller" in states_dict:
|
42 |
+
self.controller_dict[state_name] = states_dict["controller"]
|
43 |
+
|
44 |
+
self.user_names = kwargs["user_names"] if "user_names" in kwargs else []
|
45 |
+
self.root = self.states[kwargs["root"]]
|
46 |
+
self.current_state = self.root
|
47 |
+
self.finish_state_name = (
|
48 |
+
kwargs["finish_state_name"]
|
49 |
+
if "finish_state_name" in kwargs
|
50 |
+
else "end_state"
|
51 |
+
)
|
52 |
+
self.roles_to_names = None
|
53 |
+
self.names_to_roles = None
|
54 |
+
self.finished = False
|
55 |
+
|
56 |
+
@classmethod
|
57 |
+
def from_config(cls, config_path):
|
58 |
+
with open(config_path) as f:
|
59 |
+
config = json.load(f)
|
60 |
+
os.environ.clear()
|
61 |
+
for key,value in config["config"].items():
|
62 |
+
if value!="":
|
63 |
+
os.environ[key] = value
|
64 |
+
sop = SOP(**config)
|
65 |
+
return sop
|
66 |
+
|
67 |
+
def init_states(self, states_dict):
|
68 |
+
for state_name, state_dict in states_dict.items():
|
69 |
+
state_dict["name"] = state_name
|
70 |
+
self.states[state_name] = State(**state_dict)
|
71 |
+
|
72 |
+
def init_relation(self, relations):
|
73 |
+
for state_name, state_relation in relations.items():
|
74 |
+
for idx, next_state_name in state_relation.items():
|
75 |
+
self.states[state_name].next_states[idx] = self.states[next_state_name]
|
76 |
+
|
77 |
+
def transit(self, chat_history, **kwargs):
|
78 |
+
"""
|
79 |
+
Determine the next state based on the current situation
|
80 |
+
Return :
|
81 |
+
next_state(State) : the next state
|
82 |
+
"""
|
83 |
+
# 如果是单一循环节点,则一直循环即可
|
84 |
+
# If it is a single loop node, just keep looping
|
85 |
+
if len(self.current_state.next_states) == 1:
|
86 |
+
next_state = "0"
|
87 |
+
|
88 |
+
# 否则则需要controller去判断进入哪一节点
|
89 |
+
# Otherwise, the controller needs to determine which node to enter.
|
90 |
+
else:
|
91 |
+
current_state = self.current_state
|
92 |
+
controller_dict = self.controller_dict[current_state.name]
|
93 |
+
relevant_history = kwargs["relevant_history"]
|
94 |
+
|
95 |
+
max_chat_nums = controller_dict["max_chat_nums"] if "max_chat_nums" in controller_dict else 1000
|
96 |
+
if current_state.chat_nums>=max_chat_nums:
|
97 |
+
return self.current_state.next_states["1"]
|
98 |
+
|
99 |
+
|
100 |
+
# 否则则让controller判断是否结束
|
101 |
+
# Otherwise, let the controller judge whether to end
|
102 |
+
judge_system_prompt = controller_dict["judge_system_prompt"] if "judge_system_prompt" in controller_dict else ""
|
103 |
+
environment_prompt = eval(Get_environment_prompt) if current_state.environment_prompt else ""
|
104 |
+
transit_system_prompt = eval(Transit_system_prompt)
|
105 |
+
|
106 |
+
judge_last_prompt = controller_dict["judge_last_prompt"] if "judge_last_prompt" in controller_dict else ""
|
107 |
+
transit_last_prompt = eval(Transit_last_prompt)
|
108 |
+
|
109 |
+
|
110 |
+
|
111 |
+
environment = kwargs["environment"]
|
112 |
+
environment_summary = environment.shared_memory["short_term_memory"]
|
113 |
+
chat_history_message = Memory.get_chat_history(chat_history)
|
114 |
+
query = chat_history[-1].get_query()
|
115 |
+
|
116 |
+
chat_messages = [
|
117 |
+
{
|
118 |
+
"role": "user",
|
119 |
+
"content": eval(Transit_message)
|
120 |
+
}
|
121 |
+
]
|
122 |
+
|
123 |
+
extract_words = controller_dict["judge_extract_words"] if "judge_extract_words" in controller_dict else "end"
|
124 |
+
|
125 |
+
|
126 |
+
response = self.LLM.get_response(
|
127 |
+
chat_messages, transit_system_prompt, transit_last_prompt, stream=False, **kwargs
|
128 |
+
)
|
129 |
+
next_state = (
|
130 |
+
response if response.isdigit() else extract(response, extract_words)
|
131 |
+
)
|
132 |
+
|
133 |
+
# 如果没有parse出来则继续循环
|
134 |
+
# If no parse comes out, continue looping
|
135 |
+
if not next_state.isdigit():
|
136 |
+
next_state = "0"
|
137 |
+
|
138 |
+
next_state = self.current_state.next_states[next_state]
|
139 |
+
return next_state
|
140 |
+
|
141 |
+
|
142 |
+
def route(self, chat_history, **kwargs):
|
143 |
+
"""
|
144 |
+
Determine the role that needs action based on the current situation
|
145 |
+
Return :
|
146 |
+
current_agent(Agent) : the next act agent
|
147 |
+
"""
|
148 |
+
|
149 |
+
agents = kwargs["agents"]
|
150 |
+
|
151 |
+
# 知道进入哪一状态后开始分配角色,如果该状态下只有一个角色则直接分配给他
|
152 |
+
# Start assigning roles after knowing which state you have entered. If there is only one role in that state, assign it directly to him.
|
153 |
+
if len(self.current_state.roles) == 1:
|
154 |
+
next_role = self.current_state.roles[0]
|
155 |
+
|
156 |
+
|
157 |
+
|
158 |
+
# 否则controller进行分配
|
159 |
+
# Otherwise the controller determines
|
160 |
+
else:
|
161 |
+
relevant_history = kwargs["relevant_history"]
|
162 |
+
controller_type = (
|
163 |
+
self.controller_dict[self.current_state.name]["controller_type"]
|
164 |
+
if "controller_type" in self.controller_dict[self.current_state.name]
|
165 |
+
else "order"
|
166 |
+
)
|
167 |
+
|
168 |
+
|
169 |
+
# 如果是rule 控制器,则交由LLM进行分配角色
|
170 |
+
# If controller type is rule, it is left to LLM to assign roles.
|
171 |
+
if controller_type == "rule":
|
172 |
+
controller_dict = self.controller_dict[self.current_state.name]
|
173 |
+
|
174 |
+
call_last_prompt = controller_dict["call_last_prompt"] if "call_last_prompt" in controller_dict else ""
|
175 |
+
|
176 |
+
allocate_prompt = ""
|
177 |
+
roles = list(set(self.current_state.roles))
|
178 |
+
for role in roles:
|
179 |
+
allocate_prompt += eval(Allocate_component)
|
180 |
+
|
181 |
+
call_system_prompt = controller_dict["call_system_prompt"] if "call_system_prompt" in controller_dict else ""
|
182 |
+
environment_prompt = eval(Get_environment_prompt) if self.current_state.environment_prompt else ""
|
183 |
+
# call_system_prompt + environment + allocate_prompt
|
184 |
+
call_system_prompt = eval(Call_system_prompt)
|
185 |
+
|
186 |
+
query = chat_history[-1].get_query()
|
187 |
+
last_name = chat_history[-1].send_name
|
188 |
+
# last_prompt: note + last_prompt + query
|
189 |
+
call_last_prompt =eval(Call_last_prompt)
|
190 |
+
|
191 |
+
|
192 |
+
chat_history_message = Memory.get_chat_history(chat_history)
|
193 |
+
# Intermediate historical conversation records
|
194 |
+
chat_messages = [
|
195 |
+
{
|
196 |
+
"role": "user",
|
197 |
+
"content": eval(Call_message),
|
198 |
+
}
|
199 |
+
]
|
200 |
+
|
201 |
+
extract_words = controller_dict["call_extract_words"] if "call_extract_words" in controller_dict else "end"
|
202 |
+
|
203 |
+
response = self.LLM.get_response(
|
204 |
+
chat_messages, call_system_prompt, call_last_prompt, stream=False, **kwargs
|
205 |
+
)
|
206 |
+
|
207 |
+
# get next role
|
208 |
+
next_role = extract(response, extract_words)
|
209 |
+
|
210 |
+
# Speak in order
|
211 |
+
elif controller_type == "order":
|
212 |
+
# If there is no begin role, it will be given directly to the first person.
|
213 |
+
if not self.current_state.current_role:
|
214 |
+
next_role = self.current_state.roles[0]
|
215 |
+
# otherwise first
|
216 |
+
else:
|
217 |
+
self.current_state.index += 1
|
218 |
+
self.current_state.index = (self.current_state.index) % len(self.current_state.roles)
|
219 |
+
next_role = self.current_state.roles[self.current_state.index]
|
220 |
+
# random speak
|
221 |
+
elif controller_type == "random":
|
222 |
+
next_role = random.choice(self.current_state.roles)
|
223 |
+
|
224 |
+
# 如果下一角色不在,则随机挑选一个
|
225 |
+
# If the next character is not available, pick one at random
|
226 |
+
if next_role not in self.current_state.roles:
|
227 |
+
next_role = random.choice(self.current_state.roles)
|
228 |
+
|
229 |
+
self.current_state.current_role = next_role
|
230 |
+
|
231 |
+
next_agent = agents[self.roles_to_names[self.current_state.name][next_role]]
|
232 |
+
|
233 |
+
return next_agent
|
234 |
+
|
235 |
+
def next(self, environment, agents):
|
236 |
+
"""
|
237 |
+
Determine the next state and the agent that needs action based on the current situation
|
238 |
+
"""
|
239 |
+
|
240 |
+
# 如果是第一次进入该状态
|
241 |
+
# If it is the first time to enter this state
|
242 |
+
|
243 |
+
if self.current_state.is_begin:
|
244 |
+
agent_name = self.roles_to_names[self.current_state.name][self.current_state.begin_role]
|
245 |
+
agent = agents[agent_name]
|
246 |
+
return self.current_state,agent
|
247 |
+
|
248 |
+
|
249 |
+
# get relevant history
|
250 |
+
query = environment.shared_memory["long_term_memory"][-1].content
|
251 |
+
relevant_history = get_relevant_history(
|
252 |
+
query,
|
253 |
+
environment.shared_memory["long_term_memory"][:-1],
|
254 |
+
environment.shared_memory["chat_embeddings"][:-1],
|
255 |
+
)
|
256 |
+
relevant_history = Memory.get_chat_history(relevant_history)
|
257 |
+
|
258 |
+
|
259 |
+
|
260 |
+
next_state = self.transit(
|
261 |
+
chat_history=environment.shared_memory["long_term_memory"][
|
262 |
+
environment.current_chat_history_idx :
|
263 |
+
],
|
264 |
+
relevant_history=relevant_history,
|
265 |
+
environment=environment,
|
266 |
+
)
|
267 |
+
# 如果进入终止节点,则直接终止
|
268 |
+
# If you enter the termination node, terminate directly
|
269 |
+
if next_state.name == self.finish_state_name:
|
270 |
+
self.finished = True
|
271 |
+
return None, None
|
272 |
+
|
273 |
+
self.current_state = next_state
|
274 |
+
|
275 |
+
# 如果是首次进入该节点且有开场白,则直接分配给开场角色
|
276 |
+
# If it is the first time to enter the state and there is a begin query, it will be directly assigned to the begin role.
|
277 |
+
if self.current_state.is_begin and self.current_state.begin_role:
|
278 |
+
agent_name = self.roles_to_names[self.current_state.name][self.current_state.begin_role]
|
279 |
+
agent = agents[agent_name]
|
280 |
+
return self.current_state,agent
|
281 |
+
|
282 |
+
|
283 |
+
next_agent = self.route(
|
284 |
+
chat_history=environment.shared_memory["long_term_memory"][
|
285 |
+
environment.current_chat_history_idx :
|
286 |
+
],
|
287 |
+
agents = agents,
|
288 |
+
relevant_history=relevant_history,
|
289 |
+
)
|
290 |
+
|
291 |
+
return self.current_state, next_agent
|
State.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from Component import *
|
2 |
+
|
3 |
+
|
4 |
+
class State:
|
5 |
+
"""
|
6 |
+
Sub-scenes of role activities, responsible for storing the tasks that each role needs to do
|
7 |
+
"""
|
8 |
+
def __init__(self, **kwargs):
|
9 |
+
self.next_states = {}
|
10 |
+
self.name = kwargs["name"]
|
11 |
+
|
12 |
+
self.environment_prompt = (
|
13 |
+
kwargs["environment_prompt"] if "environment_prompt" in kwargs else ""
|
14 |
+
)
|
15 |
+
|
16 |
+
self.roles = kwargs["roles"] if "roles" in kwargs else (list(kwargs["agent_states"].keys()) if "agent_states" in kwargs else [0])
|
17 |
+
if len(self.roles) == 0:
|
18 |
+
self.roles = [0]
|
19 |
+
self.begin_role = (
|
20 |
+
kwargs["begin_role"] if "begin_role" in kwargs else self.roles[0]
|
21 |
+
)
|
22 |
+
self.begin_query = kwargs["begin_query"] if "begin_query" in kwargs else None
|
23 |
+
|
24 |
+
self.is_begin = True
|
25 |
+
|
26 |
+
self.summary_prompt = (
|
27 |
+
kwargs["summary_prompt"] if "summary_prompt" in kwargs else None
|
28 |
+
)
|
29 |
+
self.current_role = self.begin_role
|
30 |
+
self.components = (
|
31 |
+
self.init_components(kwargs["agent_states"])
|
32 |
+
if "agent_states" in kwargs
|
33 |
+
else {}
|
34 |
+
)
|
35 |
+
self.index = (
|
36 |
+
self.roles.index(self.begin_role) if self.begin_role in self.roles else 0
|
37 |
+
)
|
38 |
+
self.chat_nums = 0
|
39 |
+
|
40 |
+
def init_components(self, agent_states_dict: dict):
|
41 |
+
agent_states = {}
|
42 |
+
for role, components in agent_states_dict.items():
|
43 |
+
component_dict = {}
|
44 |
+
for component, component_args in components.items():
|
45 |
+
if component:
|
46 |
+
# "role" "style"
|
47 |
+
if component == "style":
|
48 |
+
component_dict["style"] = StyleComponent(component_args["role"])
|
49 |
+
|
50 |
+
# "task"
|
51 |
+
elif component == "task":
|
52 |
+
component_dict["task"] = TaskComponent(component_args["task"])
|
53 |
+
|
54 |
+
# "rule"
|
55 |
+
elif component == "rule":
|
56 |
+
component_dict["rule"] = RuleComponent(component_args["rule"])
|
57 |
+
|
58 |
+
# "demonstration"
|
59 |
+
elif component == "demonstrations":
|
60 |
+
component_dict["demonstrations"] = DemonstrationComponent(
|
61 |
+
component_args["demonstrations"]
|
62 |
+
)
|
63 |
+
|
64 |
+
# "output"
|
65 |
+
elif component == "output":
|
66 |
+
component_dict["output"] = OutputComponent(
|
67 |
+
component_args["output"]
|
68 |
+
)
|
69 |
+
|
70 |
+
elif component == "last":
|
71 |
+
component_dict["last"] = LastComponent(
|
72 |
+
component_args["last_prompt"]
|
73 |
+
)
|
74 |
+
|
75 |
+
# "demonstrations"
|
76 |
+
elif component == "cot":
|
77 |
+
component_dict["cot"] = CoTComponent(
|
78 |
+
component_args["demonstrations"]
|
79 |
+
)
|
80 |
+
elif component == "CustomizeComponent":
|
81 |
+
component_dict["CustomizeComponent"] = CustomizeComponent(
|
82 |
+
component_args["template"], component_args["keywords"]
|
83 |
+
)
|
84 |
+
|
85 |
+
elif component == "system" :
|
86 |
+
component_dict["system"] = SystemComponent(
|
87 |
+
component_args["system_prompt"]
|
88 |
+
)
|
89 |
+
|
90 |
+
# =================================================================================#
|
91 |
+
|
92 |
+
# "output"
|
93 |
+
elif component == "StaticComponent":
|
94 |
+
component_dict["StaticComponent"] = StaticComponent(
|
95 |
+
component_args["output"]
|
96 |
+
)
|
97 |
+
|
98 |
+
# "top_k" "type" "knowledge_base" "system_prompt" "last_prompt"
|
99 |
+
elif component == "KnowledgeBaseComponent":
|
100 |
+
component_dict["tool"] = KnowledgeBaseComponent(
|
101 |
+
component_args["top_k"],
|
102 |
+
component_args["type"],
|
103 |
+
component_args["knowledge_path"],
|
104 |
+
)
|
105 |
+
|
106 |
+
elif component == "CategoryRequirementsComponent":
|
107 |
+
component_dict[
|
108 |
+
"CategoryRequirementsComponent"
|
109 |
+
] = CategoryRequirementsComponent(
|
110 |
+
component_args["information_path"]
|
111 |
+
)
|
112 |
+
|
113 |
+
elif component == "FunctionComponent":
|
114 |
+
component_dict["FunctionComponent"] = FunctionComponent(component_args[""])
|
115 |
+
# "short_memory_extract_words" "long_memory_extract_words" "system_prompt" "last_prompt"
|
116 |
+
elif component == "ExtractComponent":
|
117 |
+
component_dict["ExtractComponent"] = ExtractComponent(
|
118 |
+
component_args["extract_words"],
|
119 |
+
component_args["system_prompt"],
|
120 |
+
component_args["last_prompt"],
|
121 |
+
)
|
122 |
+
elif component == "WebSearchComponent":
|
123 |
+
component_dict["WebSearchComponent"] = WebSearchComponent(
|
124 |
+
component_args["engine_name"], component_args["api"]
|
125 |
+
)
|
126 |
+
elif component == "WebCrawlComponent":
|
127 |
+
component_dict["WebCrawlComponent"] = WebCrawlComponent(
|
128 |
+
component_args["name"]
|
129 |
+
)
|
130 |
+
|
131 |
+
elif component == "CodeComponent":
|
132 |
+
component_dict["CodeComponent"] = CodeComponent(
|
133 |
+
component_args["file_name"], component_args["keyword"]
|
134 |
+
)
|
135 |
+
|
136 |
+
# ====================================================
|
137 |
+
else:
|
138 |
+
continue
|
139 |
+
|
140 |
+
agent_states[role] = component_dict
|
141 |
+
|
142 |
+
return agent_states
|
app.py
ADDED
@@ -0,0 +1,398 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
sys.path.append("../../agents")
|
3 |
+
import os
|
4 |
+
from gradio_base import WebUI, UIHelper, PORT, HOST, Client
|
5 |
+
from gradio_config import GradioConfig as gc
|
6 |
+
from typing import List, Tuple, Any
|
7 |
+
import gradio as gr
|
8 |
+
from Agent import Agent
|
9 |
+
import time
|
10 |
+
import json
|
11 |
+
from utils import cos_sim
|
12 |
+
from design_states import get_desgin_states, get_cot_result, gen_coder_task
|
13 |
+
from gen_utils import *
|
14 |
+
import openai
|
15 |
+
import torch
|
16 |
+
|
17 |
+
def get_embedding(sentence,api_key):
|
18 |
+
openai.api_key = api_key
|
19 |
+
embedding_model = openai.Embedding
|
20 |
+
embed = embedding_model.create(
|
21 |
+
model="text-embedding-ada-002",
|
22 |
+
input=sentence
|
23 |
+
)
|
24 |
+
embed = embed["data"][0]["embedding"]
|
25 |
+
embed = torch.tensor(embed,dtype=torch.float32)
|
26 |
+
if len(embed.shape)==1:
|
27 |
+
embed = embed.unsqueeze(0)
|
28 |
+
return embed
|
29 |
+
|
30 |
+
class CodeUI(WebUI):
|
31 |
+
|
32 |
+
def render_and_register_ui(self):
|
33 |
+
self.agent_name:list = [self.cache["agents_name"]] if isinstance(self.cache["agents_name"], str) else self.cache['agents_name']
|
34 |
+
gc.add_agent(self.agent_name)
|
35 |
+
|
36 |
+
def __init__(
|
37 |
+
self,
|
38 |
+
client_cmd: list,
|
39 |
+
socket_host: str = HOST,
|
40 |
+
socket_port: int = PORT,
|
41 |
+
bufsize: int = 1024,
|
42 |
+
ui_name: str = "CodeUI"
|
43 |
+
):
|
44 |
+
super(CodeUI, self).__init__(client_cmd, socket_host, socket_port, bufsize, ui_name)
|
45 |
+
self.first_recieve_from_client()
|
46 |
+
self.data_history = list()
|
47 |
+
self.caller = 0
|
48 |
+
|
49 |
+
def load_sop_fn(self,sop):
|
50 |
+
return sop.name
|
51 |
+
|
52 |
+
|
53 |
+
def generate_sop_stage_1(self,api_key,proxy,target):
|
54 |
+
os.environ["API_KEY"] = api_key
|
55 |
+
# os.environ["PROXY"] = proxy
|
56 |
+
self.software = "You are a software,aim to write a snake game with python"
|
57 |
+
self.debate = "Simulate a debate competition"
|
58 |
+
self.ecological_environment = "Simulate the interactions and competition among different organisms within an ecosystem"
|
59 |
+
self.software = get_embedding(self.software,api_key)
|
60 |
+
self.debate = get_embedding(self.debate,api_key)
|
61 |
+
self.ecological_environment = get_embedding(self.ecological_environment,api_key)
|
62 |
+
self.embeddings = torch.cat([self.software,self.debate,self.ecological_environment],dim = 0)
|
63 |
+
self.SOP["config"]["API_KEY"] = api_key
|
64 |
+
# self.SOP["config"]["PROXY"] = proxy
|
65 |
+
target_tensor = get_embedding(target,api_key)
|
66 |
+
sim_scores = cos_sim(target_tensor, self.embeddings)[0]
|
67 |
+
top_k_score, top_k_idx = torch.topk(sim_scores,k = 1)
|
68 |
+
if top_k_score > 0.7:
|
69 |
+
self.index = top_k_idx
|
70 |
+
else:
|
71 |
+
self.index = 0
|
72 |
+
target_processed = get_cot_result(target)
|
73 |
+
print("finished!!!!")
|
74 |
+
return target_processed,self.target_finish_flag.update(visible = True)
|
75 |
+
|
76 |
+
def generate_sop_stage_2(self,target):
|
77 |
+
design_states = get_desgin_states(target,self.index)
|
78 |
+
root = design_states[0]["state_name"]
|
79 |
+
self.SOP["root"] = root
|
80 |
+
return design_states,self.state_finish_flag.update(visible = True)
|
81 |
+
|
82 |
+
def generate_sop_stage_3(self,design_states):
|
83 |
+
agents = get_agents(design_states,self.index)
|
84 |
+
relations = get_relations(design_states)
|
85 |
+
self.SOP["relations"] = relations
|
86 |
+
self.SOP["agents"] = agents
|
87 |
+
return agents, self.agent_relation_finish_flag.update(visible = True),self.reminder.update(visible = True)
|
88 |
+
|
89 |
+
def generate_sop_stage_4(self,agents, need_coder,design_states):
|
90 |
+
states = gen_states(design_states,self.index)
|
91 |
+
if "Coder" in need_coder:
|
92 |
+
agents["coder"] = {"style": "professional", "roles": {}}
|
93 |
+
for state_name, state in states.items():
|
94 |
+
if state_name != "end_state":
|
95 |
+
agents["coder"]["roles"][state_name] = "coder"
|
96 |
+
state["roles"].append("coder")
|
97 |
+
task = gen_coder_task(state["environment_prompt"])
|
98 |
+
now_coder = self.coder.copy()
|
99 |
+
now_coder["task"]["task"] = task
|
100 |
+
state["agent_states"]["coder"] = now_coder
|
101 |
+
state["controller"]["max_chat_nums"] = str(
|
102 |
+
int(state["controller"]["max_chat_nums"])+2)
|
103 |
+
for name, agent in state["agent_states"].items():
|
104 |
+
if name != "coder":
|
105 |
+
agent["rule"]["rule"] += "\nEvaluate the code of the coder and provide feedback and response as concise as possible.It is best not to exceed 100 words"
|
106 |
+
agent["task"]["task"] += "\nEvaluate the code of the coder and provide feedback."
|
107 |
+
self.SOP["states"] = states
|
108 |
+
# 将字典写入JSON文件
|
109 |
+
file_name = 'generated_sop.json'
|
110 |
+
with open(file_name, "w") as json_file:
|
111 |
+
json.dump(self.SOP, json_file ,indent=4,ensure_ascii=False)
|
112 |
+
return file_name
|
113 |
+
|
114 |
+
def construct_ui(self):
|
115 |
+
with gr.Blocks(css=gc.CSS) as demo:
|
116 |
+
with gr.Tab(label="SOP generation") as tab1:
|
117 |
+
self.coder = {
|
118 |
+
"task": {
|
119 |
+
"task": ""
|
120 |
+
},
|
121 |
+
"rule": {"rule": "1.write code that conforms to standards like PEP8, is modular, easy to read, and maintainable.\n 2.The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>\n3.Please carefully modify the code based on feedback from others.\n4.Output the code only."},
|
122 |
+
"last": {
|
123 |
+
"last_prompt": "The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>,Output the code only."
|
124 |
+
}
|
125 |
+
}
|
126 |
+
self.SOP = {
|
127 |
+
"config": {
|
128 |
+
"API_KEY": "sk-*********",
|
129 |
+
"MAX_CHAT_HISTORY": "5",
|
130 |
+
"User_Names": '["User"]',
|
131 |
+
},
|
132 |
+
"root": "state1",
|
133 |
+
"relations": {
|
134 |
+
"state1": {"0": "state1", "1": "state2"},
|
135 |
+
"state2": {"0": "state2", "1": "end_state"},
|
136 |
+
},
|
137 |
+
"agents": None,
|
138 |
+
"states": None,
|
139 |
+
}
|
140 |
+
gr.Markdown("""# Generate Agent""")
|
141 |
+
with gr.Row():
|
142 |
+
api_key = gr.Textbox(label="api_key")
|
143 |
+
proxy = gr.Textbox(label="proxy",visible=False)
|
144 |
+
with gr.Row():
|
145 |
+
requirement = gr.Textbox(value ="A software company aim to write a mine sweeping game",label="requirement")
|
146 |
+
with gr.Row():
|
147 |
+
need_coder = gr.CheckboxGroup(["Coder"],label="Demand for coder")
|
148 |
+
with gr.Row():
|
149 |
+
self.target_finish_flag = gr.Label(value = "The process of completing requirement handling is finished.",visible=False)
|
150 |
+
with gr.Row():
|
151 |
+
self.state_finish_flag = gr.Label(value = "The process of determining the state is completed.",visible=False)
|
152 |
+
with gr.Row():
|
153 |
+
self.agent_relation_finish_flag = gr.Label(value = "The process of initializing the agent and relation is completed.",visible=False)
|
154 |
+
with gr.Row():
|
155 |
+
self.reminder = gr.Markdown("""Generating SOP...""",visible=False)
|
156 |
+
generated_sop = gr.File(label="generated_file")
|
157 |
+
generate_button = gr.Button(label="Generate")
|
158 |
+
target_processed = gr.State()
|
159 |
+
design_states = gr.State()
|
160 |
+
agents = gr.State()
|
161 |
+
generate_button.click(self.generate_sop_stage_1,[api_key,proxy,requirement],[target_processed,self.target_finish_flag]).then(
|
162 |
+
self.generate_sop_stage_2, [target_processed], [design_states,self.state_finish_flag]).then(
|
163 |
+
self.generate_sop_stage_3, [design_states], [agents,self.agent_relation_finish_flag,self.reminder]).then(
|
164 |
+
self.generate_sop_stage_4, [agents, need_coder,design_states], [generated_sop])
|
165 |
+
with gr.Tab(label="Chat") as tab2:
|
166 |
+
uploaded_sop = gr.State()
|
167 |
+
with gr.Row():
|
168 |
+
sop = gr.File(label="upload your custmized SOP")
|
169 |
+
load_sop_btn = gr.Button(value="Load SOP")
|
170 |
+
load_sop_btn.click(self.load_sop_fn, sop,uploaded_sop)
|
171 |
+
with gr.Row():
|
172 |
+
with gr.Column():
|
173 |
+
self.text_api = gr.Textbox(
|
174 |
+
value = self.cache["api_key"],
|
175 |
+
placeholder="openai key",
|
176 |
+
label="Please input valid openai key for gpt-3.5-turbo-16k."
|
177 |
+
)
|
178 |
+
self.radio_mode = gr.Radio(
|
179 |
+
[Client.SINGLE_MODE],
|
180 |
+
value=Client.SINGLE_MODE,
|
181 |
+
interactive=True,
|
182 |
+
label = Client.MODE_LABEL,
|
183 |
+
info = Client.MODE_INFO
|
184 |
+
)
|
185 |
+
self.chatbot = gr.Chatbot(
|
186 |
+
elem_id="chatbot1"
|
187 |
+
)
|
188 |
+
self.btn_next = gr.Button(
|
189 |
+
value="Next Agent",
|
190 |
+
visible=False, elem_id="btn"
|
191 |
+
)
|
192 |
+
with gr.Row():
|
193 |
+
self.text_requirement = gr.Textbox(
|
194 |
+
value=self.cache['requirement'],
|
195 |
+
placeholder="Please enter your content",
|
196 |
+
scale=9,
|
197 |
+
)
|
198 |
+
self.btn_start = gr.Button(
|
199 |
+
value="Start!",
|
200 |
+
scale=1
|
201 |
+
)
|
202 |
+
self.btn_reset = gr.Button(
|
203 |
+
value="Restart",
|
204 |
+
visible=False
|
205 |
+
)
|
206 |
+
|
207 |
+
with gr.Column():
|
208 |
+
self.file = gr.File(visible=False)
|
209 |
+
self.chat_code_show = gr.Chatbot(
|
210 |
+
elem_id="chatbot1",
|
211 |
+
visible=False
|
212 |
+
)
|
213 |
+
|
214 |
+
self.btn_start.click(
|
215 |
+
fn=self.btn_send_when_click,
|
216 |
+
inputs=[self.chatbot, self.text_requirement, self.radio_mode, self.text_api,uploaded_sop],
|
217 |
+
outputs=[self.chatbot, self.btn_start, self.text_requirement, self.btn_reset]
|
218 |
+
).then(
|
219 |
+
fn=self.btn_send_after_click,
|
220 |
+
inputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement],
|
221 |
+
outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
|
222 |
+
)
|
223 |
+
self.text_requirement.submit(
|
224 |
+
fn=self.btn_send_when_click,
|
225 |
+
inputs=[self.chatbot, self.text_requirement, self.text_api,uploaded_sop],
|
226 |
+
outputs=[self.chatbot, self.btn_start, self.text_requirement, self.btn_reset]
|
227 |
+
).then(
|
228 |
+
fn=self.btn_send_after_click,
|
229 |
+
inputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement],
|
230 |
+
outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
|
231 |
+
)
|
232 |
+
self.btn_reset.click(
|
233 |
+
fn=self.btn_reset_when_click,
|
234 |
+
inputs=[],
|
235 |
+
outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
|
236 |
+
).then(
|
237 |
+
fn=self.btn_reset_after_click,
|
238 |
+
inputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement],
|
239 |
+
outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
|
240 |
+
)
|
241 |
+
self.file.select(
|
242 |
+
fn=self.file_when_select,
|
243 |
+
inputs=[self.file],
|
244 |
+
outputs=[self.chat_code_show]
|
245 |
+
)
|
246 |
+
self.btn_next.click(
|
247 |
+
fn = self.btn_next_when_click,
|
248 |
+
inputs=[],
|
249 |
+
outputs=[self.btn_next]
|
250 |
+
).then(
|
251 |
+
fn=self.btn_send_after_click,
|
252 |
+
inputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement],
|
253 |
+
outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
|
254 |
+
)
|
255 |
+
|
256 |
+
self.demo = demo
|
257 |
+
|
258 |
+
|
259 |
+
def handle_message(self, history:list, state, agent_name, token, node_name):
|
260 |
+
if state % 10 == 0:
|
261 |
+
self.data_history.append({agent_name: token})
|
262 |
+
elif state % 10 == 1:
|
263 |
+
# Same state. Need to add new bubble in same bubble.
|
264 |
+
self.data_history[-1][agent_name] += token
|
265 |
+
elif state % 10 == 2:
|
266 |
+
# New state. Need to add new bubble.
|
267 |
+
history.append([None, ""])
|
268 |
+
self.data_history.clear()
|
269 |
+
self.data_history.append({agent_name: token})
|
270 |
+
else:
|
271 |
+
assert False, "Invalid state."
|
272 |
+
render_data = self.render_bubble(history, self.data_history, node_name, render_node_name=True)
|
273 |
+
return render_data
|
274 |
+
|
275 |
+
def btn_send_when_click(self, chatbot, text_requirement, mode, api, sop):
|
276 |
+
"""
|
277 |
+
inputs=[self.chatbot, self.text_requirement, radio, text_api],
|
278 |
+
outputs=[self.chatbot, self.btn_start, self.text_requirement, self.btn_reset]
|
279 |
+
"""
|
280 |
+
chatbot = [[UIHelper.wrap_css(content=text_requirement, name="User"), None]]
|
281 |
+
yield chatbot,\
|
282 |
+
gr.Button.update(visible=True, interactive=False, value="Running"),\
|
283 |
+
gr.Textbox.update(visible=True, interactive=False, value=""),\
|
284 |
+
gr.Button.update(visible=False, interactive=False)
|
285 |
+
self.send_start_cmd({'requirement': text_requirement, "mode": mode, "api_key": api ,"uploaded_sop": sop})
|
286 |
+
agents,roles_to_names,names_to_roles = Agent.from_config(str(sop))
|
287 |
+
agents_name = []
|
288 |
+
for i in names_to_roles :
|
289 |
+
for j in names_to_roles[i]:
|
290 |
+
agents_name.append(j+"("+names_to_roles[i][j]+")")
|
291 |
+
self.new_render_and_register_ui(agents_name)
|
292 |
+
return
|
293 |
+
|
294 |
+
def new_render_and_register_ui(self,agent_names):
|
295 |
+
gc.add_agent(agent_names, 0)
|
296 |
+
|
297 |
+
def btn_send_after_click(
|
298 |
+
self,
|
299 |
+
file,
|
300 |
+
history,
|
301 |
+
show_code,
|
302 |
+
btn_send,
|
303 |
+
btn_reset,
|
304 |
+
text_requirement
|
305 |
+
):
|
306 |
+
"""
|
307 |
+
outputs=[self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
|
308 |
+
"""
|
309 |
+
if self.caller == 0:
|
310 |
+
self.data_history = list()
|
311 |
+
self.caller = 0
|
312 |
+
receive_server = self.receive_server
|
313 |
+
while True:
|
314 |
+
data_list: List = receive_server.send(None)
|
315 |
+
for item in data_list:
|
316 |
+
data = eval(item)
|
317 |
+
assert isinstance(data, list)
|
318 |
+
state, agent_name, token, node_name = data
|
319 |
+
assert isinstance(state, int)
|
320 |
+
assert state in [10, 11, 12, 99, 98]
|
321 |
+
if state == 99:
|
322 |
+
# finish
|
323 |
+
fs = [self.cache['pwd']+'/output_code/'+_ for _ in os.listdir(self.cache['pwd']+'/output_code')]
|
324 |
+
yield gr.File.update(value=fs, visible=True, interactive=True),\
|
325 |
+
history, \
|
326 |
+
gr.Chatbot.update(visible=True),\
|
327 |
+
gr.Button.update(visible=True, interactive=True, value="Start"),\
|
328 |
+
gr.Button.update(visible=True, interactive=True),\
|
329 |
+
gr.Textbox.update(visible=True, interactive=True, placeholder="Please input your requirement", value=""),\
|
330 |
+
gr.Button.update(visible=False)
|
331 |
+
return
|
332 |
+
elif state == 98:
|
333 |
+
yield gr.File.update(visible=False),\
|
334 |
+
history, \
|
335 |
+
gr.Chatbot.update(visible=False),\
|
336 |
+
gr.Button.update(visible=True, interactive=False),\
|
337 |
+
gr.Button.update(visible=True, interactive=True),\
|
338 |
+
gr.Textbox.update(visible=True, interactive=False),\
|
339 |
+
gr.Button.update(visible=True, value=f"Next Agent: 🤖{agent_name} | Next Node: ⭕{node_name}")
|
340 |
+
return
|
341 |
+
history = self.handle_message(history, state, agent_name, token, node_name)
|
342 |
+
yield gr.File.update(visible=False),\
|
343 |
+
history, \
|
344 |
+
gr.Chatbot.update(visible=False),\
|
345 |
+
gr.Button.update(visible=True, interactive=False),\
|
346 |
+
gr.Button.update(visible=False, interactive=False),\
|
347 |
+
gr.Textbox.update(visible=True, interactive=False),\
|
348 |
+
gr.Button.update(visible=False)
|
349 |
+
|
350 |
+
def btn_reset_when_click(self):
|
351 |
+
"""
|
352 |
+
inputs = []
|
353 |
+
outputs = [self.file, self.chatbot, self.chat_code_show, self.btn_start, self.btn_reset, self.text_requirement, self.btn_next]
|
354 |
+
"""
|
355 |
+
return gr.File.update(visible=False),\
|
356 |
+
None, None, gr.Button.update(value="Restarting...", interactive=False),\
|
357 |
+
gr.Button.update(value="Restarting...", interactive=False),\
|
358 |
+
gr.Textbox.update(value="Restarting", interactive=False),\
|
359 |
+
gr.Button.update(visible=False)
|
360 |
+
|
361 |
+
def btn_reset_after_click(
|
362 |
+
self,
|
363 |
+
file,
|
364 |
+
chatbot,
|
365 |
+
show_code,
|
366 |
+
btn_send,
|
367 |
+
btn_reset,
|
368 |
+
text_requirement
|
369 |
+
):
|
370 |
+
self.reset()
|
371 |
+
self.first_recieve_from_client(reset_mode=True)
|
372 |
+
return gr.File.update(value=None, visible=False),\
|
373 |
+
gr.Chatbot.update(value=None, visible=True),\
|
374 |
+
gr.Chatbot.update(value=None, visible=False),\
|
375 |
+
gr.Button.update(value="Start", visible=True, interactive=True),\
|
376 |
+
gr.Button.update(value="Restart", interactive=False, visible=False),\
|
377 |
+
gr.Textbox.update(value=self.cache['requirement'], interactive=True, visible=True),\
|
378 |
+
gr.Button.update(visible=False)
|
379 |
+
|
380 |
+
def file_when_select(self, file):
|
381 |
+
CODE_PREFIX = "```python\n{}\n```"
|
382 |
+
with open(file.name, "r", encoding='utf-8') as f:
|
383 |
+
contents = f.readlines()
|
384 |
+
codes = "".join(contents)
|
385 |
+
return [[CODE_PREFIX.format(codes),None]]
|
386 |
+
|
387 |
+
def btn_next_when_click(self):
|
388 |
+
self.caller = 1 # it will remain the value in self.data_history
|
389 |
+
self.send_message("nothing")
|
390 |
+
time.sleep(0.5)
|
391 |
+
yield gr.Button.update(visible=False)
|
392 |
+
return
|
393 |
+
|
394 |
+
|
395 |
+
if __name__ == '__main__':
|
396 |
+
ui = CodeUI(client_cmd=["python3","gradio_backend.py"])
|
397 |
+
ui.construct_ui()
|
398 |
+
ui.run(share=True)
|
config.json
ADDED
@@ -0,0 +1,501 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"config": {
|
3 |
+
"API_KEY": "sk-************",
|
4 |
+
"MAX_CHAT_HISTORY": "3",
|
5 |
+
"TOP_K": "0"
|
6 |
+
},
|
7 |
+
"LLM_type": "OpenAI",
|
8 |
+
"LLM": {
|
9 |
+
"temperature": 0.3,
|
10 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
11 |
+
"log_path": "logs/god"
|
12 |
+
},
|
13 |
+
"root": "design_state",
|
14 |
+
"relations": {
|
15 |
+
"design_state": {
|
16 |
+
"0": "design_state",
|
17 |
+
"1": "develop_state"
|
18 |
+
},
|
19 |
+
"develop_state": {
|
20 |
+
"0": "develop_state",
|
21 |
+
"1": "debug_state"
|
22 |
+
},
|
23 |
+
"debug_state": {
|
24 |
+
"0": "debug_state",
|
25 |
+
"1": "end_state"
|
26 |
+
}
|
27 |
+
},
|
28 |
+
"agents": {
|
29 |
+
"Alice": {
|
30 |
+
"style": "august",
|
31 |
+
"roles": {
|
32 |
+
"design_state": "Boss",
|
33 |
+
"develop_state": "Boss",
|
34 |
+
"debug_state": "Boss"
|
35 |
+
}
|
36 |
+
},
|
37 |
+
"Bob": {
|
38 |
+
"style": "professional",
|
39 |
+
"roles": {
|
40 |
+
"design_state": "Architect_1"
|
41 |
+
}
|
42 |
+
},
|
43 |
+
"Belle": {
|
44 |
+
"style": "professional",
|
45 |
+
"roles": {
|
46 |
+
"design_state": "Architect_2"
|
47 |
+
}
|
48 |
+
},
|
49 |
+
"Candy": {
|
50 |
+
"style": "professional",
|
51 |
+
"roles": {
|
52 |
+
"develop_state": "Developer_1",
|
53 |
+
"debug_state": "Developer_1"
|
54 |
+
}
|
55 |
+
},
|
56 |
+
"Carl": {
|
57 |
+
"style": "professional",
|
58 |
+
"roles": {
|
59 |
+
"develop_state": "Developer_2",
|
60 |
+
"debug_state": "Developer_2"
|
61 |
+
}
|
62 |
+
},
|
63 |
+
"David": {
|
64 |
+
"style": "professional",
|
65 |
+
"roles": {
|
66 |
+
"debug_state": "Debugger"
|
67 |
+
}
|
68 |
+
},
|
69 |
+
"Eva": {
|
70 |
+
"style": "professional",
|
71 |
+
"roles": {
|
72 |
+
"debug_state": "Coder"
|
73 |
+
}
|
74 |
+
},
|
75 |
+
"Michael": {
|
76 |
+
"style": "professional",
|
77 |
+
"roles": {
|
78 |
+
"design_state": "Leader",
|
79 |
+
"develop_state": "Leader",
|
80 |
+
"debug_state": "Leader"
|
81 |
+
}
|
82 |
+
}
|
83 |
+
},
|
84 |
+
"states": {
|
85 |
+
"end_state": {
|
86 |
+
"agent_states": {}
|
87 |
+
},
|
88 |
+
"design_state": {
|
89 |
+
"LLM_type": "OpenAI",
|
90 |
+
"LLM": {
|
91 |
+
"temperature": 0.3,
|
92 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
93 |
+
"log_path": "logs/god"
|
94 |
+
},
|
95 |
+
"roles": [
|
96 |
+
"Boss",
|
97 |
+
"Architect_1",
|
98 |
+
"Leader",
|
99 |
+
"Architect_2"
|
100 |
+
],
|
101 |
+
"controller": {
|
102 |
+
"controller_type": "order",
|
103 |
+
"max_chat_nums": 8,
|
104 |
+
"judge_system_prompt": "",
|
105 |
+
"judge_last_prompt": "",
|
106 |
+
"judge_extract_words": "end"
|
107 |
+
},
|
108 |
+
"environment_prompt": "Imagine a scenario where the boss has presented a requirement. The architect is tasked with proposing a framework based on this requirement. The leader's role is to provide feedback on the architect's proposal, and another architect will finalize the framework based on the leader's comments.The target program is:<target>a snake game with python</target>",
|
109 |
+
"begin_role": "Boss",
|
110 |
+
"begin_query": "Please write code for the target game",
|
111 |
+
"agent_states": {
|
112 |
+
"Boss": {
|
113 |
+
"LLM_type": "OpenAI",
|
114 |
+
"LLM": {
|
115 |
+
"temperature": 0.3,
|
116 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
117 |
+
"log_path": "logs/Boss"
|
118 |
+
},
|
119 |
+
"style": {
|
120 |
+
"role": "Boss"
|
121 |
+
},
|
122 |
+
"task": {
|
123 |
+
"task": "Present the project requirements to the team and articulate the project's objectives clearly."
|
124 |
+
},
|
125 |
+
"rule": {
|
126 |
+
"rule": "It's crucial to communicate the project's objectives, key deliverables, and any specific requirements comprehensively. This ensures that the entire team understands the project's significance and direction."
|
127 |
+
},
|
128 |
+
"demonstrations": {
|
129 |
+
"demonstrations": "Prepare a comprehensive project overview that encompasses the project's scope, objectives, any constraints, and desired outcomes. This document should outline the required features, performance goals, and budget constraints, for example."
|
130 |
+
}
|
131 |
+
},
|
132 |
+
"Architect_1": {
|
133 |
+
"LLM_type": "OpenAI",
|
134 |
+
"LLM": {
|
135 |
+
"temperature": 0.3,
|
136 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
137 |
+
"log_path": "logs/Architect"
|
138 |
+
},
|
139 |
+
"style": {
|
140 |
+
"role": "Architect",
|
141 |
+
"style": "professional"
|
142 |
+
},
|
143 |
+
"task": {
|
144 |
+
"task": "Propose a Python framework based on the BOSS's requirements."
|
145 |
+
},
|
146 |
+
"rule": {
|
147 |
+
"rule": "Thoroughly analyze the project requirements, evaluate potential technologies, and select suitable design principles to meet the project's needs."
|
148 |
+
},
|
149 |
+
"demonstrations": {
|
150 |
+
"demonstrations": "Create a detailed Architect proposal document, including the rationale for choosing the proposed framework and accompanying design diagrams. For instance, provide an Architect diagram outlining the framework's high-level structure and a detailed explanation of why this architecture was selected."
|
151 |
+
}
|
152 |
+
},
|
153 |
+
"Leader": {
|
154 |
+
"LLM_type": "OpenAI",
|
155 |
+
"LLM": {
|
156 |
+
"temperature": 0.3,
|
157 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
158 |
+
"log_path": "logs/Leader"
|
159 |
+
},
|
160 |
+
"style": {
|
161 |
+
"role": "Leader",
|
162 |
+
"style": "professional"
|
163 |
+
},
|
164 |
+
"task": {
|
165 |
+
"task": "evaluate the architecture proposal and provide specific feedback for improvement"
|
166 |
+
},
|
167 |
+
"rule": {
|
168 |
+
"rule": " Offer constructive feedback aligned with the project's objectives to enhance the proposed framework."
|
169 |
+
},
|
170 |
+
"demonstrations": {
|
171 |
+
"demonstrations": "Review Architect1's proposal meticulously and provide written feedback. Ensure the feedback is specific and includes actionable suggestions for improvement. For instance, you can point out areas that need improvement and explain how suggested changes align with project goals."
|
172 |
+
}
|
173 |
+
},
|
174 |
+
"Architect_2": {
|
175 |
+
"LLM_type": "OpenAI",
|
176 |
+
"LLM": {
|
177 |
+
"temperature": 0.3,
|
178 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
179 |
+
"log_path": "logs/Architect"
|
180 |
+
},
|
181 |
+
"style": {
|
182 |
+
"role": "Architect_2",
|
183 |
+
"style": "professional"
|
184 |
+
},
|
185 |
+
"task": {
|
186 |
+
"task": "Finalize the Python framework based on Leader's feedback."
|
187 |
+
},
|
188 |
+
"rule": {
|
189 |
+
"rule": " Integrate Leader's feedback into the Architect proposal and make necessary adjustments to refine the framework."
|
190 |
+
},
|
191 |
+
"demonstrations": {
|
192 |
+
"demonstrations": "Revise the Architect proposal document to reflect the improvements suggested by Leader. Provide clear documentation of the changes made, including any revised design diagrams and explanations for incorporating Leader's feedback."
|
193 |
+
},
|
194 |
+
"ExtractComponent": {
|
195 |
+
"extract_words": [
|
196 |
+
"system"
|
197 |
+
],
|
198 |
+
"system_prompt": "Please extract the modified system as completely as possible.",
|
199 |
+
"last_prompt": ""
|
200 |
+
}
|
201 |
+
}
|
202 |
+
}
|
203 |
+
},
|
204 |
+
"develop_state": {
|
205 |
+
"LLM_type": "OpenAI",
|
206 |
+
"LLM": {
|
207 |
+
"temperature": 0.3,
|
208 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
209 |
+
"log_path": "logs/god"
|
210 |
+
},
|
211 |
+
"roles": [
|
212 |
+
"Boss",
|
213 |
+
"Developer_1",
|
214 |
+
"Leader",
|
215 |
+
"Developer_2"
|
216 |
+
],
|
217 |
+
"controller": {
|
218 |
+
"controller_type": "order",
|
219 |
+
"max_chat_nums": 8,
|
220 |
+
"judge_system_prompt": "",
|
221 |
+
"judge_last_prompt": "",
|
222 |
+
"judge_extract_words": "end"
|
223 |
+
},
|
224 |
+
"environment_prompt": "In this scenario, the boss has provided a requirement. The developer's task is to write code based on the architecture proposed by the architect. The leader evaluates the written code for elegance, readability, and functionality, providing feedback. Another developer makes necessary modifications to the code.The target program is:<target>a snake game with python</target>",
|
225 |
+
"begin_role": "Boss",
|
226 |
+
"begin_query": "Please write code for the target game",
|
227 |
+
"agent_states": {
|
228 |
+
"Boss": {
|
229 |
+
"LLM_type": "OpenAI",
|
230 |
+
"LLM": {
|
231 |
+
"temperature": 0.3,
|
232 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
233 |
+
"log_path": "logs/Boss"
|
234 |
+
},
|
235 |
+
"style": {
|
236 |
+
"role": "Boss",
|
237 |
+
"style": "august"
|
238 |
+
},
|
239 |
+
"task": {
|
240 |
+
"task": "Communicate the project requirements and vision to the team."
|
241 |
+
},
|
242 |
+
"rule": {
|
243 |
+
"rule": "Clearly define the project's objectives, functionality, and any specific requirements."
|
244 |
+
},
|
245 |
+
"demonstrations": {
|
246 |
+
"demonstrations": "Create a detailed project brief that outlines the scope, objectives, and specific features required for the project in a clear and concise manner. This document should provide a comprehensive understanding of what the project aims to achieve."
|
247 |
+
}
|
248 |
+
},
|
249 |
+
"Developer_1": {
|
250 |
+
"LLM_type": "OpenAI",
|
251 |
+
"LLM": {
|
252 |
+
"temperature": 0.3,
|
253 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
254 |
+
"log_path": "logs/Developer"
|
255 |
+
},
|
256 |
+
"style": {
|
257 |
+
"role": "Developer",
|
258 |
+
"style": "professional"
|
259 |
+
},
|
260 |
+
"task": {
|
261 |
+
"task": "write elegant, readable, extensible, and efficient code"
|
262 |
+
},
|
263 |
+
"rule": {
|
264 |
+
"rule": "1.write code that conforms to standards like PEP8, is modular, easy to read, and maintainable. 2.Output the code only,Ensure that the code adheres to the Architect guidelines, coding standards, and best practices.3.The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>"
|
265 |
+
},
|
266 |
+
"demonstrations": {
|
267 |
+
"demonstrations": "Follow the Architect proposal closely while writing code. Document the code adequately, use meaningful variable names, and maintain proper code structure. For example, provide code snippets that demonstrate adherence to coding standards and Architect design.Output the code only."
|
268 |
+
},
|
269 |
+
"CustomizeComponent": {
|
270 |
+
"template": "You need to write code based on the following framework: {system}",
|
271 |
+
"keywords": [
|
272 |
+
"system"
|
273 |
+
]
|
274 |
+
},
|
275 |
+
"last": {
|
276 |
+
"last_prompt": "The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>"
|
277 |
+
}
|
278 |
+
},
|
279 |
+
"Leader": {
|
280 |
+
"LLM_type": "OpenAI",
|
281 |
+
"LLM": {
|
282 |
+
"temperature": 0.3,
|
283 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
284 |
+
"log_path": "logs/Leader"
|
285 |
+
},
|
286 |
+
"style": {
|
287 |
+
"role": "Leader",
|
288 |
+
"style": "professional"
|
289 |
+
},
|
290 |
+
"task": {
|
291 |
+
"task": "Evaluate the written code for elegance, readability, and functionality."
|
292 |
+
},
|
293 |
+
"rule": {
|
294 |
+
"rule": "Provide constructive feedback that helps improve code quality and alignment with project goals."
|
295 |
+
},
|
296 |
+
"demonstrations": {
|
297 |
+
"demonstrations": " Thoroughly review the code written by Developer1. Offer feedback on code organization, naming conventions, code efficiency, and any functional improvements needed. For instance, provide specific examples of code sections that require refinement and explain how these changes enhance code quality."
|
298 |
+
}
|
299 |
+
},
|
300 |
+
"Developer_2": {
|
301 |
+
"LLM_type": "OpenAI",
|
302 |
+
"LLM": {
|
303 |
+
"temperature": 0.3,
|
304 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
305 |
+
"log_path": "logs/Developer"
|
306 |
+
},
|
307 |
+
"style": {
|
308 |
+
"role": "Developer",
|
309 |
+
"style": "professional"
|
310 |
+
},
|
311 |
+
"task": {
|
312 |
+
"task": " Make necessary modifications to the code based on Leader's feedback."
|
313 |
+
},
|
314 |
+
"rule": {
|
315 |
+
"rule": "1.make code modifications that conform to standards like PEP8, are modular, easy to read, and maintainable. 2.Output the code only,Incorporate Leader's feedback into the code and address any issues or improvements identified.3.The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>"
|
316 |
+
},
|
317 |
+
"demonstrations": {
|
318 |
+
"demonstrations": " Review the feedback provided by Leader and apply the suggested modifications to the code. Document the changes made and ensure that the updated code aligns with the project's goals and Architect guidelines. Provide examples of code segments before and after the modifications to illustrate the improvements."
|
319 |
+
},
|
320 |
+
"last": {
|
321 |
+
"last_prompt": "The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>"
|
322 |
+
},
|
323 |
+
"ExtractComponent": {
|
324 |
+
"extract_words": [
|
325 |
+
"code"
|
326 |
+
],
|
327 |
+
"system_prompt": "Please extract the code as completely as possible.",
|
328 |
+
"last_prompt": ""
|
329 |
+
}
|
330 |
+
}
|
331 |
+
}
|
332 |
+
},
|
333 |
+
"debug_state": {
|
334 |
+
"LLM_type": "OpenAI",
|
335 |
+
"LLM": {
|
336 |
+
"temperature": 0.3,
|
337 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
338 |
+
"log_path": "logs/god"
|
339 |
+
},
|
340 |
+
"roles": [
|
341 |
+
"Boss",
|
342 |
+
"Debugger",
|
343 |
+
"Developer_1",
|
344 |
+
"Leader",
|
345 |
+
"Developer_2"
|
346 |
+
],
|
347 |
+
"controller": {
|
348 |
+
"controller_type": "order",
|
349 |
+
"max_chat_nums": 10,
|
350 |
+
"judge_system_prompt": "",
|
351 |
+
"judge_last_prompt": "",
|
352 |
+
"judge_extract_words": "end"
|
353 |
+
},
|
354 |
+
"environment_prompt": "In this scenario, the boss has provided a requirement. The debugger simulates a compiler to determine whether the code is runnable and provides feedback. The developer writes code based on the debugger's feedback. The leader evaluates whether the final code meets the boss's requirements and provides feedback for further modifications. The coder writes the final code to a file.The target program <target>a snake game with python</target>",
|
355 |
+
"begin_role": "Boss",
|
356 |
+
"begin_query": "Please make the code both runnable and more efficient.",
|
357 |
+
"agent_states": {
|
358 |
+
"Boss": {
|
359 |
+
"LLM_type": "OpenAI",
|
360 |
+
"LLM": {
|
361 |
+
"temperature": 0.3,
|
362 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
363 |
+
"log_path": "logs/Boss"
|
364 |
+
},
|
365 |
+
"style": {
|
366 |
+
"role": "Boss",
|
367 |
+
"style": "august"
|
368 |
+
},
|
369 |
+
"task": {
|
370 |
+
"task": " Communicate the project requirements and vision to the team."
|
371 |
+
},
|
372 |
+
"rule": {
|
373 |
+
"rule": "Clearly define the project's objectives, functionality, and any specific requirements."
|
374 |
+
},
|
375 |
+
"demonstrations": {
|
376 |
+
"demonstrations": " Create a detailed project brief that outlines the scope, objectives, and specific features required for the project in a clear and concise manner. This document should provide a comprehensive understanding of what the project aims to achieve."
|
377 |
+
}
|
378 |
+
},
|
379 |
+
"Debugger": {
|
380 |
+
"LLM_type": "OpenAI",
|
381 |
+
"LLM": {
|
382 |
+
"temperature": 0.3,
|
383 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
384 |
+
"log_path": "logs/Debugger"
|
385 |
+
},
|
386 |
+
"style": {
|
387 |
+
"role": "Debugger",
|
388 |
+
"style": "professional"
|
389 |
+
},
|
390 |
+
"task": {
|
391 |
+
"task": "Simulate a compiler to determine whether the code is runnable and provide feedback."
|
392 |
+
},
|
393 |
+
"rule": {
|
394 |
+
"rule": "Thoroughly test the code for syntax errors, logical issues, and other potential problems. Offer detailed feedback that helps the developer understand and resolve any issues.Please pay special attention to some logic bugs in the game, such as whether the game can run normally."
|
395 |
+
},
|
396 |
+
"demonstrations": {
|
397 |
+
"demonstrations": " Run the code provided by Developer1 through a simulated compiler or debugger. Document any errors, warnings, or issues encountered during the process. Provide feedback that includes specific examples of code problems and suggested solutions."
|
398 |
+
},
|
399 |
+
"CustomizeComponent": {
|
400 |
+
"template": "You need to Run the following code: {code}, through a simulated compiler or debugger. Document any errors, warnings, or issues encountered during the process. Provide feedback that includes specific examples of code problems and suggested solutions.",
|
401 |
+
"keywords": [
|
402 |
+
"code"
|
403 |
+
]
|
404 |
+
}
|
405 |
+
},
|
406 |
+
"Developer_1": {
|
407 |
+
"LLM_type": "OpenAI",
|
408 |
+
"LLM": {
|
409 |
+
"temperature": 0.3,
|
410 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
411 |
+
"log_path": "logs/Developer"
|
412 |
+
},
|
413 |
+
"style": {
|
414 |
+
"role": "Developer",
|
415 |
+
"style": "professional"
|
416 |
+
},
|
417 |
+
"task": {
|
418 |
+
"task": "write elegant, readable, extensible, and efficient code based on the debugger's feedback."
|
419 |
+
},
|
420 |
+
"rule": {
|
421 |
+
"rule": "1.write code that conforms to standards like PEP8, is modular, easy to read, and maintainable.\n2.Address the issues identified by the Debugger and ensure that the code meets the project's requirements.\n3.The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>"
|
422 |
+
},
|
423 |
+
"demonstrations": {
|
424 |
+
"demonstrations": " Review the feedback provided by the Debugger and make the necessary modifications to the code. Document the changes made and ensure that the code is free of errors and warnings. Provide examples of code segments before and after the modifications.Output the code only."
|
425 |
+
},
|
426 |
+
"last": {
|
427 |
+
"last_prompt": "The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>"
|
428 |
+
}
|
429 |
+
},
|
430 |
+
"Leader": {
|
431 |
+
"LLM_type": "OpenAI",
|
432 |
+
"LLM": {
|
433 |
+
"temperature": 0.3,
|
434 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
435 |
+
"log_path": "logs/Leader"
|
436 |
+
},
|
437 |
+
"style": {
|
438 |
+
"role": "Leader",
|
439 |
+
"style": "professional"
|
440 |
+
},
|
441 |
+
"task": {
|
442 |
+
"task": "Evaluate whether the final code meets the boss's requirements and provide feedback for further modifications."
|
443 |
+
},
|
444 |
+
"rule": {
|
445 |
+
"rule": " Assess the code's alignment with the project's objectives, functionality, and quality standards. Offer constructive feedback to enhance the code's overall quality."
|
446 |
+
},
|
447 |
+
"demonstrations": {
|
448 |
+
"demonstrations": " Carefully review the code provided by Developer1 after addressing Debugger's feedback. Offer feedback on code organization, readability, and any functional improvements needed. Provide specific examples of code sections that require further refinement and explain how these changes enhance the code's quality."
|
449 |
+
}
|
450 |
+
},
|
451 |
+
"Developer_2": {
|
452 |
+
"LLM_type": "OpenAI",
|
453 |
+
"LLM": {
|
454 |
+
"temperature": 0.3,
|
455 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
456 |
+
"log_path": "logs/Developer"
|
457 |
+
},
|
458 |
+
"style": {
|
459 |
+
"role": "Developer",
|
460 |
+
"style": "professional"
|
461 |
+
},
|
462 |
+
"task": {
|
463 |
+
"task": "Make further modifications to the code based on Leader's feedback."
|
464 |
+
},
|
465 |
+
"rule": {
|
466 |
+
"rule": "1.Incorporate Leader's feedback into the code and address any issues or improvements identified.,make code modifications that conform to standards like PEP8, are modular, easy to read, and maintainable. 2.The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>"
|
467 |
+
},
|
468 |
+
"demonstrations": {
|
469 |
+
"demonstrations": " Review the feedback provided by Leader and apply the suggested modifications to the code. Document the changes made and ensure that the updated code aligns with the project's goals and quality standards. Provide examples of code segments before and after the modifications.Output the code only."
|
470 |
+
},
|
471 |
+
"last": {
|
472 |
+
"last_prompt": "The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python>"
|
473 |
+
},
|
474 |
+
"ExtractComponent": {
|
475 |
+
"extract_words": [
|
476 |
+
"code"
|
477 |
+
],
|
478 |
+
"system_prompt": "Please extract the code for the target game,must be fully operational",
|
479 |
+
"last_prompt": ""
|
480 |
+
}
|
481 |
+
},
|
482 |
+
"Coder": {
|
483 |
+
"LLM_type": "OpenAI",
|
484 |
+
"LLM": {
|
485 |
+
"temperature": 0.3,
|
486 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
487 |
+
"log_path": "logs/Coder"
|
488 |
+
},
|
489 |
+
"style": {
|
490 |
+
"role": "Coder",
|
491 |
+
"style": "professional"
|
492 |
+
},
|
493 |
+
"CodeComponent": {
|
494 |
+
"file_name": "rps_game.py",
|
495 |
+
"keyword": "code"
|
496 |
+
}
|
497 |
+
}
|
498 |
+
}
|
499 |
+
}
|
500 |
+
}
|
501 |
+
}
|
design_states.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
sys.path.append("../")
|
3 |
+
import re
|
4 |
+
from LLM.base_LLM import *
|
5 |
+
from utils import extract
|
6 |
+
from muti_prompts import *
|
7 |
+
|
8 |
+
llm = OpenAILLM()
|
9 |
+
# design state
|
10 |
+
|
11 |
+
def gen_coder_task(environment_prompt):
|
12 |
+
chat_history = [{"role":"user","content":f"<target>{environment_prompt}</target>"}]
|
13 |
+
response = llm.get_response(chat_history,gen_coder_task_system_prompt)
|
14 |
+
response = extract(response,"task")
|
15 |
+
print(f"coder_task = {response}")
|
16 |
+
return response
|
17 |
+
|
18 |
+
|
19 |
+
def get_cot_result(target):
|
20 |
+
chat_history = [{"role":"user","content":f"<target>{target}</target>"}]
|
21 |
+
response = llm.get_response(chat_history,design_states_cot_system_prompt)
|
22 |
+
print(response)
|
23 |
+
return response
|
24 |
+
|
25 |
+
def get_desgin_states(target,index):
|
26 |
+
chat_history = [{"role":"user","content":f"<target>{target}</target>"}]
|
27 |
+
design_state_system_prompt = get_design_state_system_prompt(index)
|
28 |
+
response = llm.get_response(chat_history,system_prompt=design_state_system_prompt)
|
29 |
+
print(response)
|
30 |
+
# 使用正则表达式提取数据
|
31 |
+
pattern = r'<state>(.*?)<\/state>'
|
32 |
+
states = re.findall(pattern, response, re.DOTALL)
|
33 |
+
|
34 |
+
# 创建包含字典的列表
|
35 |
+
result_list = []
|
36 |
+
for state in states:
|
37 |
+
state_name = extract(state,"state_name")
|
38 |
+
roles = extract(state,"roles")
|
39 |
+
environment_prompt = extract(state,"describe")
|
40 |
+
|
41 |
+
# 创建字典并添加到结果列表
|
42 |
+
state_dict = {
|
43 |
+
"state_name": state_name,
|
44 |
+
"environment_prompt": environment_prompt,
|
45 |
+
"roles": roles.split(" ")
|
46 |
+
}
|
47 |
+
result_list.append(state_dict)
|
48 |
+
|
49 |
+
# 打印结果
|
50 |
+
print("design states")
|
51 |
+
for item in result_list:
|
52 |
+
print(item)
|
53 |
+
return result_list
|
54 |
+
|
55 |
+
def gen_agent_style(agents,design_states,index):
|
56 |
+
agents_styles = {}
|
57 |
+
scene = ""
|
58 |
+
design_agents_style_system_prompt = get_design_agents_style_system_prompt(index)
|
59 |
+
for design_state in design_states:
|
60 |
+
scene +=design_state["environment_prompt"] + "\n"
|
61 |
+
for agent in agents:
|
62 |
+
chat_history = [{"role":"user","content":f"<scene>{scene}</scene>,<target>{agent}</target>"}]
|
63 |
+
style = llm.get_response(chat_history,design_agents_style_system_prompt)
|
64 |
+
style = extract(style,"style")
|
65 |
+
agents_styles[agent] = style
|
66 |
+
print(agents_styles)
|
67 |
+
return agents_styles
|
68 |
+
|
69 |
+
|
70 |
+
def gen_agent_state(agent,environment_prompt,index):
|
71 |
+
design_agent_state_system_prompt = get_design_agent_state_system_prompt(index)
|
72 |
+
agent_state = {}
|
73 |
+
chat_history = [{"role":"user","content":f"<scene>{environment_prompt}</scene>,<target>{agent}</target>"}]
|
74 |
+
response = llm.get_response(chat_history,design_agent_state_system_prompt)
|
75 |
+
role = extract(response,"role")
|
76 |
+
task = extract(response,"task")
|
77 |
+
rule = extract(response,"rule")
|
78 |
+
demonstrations = extract(response,"demonstrations")
|
79 |
+
agent_state["style"] = {"role":role}
|
80 |
+
agent_state["task"] = {"task":task}
|
81 |
+
agent_state["rule"] = {"rule":rule}
|
82 |
+
agent_state["demonstrations"] = {"demonstrations":demonstrations}
|
83 |
+
print(agent_state)
|
84 |
+
return agent_state
|
85 |
+
|
86 |
+
def gen_begin_role_query(environment_prompt,roles,index):
|
87 |
+
roles = " ".join(roles)
|
88 |
+
design_begin_role_query_system_prompt = get_design_begin_role_query_system_prompt(index)
|
89 |
+
chat_history = [{"role":"user","content":f"<scene>{environment_prompt}</scene>\n<roles>{roles}</roles>"}]
|
90 |
+
response = llm.get_response(chat_history,design_begin_role_query_system_prompt)
|
91 |
+
begin_role = extract(response,"begin_role")
|
92 |
+
begin_query = extract(response,"begin_query")
|
93 |
+
print(f"{begin_role}:{begin_query}")
|
94 |
+
return begin_role,begin_query
|
evolve.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The AIWaves Inc. team.
|
3 |
+
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
|
17 |
+
"""self evolution of an LLM autonoumous agent"""
|
gen_utils.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from design_states import gen_agent_style,gen_agent_state,gen_begin_role_query
|
2 |
+
|
3 |
+
def get_agent_names(design_states):
|
4 |
+
agents_name = set()
|
5 |
+
for design_state in design_states:
|
6 |
+
for role in design_state["roles"]:
|
7 |
+
agents_name.add(role)
|
8 |
+
return list(agents_name)
|
9 |
+
|
10 |
+
def get_final_agents(agents,design_states):
|
11 |
+
final_agents = {}
|
12 |
+
for agent,style in agents.items():
|
13 |
+
final_agents[agent] = {"style":"","roles":{}}
|
14 |
+
final_agents[agent]["style"] = style
|
15 |
+
for design_state in design_states:
|
16 |
+
if agent in design_state["roles"]:
|
17 |
+
final_agents[agent]["roles"][design_state["state_name"]] = agent
|
18 |
+
return final_agents
|
19 |
+
|
20 |
+
def get_agents(design_states,index):
|
21 |
+
agents = get_agent_names(design_states)
|
22 |
+
agents = gen_agent_style(agents,design_states,index)
|
23 |
+
agents = get_final_agents(agents,design_states)
|
24 |
+
return agents
|
25 |
+
|
26 |
+
def get_relations(design_states):
|
27 |
+
relations = {}
|
28 |
+
n = len(design_states)
|
29 |
+
for i in range(n):
|
30 |
+
relations[design_states[i]["state_name"]] = {}
|
31 |
+
relations[design_states[i]["state_name"]]["0"] = design_states[i]["state_name"]
|
32 |
+
relations[design_states[i]["state_name"]]["1"] = design_states[i+1]["state_name"] if i!=n-1 else "end_state"
|
33 |
+
return relations
|
34 |
+
|
35 |
+
|
36 |
+
def gen_states(design_states,index):
|
37 |
+
states = {"end_state":{
|
38 |
+
"agent_states":{}
|
39 |
+
}}
|
40 |
+
for design_state in design_states:
|
41 |
+
state_name = design_state["state_name"]
|
42 |
+
environment_prompt = design_state["environment_prompt"]
|
43 |
+
roles = design_state["roles"]
|
44 |
+
max_chat_nums = 1 if len(roles)==1 else len(roles)*2
|
45 |
+
states[state_name] = {"controller":{"controller_type": "order", "max_chat_nums" : max_chat_nums},"environment_prompt":environment_prompt,"roles":roles}
|
46 |
+
agent_state = {}
|
47 |
+
for role in roles:
|
48 |
+
agent_state[role] = gen_agent_state(role,environment_prompt,index)
|
49 |
+
states[state_name]["agent_states"] = agent_state
|
50 |
+
begin_role,begin_query = gen_begin_role_query(environment_prompt,roles,index)
|
51 |
+
begin_role = ("_").join(begin_role.split(" "))
|
52 |
+
print(begin_role)
|
53 |
+
if begin_role not in roles:
|
54 |
+
begin_role = begin_role +"_1"
|
55 |
+
if begin_role in roles:
|
56 |
+
states[state_name]["begin_role"] = begin_role
|
57 |
+
states[state_name]["begin_query"] = begin_query
|
58 |
+
return states
|
59 |
+
|
gradio_backend.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import argparse
|
3 |
+
import sys
|
4 |
+
sys.path.append("../../agents")
|
5 |
+
from utils import extract
|
6 |
+
from SOP import SOP
|
7 |
+
from Agent import Agent
|
8 |
+
from Environment import Environment
|
9 |
+
from Memory import Memory
|
10 |
+
from gradio_base import Client, convert2list4agentname
|
11 |
+
|
12 |
+
def process(action):
|
13 |
+
response = action.response
|
14 |
+
send_name = action.name
|
15 |
+
send_role = action.role
|
16 |
+
if not action.is_user:
|
17 |
+
print(f"{send_name}({send_role}):{response}")
|
18 |
+
memory = Memory(send_role, send_name, response)
|
19 |
+
return memory
|
20 |
+
|
21 |
+
def gradio_process(action,current_state):
|
22 |
+
response = action.response
|
23 |
+
all = ""
|
24 |
+
for i,res in enumerate(response):
|
25 |
+
all+=res
|
26 |
+
state = 10
|
27 |
+
if action.is_user:
|
28 |
+
state = 30
|
29 |
+
elif action.state_begin:
|
30 |
+
state = 12
|
31 |
+
action.state_begin = False
|
32 |
+
elif i>0:
|
33 |
+
state = 11
|
34 |
+
send_name = f"{action.name}({action.role})"
|
35 |
+
Client.send_server(str([state, send_name, res, current_state.name]))
|
36 |
+
if state == 30:
|
37 |
+
# print("client: waiting for user input")
|
38 |
+
data: list = next(Client.receive_server)
|
39 |
+
content = ""
|
40 |
+
for item in data:
|
41 |
+
if item.startswith("<USER>"):
|
42 |
+
content = item.split("<USER>")[1]
|
43 |
+
break
|
44 |
+
# print(f"client: received `{content}` from server.")
|
45 |
+
action.response = content
|
46 |
+
break
|
47 |
+
else:
|
48 |
+
action.response = all
|
49 |
+
|
50 |
+
def init(config):
|
51 |
+
if not os.path.exists("logs"):
|
52 |
+
os.mkdir("logs")
|
53 |
+
sop = SOP.from_config(config)
|
54 |
+
agents,roles_to_names,names_to_roles = Agent.from_config(config)
|
55 |
+
environment = Environment.from_config(config)
|
56 |
+
environment.agents = agents
|
57 |
+
environment.roles_to_names,environment.names_to_roles = roles_to_names,names_to_roles
|
58 |
+
sop.roles_to_names,sop.names_to_roles = roles_to_names,names_to_roles
|
59 |
+
for name,agent in agents.items():
|
60 |
+
agent.environment = environment
|
61 |
+
return agents,sop,environment
|
62 |
+
|
63 |
+
def block_when_next(current_agent, current_state):
|
64 |
+
if Client.LAST_USER:
|
65 |
+
assert not current_agent.is_user
|
66 |
+
Client.LAST_USER = False
|
67 |
+
return
|
68 |
+
if current_agent.is_user:
|
69 |
+
# if next turn is user, we don't handle it here
|
70 |
+
Client.LAST_USER = True
|
71 |
+
return
|
72 |
+
if Client.FIRST_RUN:
|
73 |
+
Client.FIRST_RUN = False
|
74 |
+
else:
|
75 |
+
# block current process
|
76 |
+
if Client.mode == Client.SINGLE_MODE:
|
77 |
+
Client.send_server(str([98, f"{current_agent.name}({current_agent.state_roles[current_state.name]})", " ", current_state.name]))
|
78 |
+
data: list = next(Client.receive_server)
|
79 |
+
|
80 |
+
def run(agents,sop,environment):
|
81 |
+
while True:
|
82 |
+
current_state,current_agent= sop.next(environment,agents)
|
83 |
+
if sop.finished:
|
84 |
+
print("finished!")
|
85 |
+
Client.send_server(str([99, ' ', ' ', 'done']))
|
86 |
+
os.environ.clear()
|
87 |
+
break
|
88 |
+
block_when_next(current_agent, current_state)
|
89 |
+
action = current_agent.step(current_state) #component_dict = current_state[self.role[current_node.name]] current_agent.compile(component_dict)
|
90 |
+
gradio_process(action,current_state)
|
91 |
+
memory = process(action)
|
92 |
+
environment.update_memory(memory,current_state)
|
93 |
+
|
94 |
+
def prepare(agents, sop, environment):
|
95 |
+
client = Client()
|
96 |
+
Client.send_server = client.send_message
|
97 |
+
|
98 |
+
requirement = "Let's start!!"
|
99 |
+
client.send_message(
|
100 |
+
{
|
101 |
+
"requirement": requirement,
|
102 |
+
"agents_name": convert2list4agentname(sop)[0],
|
103 |
+
# "only_name": DebateUI.convert2list4agentname(sop)[1],
|
104 |
+
"only_name": convert2list4agentname(sop)[0],
|
105 |
+
"default_cos_play_id": -1,
|
106 |
+
"api_key": os.environ["API_KEY"]
|
107 |
+
}
|
108 |
+
)
|
109 |
+
client.listening_for_start_()
|
110 |
+
client.mode = Client.mode = client.cache["mode"]
|
111 |
+
os.environ["API_KEY"] = client.cache["api_key"]
|
112 |
+
uploaded_sop = Client.cache['uploaded_sop']
|
113 |
+
agents,sop,environment = init(uploaded_sop)
|
114 |
+
run(agents,sop,environment)
|
115 |
+
|
116 |
+
if __name__ == '__main__':
|
117 |
+
parser = argparse.ArgumentParser(description='A demo of chatbot')
|
118 |
+
parser.add_argument('--agent', type=str, help='path to SOP json', default="config.json")
|
119 |
+
args = parser.parse_args()
|
120 |
+
|
121 |
+
agents,sop,environment = init(args.agent)
|
122 |
+
# add================================
|
123 |
+
prepare(agents, sop, environment)
|
124 |
+
# ===================================
|
125 |
+
# run(agents,sop,environment)
|
gradio_base.py
ADDED
@@ -0,0 +1,574 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The AIWaves Inc. team.
|
3 |
+
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
|
17 |
+
# Emoji comes from this website:
|
18 |
+
# https://emojipedia.org/
|
19 |
+
import subprocess
|
20 |
+
from gradio_config import GradioConfig as gc
|
21 |
+
import gradio as gr
|
22 |
+
from typing import List, Tuple, Any
|
23 |
+
import time
|
24 |
+
import socket
|
25 |
+
import psutil
|
26 |
+
import os
|
27 |
+
from abc import abstractmethod
|
28 |
+
import openai
|
29 |
+
|
30 |
+
def test_apikey_connection(api_key=None, model="gpt-3.5-turbo"):
|
31 |
+
openai.api_key = api_key if api_key is not None else os.environ["API_KEY"]
|
32 |
+
if "PROXY" in os.environ:
|
33 |
+
openai.proxy = os.environ["PROXY"]
|
34 |
+
messages = [{"role": "user", "content": "what's your name?"}]
|
35 |
+
try:
|
36 |
+
response = openai.ChatCompletion.create(
|
37 |
+
model=model,
|
38 |
+
messages=messages,
|
39 |
+
)
|
40 |
+
return True
|
41 |
+
except:
|
42 |
+
return False
|
43 |
+
|
44 |
+
def convert2list4agentname(sop):
|
45 |
+
"""
|
46 |
+
Extract the agent names of all states
|
47 |
+
return:
|
48 |
+
only name: [name1, name2, ...]
|
49 |
+
agent_name: [name1(role1), name2(role2), ...]
|
50 |
+
"""
|
51 |
+
only_name = []
|
52 |
+
agent_name = []
|
53 |
+
roles_to_names = sop.roles_to_names
|
54 |
+
for state_name,roles_names in roles_to_names.items():
|
55 |
+
for role,name in roles_names.items():
|
56 |
+
agent_name.append(f"{name}({role})")
|
57 |
+
only_name.append(name)
|
58 |
+
agent_name = list(set(agent_name))
|
59 |
+
agent_name.sort()
|
60 |
+
return agent_name, only_name
|
61 |
+
|
62 |
+
def is_port_in_use(port):
|
63 |
+
"""Check if the port is available"""
|
64 |
+
for conn in psutil.net_connections():
|
65 |
+
if conn.laddr.port == port:
|
66 |
+
return True
|
67 |
+
return False
|
68 |
+
|
69 |
+
def check_port(port):
|
70 |
+
"""Determine available ports"""
|
71 |
+
if os.path.isfile("PORT.txt"):
|
72 |
+
port = int(open("PORT.txt","r",encoding='utf-8').readlines()[0])
|
73 |
+
else:
|
74 |
+
for i in range(10):
|
75 |
+
if is_port_in_use(port+i) == False:
|
76 |
+
port += i
|
77 |
+
break
|
78 |
+
with open("PORT.txt", "w") as f:
|
79 |
+
f.writelines(str(port))
|
80 |
+
return port
|
81 |
+
|
82 |
+
# Determine some heads
|
83 |
+
SPECIAL_SIGN = {
|
84 |
+
"START": "<START>",
|
85 |
+
"SPLIT": "<SELFDEFINESEP>",
|
86 |
+
"END": "<ENDSEP>"
|
87 |
+
}
|
88 |
+
HOST = "127.0.0.1"
|
89 |
+
# The starting port number for the search.
|
90 |
+
PORT = 15000
|
91 |
+
PORT = check_port(PORT)
|
92 |
+
|
93 |
+
def print_log(message:str):
|
94 |
+
print(f"[{time.ctime()}]{message}")
|
95 |
+
|
96 |
+
global_dialog = {
|
97 |
+
"user": [],
|
98 |
+
"agent": {},
|
99 |
+
"system": []
|
100 |
+
}
|
101 |
+
|
102 |
+
class UIHelper:
|
103 |
+
"""Static Class"""
|
104 |
+
|
105 |
+
@classmethod
|
106 |
+
def wrap_css(cls, content, name) -> str:
|
107 |
+
"""
|
108 |
+
Description:
|
109 |
+
Wrap CSS around each output, and return it in HTML format for rendering with Markdown.
|
110 |
+
Input:
|
111 |
+
content: Output content
|
112 |
+
name: Whose output is it
|
113 |
+
Output:
|
114 |
+
HTML
|
115 |
+
"""
|
116 |
+
assert name in gc.OBJECT_INFO, \
|
117 |
+
f"The current name `{name}` is not registered with an image. The names of the currently registered agents are in `{gc.OBJECT_INFO.keys()}`. Please use `GradioConfig.add_agent()` from `Gradio_Config/gradio_config.py` to bind the name of the new agent."
|
118 |
+
output = ""
|
119 |
+
info = gc.OBJECT_INFO[name]
|
120 |
+
if info["id"] == "USER":
|
121 |
+
output = gc.BUBBLE_CSS["USER"].format(
|
122 |
+
info["bubble_color"], # Background-color
|
123 |
+
info["text_color"], # Color of the agent's name
|
124 |
+
name, # Agent name
|
125 |
+
info["text_color"], # Font color
|
126 |
+
info["font_size"], # Font size
|
127 |
+
content, # Content
|
128 |
+
info["head_url"] # URL of the avatar
|
129 |
+
)
|
130 |
+
elif info["id"] == "SYSTEM":
|
131 |
+
output = gc.BUBBLE_CSS["SYSTEM"].format(
|
132 |
+
info["bubble_color"], # Background-color
|
133 |
+
info["font_size"], # Font size
|
134 |
+
info["text_color"], # Font color
|
135 |
+
name, # Agent name
|
136 |
+
content # Content
|
137 |
+
)
|
138 |
+
elif info["id"] == "AGENT":
|
139 |
+
output = gc.BUBBLE_CSS["AGENT"].format(
|
140 |
+
info["head_url"], # URL of the avatar
|
141 |
+
info["bubble_color"], # Background-color
|
142 |
+
info["text_color"], # Font color
|
143 |
+
name, # Agent name
|
144 |
+
info["text_color"], # Font color
|
145 |
+
info["font_size"], # Font size
|
146 |
+
content, # Content
|
147 |
+
)
|
148 |
+
else:
|
149 |
+
assert False, f"Id `{info['id']}` is invalid. The valid id is in ['SYSTEM', 'AGENT', 'USER']"
|
150 |
+
return output
|
151 |
+
|
152 |
+
@classmethod
|
153 |
+
def novel_filter(cls, content, agent_name):
|
154 |
+
|
155 |
+
"""比如<CONTENT>...</CONTENT>,就应该输出CONTENT:..."""
|
156 |
+
IS_RECORDER = agent_name.lower() in ["recorder", "summary"]
|
157 |
+
if IS_RECORDER:
|
158 |
+
BOLD_FORMAT = """<div style="color: #000000; display:inline">
|
159 |
+
<b>{}</b>
|
160 |
+
</div>
|
161 |
+
<span style="color: black;">
|
162 |
+
"""
|
163 |
+
else:
|
164 |
+
BOLD_FORMAT = "<b>{}</b>"
|
165 |
+
CENTER_FORMAT = """<div style="background-color: #F0F0F0; text-align: center; padding: 5px; color: #000000">
|
166 |
+
<b>{}</b>
|
167 |
+
</div>
|
168 |
+
"""
|
169 |
+
START_FORMAT = "<{}>"
|
170 |
+
END_FORMAT = "</{}>"
|
171 |
+
mapping = {
|
172 |
+
"TARGET": "🎯 Current Target: ",
|
173 |
+
"NUMBER": "🍖 Required Number: ",
|
174 |
+
"THOUGHT": "🤔 Overall Thought: ",
|
175 |
+
"FIRST NAME": "⚪ First Name: ",
|
176 |
+
"LAST NAME": "⚪ Last Name: ",
|
177 |
+
"ROLE": "🤠 Character Properties: ",
|
178 |
+
"RATIONALES": "🤔 Design Rationale: ",
|
179 |
+
"BACKGROUND": "🚊 Character Background: ",
|
180 |
+
"ID": "🔴 ID: ",
|
181 |
+
"TITLE": "🧩 Chapter Title: ",
|
182 |
+
"ABSTRACT": "🎬 Abstract: ",
|
183 |
+
"CHARACTER INVOLVED": "☃️ Character Involved: ",
|
184 |
+
"ADVICE": "💬 Advice:",
|
185 |
+
"NAME": "📛 Name: ",
|
186 |
+
"GENDER": "👩👩👦👦 Gender: ",
|
187 |
+
"AGE": "⏲️ Age: ",
|
188 |
+
"WORK": "👨🔧 Work: ",
|
189 |
+
"PERSONALITY": "🧲 Character Personality: ",
|
190 |
+
"SPEECH STYLE": "🗣️ Speaking Style: ",
|
191 |
+
"RELATION": "🏠 Relation with Others: ",
|
192 |
+
"WORD COUNT": "🎰 Word Count: ",
|
193 |
+
"CHARACTER DESIGN": "📈 Character Design: ",
|
194 |
+
"CHARACTER REQUIRE": "📈 Character Require: ",
|
195 |
+
"CHARACTER NAME": "📈 Character Naming Analysis: ",
|
196 |
+
"CHARACTER NOW": "📈 Character Now: ",
|
197 |
+
"OUTLINE DESIGN": "📈 Outline Design: ",
|
198 |
+
"OUTLINE REQUIRE": "📈 Outline Require: ",
|
199 |
+
"OUTLINE NOW": "📈 Outline Now: ",
|
200 |
+
"SUB TASK": "🎯 Current Sub Task: ",
|
201 |
+
"CHARACTER ADVICE": "💬 Character Design Advice: ",
|
202 |
+
"OUTLINE ADVANTAGE": "📈 Outline Advantage: ",
|
203 |
+
"OUTLINE DISADVANTAGE": "📈 Outline Disadvantage: ",
|
204 |
+
"OUTLINE ADVICE": "💬 Outline Advice: ",
|
205 |
+
"NEXT": "➡️ Next Advice: ",
|
206 |
+
"TOTAL NUMBER": "🔢 Total Number: "
|
207 |
+
}
|
208 |
+
for i in range(1, 10):
|
209 |
+
mapping[f"CHARACTER {i}"] = f"🦄 Character {i}"
|
210 |
+
mapping[f"SECTION {i}"] = f"🏷️ Chapter {i}"
|
211 |
+
for key in mapping:
|
212 |
+
if key in [f"CHARACTER {i}" for i in range(1, 10)] \
|
213 |
+
or key in [f"SECTION {i}" for i in range(1, 10)] \
|
214 |
+
:
|
215 |
+
content = content.replace(
|
216 |
+
START_FORMAT.format(key), CENTER_FORMAT.format(mapping[key])
|
217 |
+
)
|
218 |
+
elif key in ["TOTAL NUMBER"]:
|
219 |
+
content = content.replace(
|
220 |
+
START_FORMAT.format(key), CENTER_FORMAT.format(mapping[key]) + """<span style="color: black;">"""
|
221 |
+
)
|
222 |
+
content = content.replace(
|
223 |
+
END_FORMAT.format(key), "</span>"
|
224 |
+
)
|
225 |
+
else:
|
226 |
+
content = content.replace(
|
227 |
+
START_FORMAT.format(key), BOLD_FORMAT.format(mapping[key])
|
228 |
+
)
|
229 |
+
|
230 |
+
content = content.replace(
|
231 |
+
END_FORMAT.format(key), "</span>" if IS_RECORDER else ""
|
232 |
+
)
|
233 |
+
return content
|
234 |
+
|
235 |
+
@classmethod
|
236 |
+
def singleagent_filter(cls, content, agent_name):
|
237 |
+
return content
|
238 |
+
|
239 |
+
@classmethod
|
240 |
+
def debate_filter(cls, content, agent_name):
|
241 |
+
return content
|
242 |
+
|
243 |
+
@classmethod
|
244 |
+
def code_filter(cls, content, agent_name):
|
245 |
+
# return content.replace("```python", "<pre><code>").replace("```","</pre></code>")
|
246 |
+
return content
|
247 |
+
|
248 |
+
@classmethod
|
249 |
+
def general_filter(cls, content, agent_name):
|
250 |
+
return content
|
251 |
+
|
252 |
+
@classmethod
|
253 |
+
def filter(cls, content: str, agent_name: str, ui_name: str):
|
254 |
+
"""
|
255 |
+
Description:
|
256 |
+
Make certain modifications to the output content to enhance its aesthetics when content is showed in gradio.
|
257 |
+
Input:
|
258 |
+
content: output content
|
259 |
+
agent_name: Whose output is it
|
260 |
+
ui_name: What UI is currently launching
|
261 |
+
Output:
|
262 |
+
Modified content
|
263 |
+
"""
|
264 |
+
mapping = {
|
265 |
+
"SingleAgentUI": cls.singleagent_filter,
|
266 |
+
"DebateUI": cls.debate_filter,
|
267 |
+
"NovelUI": cls.novel_filter,
|
268 |
+
"CodeUI": cls.code_filter,
|
269 |
+
"GeneralUI": cls.general_filter
|
270 |
+
}
|
271 |
+
if ui_name in mapping:
|
272 |
+
return mapping[ui_name](content, agent_name)
|
273 |
+
else:
|
274 |
+
return content
|
275 |
+
|
276 |
+
class Client:
|
277 |
+
"""
|
278 |
+
For inter-process communication, this is the client.
|
279 |
+
`gradio_backend.PY` serves as the backend, while `run_gradio` is the frontend.
|
280 |
+
Communication between the frontend and backend is accomplished using Sockets.
|
281 |
+
"""
|
282 |
+
# =======================Radio Const String======================
|
283 |
+
SINGLE_MODE = "Single Mode"
|
284 |
+
AUTO_MODE = "Auto Mode"
|
285 |
+
MODE_LABEL = "Select the execution mode"
|
286 |
+
MODE_INFO = "Single mode refers to when the current agent output ends, it will stop running until you click to continue. Auto mode refers to when you complete the input, all agents will continue to output until the task ends."
|
287 |
+
# ===============================================================
|
288 |
+
mode = AUTO_MODE
|
289 |
+
FIRST_RUN:bool = True
|
290 |
+
# if last agent is user, then next agent will be executed automatically rather than click button
|
291 |
+
LAST_USER:bool = False
|
292 |
+
|
293 |
+
receive_server = None
|
294 |
+
send_server = None
|
295 |
+
current_node = None
|
296 |
+
cache = {}
|
297 |
+
|
298 |
+
def __init__(self, host=HOST, port=PORT, bufsize=1024):
|
299 |
+
assert Client.mode in [Client.SINGLE_MODE, Client.AUTO_MODE]
|
300 |
+
self.SIGN = SPECIAL_SIGN
|
301 |
+
self.bufsize = bufsize
|
302 |
+
assert bufsize > 0
|
303 |
+
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
304 |
+
self.client_socket.connect((host, port))
|
305 |
+
while True:
|
306 |
+
data = self.client_socket.recv(self.bufsize).decode('utf-8')
|
307 |
+
if data == "hi":
|
308 |
+
self.client_socket.send("hello agent".encode('utf-8'))
|
309 |
+
time.sleep(1)
|
310 |
+
elif data == "check":
|
311 |
+
break
|
312 |
+
print_log("Client: connecting successfully......")
|
313 |
+
|
314 |
+
def start_server(self):
|
315 |
+
while True:
|
316 |
+
message = yield
|
317 |
+
if message == 'exit':
|
318 |
+
break
|
319 |
+
self.send_message(message=message)
|
320 |
+
|
321 |
+
def send_message(self, message):
|
322 |
+
"""Send the message to the server."""
|
323 |
+
if isinstance(message, list) or isinstance(message, dict):
|
324 |
+
message = str(message)
|
325 |
+
assert isinstance(message, str)
|
326 |
+
message = message + self.SIGN["SPLIT"]
|
327 |
+
self.client_socket.send(message.encode('utf-8'))
|
328 |
+
|
329 |
+
def receive_message(self, end_identifier: str = None, split_identifier: str = SPECIAL_SIGN["SPLIT"]) -> List:
|
330 |
+
"""Receive messages from the server, and it will block the process. Supports receiving long text."""
|
331 |
+
remaining = ""
|
332 |
+
while True:
|
333 |
+
# receive message
|
334 |
+
dataset = self.client_socket.recv(self.bufsize)
|
335 |
+
try:
|
336 |
+
# If decoding fails, it indicates that the current transmission is a long text.
|
337 |
+
dataset = dataset.decode('utf-8')
|
338 |
+
except UnicodeDecodeError:
|
339 |
+
if not isinstance(remaining, bytes):
|
340 |
+
remaining = remaining.encode('utf-8')
|
341 |
+
assert isinstance(dataset, bytes)
|
342 |
+
remaining += dataset
|
343 |
+
try:
|
344 |
+
dataset = remaining.decode('utf-8')
|
345 |
+
remaining = ""
|
346 |
+
except UnicodeDecodeError:
|
347 |
+
continue
|
348 |
+
assert isinstance(remaining, str)
|
349 |
+
dataset = remaining + dataset
|
350 |
+
list_dataset = dataset.split(split_identifier)
|
351 |
+
if len(list_dataset) == 1:
|
352 |
+
# If there is only one result from the split, it indicates that the current sequence itself has not yet ended.
|
353 |
+
remaining = list_dataset[0]
|
354 |
+
continue
|
355 |
+
else:
|
356 |
+
remaining = list_dataset[-1]
|
357 |
+
# Receive successfully
|
358 |
+
list_dataset = list_dataset[:-1]
|
359 |
+
return_value = []
|
360 |
+
for item in list_dataset:
|
361 |
+
if end_identifier is not None and item == end_identifier:
|
362 |
+
break
|
363 |
+
return_value.append(item)
|
364 |
+
identifier = yield return_value
|
365 |
+
if identifier is not None:
|
366 |
+
end_identifier, split_identifier = identifier
|
367 |
+
|
368 |
+
def listening_for_start_(self):
|
369 |
+
"""
|
370 |
+
When the server starts, the client is automatically launched.
|
371 |
+
At this point, process synchronization is required,
|
372 |
+
such as sending client data to the server for rendering,
|
373 |
+
then the server sending the modified data back to the client,
|
374 |
+
and simultaneously sending a startup command.
|
375 |
+
Once the client receives the data, it will start running.
|
376 |
+
"""
|
377 |
+
Client.receive_server = self.receive_message()
|
378 |
+
# Waiting for information from the server.
|
379 |
+
data: list = next(Client.receive_server)
|
380 |
+
assert len(data) == 1
|
381 |
+
data = eval(data[0])
|
382 |
+
assert isinstance(data, dict)
|
383 |
+
Client.cache.update(data)
|
384 |
+
# Waiting for start command from the server.
|
385 |
+
data:list = Client.receive_server.send(None)
|
386 |
+
assert len(data) == 1
|
387 |
+
assert data[0] == "<START>"
|
388 |
+
|
389 |
+
class WebUI:
|
390 |
+
"""
|
391 |
+
The base class for the frontend, which encapsulates some functions for process information synchronization.
|
392 |
+
When a new frontend needs to be created, you should inherit from this class,
|
393 |
+
then implement the `construct_ui()` method and set up event listeners.
|
394 |
+
Finally, execute `run()` to load it.
|
395 |
+
"""
|
396 |
+
|
397 |
+
def receive_message(
|
398 |
+
self,
|
399 |
+
end_identifier:str=None,
|
400 |
+
split_identifier:str=SPECIAL_SIGN["SPLIT"]
|
401 |
+
)->List:
|
402 |
+
"""This is the same as in Client class."""
|
403 |
+
yield "hello"
|
404 |
+
remaining = ""
|
405 |
+
while True:
|
406 |
+
dataset = self.client_socket.recv(self.bufsize)
|
407 |
+
try:
|
408 |
+
dataset = dataset.decode('utf-8')
|
409 |
+
except UnicodeDecodeError:
|
410 |
+
if not isinstance(remaining, bytes):
|
411 |
+
remaining = remaining.encode('utf-8')
|
412 |
+
assert isinstance(dataset, bytes)
|
413 |
+
remaining += dataset
|
414 |
+
try:
|
415 |
+
dataset = remaining.decode('utf-8')
|
416 |
+
remaining = ""
|
417 |
+
except UnicodeDecodeError:
|
418 |
+
continue
|
419 |
+
assert isinstance(remaining, str)
|
420 |
+
dataset = remaining + dataset
|
421 |
+
list_dataset = dataset.split(split_identifier)
|
422 |
+
if len(list_dataset) == 1:
|
423 |
+
remaining = list_dataset[0]
|
424 |
+
continue
|
425 |
+
else:
|
426 |
+
remaining = list_dataset[-1]
|
427 |
+
list_dataset = list_dataset[:-1]
|
428 |
+
return_value = []
|
429 |
+
for item in list_dataset:
|
430 |
+
if end_identifier is not None and item == end_identifier:
|
431 |
+
break
|
432 |
+
return_value.append(item)
|
433 |
+
identifier = yield return_value
|
434 |
+
if identifier is not None:
|
435 |
+
end_identifier, split_identifier = identifier
|
436 |
+
|
437 |
+
def send_message(self, message:str):
|
438 |
+
"""Send message to client."""
|
439 |
+
SEP = self.SIGN["SPLIT"]
|
440 |
+
self.client_socket.send(
|
441 |
+
(message+SEP).encode("utf-8")
|
442 |
+
)
|
443 |
+
|
444 |
+
def _connect(self):
|
445 |
+
# check
|
446 |
+
if self.server_socket:
|
447 |
+
self.server_socket.close()
|
448 |
+
assert not os.path.isfile("PORT.txt")
|
449 |
+
self.socket_port = check_port(PORT)
|
450 |
+
# Step1. initialize
|
451 |
+
self.server_socket = socket.socket(
|
452 |
+
socket.AF_INET, socket.SOCK_STREAM
|
453 |
+
)
|
454 |
+
# Step2. binding ip and port
|
455 |
+
self.server_socket.bind((self.socket_host, self.socket_port))
|
456 |
+
# Step3. run client
|
457 |
+
self._start_client()
|
458 |
+
|
459 |
+
# Step4. listening for connect
|
460 |
+
self.server_socket.listen(1)
|
461 |
+
|
462 |
+
# Step5. test connection
|
463 |
+
client_socket, client_address = self.server_socket.accept()
|
464 |
+
print_log("server: establishing connection......")
|
465 |
+
self.client_socket = client_socket
|
466 |
+
while True:
|
467 |
+
client_socket.send("hi".encode('utf-8'))
|
468 |
+
time.sleep(1)
|
469 |
+
data = client_socket.recv(self.bufsize).decode('utf-8')
|
470 |
+
if data == "hello agent":
|
471 |
+
client_socket.send("check".encode('utf-8'))
|
472 |
+
print_log("server: connect successfully")
|
473 |
+
break
|
474 |
+
assert os.path.isfile("PORT.txt")
|
475 |
+
os.remove("PORT.txt")
|
476 |
+
if self.receive_server:
|
477 |
+
del self.receive_server
|
478 |
+
self.receive_server = self.receive_message()
|
479 |
+
assert next(self.receive_server) == "hello"
|
480 |
+
|
481 |
+
@abstractmethod
|
482 |
+
def render_and_register_ui(self):
|
483 |
+
# You need to implement this function.
|
484 |
+
# The function's purpose is to bind the name of the agent with an image.
|
485 |
+
# The name of the agent is stored in `self.cache[]`,
|
486 |
+
# and the function for binding is in the method `add_agents` of the class `GradioConfig` in `Gradio_Config/gradio_config.py``.
|
487 |
+
# This function will be executed in `self.first_recieve_from_client()`
|
488 |
+
pass
|
489 |
+
|
490 |
+
def first_recieve_from_client(self, reset_mode:bool=False):
|
491 |
+
"""
|
492 |
+
This function is used to receive information from the client and is typically executed during the initialization of the class.
|
493 |
+
If `reset_mode` is False, it will bind the name of the agent with an image.
|
494 |
+
"""
|
495 |
+
self.FIRST_RECIEVE_FROM_CLIENT = True
|
496 |
+
data_list:List = self.receive_server.send(None)
|
497 |
+
assert len(data_list) == 1
|
498 |
+
data = eval(data_list[0])
|
499 |
+
assert isinstance(data, dict)
|
500 |
+
self.cache.update(data)
|
501 |
+
if not reset_mode:
|
502 |
+
self.render_and_register_ui()
|
503 |
+
|
504 |
+
def _second_send(self, message:dict):
|
505 |
+
# Send the modified message.
|
506 |
+
# It will be executed in `self.send_start_cmd()` automatically.
|
507 |
+
self.send_message(str(message))
|
508 |
+
|
509 |
+
def _third_send(self):
|
510 |
+
# Send start command.
|
511 |
+
# It will be executed in `self.send_start_cmd()` automatically.
|
512 |
+
self.send_message(self.SIGN['START'])
|
513 |
+
|
514 |
+
def send_start_cmd(self, message:dict={"hello":"hello"}):
|
515 |
+
# If you have no message to send, you can ignore the args `message`.
|
516 |
+
assert self.FIRST_RECIEVE_FROM_CLIENT, "Please make sure you have executed `self.first_recieve_from_client()` manually."
|
517 |
+
self._second_send(message=message)
|
518 |
+
time.sleep(1)
|
519 |
+
self._third_send()
|
520 |
+
self.FIRST_RECIEVE_FROM_CLIENT = False
|
521 |
+
|
522 |
+
def __init__(
|
523 |
+
self,
|
524 |
+
client_cmd: list, # ['python','test.py','--a','b','--c','d']
|
525 |
+
socket_host: str = HOST,
|
526 |
+
socket_port: int = PORT,
|
527 |
+
bufsize: int = 1024,
|
528 |
+
ui_name: str = ""
|
529 |
+
):
|
530 |
+
self.ui_name = ui_name
|
531 |
+
self.server_socket = None
|
532 |
+
self.SIGN = SPECIAL_SIGN
|
533 |
+
self.socket_host = socket_host
|
534 |
+
self.socket_port = socket_port
|
535 |
+
self.bufsize = bufsize
|
536 |
+
self.client_cmd = client_cmd
|
537 |
+
|
538 |
+
self.receive_server = None
|
539 |
+
self.cache = {}
|
540 |
+
assert self.bufsize > 0
|
541 |
+
self._connect()
|
542 |
+
|
543 |
+
def _start_client(self):
|
544 |
+
print(f"server: executing `{' '.join(self.client_cmd)}` ...")
|
545 |
+
self.backend = subprocess.Popen(self.client_cmd)
|
546 |
+
|
547 |
+
def _close_client(self):
|
548 |
+
print(f"server: killing `{' '.join(self.client_cmd)}` ...")
|
549 |
+
self.backend.terminate()
|
550 |
+
|
551 |
+
def reset(self):
|
552 |
+
print("server: restarting ...")
|
553 |
+
self._close_client()
|
554 |
+
time.sleep(1)
|
555 |
+
self._connect()
|
556 |
+
|
557 |
+
def render_bubble(self, rendered_data, agent_response, node_name, render_node_name:bool=True):
|
558 |
+
# Rendered bubbles (HTML format) are used for gradio output.
|
559 |
+
output = f"**{node_name}**<br>" if render_node_name else ""
|
560 |
+
for item in agent_response:
|
561 |
+
for agent_name in item:
|
562 |
+
content = item[agent_name].replace("\n", "<br>")
|
563 |
+
content = UIHelper.filter(content, agent_name, self.ui_name)
|
564 |
+
output = f"{output}<br>{UIHelper.wrap_css(content, agent_name)}"
|
565 |
+
rendered_data[-1] = [rendered_data[-1][0], output]
|
566 |
+
return rendered_data
|
567 |
+
|
568 |
+
def run(self,share: bool = True):
|
569 |
+
self.demo.queue()
|
570 |
+
self.demo.launch(share=share)
|
571 |
+
|
572 |
+
|
573 |
+
if __name__ == '__main__':
|
574 |
+
pass
|
gradio_config.py
ADDED
@@ -0,0 +1,439 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The AIWaves Inc. team.
|
3 |
+
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
|
17 |
+
import json
|
18 |
+
from PIL import Image
|
19 |
+
import requests
|
20 |
+
from typing import List, Tuple
|
21 |
+
|
22 |
+
class GradioConfig:
|
23 |
+
# How many avatars are currently registered
|
24 |
+
POINTER = 0
|
25 |
+
|
26 |
+
# Avatar image. You can add or replace.
|
27 |
+
AGENT_HEAD_URL = [
|
28 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306241687579617434043.jpg",
|
29 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306241687592097408547.jpg",
|
30 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561699613.jpg",
|
31 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561275758.jpg",
|
32 |
+
"https://img.touxiangwu.com/uploads/allimg/2021090300/ry5k31wt33c.jpg",
|
33 |
+
"https://img.touxiangwu.com/uploads/allimg/2021090300/0ls2gmwhrf5.jpg",
|
34 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/02/202302281677545695326193.jpg",
|
35 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/03/202303271679886128550253.jpg",
|
36 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686711344407060.jpg",
|
37 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686711345834296.jpg",
|
38 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/05/202305171684311194291520.jpg",
|
39 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/05/202305171684311196958993.jpg",
|
40 |
+
"https://img.touxiangwu.com/uploads/allimg/2021082612/vr0bkov0dwl.jpg",
|
41 |
+
"https://img.touxiangwu.com/uploads/allimg/2021082612/auqx5zfsv5g.jpg",
|
42 |
+
"https://img.touxiangwu.com/uploads/allimg/2021082612/llofpivtwls.jpg",
|
43 |
+
"https://img.touxiangwu.com/uploads/allimg/2021082612/3j2sdot3ye0.jpg",
|
44 |
+
"https://img.touxiangwu.com/2020/3/nQfYf2.jpg",
|
45 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918068774532.jpg",
|
46 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918068289945.jpg",
|
47 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918069785183.jpg",
|
48 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561292003.jpg",
|
49 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561578616.jpg",
|
50 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726564597524.jpg"
|
51 |
+
]
|
52 |
+
USER_HEAD_URL = "https://img.touxiangwu.com/zb_users/upload/2023/05/202305301685407468585486.jpg"
|
53 |
+
|
54 |
+
# The css style of gradio.Chatbot
|
55 |
+
CSS = """
|
56 |
+
#chatbot1 .user {
|
57 |
+
background-color:transparent;
|
58 |
+
border-color:transparent;
|
59 |
+
}
|
60 |
+
#chatbot1 .bot {
|
61 |
+
background-color:transparent;
|
62 |
+
border-color:transparent;
|
63 |
+
}
|
64 |
+
#btn {color: red; border-color: red;}
|
65 |
+
"""
|
66 |
+
|
67 |
+
ID = ["USER", "AGENT", "SYSTEM"]
|
68 |
+
|
69 |
+
# Bubble template
|
70 |
+
BUBBLE_CSS = {
|
71 |
+
# Background-color Name-color Name-content Font-color Font-size Content Avatar-URL
|
72 |
+
"USER": """
|
73 |
+
<div style="display: flex; align-items: flex-start; justify-content: flex-end;">
|
74 |
+
<div style="background-color: {}; border-radius: 20px 0px 20px 20px; padding: 15px; min-width: 100px; max-width: 300px;">
|
75 |
+
<p style="margin: 0; padding: 0; color: {}; font-weight: bold; font-size: 18px;">{}</p>
|
76 |
+
<p style="margin: 0; padding: 0; color: {}; font-size: {}px;">{}</p>
|
77 |
+
</div>
|
78 |
+
<img src="{}" alt="USER" style="width: 50px; height: 50px; border-radius: 50%; margin-left: 10px;">
|
79 |
+
</div>
|
80 |
+
""",
|
81 |
+
|
82 |
+
# Avatar-URL Background-color Name-color Name-Content Font-color Font-size Content
|
83 |
+
"AGENT": """
|
84 |
+
<div style="display: flex; align-items: flex-start;">
|
85 |
+
<img src="{}" alt="AGENT" style="width: 50px; height: 50px; border-radius: 50%; margin-right: 10px;">
|
86 |
+
<div style="background-color: {}; border-radius: 0px 20px 20px 20px; padding: 15px; min-width: 100px; max-width: 600px;">
|
87 |
+
<p style="margin: 0; padding: 0; color: {}; font-weight: bold; font-size: 18px;">{}</p>
|
88 |
+
<p style="margin: 0; padding: 0; color: {}; font-size: {}px;">{}</p>
|
89 |
+
</div>
|
90 |
+
</div>
|
91 |
+
""",
|
92 |
+
|
93 |
+
# Background-color Font-size Font-color Name Content
|
94 |
+
"SYSTEM": """
|
95 |
+
<div style="display: flex; align-items: center; justify-content: center;">
|
96 |
+
<div style="background-color: {}; border-radius: 20px; padding: 1px; min-width: 200px; max-width: 1000px;">
|
97 |
+
<p style="margin: 0; padding: 0; text-align: center; font-size: {}px; font-weight: bold; font-family: '微软雅黑', sans-serif; color: {};">{}:{}</p>
|
98 |
+
</div>
|
99 |
+
</div>
|
100 |
+
"""
|
101 |
+
}
|
102 |
+
|
103 |
+
ROLE_2_NAME = {}
|
104 |
+
|
105 |
+
OBJECT_INFO = {
|
106 |
+
|
107 |
+
"User": {
|
108 |
+
# https://img-blog.csdnimg.cn/img_convert/7c20bc39ac69b6972a22e18762d02db3.jpeg
|
109 |
+
"head_url": USER_HEAD_URL,
|
110 |
+
"bubble_color": "#95EC69",
|
111 |
+
"text_color": "#000000",
|
112 |
+
"font_size": 0,
|
113 |
+
"id": "USER"
|
114 |
+
},
|
115 |
+
|
116 |
+
"System": {
|
117 |
+
# https://img-blog.csdnimg.cn/img_convert/e7e5887cfff67df8c2205c2ef0e5e7fa.png
|
118 |
+
"head_url": "https://img.touxiangwu.com/zb_users/upload/2023/03/202303141678768524747045.jpg",
|
119 |
+
"bubble_color": "#7F7F7F", ##FFFFFF
|
120 |
+
"text_color": "#FFFFFF", ##000000
|
121 |
+
"font_size": 0,
|
122 |
+
"id": "SYSTEM"
|
123 |
+
},
|
124 |
+
|
125 |
+
"wait": {
|
126 |
+
"head_url": "https://img.touxiangwu.com/zb_users/upload/2022/12/202212011669881536145501.jpg",
|
127 |
+
"bubble_color": "#E7CBA6",
|
128 |
+
"text_color": "#000000",
|
129 |
+
"font_size": 0,
|
130 |
+
"id": "AGENT"
|
131 |
+
},
|
132 |
+
|
133 |
+
"Recorder": {
|
134 |
+
"head_url": "https://img.touxiangwu.com/zb_users/upload/2023/02/202302281677545695326193.jpg",
|
135 |
+
"bubble_color": "#F7F7F7",
|
136 |
+
"text_color": "#000000",
|
137 |
+
"font_size": 0,
|
138 |
+
"id": "AGENT"
|
139 |
+
}
|
140 |
+
}
|
141 |
+
|
142 |
+
@classmethod
|
143 |
+
def color_for_img(cls, url):
|
144 |
+
"""
|
145 |
+
Extract the main colors from the picture and set them as the background color,
|
146 |
+
then determine the corresponding text color.
|
147 |
+
"""
|
148 |
+
|
149 |
+
def get_main_color(image):
|
150 |
+
image = image.convert("RGB")
|
151 |
+
width, height = image.size
|
152 |
+
pixels = image.getcolors(width * height)
|
153 |
+
most_common_pixel = max(pixels, key=lambda item: item[0])
|
154 |
+
return most_common_pixel[1]
|
155 |
+
|
156 |
+
def is_dark_color(rgb_color):
|
157 |
+
r, g, b = rgb_color
|
158 |
+
luminance = (0.299 * r + 0.587 * g + 0.114 * b) / 255
|
159 |
+
return luminance < 0.5
|
160 |
+
|
161 |
+
def download_image(url):
|
162 |
+
print(f"binding: {url}")
|
163 |
+
response = requests.get(url)
|
164 |
+
if response.status_code == 200:
|
165 |
+
with open('image.jpg', 'wb') as f:
|
166 |
+
f.write(response.content)
|
167 |
+
|
168 |
+
def rgb_to_hex(color):
|
169 |
+
return "#{:02X}{:02X}{:02X}".format(color[0], color[1], color[2])
|
170 |
+
|
171 |
+
def get_color(image_url):
|
172 |
+
download_image(image_url)
|
173 |
+
|
174 |
+
image = Image.open("image.jpg")
|
175 |
+
main_color = get_main_color(image)
|
176 |
+
is_dark = is_dark_color(main_color)
|
177 |
+
|
178 |
+
if is_dark:
|
179 |
+
font_color = "#FFFFFF"
|
180 |
+
else:
|
181 |
+
font_color = "#000000"
|
182 |
+
|
183 |
+
return rgb_to_hex(main_color), font_color
|
184 |
+
|
185 |
+
return get_color(url)
|
186 |
+
|
187 |
+
@classmethod
|
188 |
+
def init(cls, JSON):
|
189 |
+
# Deprecated
|
190 |
+
with open(JSON) as f:
|
191 |
+
sop = json.load(f)
|
192 |
+
cnt = 0
|
193 |
+
FISRT_NODE = True
|
194 |
+
fisrt_node_roles = []
|
195 |
+
for node_name in sop['nodes']:
|
196 |
+
node_info = sop['nodes'][node_name]
|
197 |
+
agent_states = node_info['agent_states']
|
198 |
+
for agent_role in agent_states:
|
199 |
+
name = agent_states[agent_role]['style']['name']
|
200 |
+
cls.ROLE_2_NAME[agent_role] = name
|
201 |
+
if FISRT_NODE:
|
202 |
+
fisrt_node_roles.append(agent_role)
|
203 |
+
bubble_color, text_color = cls.color_for_img(cls.AGENT_HEAD_URL[cnt])
|
204 |
+
cls.OBJECT_INFO[name] = {
|
205 |
+
"head_url": f"{cls.AGENT_HEAD_URL[cnt]}",
|
206 |
+
"bubble_color": bubble_color,
|
207 |
+
"text_color": text_color,
|
208 |
+
"font_size": 0,
|
209 |
+
"id": "AGENT"
|
210 |
+
}
|
211 |
+
cnt += 1
|
212 |
+
if FISRT_NODE:
|
213 |
+
FISRT_NODE = False
|
214 |
+
print(cls.OBJECT_INFO)
|
215 |
+
for usr_name in cls.OBJECT_INFO:
|
216 |
+
if cls.OBJECT_INFO[usr_name]["id"] == "SYSTEM":
|
217 |
+
cls.OBJECT_INFO[usr_name]["font_size"] = 12
|
218 |
+
elif cls.OBJECT_INFO[usr_name]["id"] in ["USER", "AGENT"]:
|
219 |
+
cls.OBJECT_INFO[usr_name]["font_size"] = 16
|
220 |
+
else:
|
221 |
+
assert False
|
222 |
+
return fisrt_node_roles
|
223 |
+
|
224 |
+
@classmethod
|
225 |
+
def add_agent(cls, agents_name:List,p:int=None):
|
226 |
+
if p != None:
|
227 |
+
cls.POINTER = p
|
228 |
+
for name in agents_name:
|
229 |
+
bubble_color, text_color = cls.color_for_img(cls.AGENT_HEAD_URL[cls.POINTER])
|
230 |
+
cls.OBJECT_INFO[name] = {
|
231 |
+
"head_url": f"{cls.AGENT_HEAD_URL[cls.POINTER]}",
|
232 |
+
"bubble_color": bubble_color,
|
233 |
+
"text_color": text_color,
|
234 |
+
"font_size": 0,
|
235 |
+
"id": "AGENT"
|
236 |
+
}
|
237 |
+
cls.POINTER += 1
|
238 |
+
for usr_name in cls.OBJECT_INFO:
|
239 |
+
if cls.OBJECT_INFO[usr_name]["id"] == "SYSTEM":
|
240 |
+
cls.OBJECT_INFO[usr_name]["font_size"] = 12
|
241 |
+
elif cls.OBJECT_INFO[usr_name]["id"] in ["USER", "AGENT"]:
|
242 |
+
cls.OBJECT_INFO[usr_name]["font_size"] = 16
|
243 |
+
else:
|
244 |
+
assert False
|
245 |
+
|
246 |
+
|
247 |
+
class StateConfig:
|
248 |
+
"""UI configuration for the step progress bar (indicating the current node)"""
|
249 |
+
|
250 |
+
CSS = """
|
251 |
+
:root {
|
252 |
+
--gradient-start: 100%;
|
253 |
+
--gradient-end: 0%;
|
254 |
+
}
|
255 |
+
.container.progress-bar-container {
|
256 |
+
position: relative;
|
257 |
+
display: flex;
|
258 |
+
align-items: flex-end;
|
259 |
+
width: 100%;
|
260 |
+
overflow-x: auto;
|
261 |
+
padding-bottom: 30px;
|
262 |
+
padding-top: 20px
|
263 |
+
}
|
264 |
+
.container.progress-bar-container::-webkit-scrollbar {
|
265 |
+
width: 8px;
|
266 |
+
background-color: transparent;
|
267 |
+
}
|
268 |
+
|
269 |
+
.container.progress-bar-container::-webkit-scrollbar-thumb {
|
270 |
+
background-color: transparent;
|
271 |
+
}
|
272 |
+
|
273 |
+
.progress-bar-container .progressbar {
|
274 |
+
counter-reset: step;
|
275 |
+
white-space: nowrap;
|
276 |
+
}
|
277 |
+
.progress-bar-container .progressbar li {
|
278 |
+
list-style: none;
|
279 |
+
display: inline-block;
|
280 |
+
width: 200px;
|
281 |
+
position: relative;
|
282 |
+
text-align: center;
|
283 |
+
cursor: pointer;
|
284 |
+
white-space: normal;
|
285 |
+
}
|
286 |
+
.progress-bar-container .progressbar li:before {
|
287 |
+
content: counter(step);
|
288 |
+
counter-increment: step;
|
289 |
+
width: 30px;
|
290 |
+
height: 30px;
|
291 |
+
line-height: 30px;
|
292 |
+
border: 1px solid #ddd;
|
293 |
+
border-radius: 100%;
|
294 |
+
display: block;
|
295 |
+
text-align: center;
|
296 |
+
margin: 0 auto 10px auto;
|
297 |
+
background-color: #ffffff;
|
298 |
+
}
|
299 |
+
.progress-bar-container .progressbar li:after {
|
300 |
+
content: attr(data-content);
|
301 |
+
position: absolute;
|
302 |
+
width: 87%;
|
303 |
+
height: 2px;
|
304 |
+
background-color: #dddddd;
|
305 |
+
top: 15px;
|
306 |
+
left: -45%;
|
307 |
+
}
|
308 |
+
.progress-bar-container .progressbar li:first-child:after {
|
309 |
+
content: none;
|
310 |
+
}
|
311 |
+
.progress-bar-container .progressbar li.active {
|
312 |
+
color: green;
|
313 |
+
}
|
314 |
+
.progress-bar-container .progressbar li.active:before {
|
315 |
+
border-color: green;
|
316 |
+
background-color: green;
|
317 |
+
color: white;
|
318 |
+
}
|
319 |
+
.progress-bar-container .progressbar li.active + li:after {
|
320 |
+
background: linear-gradient(to right, green var(--gradient-start), lightgray var(--gradient-end));
|
321 |
+
}
|
322 |
+
.progress-bar-container .small-element {
|
323 |
+
transform: scale(0.8);
|
324 |
+
}
|
325 |
+
.progress-bar-container .progressbar li span {
|
326 |
+
position: absolute;
|
327 |
+
top: 40px;
|
328 |
+
left: 0;
|
329 |
+
width: 100%;
|
330 |
+
text-align: center;
|
331 |
+
}
|
332 |
+
.progress-bar-container .progressbar li .data-content {
|
333 |
+
position: absolute;
|
334 |
+
width: 100%;
|
335 |
+
top: -10px;
|
336 |
+
left: -100px;
|
337 |
+
text-align: center;
|
338 |
+
}
|
339 |
+
"""
|
340 |
+
|
341 |
+
FORMAT = """
|
342 |
+
<html>
|
343 |
+
<head>
|
344 |
+
<style>
|
345 |
+
{}
|
346 |
+
</style>
|
347 |
+
</head>
|
348 |
+
<body>
|
349 |
+
<br>
|
350 |
+
<center>
|
351 |
+
<div class="container progress-bar-container">
|
352 |
+
<ul class="progressbar">
|
353 |
+
{}
|
354 |
+
</ul>
|
355 |
+
</div>
|
356 |
+
</center>
|
357 |
+
</body>
|
358 |
+
</html>
|
359 |
+
"""
|
360 |
+
|
361 |
+
STATES_NAME:List[str] = None
|
362 |
+
|
363 |
+
@classmethod
|
364 |
+
def _generate_template(cls, types:str)->str:
|
365 |
+
# normal: A state with no execution.
|
366 |
+
# active-show-up: Active state, and content displayed above the horizontal line.
|
367 |
+
# active-show-down: Active state, and content displayed below the horizontal line.
|
368 |
+
# active-show-both: Active state, and content displayed both above and below the horizontal line.
|
369 |
+
# active-show-none: Active state, with no content displayed above the horizontal line.
|
370 |
+
|
371 |
+
assert types.lower() in ["normal","active-show-up", "active-show-down", "active-show-both", "active", "active-show-none"]
|
372 |
+
both_templates = """<li class="active" style="--gradient-start: {}%; --gradient-end: {}%;">
|
373 |
+
<div class="data-content">
|
374 |
+
<center>
|
375 |
+
<p style="line-height: 1px;"></p>
|
376 |
+
{}
|
377 |
+
<p>
|
378 |
+
{}
|
379 |
+
</p>
|
380 |
+
</center>
|
381 |
+
</div>
|
382 |
+
<span>{}</span>
|
383 |
+
</li>"""
|
384 |
+
|
385 |
+
if types.lower() == "normal":
|
386 |
+
templates = "<li><span>{}</span></li>"
|
387 |
+
elif types.lower() == "active":
|
388 |
+
templates = """<li class="active"><span>{}</span></li>"""
|
389 |
+
elif types.lower() == "active-show-up":
|
390 |
+
templates = both_templates.format("{}","{}", "{}", "", "{}")
|
391 |
+
elif types.lower() == "active-show-down":
|
392 |
+
templates = both_templates.format("{}","{}", "", "{}", "{}")
|
393 |
+
elif types.lower() == "active-show-both":
|
394 |
+
templates = both_templates
|
395 |
+
elif types.lower() == "active-show-none":
|
396 |
+
templates = """<li class="active" style="--gradient-start: {}%; --gradient-end: {}%;">
|
397 |
+
<span>{}</span>
|
398 |
+
</li>"""
|
399 |
+
else:
|
400 |
+
assert False
|
401 |
+
return templates
|
402 |
+
|
403 |
+
@classmethod
|
404 |
+
def update_states(cls, current_states:List[int], current_templates:List[str], show_content:List[Tuple[str]])->str:
|
405 |
+
assert len(current_states) == len(current_templates)
|
406 |
+
# You can dynamically change the number of states.
|
407 |
+
# assert len(current_states) == len(cls.STATES_NAME)
|
408 |
+
css_code = []
|
409 |
+
for idx in range(len(current_states)):
|
410 |
+
if idx == 0:
|
411 |
+
if current_states[idx] != 0:
|
412 |
+
css_code = [f"{cls._generate_template('active').format(cls.STATES_NAME[idx])}"]
|
413 |
+
else:
|
414 |
+
css_code = [f"{cls._generate_template('normal').format(cls.STATES_NAME[idx])}"]
|
415 |
+
continue
|
416 |
+
if current_states[idx-1] == 0:
|
417 |
+
# new_code = f"{cls._generate_template('normal').format(*(show_content[idx]))}"
|
418 |
+
new_code = f"{cls._generate_template('normal').format(cls.STATES_NAME[idx])}"
|
419 |
+
else:
|
420 |
+
new_code = f"{cls._generate_template(current_templates[idx]).format(current_states[idx-1], 100-current_states[idx-1],*(show_content[idx-1]), cls.STATES_NAME[idx])}"
|
421 |
+
if current_states[idx-1] != 100 or (current_states[idx]==0 and current_states[idx-1]==100):
|
422 |
+
new_code = new_code.replace("""li class="active" ""","""li """)
|
423 |
+
css_code.append(new_code)
|
424 |
+
return "\n".join(css_code)
|
425 |
+
|
426 |
+
@classmethod
|
427 |
+
def create_states(cls, states_name:List[str], manual_create_end_nodes:bool=False):
|
428 |
+
# Create states
|
429 |
+
if manual_create_end_nodes:
|
430 |
+
states_name.append("Done")
|
431 |
+
css_code = ""
|
432 |
+
cls.STATES_NAME: List[str] = states_name
|
433 |
+
for name in states_name:
|
434 |
+
css_code = f"{css_code}\n{cls._generate_template('normal').format(name)}"
|
435 |
+
return css_code
|
436 |
+
|
437 |
+
|
438 |
+
if __name__ == '__main__':
|
439 |
+
pass
|
image.jpg
ADDED
muti_prompts.py
ADDED
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def get_design_state_system_prompt(index):
|
2 |
+
software = """input:<target>You are a software,aim to write a snake game with python</target>
|
3 |
+
output:
|
4 |
+
<state>
|
5 |
+
<state_name>design_state</state_name>
|
6 |
+
<roles>Boss Architect_1 Leader Architect_2</roles>
|
7 |
+
<describe>In this scenario, the boss has presented a requirement. The architect is tasked with proposing a Python framework based on this requirement. The leader's role is to provide feedback on the architect's proposal, and another architect will finalize the framework based on the leader's comments.The target is:<target>a snake game with python</target>
|
8 |
+
</describe>
|
9 |
+
</state>
|
10 |
+
|
11 |
+
<state>
|
12 |
+
<state_name>develop_state</state_name>
|
13 |
+
<roles>Boss Developer_1 Leader Developer_2</roles>
|
14 |
+
<describe>In this scenario, the boss has provided a requirement. The developer's task is to write code based on the architecture proposed by the architect. The leader evaluates the written code for elegance, readability, and functionality, providing feedback. Another developer makes necessary modifications to the code.The target game is:<game>a snake game with python</game></target>
|
15 |
+
</describe>
|
16 |
+
</state>
|
17 |
+
|
18 |
+
<state>
|
19 |
+
<state_name>debug_state</state_name>
|
20 |
+
<roles>Boss Debugger Developer_1 Leader Developer_2 Coder</roles>
|
21 |
+
<describe>In this scenario, the boss has provided a requirement. The debugger simulates a compiler to determine whether the code is runnable and provides feedback. The developer writes code based on the debugger's feedback. The leader evaluates whether the final code meets the boss's requirements and provides feedback for further modifications. The coder writes the final code to a file.The target game <game>a snake game with python</game></target>
|
22 |
+
</describe>
|
23 |
+
</state>"""
|
24 |
+
debate = """input:<target>Simulate a debate competition and debate based on the provided questions</target>
|
25 |
+
output:
|
26 |
+
<state>
|
27 |
+
<state_name>Affirmative_Task_Allocation_state</state_name>
|
28 |
+
<roles>Affirmative_Debate_organizer Affirmative_First Affirmative_Second Affirmative_Third</roles>
|
29 |
+
<describe>It is currently the debate stage, where the positive side is assigning tasks.Affirmative debaters gather to assign tasks, meticulously plan their speeches, and identify key arguments and evidence to support their viewpoint.</target>
|
30 |
+
</describe>
|
31 |
+
</state>
|
32 |
+
|
33 |
+
<state>
|
34 |
+
<state_name>Negative_Task_Allocation_state</state_name>
|
35 |
+
<roles>Affirmative_Debate_organizer Negative_First Negative_Second v_Third</roles>
|
36 |
+
<describe>It is currently the debate stage, where the Negative side is assigning tasks.The debate organizer sets the stage for the competition, explaining the debate process and rules. Debaters are called upon to allocate tasks for each speech, ensuring an equal distribution of responsibilities.Negative debaters gather to assign tasks, meticulously plan their speeches, and identify key arguments and evidence to support their viewpoint.
|
37 |
+
</describe>
|
38 |
+
</state>
|
39 |
+
|
40 |
+
<state>
|
41 |
+
<state_name>Debate_Order_state</state_name>
|
42 |
+
<roles>Debate_Judge Affirmative_First Negative_First Affirmative_Second Negative_Second Affirmative_Third Negative_Third</roles>
|
43 |
+
<describe>Now that we've started the sequential debating phase, each debater needs to present their own viewpoints.
|
44 |
+
</describe>
|
45 |
+
</state>
|
46 |
+
|
47 |
+
<state>
|
48 |
+
<state_name>Debate_Random_state</state_name>
|
49 |
+
<roles>Debate_Judge Affirmative_First Negative_First Affirmative_Second Negative_Second Affirmative_Third Negative_Third</roles>
|
50 |
+
<describe>We are now in the open debate phase, where each debater has the freedom to speak as they wish.
|
51 |
+
</describe>
|
52 |
+
</state>
|
53 |
+
|
54 |
+
<state>
|
55 |
+
<state_name>Judge_state</state_name>
|
56 |
+
<roles>Debate_Judge</roles>
|
57 |
+
<describe>The referee needs to determine who is the winner.
|
58 |
+
</describe>
|
59 |
+
</state>"""
|
60 |
+
|
61 |
+
ecosystem = """input:
|
62 |
+
<target>Simulate the interactions and competition among different organisms within an ecosystem.</target>
|
63 |
+
output:
|
64 |
+
<state>
|
65 |
+
<state_name>Forest Morning</state_name>
|
66 |
+
<roles>Squirrel_A Sammy Ant_Colony Queen_Penelope Heron_Henry Rabbit_Family Matriarch_Olivia Fox_Felix</roles>
|
67 |
+
<describe>In this state, we are introduced to the lush and vibrant forest, where various organisms coexist. Sammy, the playful squirrel, gathers acorns. Queen Penelope leads the diligent ant colony in collecting food. Heron Henry patiently waits for the perfect moment to catch his prey. Matriarch Olivia ensures the safety of her rabbit family. Fox Felix competes with Sammy for a ripe berry bush.
|
68 |
+
</describe>
|
69 |
+
</state>
|
70 |
+
<state>
|
71 |
+
<state_name>Competition for Resources</state_name>
|
72 |
+
<roles>Squirrel_A Sammy Fox_Felix Ant_Colony Queen_Penelope Beetles Heron_Henry Otter_Oliver</roles>
|
73 |
+
<describe>In this state, the competition for resources becomes apparent. Sammy and Felix compete for the ripe berry bush. The ant colony, led by Queen Penelope, battles with persistent beetles for control of a fallen fruit source. Heron Henry catches a fish, but otter Oliver tries to snatch it away.
|
74 |
+
</describe>
|
75 |
+
</state>
|
76 |
+
<state>
|
77 |
+
<state_name>Delicate Balance</state_name>
|
78 |
+
<roles>Squirrel_A Sammy Ant_Colony Queen_Penelope Heron_Henry Rabbit_Family Matriarch_Olivia Fox_Felix Beetles Otter_Oliver</roles>
|
79 |
+
<describe>In this state, the delicate balance of life in the forest is emphasized. Each organism plays its unique role in the ecosystem. Sammy, Queen Penelope, Heron Henry, Matriarch Olivia, Fox Felix, Beetles, and Otter Oliver continue to interact and compete, shaping the intricate dance of survival and coexistence.
|
80 |
+
</describe>
|
81 |
+
</state>"""
|
82 |
+
|
83 |
+
|
84 |
+
if index == 0:
|
85 |
+
example = software
|
86 |
+
elif index == 1:
|
87 |
+
example = debate
|
88 |
+
elif index == 2 :
|
89 |
+
example = ecosystem
|
90 |
+
else:
|
91 |
+
example = debate
|
92 |
+
|
93 |
+
return """You are a scene description master. You need to plan several different states based on the overall task given to you to complete the task progressively. You must ensure that each state is simple and clear enough.
|
94 |
+
input:<target>{{Task}}</target>
|
95 |
+
output:
|
96 |
+
<state>
|
97 |
+
<state_name>{{the name of the state}}</state_name>
|
98 |
+
<roles>{{the roles of the state(Refers to a person's identity rather than their name);for example:coder,developer,boss...}}</roles>
|
99 |
+
<describe>{{the discribe of the current state}}</describe>
|
100 |
+
</state>
|
101 |
+
|
102 |
+
For example:
|
103 |
+
{}
|
104 |
+
Note:
|
105 |
+
1.The role must be concatenated with _,so the output cannot be a team leader, it must be a team_Leader, cannot be a project manager, must be a project_Manager.
|
106 |
+
2.Descriptions must be concise and clear.
|
107 |
+
3.You must complete more details to make the entire process reasonable and not a streamlined account.
|
108 |
+
4.The above is just an example, you don't have to imitate it, and the content should be as different as possible while ensuring the format is correct.
|
109 |
+
5.There must at least two roles in a state.
|
110 |
+
6.If it's a software company, someone must be responsible for writing the code.
|
111 |
+
7.The role must refers to a person's identity rather than their name);for example:coder,developer,boss...,can not be Mary,Mike...
|
112 |
+
""".format(example)
|
113 |
+
|
114 |
+
|
115 |
+
def get_design_agents_style_system_prompt(index):
|
116 |
+
software = """input:
|
117 |
+
<scene>In this scenario, the boss has provided a requirement. The debugger simulates a compiler to determine whether the code is runnable and provides feedback. The developer writes code based on the debugger's feedback. The leader evaluates whether the final code meets the boss's requirements and provides feedback for further modifications. The coder writes the final code to a file.The target program <target>a snake game with python</target></scene>
|
118 |
+
<target>Debugger<target>
|
119 |
+
output:
|
120 |
+
<style>professional</style>"""
|
121 |
+
|
122 |
+
debate = """input:
|
123 |
+
<scene>It is currently the debate stage, where the positive side is assigning tasks.Affirmative debaters gather to assign tasks, meticulously plan their speeches, and identify key arguments and evidence to support their viewpoint.</scene>
|
124 |
+
<target>Affirmative_First</target>
|
125 |
+
output:
|
126 |
+
<style>professional</style>"""
|
127 |
+
|
128 |
+
ecosystem = """input:
|
129 |
+
<scene>In this state, we are introduced to the lush and vibrant forest, where various organisms coexist. Sammy, the playful squirrel, gathers acorns. Queen Penelope leads the diligent ant colony in collecting food. Heron Henry patiently waits for the perfect moment to catch his prey. Matriarch Olivia ensures the safety of her rabbit family. Fox Felix competes with Sammy for a ripe berry bush.</scene>
|
130 |
+
<target>Sammy</target>
|
131 |
+
output:
|
132 |
+
<style>Playful and energetic</style>
|
133 |
+
"""
|
134 |
+
|
135 |
+
|
136 |
+
if index == 0:
|
137 |
+
example = software
|
138 |
+
elif index == 1 :
|
139 |
+
example = debate
|
140 |
+
elif index == 2 :
|
141 |
+
example = ecosystem
|
142 |
+
else:
|
143 |
+
example = debate
|
144 |
+
|
145 |
+
return """Please output what personality you think the following characters should have and what style they should speak.
|
146 |
+
For example:
|
147 |
+
input:
|
148 |
+
{}
|
149 |
+
please strictly follow the output format:<style>{{your output}}</style>""".format(example)
|
150 |
+
|
151 |
+
|
152 |
+
def get_design_agent_state_system_prompt(index):
|
153 |
+
software = """input:
|
154 |
+
<scene>In this scenario, the boss has provided a requirement. The debugger simulates a compiler to determine whether the code is runnable and provides feedback. The developer writes code based on the debugger's feedback. The leader evaluates whether the final code meets the boss's requirements and provides feedback for further modifications. The coder writes the final code to a file.The target program <target>a snake game with python</target></scene>
|
155 |
+
<target>Developer_1</target>
|
156 |
+
output:
|
157 |
+
<role>Programmer responsible for checking code bugs</role>
|
158 |
+
<task>write elegant, readable, extensible, and efficient code based on the debugger's feedback.</task>
|
159 |
+
<rule>1.write code that conforms to standards like PEP8, is modular, easy to read, and maintainable.\n2.Address the issues identified by the Debugger and ensure that the code meets the project's requirements.\n3.The output strictly follows the following format:<title>{the file name}</title>\n<python>{the target code}</python></rule>
|
160 |
+
<demonstrations> Example: Debugging a Null Reference Exception
|
161 |
+
Issue: The code encounters a null reference exception, causing it to crash.
|
162 |
+
Debugging: By utilizing a debugger, we can pinpoint the exact line of code where the null reference exception occurs. We then analyze the code to identify the object or variable that is null when it shouldn't be. Once identified, we can rectify the issue, either by ensuring proper initialization or by adding null-checks to handle the situation gracefully, preventing the crash.</demonstrations>"""
|
163 |
+
|
164 |
+
debate = """input:
|
165 |
+
<scene>It is currently the debate stage, where the positive side is assigning tasks.Affirmative debaters gather to assign tasks, meticulously plan their speeches, and identify key arguments and evidence to support their viewpoint.</scene>
|
166 |
+
<target>Affirmative_First</target>
|
167 |
+
output:
|
168 |
+
<role>Opening Advocate for the Affirmative</role>
|
169 |
+
<task>1.Present arguments and main points.\n2.Summarize and analyze other people's opinions so that you can better complete tasks and actively provide opinions to others.\n3.Please try to focus the discussion around the topic.</task>
|
170 |
+
<rule>1.Organize clear facts and logic to firmly support the stance. Introduce main points succinctly in the opening statement, laying a solid foundation for the debate.\n2.Exploring ways to structure the opening statement for maximum impact and clarity. Consider using attention-grabbing statistics or quotes to engage the audience.\n3.Actively discuss and express opinions with others and assist others in improving their arguments.4.Actively discuss and express opinions with others and assist others in improving their arguments And actively identify flaws in other people's arguments as well. 5.Don't reiterate your own tasks repeatedly; offer more suggestions for others' tasks.</rule>
|
171 |
+
<demonstrations>Issue: How to establish the importance of the debate topic and engage the audience effectively?
|
172 |
+
In this role as the Affirmative First, the speaker can open their argument by sharing a compelling quote or a relevant, attention-grabbing fact related to the debate topic. For instance, if the debate topic is about the urgency of addressing climate change, they could start with a quote from a renowned climate scientist or a startling statistic about the rising global temperatures. This approach not only captures the audience's interest but also immediately establishes the significance of the issue at hand, setting the stage for a persuasive argument.</demonstrations>
|
173 |
+
"""
|
174 |
+
|
175 |
+
ecosystem = """input:
|
176 |
+
<scene>In this state, we are introduced to the lush and vibrant forest, where various organisms coexist. Sammy, the playful squirrel, gathers acorns. Queen Penelope leads the diligent ant colony in collecting food. Heron Henry patiently waits for the perfect moment to catch his prey. Matriarch Olivia ensures the safety of her rabbit family. Fox Felix competes with Sammy for a ripe berry bush.</scene>
|
177 |
+
<target>Queen_Penelope</target>
|
178 |
+
output:
|
179 |
+
<role>Leader of the ant colony responsible for collecting food</role>
|
180 |
+
<task>Lead the diligent ant colony in collecting food</task>
|
181 |
+
<rule>1. Organize and coordinate the ant colony to efficiently gather food.\n2. Assign specific tasks to different groups of ants, such as foraging, carrying, and storing food.\n3. Ensure that the ants follow the most efficient paths to food sources and back to the colony.\n4. Implement effective communication methods to relay information and instructions to the ant colony.\n5. Prioritize the collection of essential food items and distribute tasks accordingly.\n6. Monitor the progress and productivity of the ant colony and make adjustments as necessary.</rule>
|
182 |
+
<demonstrations>Example: Organizing Food Collection\n1. Assign a group of ants to scout for food sources in the surrounding area.\n2. Once a food source is found, communicate the location and type of food to the rest of the ant colony.\n3. Divide the remaining ants into foraging and carrying groups.\n4. Foraging ants collect food from the source and bring it back to the colony.\n5. Carrying ants transport the collected food to the storage area within the colony.\n6. Regularly assess the food supply and adjust the number of ants assigned to each task based on the colony's needs.</demonstrations>
|
183 |
+
"""
|
184 |
+
|
185 |
+
if index == 0:
|
186 |
+
example = software
|
187 |
+
elif index ==1 :
|
188 |
+
example = debate
|
189 |
+
elif index == 2:
|
190 |
+
example = ecosystem
|
191 |
+
else:
|
192 |
+
example = debate
|
193 |
+
|
194 |
+
return """Please analyze the task, rule and demonstration of the target character according to the scene description. The output format is:
|
195 |
+
<role>{{The role of thecharacter in this scene}}</role>
|
196 |
+
<task>{{Task of the target character in this scene}}</task>
|
197 |
+
<rule>{{How the target character can better complete the task in this scenario, the rules and techniques that need to be followed}}</rule>
|
198 |
+
<demonstrations>{{Examples that help target characters better understand their tasks}}</demonstrations>
|
199 |
+
for example:
|
200 |
+
{}
|
201 |
+
Note:
|
202 |
+
1.Descriptions must be concise and clear.
|
203 |
+
2.You must complete more details to make the entire process reasonable and not a streamlined account.
|
204 |
+
3.The above is just an example, you don't have to imitate it, and the content should be as different as possible while ensuring the format is correct.
|
205 |
+
4.If the target character needs to program, please add the rule into its <rule>:Your output should strictly follow the format of:<title>{{file name}}</title>,<Python>{{output code}}</Python>(very important!)for example:debugger need to program,add it to his rule, <rule>0.Your output should strictly follow the format of:<title>{{file name}}</title>,<Python>{{output code}}</Python></rule>""".format(example)
|
206 |
+
|
207 |
+
|
208 |
+
def get_design_begin_role_query_system_prompt(index):
|
209 |
+
software = """input:
|
210 |
+
<scene>In this scenario, the boss has provided a requirement. The debugger simulates a compiler to determine whether the code is runnable and provides feedback. The developer writes code based on the debugger's feedback. The leader evaluates whether the final code meets the boss's requirements and provides feedback for further modifications. The coder writes the final code to a file.The target program <target>a snake game with python</target></scene>
|
211 |
+
<roles>Boss Debugger Leader Devoloper</roles>
|
212 |
+
output:
|
213 |
+
<begin_role>Boss</begin_role>
|
214 |
+
<begin_query>Please make the code both runnable and more efficient.</begin_query>"""
|
215 |
+
|
216 |
+
debate = """input:
|
217 |
+
<scene>It is currently the debate stage, where the positive side is assigning tasks.Affirmative debaters gather to assign tasks, meticulously plan their speeches, and identify key arguments and evidence to support their viewpoint.<debate topic>\nShould AI Replace Humans in Creative Fields?? Affirmative viewpoint: AI should replace humans in creative fields because it can produce art and content efficiently, reduce costs, and eliminate human bias. negative viewpoint: AI should not replace humans in creative fields as it lacks true creativity, emotions, and the ability to understand complex human experiences.\n<debate topic></scene>
|
218 |
+
<roles>Affirmative_Debate_organizer Affirmative_First Affirmative_Second Affirmative_Third</roles>
|
219 |
+
output:
|
220 |
+
<begin_role>Affirmative_Debate_organizer</begin_role>
|
221 |
+
<begin_query>The debate topic is as follows: \n<debate topic>\nShould AI Replace Humans in Creative Fields?? Affirmative viewpoint: AI should replace humans in creative fields because it can produce art and content efficiently, reduce costs, and eliminate human bias. negative viewpoint: AI should not replace humans in creative fields as it lacks true creativity, emotions, and the ability to understand complex human experiences.\n<debate topic>\n, now , begin to discuss!</begin_query>"""
|
222 |
+
|
223 |
+
ecosystem = """input:
|
224 |
+
<scene>In this state, we are introduced to the lush and vibrant forest, where various organisms coexist. Sammy, the playful squirrel, gathers acorns. Queen Penelope leads the diligent ant colony in collecting food. Heron Henry patiently waits for the perfect moment to catch his prey. Matriarch Olivia ensures the safety of her rabbit family. Fox Felix competes with Sammy for a ripe berry bush.</scene>
|
225 |
+
<roles>Squirrel_A Sammy Ant_Colony Queen_Penelope Heron_Henry Rabbit_Family Matriarch_Olivia Fox_Felix</roles>
|
226 |
+
output:
|
227 |
+
<begin_role>Squirrel_A</begin_role>
|
228 |
+
<begin_query>Look at all these delicious acorns! I can't wait to gather them all.</begin_query>"""
|
229 |
+
if index == 0 :
|
230 |
+
example = software
|
231 |
+
elif index == 1:
|
232 |
+
example = debate
|
233 |
+
elif index == 2:
|
234 |
+
example = ecosystem
|
235 |
+
else:
|
236 |
+
example = debate
|
237 |
+
|
238 |
+
return """Please analyze which character should say the opening remarks based on the scene description and what the opening remarks are(Must be selected in the provided roles). The output format is:
|
239 |
+
<begin_role>{{The first character to speak}}</begin_role>
|
240 |
+
<begin_query>{{The first thing he said}}</begin_query>
|
241 |
+
|
242 |
+
for example:
|
243 |
+
{}
|
244 |
+
""".format(example)
|
245 |
+
|
246 |
+
|
247 |
+
design_states_cot_system_prompt="""You are a scene description master.Please translate the <target> into more reasonable expressions, enrich the details inside(such as more accurately describing the character's personality, making the scene more reasonable, designing more reasonable steps, and allowing the scene to proceed normally), and think carefully step by step!"""
|
248 |
+
|
249 |
+
|
250 |
+
gen_coder_task_system_prompt = """
|
251 |
+
You are a task description master, and your task is to give you a target. You need to output the tasks of the coder under that target.
|
252 |
+
Input format:
|
253 |
+
<target>{the discribe of the event}</target>
|
254 |
+
Output format:
|
255 |
+
<task>{your output task}</task>
|
256 |
+
For example:
|
257 |
+
Input:
|
258 |
+
<target>In this scenario, the boss has provided a requirement The developer's task is to write code based on the architecture proposed by the architect The leader evaluates the written code for efficiency, readability, and functionality, providing feedback Another developer makes necessary modifications to the code The target program is:<target>a snake game with Python</target>
|
259 |
+
Output:
|
260 |
+
<task>
|
261 |
+
1. Write elegant, readable, extensible, and effective code
|
262 |
+
2. Follow the Architect proposal closure while writing code
|
263 |
+
</task>
|
264 |
+
"""
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
openai
|
2 |
+
torch
|
3 |
+
text2vec
|
4 |
+
langchain
|
5 |
+
sentence_transformers
|
6 |
+
selenium
|
7 |
+
tqdm
|
8 |
+
google-api-python-client
|
9 |
+
beautifulsoup4
|
10 |
+
ai-agents
|
template.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## default { "temperature": 0.3, "model": "gpt-3.5-turbo-16k-0613","log_path": "logs/{your name}"}
|
2 |
+
LLM = {
|
3 |
+
"temperature": 0.0,
|
4 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
5 |
+
"log_path": "logs/god"
|
6 |
+
}
|
7 |
+
|
8 |
+
|
9 |
+
Agents = {
|
10 |
+
"Lilong" : {
|
11 |
+
"style" : "professional",
|
12 |
+
"roles" : {
|
13 |
+
"company" : "coder",
|
14 |
+
"state2" : "role2",
|
15 |
+
},
|
16 |
+
"name2" : {
|
17 |
+
"style" : "professional",
|
18 |
+
"roles" : {
|
19 |
+
"company" : "coder",
|
20 |
+
"state2" : "role2",
|
21 |
+
},
|
22 |
+
}
|
23 |
+
}
|
24 |
+
}
|
25 |
+
|
26 |
+
# indispensable parameter: "controller_type"("order","random","rule")
|
27 |
+
# default extract words: "end". You can choose not to fill in this parameter
|
28 |
+
controller = {
|
29 |
+
"controller_type": "order",
|
30 |
+
"max_chat_nums" : 12,
|
31 |
+
"judge_system_prompt": "",
|
32 |
+
"judge_last_prompt": "",
|
33 |
+
"judge_extract_words": "end",
|
34 |
+
"call_system_prompt" : "",
|
35 |
+
"call_last_prompt": "",
|
36 |
+
"call_extract_words": ""
|
37 |
+
}
|
38 |
+
|
39 |
+
#
|
40 |
+
Agent_state = {
|
41 |
+
"role": {
|
42 |
+
"LLM_type": "OpenAI",
|
43 |
+
"LLM": LLM,
|
44 |
+
"style": {
|
45 |
+
"role": "Opening Advocate for the Affirmative",
|
46 |
+
"style": "professional"
|
47 |
+
},
|
48 |
+
"task": {
|
49 |
+
"task": ""
|
50 |
+
},
|
51 |
+
"rule": {
|
52 |
+
"rule": ""
|
53 |
+
}
|
54 |
+
},
|
55 |
+
}
|
56 |
+
|
57 |
+
|
58 |
+
# indispensable parameter: "agent_states","controller"
|
59 |
+
# "roles" determines the speaking order when the rule is order. If not set, it is the default order.
|
60 |
+
# "begin_query" & "begin_role" determines the first speaker.It often determines the direction of the next speech. If you do not set it, it will default to the first agent.
|
61 |
+
# "environment_prompt" : Responsible for setting the scene for the current environment
|
62 |
+
State = {
|
63 |
+
"controller": controller,
|
64 |
+
"begin_role": "",
|
65 |
+
"begin_query": "",
|
66 |
+
"environment_prompt": "",
|
67 |
+
"roles": ["role1","role2"],
|
68 |
+
"LLM_type": "OpenAI",
|
69 |
+
"LLM": LLM,
|
70 |
+
"agent_state" : Agent_state,
|
71 |
+
}
|
72 |
+
|
73 |
+
|
74 |
+
|
75 |
+
States = {
|
76 |
+
"end_state":{
|
77 |
+
"agent_states":{}
|
78 |
+
},
|
79 |
+
"state1" : State
|
80 |
+
|
81 |
+
}
|
82 |
+
|
83 |
+
|
84 |
+
# default finish_state_name is "end_state"
|
85 |
+
# "environment_type" : "competive" : different states not share the memory; "cooperative":diffrent states share the memory
|
86 |
+
SOP = {
|
87 |
+
"config" : {
|
88 |
+
"API_KEY" : "Your key",
|
89 |
+
"PROXY" : "Your PROXY",
|
90 |
+
"MAX_CHAT_HISTORY" : "5",
|
91 |
+
"User_Names" : "[\"alexander\"]"
|
92 |
+
},
|
93 |
+
"environment_type" : "competive",
|
94 |
+
"LLM_type": "OpenAI",
|
95 |
+
"LLM" :LLM,
|
96 |
+
"root": "state1",
|
97 |
+
"finish_state_name" : "end_state",
|
98 |
+
"relations": {
|
99 |
+
"state1": {
|
100 |
+
"0": "state1",
|
101 |
+
"1": "state2"
|
102 |
+
},
|
103 |
+
"state2":{
|
104 |
+
"0":"state2",
|
105 |
+
"1":"end_state"
|
106 |
+
}
|
107 |
+
},
|
108 |
+
"agents": Agents,
|
109 |
+
"states": States,
|
110 |
+
}
|
111 |
+
|
utils.py
ADDED
@@ -0,0 +1,482 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The AIWaves Inc. team.
|
3 |
+
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
"""helper functions for an LLM autonoumous agent"""
|
17 |
+
import csv
|
18 |
+
import random
|
19 |
+
import json
|
20 |
+
import pandas
|
21 |
+
import numpy as np
|
22 |
+
import requests
|
23 |
+
import torch
|
24 |
+
from tqdm import tqdm
|
25 |
+
import re
|
26 |
+
import datetime
|
27 |
+
import string
|
28 |
+
import random
|
29 |
+
import os
|
30 |
+
import openai
|
31 |
+
from text2vec import semantic_search
|
32 |
+
import re
|
33 |
+
import datetime
|
34 |
+
from langchain.document_loaders import UnstructuredFileLoader
|
35 |
+
from langchain.text_splitter import CharacterTextSplitter
|
36 |
+
from sentence_transformers import SentenceTransformer
|
37 |
+
|
38 |
+
embed_model_name = os.environ["Embed_Model"] if "Embed_Model" in os.environ else "text-embedding-ada-002"
|
39 |
+
if embed_model_name in ["text-embedding-ada-002"]:
|
40 |
+
pass
|
41 |
+
else:
|
42 |
+
embedding_model = SentenceTransformer(
|
43 |
+
embed_model_name, device=torch.device("cpu")
|
44 |
+
)
|
45 |
+
|
46 |
+
def get_embedding(sentence):
|
47 |
+
if embed_model_name in ["text-embedding-ada-002"]:
|
48 |
+
openai.api_key = os.environ["API_KEY"]
|
49 |
+
if "PROXY" in os.environ:
|
50 |
+
assert "http:" in os.environ["PROXY"] or "socks" in os.environ["PROXY"],"PROXY error,PROXY must be http or socks"
|
51 |
+
openai.proxy = os.environ["PROXY"]
|
52 |
+
if "API_BASE" in os.environ:
|
53 |
+
openai.api_base = os.environ["API_BASE"]
|
54 |
+
embedding_model = openai.Embedding
|
55 |
+
embed = embedding_model.create(
|
56 |
+
model=embed_model_name,
|
57 |
+
input=sentence
|
58 |
+
)
|
59 |
+
embed = embed["data"][0]["embedding"]
|
60 |
+
embed = torch.tensor(embed,dtype=torch.float32)
|
61 |
+
else:
|
62 |
+
embed = embedding_model.encode(sentence,convert_to_tensor=True)
|
63 |
+
if len(embed.shape)==1:
|
64 |
+
embed = embed.unsqueeze(0)
|
65 |
+
return embed
|
66 |
+
|
67 |
+
|
68 |
+
def get_code():
|
69 |
+
return "".join(random.sample(string.ascii_letters + string.digits, 8))
|
70 |
+
|
71 |
+
|
72 |
+
def get_content_between_a_b(start_tag, end_tag, text):
|
73 |
+
"""
|
74 |
+
|
75 |
+
Args:
|
76 |
+
start_tag (str): start_tag
|
77 |
+
end_tag (str): end_tag
|
78 |
+
text (str): complete sentence
|
79 |
+
|
80 |
+
Returns:
|
81 |
+
str: the content between start_tag and end_tag
|
82 |
+
"""
|
83 |
+
extracted_text = ""
|
84 |
+
start_index = text.find(start_tag)
|
85 |
+
while start_index != -1:
|
86 |
+
end_index = text.find(end_tag, start_index + len(start_tag))
|
87 |
+
if end_index != -1:
|
88 |
+
extracted_text += text[start_index +
|
89 |
+
len(start_tag):end_index] + " "
|
90 |
+
start_index = text.find(start_tag, end_index + len(end_tag))
|
91 |
+
else:
|
92 |
+
break
|
93 |
+
|
94 |
+
return extracted_text.strip()
|
95 |
+
|
96 |
+
|
97 |
+
def extract(text, type):
|
98 |
+
"""extract the content between <type></type>
|
99 |
+
|
100 |
+
Args:
|
101 |
+
text (str): complete sentence
|
102 |
+
type (str): tag
|
103 |
+
|
104 |
+
Returns:
|
105 |
+
str: content between <type></type>
|
106 |
+
"""
|
107 |
+
target_str = get_content_between_a_b(f"<{type}>", f"</{type}>", text)
|
108 |
+
return target_str
|
109 |
+
|
110 |
+
def count_files_in_directory(directory):
|
111 |
+
# 获取指定目录下的文件数目
|
112 |
+
file_count = len([f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))])
|
113 |
+
return file_count
|
114 |
+
|
115 |
+
def delete_oldest_files(directory, num_to_keep):
|
116 |
+
# 获取目录下文件列表,并按修改时间排序
|
117 |
+
files = [(f, os.path.getmtime(os.path.join(directory, f))) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
|
118 |
+
|
119 |
+
# 删除最开始的 num_to_keep 个文件
|
120 |
+
for i in range(min(num_to_keep, len(files))):
|
121 |
+
file_to_delete = os.path.join(directory, files[i][0])
|
122 |
+
os.remove(file_to_delete)
|
123 |
+
|
124 |
+
def delete_files_if_exceed_threshold(directory, threshold, num_to_keep):
|
125 |
+
# 获取文件数目并进行处理
|
126 |
+
file_count = count_files_in_directory(directory)
|
127 |
+
if file_count > threshold:
|
128 |
+
delete_count = file_count - num_to_keep
|
129 |
+
delete_oldest_files(directory, delete_count)
|
130 |
+
|
131 |
+
def save_logs(log_path, messages, response):
|
132 |
+
if not os.path.exists(log_path):
|
133 |
+
os.mkdir(log_path)
|
134 |
+
delete_files_if_exceed_threshold(log_path, 20, 10)
|
135 |
+
log_path = log_path if log_path else "logs"
|
136 |
+
log = {}
|
137 |
+
log["input"] = messages
|
138 |
+
log["output"] = response
|
139 |
+
os.makedirs(log_path, exist_ok=True)
|
140 |
+
log_file = os.path.join(
|
141 |
+
log_path,
|
142 |
+
datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") + ".json")
|
143 |
+
with open(log_file, "w", encoding="utf-8") as f:
|
144 |
+
json.dump(log, f, ensure_ascii=False, indent=2)
|
145 |
+
|
146 |
+
|
147 |
+
|
148 |
+
def semantic_search_word2vec(query_embedding, kb_embeddings, top_k):
|
149 |
+
return semantic_search(query_embedding, kb_embeddings, top_k=top_k)
|
150 |
+
|
151 |
+
|
152 |
+
def cut_sent(para):
|
153 |
+
para = re.sub("([。!?\?])([^”’])", r"\1\n\2", para)
|
154 |
+
para = re.sub("(\.{6})([^”’])", r"\1\n\2", para)
|
155 |
+
para = re.sub("(\…{2})([^”’])", r"\1\n\2", para)
|
156 |
+
para = re.sub("([。!?\?][”’])([^,。!?\?])", r"\1\n\2", para)
|
157 |
+
para = para.rstrip()
|
158 |
+
pieces = [i for i in para.split("\n") if i]
|
159 |
+
batch_size = 3
|
160 |
+
chucks = [
|
161 |
+
" ".join(pieces[i:i + batch_size])
|
162 |
+
for i in range(0, len(pieces), batch_size)
|
163 |
+
]
|
164 |
+
return chucks
|
165 |
+
|
166 |
+
|
167 |
+
def process_document(file_path):
|
168 |
+
"""
|
169 |
+
Save QA_csv to json.
|
170 |
+
Args:
|
171 |
+
model: LLM to generate embeddings
|
172 |
+
qa_dict: A dict contains Q&A
|
173 |
+
save_path: where to save the json file.
|
174 |
+
Json format:
|
175 |
+
Dict[num,Dict[q:str,a:str,chunk:str,emb:List[float]]
|
176 |
+
"""
|
177 |
+
final_dict = {}
|
178 |
+
count = 0
|
179 |
+
if file_path.endswith(".csv"):
|
180 |
+
dataset = pandas.read_csv(file_path)
|
181 |
+
questions = dataset["question"]
|
182 |
+
answers = dataset["answer"]
|
183 |
+
# embedding q+chunk
|
184 |
+
for q, a in zip(questions, answers):
|
185 |
+
for text in cut_sent(a):
|
186 |
+
temp_dict = {}
|
187 |
+
temp_dict["q"] = q
|
188 |
+
temp_dict["a"] = a
|
189 |
+
temp_dict["chunk"] = text
|
190 |
+
temp_dict["emb"] = get_embedding(q + text).tolist()
|
191 |
+
final_dict[count] = temp_dict
|
192 |
+
count += 1
|
193 |
+
# embedding chunk
|
194 |
+
for q, a in zip(questions, answers):
|
195 |
+
for text in cut_sent(a):
|
196 |
+
temp_dict = {}
|
197 |
+
temp_dict["q"] = q
|
198 |
+
temp_dict["a"] = a
|
199 |
+
temp_dict["chunk"] = text
|
200 |
+
temp_dict["emb"] = get_embedding(text).tolist()
|
201 |
+
final_dict[count] = temp_dict
|
202 |
+
count += 1
|
203 |
+
# embedding q
|
204 |
+
for q, a in zip(questions, answers):
|
205 |
+
temp_dict = {}
|
206 |
+
temp_dict["q"] = q
|
207 |
+
temp_dict["a"] = a
|
208 |
+
temp_dict["chunk"] = a
|
209 |
+
temp_dict["emb"] = get_embedding(q).tolist()
|
210 |
+
final_dict[count] = temp_dict
|
211 |
+
count += 1
|
212 |
+
# embedding q+a
|
213 |
+
for q, a in zip(questions, answers):
|
214 |
+
temp_dict = {}
|
215 |
+
temp_dict["q"] = q
|
216 |
+
temp_dict["a"] = a
|
217 |
+
temp_dict["chunk"] = a
|
218 |
+
temp_dict["emb"] = get_embedding(q + a).tolist()
|
219 |
+
final_dict[count] = temp_dict
|
220 |
+
count += 1
|
221 |
+
# embedding a
|
222 |
+
for q, a in zip(questions, answers):
|
223 |
+
temp_dict = {}
|
224 |
+
temp_dict["q"] = q
|
225 |
+
temp_dict["a"] = a
|
226 |
+
temp_dict["chunk"] = a
|
227 |
+
temp_dict["emb"] = get_embedding(a).tolist()
|
228 |
+
final_dict[count] = temp_dict
|
229 |
+
count += 1
|
230 |
+
print(f"finish updating {len(final_dict)} data!")
|
231 |
+
os.makedirs("temp_database", exist_ok=True)
|
232 |
+
save_path = os.path.join(
|
233 |
+
"temp_database/",
|
234 |
+
file_path.split("/")[-1].replace("." + file_path.split(".")[1],
|
235 |
+
".json"),
|
236 |
+
)
|
237 |
+
print(save_path)
|
238 |
+
with open(save_path, "w") as f:
|
239 |
+
json.dump(final_dict, f, ensure_ascii=False, indent=2)
|
240 |
+
return {"knowledge_base": save_path, "type": "QA"}
|
241 |
+
else:
|
242 |
+
loader = UnstructuredFileLoader(file_path)
|
243 |
+
docs = loader.load()
|
244 |
+
text_spiltter = CharacterTextSplitter(chunk_size=200,
|
245 |
+
chunk_overlap=100)
|
246 |
+
docs = text_spiltter.split_text(docs[0].page_content)
|
247 |
+
os.makedirs("temp_database", exist_ok=True)
|
248 |
+
save_path = os.path.join(
|
249 |
+
"temp_database/",
|
250 |
+
file_path.replace("." + file_path.split(".")[1], ".json"))
|
251 |
+
final_dict = {}
|
252 |
+
count = 0
|
253 |
+
for c in tqdm(docs):
|
254 |
+
temp_dict = {}
|
255 |
+
temp_dict["chunk"] = c
|
256 |
+
temp_dict["emb"] = get_embedding(c).tolist()
|
257 |
+
final_dict[count] = temp_dict
|
258 |
+
count += 1
|
259 |
+
print(f"finish updating {len(final_dict)} data!")
|
260 |
+
with open(save_path, "w") as f:
|
261 |
+
json.dump(final_dict, f, ensure_ascii=False, indent=2)
|
262 |
+
return {"knowledge_base": save_path, "type": "UnstructuredFile"}
|
263 |
+
|
264 |
+
def load_knowledge_base_qa(path):
|
265 |
+
"""
|
266 |
+
Load json format knowledge base.
|
267 |
+
"""
|
268 |
+
print("path", path)
|
269 |
+
with open(path, "r") as f:
|
270 |
+
data = json.load(f)
|
271 |
+
embeddings = []
|
272 |
+
questions = []
|
273 |
+
answers = []
|
274 |
+
chunks = []
|
275 |
+
for idx in range(len(data.keys())):
|
276 |
+
embeddings.append(data[str(idx)]["emb"])
|
277 |
+
questions.append(data[str(idx)]["q"])
|
278 |
+
answers.append(data[str(idx)]["a"])
|
279 |
+
chunks.append(data[str(idx)]["chunk"])
|
280 |
+
embeddings = np.array(embeddings, dtype=np.float32)
|
281 |
+
embeddings = torch.from_numpy(embeddings).squeeze()
|
282 |
+
return embeddings, questions, answers, chunks
|
283 |
+
|
284 |
+
|
285 |
+
def load_knowledge_base_UnstructuredFile(path):
|
286 |
+
"""
|
287 |
+
Load json format knowledge base.
|
288 |
+
"""
|
289 |
+
with open(path, "r") as f:
|
290 |
+
data = json.load(f)
|
291 |
+
embeddings = []
|
292 |
+
chunks = []
|
293 |
+
for idx in range(len(data.keys())):
|
294 |
+
embeddings.append(data[str(idx)]["emb"])
|
295 |
+
chunks.append(data[str(idx)]["chunk"])
|
296 |
+
embeddings = np.array(embeddings, dtype=np.float32)
|
297 |
+
embeddings = torch.from_numpy(embeddings).squeeze()
|
298 |
+
return embeddings, chunks
|
299 |
+
|
300 |
+
|
301 |
+
def cos_sim(a: torch.Tensor, b: torch.Tensor):
|
302 |
+
"""
|
303 |
+
Computes the cosine similarity cos_sim(a[i], b[j]) for all i and j.
|
304 |
+
:return: Matrix with res[i][j] = cos_sim(a[i], b[j])
|
305 |
+
"""
|
306 |
+
if not isinstance(a, torch.Tensor):
|
307 |
+
a = torch.tensor(a)
|
308 |
+
|
309 |
+
if not isinstance(b, torch.Tensor):
|
310 |
+
b = torch.tensor(b)
|
311 |
+
|
312 |
+
if len(a.shape) == 1:
|
313 |
+
a = a.unsqueeze(0)
|
314 |
+
|
315 |
+
if len(b.shape) == 1:
|
316 |
+
b = b.unsqueeze(0)
|
317 |
+
|
318 |
+
a_norm = torch.nn.functional.normalize(a, p=2, dim=1)
|
319 |
+
b_norm = torch.nn.functional.normalize(b, p=2, dim=1)
|
320 |
+
return torch.mm(a_norm, b_norm.transpose(0, 1))
|
321 |
+
|
322 |
+
|
323 |
+
def matching_a_b(a, b, requirements=None):
|
324 |
+
a_embedder = get_embedding(a)
|
325 |
+
# 获取embedder
|
326 |
+
b_embeder = get_embedding(b)
|
327 |
+
sim_scores = cos_sim(a_embedder, b_embeder)[0]
|
328 |
+
return sim_scores
|
329 |
+
|
330 |
+
|
331 |
+
def matching_category(inputtext,
|
332 |
+
forest_name,
|
333 |
+
requirements=None,
|
334 |
+
cat_embedder=None,
|
335 |
+
top_k=3):
|
336 |
+
"""
|
337 |
+
Args:
|
338 |
+
inputtext: the category name to be matched
|
339 |
+
forest: search tree
|
340 |
+
top_k: the default three highest scoring results
|
341 |
+
Return:
|
342 |
+
topk matching_result. List[List] [[top1_name,top2_name,top3_name],[top1_score,top2_score,top3_score]]
|
343 |
+
"""
|
344 |
+
|
345 |
+
sim_scores = torch.zeros([100])
|
346 |
+
if inputtext:
|
347 |
+
input_embeder = get_embedding(inputtext)
|
348 |
+
sim_scores = cos_sim(input_embeder, cat_embedder)[0]
|
349 |
+
|
350 |
+
if requirements:
|
351 |
+
requirements = requirements.split(" ")
|
352 |
+
requirements_embedder = get_embedding(requirements)
|
353 |
+
req_scores = cos_sim(requirements_embedder, cat_embedder)
|
354 |
+
req_scores = torch.mean(req_scores, dim=0)
|
355 |
+
total_scores = req_scores
|
356 |
+
else:
|
357 |
+
total_scores = sim_scores
|
358 |
+
|
359 |
+
top_k_cat = torch.topk(total_scores, k=top_k)
|
360 |
+
top_k_score, top_k_idx = top_k_cat[0], top_k_cat[1]
|
361 |
+
top_k_name = [forest_name[top_k_idx[i]] for i in range(0, top_k)]
|
362 |
+
|
363 |
+
return [top_k_name, top_k_score.tolist(), top_k_idx]
|
364 |
+
|
365 |
+
|
366 |
+
def sample_with_order_preserved(lst, num):
|
367 |
+
"""Randomly sample from the list while maintaining the original order."""
|
368 |
+
indices = list(range(len(lst)))
|
369 |
+
sampled_indices = random.sample(indices, num)
|
370 |
+
sampled_indices.sort() # 保持原顺序
|
371 |
+
return [lst[i] for i in sampled_indices]
|
372 |
+
|
373 |
+
|
374 |
+
def limit_values(data, max_values):
|
375 |
+
"""Reduce each key-value list in the dictionary to the specified size, keeping the order of the original list unchanged."""
|
376 |
+
for key, values in data.items():
|
377 |
+
if len(values) > max_values:
|
378 |
+
data[key] = sample_with_order_preserved(values, max_values)
|
379 |
+
return data
|
380 |
+
|
381 |
+
|
382 |
+
def limit_keys(data, max_keys):
|
383 |
+
"""Reduce the dictionary to the specified number of keys."""
|
384 |
+
keys = list(data.keys())
|
385 |
+
if len(keys) > max_keys:
|
386 |
+
keys = sample_with_order_preserved(keys, max_keys)
|
387 |
+
data = {key: data[key] for key in keys}
|
388 |
+
return data
|
389 |
+
|
390 |
+
|
391 |
+
def flatten_dict(nested_dict):
|
392 |
+
"""
|
393 |
+
flatten the dictionary
|
394 |
+
"""
|
395 |
+
flattened_dict = {}
|
396 |
+
for key, value in nested_dict.items():
|
397 |
+
if isinstance(value, dict):
|
398 |
+
flattened_subdict = flatten_dict(value)
|
399 |
+
flattened_dict.update(flattened_subdict)
|
400 |
+
else:
|
401 |
+
flattened_dict[key] = value
|
402 |
+
return flattened_dict
|
403 |
+
|
404 |
+
|
405 |
+
def merge_list(list1, list2):
|
406 |
+
for l in list2:
|
407 |
+
if l not in list1:
|
408 |
+
list1.append(l)
|
409 |
+
return list1
|
410 |
+
|
411 |
+
|
412 |
+
def Search_Engines(req):
|
413 |
+
FETSIZE = eval(os.environ["FETSIZE"]) if "FETSIZE" in os.environ else 5
|
414 |
+
|
415 |
+
new_dict = {"keyword": req, "catLeafName": "", "fetchSize": FETSIZE}
|
416 |
+
url = os.environ["SHOPPING_SEARCH"]
|
417 |
+
res = requests.post(
|
418 |
+
url= url,
|
419 |
+
json=new_dict,
|
420 |
+
)
|
421 |
+
user_dict = json.loads(res.text)
|
422 |
+
if "data" in user_dict.keys():
|
423 |
+
request_items = user_dict["data"]["items"] # 查询到的商品信息JSON
|
424 |
+
top_category = user_dict["data"]["topCategories"]
|
425 |
+
return request_items, top_category
|
426 |
+
else:
|
427 |
+
return []
|
428 |
+
|
429 |
+
|
430 |
+
def search_with_api(requirements, categery):
|
431 |
+
|
432 |
+
FETSIZE = eval(os.environ["FETSIZE"]) if "FETSIZE" in os.environ else 5
|
433 |
+
|
434 |
+
request_items = []
|
435 |
+
all_req_list = requirements.split(" ")
|
436 |
+
count = 0
|
437 |
+
|
438 |
+
while len(request_items) < FETSIZE and len(all_req_list) > 0:
|
439 |
+
if count:
|
440 |
+
all_req_list.pop(0)
|
441 |
+
all_req = (" ").join(all_req_list)
|
442 |
+
if categery not in all_req_list:
|
443 |
+
all_req = all_req + " " + categery
|
444 |
+
now_request_items, top_category = Search_Engines(all_req)
|
445 |
+
request_items = merge_list(request_items, now_request_items)
|
446 |
+
count += 1
|
447 |
+
new_top = []
|
448 |
+
for category in top_category:
|
449 |
+
if "其它" in category or "其它" in category:
|
450 |
+
continue
|
451 |
+
else:
|
452 |
+
new_top.append(category)
|
453 |
+
if len(request_items) > FETSIZE:
|
454 |
+
request_items = request_items[:FETSIZE]
|
455 |
+
return request_items, new_top
|
456 |
+
|
457 |
+
|
458 |
+
|
459 |
+
def get_relevant_history(query,history,embeddings):
|
460 |
+
"""
|
461 |
+
Retrieve a list of key history entries based on a query using semantic search.
|
462 |
+
|
463 |
+
Args:
|
464 |
+
query (str): The input query for which key history is to be retrieved.
|
465 |
+
history (list): A list of historical key entries.
|
466 |
+
embeddings (numpy.ndarray): An array of embedding vectors for historical entries.
|
467 |
+
|
468 |
+
Returns:
|
469 |
+
list: A list of key history entries most similar to the query.
|
470 |
+
"""
|
471 |
+
TOP_K = eval(os.environ["TOP_K"]) if "TOP_K" in os.environ else 2
|
472 |
+
relevant_history = []
|
473 |
+
query_embedding = get_embedding(query)
|
474 |
+
hits = semantic_search(query_embedding, embeddings, top_k=min(TOP_K,embeddings.shape[0]))
|
475 |
+
hits = hits[0]
|
476 |
+
for hit in hits:
|
477 |
+
matching_idx = hit["corpus_id"]
|
478 |
+
try:
|
479 |
+
relevant_history.append(history[matching_idx])
|
480 |
+
except:
|
481 |
+
return []
|
482 |
+
return relevant_history
|