Spaces:
Runtime error
Runtime error
update
Browse files- app.py +353 -0
- config.json +516 -0
- gradio_backend.py +138 -0
- gradio_base.py +559 -0
- gradio_config.py +437 -0
app.py
ADDED
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
from gradio_base import UIHelper, WebUI
|
4 |
+
import os
|
5 |
+
from gradio_base import WebUI, UIHelper, PORT, HOST, Client
|
6 |
+
from gradio_config import GradioConfig as gc
|
7 |
+
from typing import List, Tuple, Any
|
8 |
+
import gradio as gr
|
9 |
+
import time
|
10 |
+
|
11 |
+
class DebateUI(WebUI):
|
12 |
+
FORMAT = "{}\n<debate topic>\n{}\nAffirmative viewpoint:{}\nNegative viewpoint:{}\n<debate topic>{}"
|
13 |
+
AUDIENCE = "Audience"
|
14 |
+
cache = {}
|
15 |
+
all_agents_name = []
|
16 |
+
receive_server = None
|
17 |
+
|
18 |
+
@classmethod
|
19 |
+
def extract(cls, content):
|
20 |
+
topic = content.split("<debate topic>")[1].split("Affirmative viewpoint:")[0]
|
21 |
+
positive = content.split("<debate topic>")[1].split("Affirmative viewpoint:")[1].split("negative viewpoint:")[0]
|
22 |
+
negative = content.split("<debate topic>")[1].split("Affirmative viewpoint:")[1].split("negative viewpoint:")[1]
|
23 |
+
return topic.strip(), positive.strip(), negative.strip()
|
24 |
+
|
25 |
+
@classmethod
|
26 |
+
def merge(cls, theme, positive, negative, origin_content) -> str:
|
27 |
+
return cls.FORMAT.format(
|
28 |
+
origin_content.split("<debate topic>")[0],
|
29 |
+
theme, positive, negative,
|
30 |
+
origin_content.split("<debate topic>")[-1]
|
31 |
+
)
|
32 |
+
|
33 |
+
@classmethod
|
34 |
+
def convert2list4agentname(cls, sop):
|
35 |
+
only_name = []
|
36 |
+
agent_name = []
|
37 |
+
roles_to_names = sop.roles_to_names
|
38 |
+
for state_name,roles_names in roles_to_names.items():
|
39 |
+
for role,name in roles_names.items():
|
40 |
+
agent_name.append(f"{name}({role})")
|
41 |
+
only_name.append(name)
|
42 |
+
agent_name.append(cls.AUDIENCE)
|
43 |
+
agent_name = list(set(agent_name))
|
44 |
+
agent_name.sort()
|
45 |
+
return agent_name, only_name
|
46 |
+
|
47 |
+
def render_and_register_ui(self):
|
48 |
+
gc.add_agent(self.cache["only_name"])
|
49 |
+
|
50 |
+
def __init__(
|
51 |
+
self,
|
52 |
+
client_cmd: list,
|
53 |
+
socket_host: str = HOST,
|
54 |
+
socket_port: int = PORT,
|
55 |
+
bufsize: int = 1024,
|
56 |
+
ui_name: str = "DebateUI"
|
57 |
+
):
|
58 |
+
super(DebateUI, self).__init__(client_cmd, socket_host, socket_port, bufsize, ui_name)
|
59 |
+
self.first_recieve_from_client()
|
60 |
+
self.data_history = list()
|
61 |
+
self.caller = 0
|
62 |
+
|
63 |
+
def handle_message(self, history:list,
|
64 |
+
state, agent_name, token, node_name):
|
65 |
+
if state % 10 == 0:
|
66 |
+
self.data_history.append({agent_name: token})
|
67 |
+
elif state % 10 == 1:
|
68 |
+
# Same state. Need to add new bubble in same bubble.
|
69 |
+
self.data_history[-1][agent_name] += token
|
70 |
+
elif state % 10 == 2:
|
71 |
+
# New state. Need to add new bubble.
|
72 |
+
history.append([None, ""])
|
73 |
+
self.data_history.clear()
|
74 |
+
self.data_history.append({agent_name: token})
|
75 |
+
else:
|
76 |
+
assert False, "Invalid state."
|
77 |
+
render_data = self.render_bubble(history, self.data_history, node_name, render_node_name= True or state % 10 == 2)
|
78 |
+
return render_data
|
79 |
+
|
80 |
+
def start_button_when_click(self, theme, positive, negative, choose, mode):
|
81 |
+
"""
|
82 |
+
inputs=[self.text_theme, self.text_positive, self.text_negative, self.radio_choose],
|
83 |
+
outputs=[self.chatbot, self.btn_send]
|
84 |
+
"""
|
85 |
+
cosplay = None if choose == self.AUDIENCE else choose.split("(")[0]
|
86 |
+
message = dict(theme=theme, positive=positive, negative=negative, cosplay=cosplay, mode=mode)
|
87 |
+
self.send_start_cmd(message=message)
|
88 |
+
return gr.Chatbot.update(
|
89 |
+
visible=True
|
90 |
+
), gr.Button.update(visible=False)
|
91 |
+
|
92 |
+
def start_button_after_click(self, history):
|
93 |
+
"""
|
94 |
+
inputs=[self.chatbot],
|
95 |
+
outputs=[self.chatbot, self.text_user, self.btn_send, self.btn_reset, self.btn_next]
|
96 |
+
"""
|
97 |
+
if self.caller == 0:
|
98 |
+
# not single mode
|
99 |
+
self.data_history = list()
|
100 |
+
self.caller = 0
|
101 |
+
receive_server = self.receive_server
|
102 |
+
while True:
|
103 |
+
data_list: List = receive_server.send(None)
|
104 |
+
for item in data_list:
|
105 |
+
data = eval(item)
|
106 |
+
assert isinstance(data, list)
|
107 |
+
state, agent_name, token, node_name = data
|
108 |
+
assert isinstance(state, int)
|
109 |
+
if state == 30:
|
110 |
+
# user input
|
111 |
+
yield history,\
|
112 |
+
gr.Textbox.update(visible=True, interactive=True), \
|
113 |
+
gr.Button.update(visible=True, interactive=True),\
|
114 |
+
gr.Button.update(visible=True, interactive=True),\
|
115 |
+
gr.Button.update(visible=False)
|
116 |
+
return
|
117 |
+
elif state == 99:
|
118 |
+
# finish
|
119 |
+
yield history, gr.Textbox.update(visible=True, interactive=False, value="finish!"), \
|
120 |
+
gr.Button.update(visible=True, interactive=False, value="finish!"), gr.Button.update(visible=True, interactive=True),\
|
121 |
+
gr.Button.update(visible=False)
|
122 |
+
elif state == 98:
|
123 |
+
yield history, \
|
124 |
+
gr.Textbox.update(visible=False, interactive=False), \
|
125 |
+
gr.Button.update(visible=False, interactive=False),\
|
126 |
+
gr.Button.update(visible=False, interactive=False),\
|
127 |
+
gr.Button.update(visible=True, value=f"Next Agent: 🤖{agent_name} | Next Node: ⭕{node_name}")
|
128 |
+
return
|
129 |
+
else:
|
130 |
+
history = self.handle_message(history, state, agent_name, token, node_name)
|
131 |
+
yield history, \
|
132 |
+
gr.Textbox.update(visible=False, interactive=False), \
|
133 |
+
gr.Button.update(visible=False, interactive=False),\
|
134 |
+
gr.Button.update(visible=False, interactive=False),\
|
135 |
+
gr.Button.update(visible=False)
|
136 |
+
|
137 |
+
def send_button_when_click(self, text_user, history:list):
|
138 |
+
"""
|
139 |
+
inputs=[self.text_user, self.chatbot],
|
140 |
+
outputs=[self.text_user, self.btn_send, self.chatbot]
|
141 |
+
"""
|
142 |
+
history.append(
|
143 |
+
[UIHelper.wrap_css(text_user, "User"), None]
|
144 |
+
)
|
145 |
+
# print(f"server: send {text_user} to client")
|
146 |
+
self.send_message("<USER>"+text_user+self.SIGN["SPLIT"])
|
147 |
+
return gr.Textbox.update(value="", visible=False),\
|
148 |
+
gr.Button.update(visible=False), \
|
149 |
+
history,\
|
150 |
+
gr.Button.update(visible=False)
|
151 |
+
|
152 |
+
def reset_button_when_click(self, history, text_positive, text_negative, text_theme, text_user, btn_send, btn_start, btn_reset):
|
153 |
+
"""
|
154 |
+
self.chatbot,
|
155 |
+
self.text_positive,
|
156 |
+
self.text_negative,
|
157 |
+
self.text_theme,
|
158 |
+
self.text_user,
|
159 |
+
self.btn_send,
|
160 |
+
self.btn_start,
|
161 |
+
self.btn_reset
|
162 |
+
self.btn_next
|
163 |
+
"""
|
164 |
+
self.caller = 0
|
165 |
+
return None, \
|
166 |
+
"", \
|
167 |
+
"", \
|
168 |
+
"", \
|
169 |
+
"", \
|
170 |
+
gr.Button.update(value="Restarting...", interactive=False, visible=True),\
|
171 |
+
gr.Button.update(value="Restarting...", interactive=False, visible=True),\
|
172 |
+
gr.Button.update(value="Restarting...", interactive=False, visible=True),\
|
173 |
+
gr.Button.update(value="Restarting...", interactive=False, visible=False)
|
174 |
+
|
175 |
+
def reset_button_after_click(self, history, text_positive, text_negative, text_theme, text_user, btn_send, btn_start, btn_reset):
|
176 |
+
self.reset()
|
177 |
+
self.first_recieve_from_client(reset_mode=True)
|
178 |
+
return gr.Chatbot.update(value=None, visible=False),\
|
179 |
+
gr.Textbox.update(value=f"{self.cache['positive']}", interactive=True, visible=True),\
|
180 |
+
gr.Textbox.update(value=f"{self.cache['negative']}", interactive=True, visible=True),\
|
181 |
+
gr.Textbox.update(value=f"{self.cache['theme']}", interactive=True, visible=True),\
|
182 |
+
gr.Textbox.update(value=f"", interactive=True, visible=False),\
|
183 |
+
gr.Button.update(interactive=True, visible=False, value="Send"),\
|
184 |
+
gr.Button.update(interactive=True, visible=True, value="Start"),\
|
185 |
+
gr.Button.update(interactive=False, visible=False, value="Restart"),\
|
186 |
+
gr.Button.update(interactive=True, visible=False, value="Next Agent")
|
187 |
+
|
188 |
+
def btn_next_when_click(self):
|
189 |
+
yield gr.Button.update(visible=False)
|
190 |
+
self.send_message("nothing")
|
191 |
+
self.caller = 1 # will note clear the self.data_history
|
192 |
+
time.sleep(0.5)
|
193 |
+
return
|
194 |
+
|
195 |
+
def construct_ui(
|
196 |
+
self,
|
197 |
+
theme:str=None,
|
198 |
+
positive:str=None,
|
199 |
+
negative:str=None,
|
200 |
+
agents_name:List=None,
|
201 |
+
default_cos_play_id:int=None
|
202 |
+
):
|
203 |
+
theme = self.cache["theme"] if theme is None else theme
|
204 |
+
positive = self.cache["positive"] if positive is None else positive
|
205 |
+
negative = self.cache["negative"] if negative is None else negative
|
206 |
+
agents_name = self.cache["agents_name"] if agents_name is None else agents_name
|
207 |
+
default_cos_play_id = self.cache["default_cos_play_id"] if default_cos_play_id is None else default_cos_play_id
|
208 |
+
|
209 |
+
with gr.Blocks(css=gc.CSS) as demo:
|
210 |
+
with gr.Row():
|
211 |
+
with gr.Column():
|
212 |
+
self.radio_mode = gr.Radio(
|
213 |
+
[Client.AUTO_MODE, Client.SINGLE_MODE],
|
214 |
+
value=Client.AUTO_MODE,
|
215 |
+
interactive=True,
|
216 |
+
label = Client.MODE_LABEL,
|
217 |
+
info = Client.MODE_INFO
|
218 |
+
)
|
219 |
+
self.text_theme = gr.Textbox(
|
220 |
+
label="Debate Topic:",
|
221 |
+
value=theme,
|
222 |
+
placeholder="Please input the Debate Topic"
|
223 |
+
)
|
224 |
+
self.text_positive = gr.Textbox(
|
225 |
+
label="Affirmative viewpoint:",
|
226 |
+
value=positive,
|
227 |
+
placeholder="Please input the Affirmative viewpoint"
|
228 |
+
)
|
229 |
+
self.text_negative = gr.Textbox(
|
230 |
+
label="Negative viewpoint:",
|
231 |
+
value=negative,
|
232 |
+
placeholder="Please input the Negative viewpoint"
|
233 |
+
)
|
234 |
+
self.radio_choose = gr.Radio(
|
235 |
+
agents_name,
|
236 |
+
value=agents_name[default_cos_play_id],
|
237 |
+
label="User'agent",
|
238 |
+
interactive=True
|
239 |
+
)
|
240 |
+
self.btn_start = gr.Button(
|
241 |
+
value="run"
|
242 |
+
)
|
243 |
+
VISIBLE = False
|
244 |
+
with gr.Column():
|
245 |
+
self.chatbot = gr.Chatbot(
|
246 |
+
height= 650,
|
247 |
+
elem_id="chatbot1",
|
248 |
+
label="Dialog",
|
249 |
+
visible=VISIBLE
|
250 |
+
)
|
251 |
+
self.btn_next = gr.Button(
|
252 |
+
value="Next Agent Start",
|
253 |
+
visible=False
|
254 |
+
)
|
255 |
+
self.text_user = gr.Textbox(
|
256 |
+
label="Input",
|
257 |
+
placeholder="Input here",
|
258 |
+
visible=VISIBLE
|
259 |
+
)
|
260 |
+
self.btn_send = gr.Button(
|
261 |
+
value="Send",
|
262 |
+
visible=VISIBLE
|
263 |
+
)
|
264 |
+
self.btn_reset = gr.Button(
|
265 |
+
value="Restart",
|
266 |
+
visible=VISIBLE
|
267 |
+
)
|
268 |
+
|
269 |
+
self.btn_start.click(
|
270 |
+
fn=self.start_button_when_click,
|
271 |
+
inputs=[self.text_theme, self.text_positive, self.text_negative, self.radio_choose, self.radio_mode],
|
272 |
+
outputs=[self.chatbot, self.btn_start]
|
273 |
+
).then(
|
274 |
+
fn=self.start_button_after_click,
|
275 |
+
inputs=[self.chatbot],
|
276 |
+
outputs=[self.chatbot, self.text_user, self.btn_send, self.btn_reset, self.btn_next]
|
277 |
+
)
|
278 |
+
|
279 |
+
self.btn_send.click(
|
280 |
+
fn=self.send_button_when_click,
|
281 |
+
inputs=[self.text_user, self.chatbot],
|
282 |
+
outputs=[self.text_user, self.btn_send, self.chatbot, self.btn_reset]
|
283 |
+
).then(
|
284 |
+
fn=self.start_button_after_click,
|
285 |
+
inputs=[self.chatbot],
|
286 |
+
outputs=[self.chatbot, self.text_user, self.btn_send, self.btn_reset, self.btn_next]
|
287 |
+
)
|
288 |
+
|
289 |
+
self.btn_reset.click(
|
290 |
+
fn=self.reset_button_when_click,
|
291 |
+
inputs=[
|
292 |
+
self.chatbot,
|
293 |
+
self.text_positive,
|
294 |
+
self.text_negative,
|
295 |
+
self.text_theme,
|
296 |
+
self.text_user,
|
297 |
+
self.btn_send,
|
298 |
+
self.btn_start,
|
299 |
+
self.btn_reset
|
300 |
+
],
|
301 |
+
outputs=[
|
302 |
+
self.chatbot,
|
303 |
+
self.text_positive,
|
304 |
+
self.text_negative,
|
305 |
+
self.text_theme,
|
306 |
+
self.text_user,
|
307 |
+
self.btn_send,
|
308 |
+
self.btn_start,
|
309 |
+
self.btn_reset,
|
310 |
+
self.btn_next
|
311 |
+
]
|
312 |
+
).then(
|
313 |
+
fn=self.reset_button_after_click,
|
314 |
+
inputs=[
|
315 |
+
self.chatbot,
|
316 |
+
self.text_positive,
|
317 |
+
self.text_negative,
|
318 |
+
self.text_theme,
|
319 |
+
self.text_user,
|
320 |
+
self.btn_send,
|
321 |
+
self.btn_start,
|
322 |
+
self.btn_reset
|
323 |
+
],
|
324 |
+
outputs=[
|
325 |
+
self.chatbot,
|
326 |
+
self.text_positive,
|
327 |
+
self.text_negative,
|
328 |
+
self.text_theme,
|
329 |
+
self.text_user,
|
330 |
+
self.btn_send,
|
331 |
+
self.btn_start,
|
332 |
+
self.btn_reset,
|
333 |
+
self.btn_next
|
334 |
+
]
|
335 |
+
)
|
336 |
+
|
337 |
+
self.btn_next.click(
|
338 |
+
fn=self.btn_next_when_click,
|
339 |
+
inputs=[],
|
340 |
+
outputs=[self.btn_next]
|
341 |
+
).then(
|
342 |
+
fn=self.start_button_after_click,
|
343 |
+
inputs=[self.chatbot],
|
344 |
+
outputs=[self.chatbot, self.text_user, self.btn_send, self.btn_reset, self.btn_next]
|
345 |
+
)
|
346 |
+
|
347 |
+
self.demo = demo
|
348 |
+
|
349 |
+
|
350 |
+
if __name__ == '__main__':
|
351 |
+
ui = DebateUI(client_cmd=["python","gradio_backend.py"])
|
352 |
+
ui.construct_ui()
|
353 |
+
ui.run()
|
config.json
ADDED
@@ -0,0 +1,516 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"config": {
|
3 |
+
"API_KEY": "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",
|
4 |
+
"PROXY": "",
|
5 |
+
"MAX_CHAT_HISTORY": "5",
|
6 |
+
"TOP_K": "1",
|
7 |
+
"ACTIVE_MODE": "0",
|
8 |
+
"GRADIO": "0",
|
9 |
+
"User_Names": "[]"
|
10 |
+
},
|
11 |
+
"environment_type": "competive",
|
12 |
+
"LLM_type": "OpenAI",
|
13 |
+
"LLM": {
|
14 |
+
"temperature": 0.0,
|
15 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
16 |
+
"log_path": "logs/god"
|
17 |
+
},
|
18 |
+
"agents": {
|
19 |
+
"John": {
|
20 |
+
"style": "professional",
|
21 |
+
"roles": {
|
22 |
+
"Affirmative_Task_Allocation_state": "Affirmative_Debate_organizer"
|
23 |
+
}
|
24 |
+
},
|
25 |
+
"Mary": {
|
26 |
+
"style": "professional",
|
27 |
+
"roles": {
|
28 |
+
"Affirmative_Task_Allocation_state": "Affirmative_First",
|
29 |
+
"Debate_Order_state": "Affirmative_First",
|
30 |
+
"Debate_Random_state": "Affirmative_First"
|
31 |
+
}
|
32 |
+
},
|
33 |
+
"James": {
|
34 |
+
"style": "professional",
|
35 |
+
"roles": {
|
36 |
+
"Affirmative_Task_Allocation_state": "Affirmative_Second",
|
37 |
+
"Debate_Order_state": "Affirmative_Second",
|
38 |
+
"Debate_Random_state": "Affirmative_Second"
|
39 |
+
}
|
40 |
+
},
|
41 |
+
"Jennifer": {
|
42 |
+
"style": "professional",
|
43 |
+
"roles": {
|
44 |
+
"Affirmative_Task_Allocation_state": "Affirmative_Third",
|
45 |
+
"Debate_Order_state": "Affirmative_Third",
|
46 |
+
"Debate_Random_state": "Affirmative_Third"
|
47 |
+
}
|
48 |
+
},
|
49 |
+
"Michael": {
|
50 |
+
"style": "professional",
|
51 |
+
"roles": {
|
52 |
+
"Negative_Task_Allocation_state": "Negative_Debate_organizer"
|
53 |
+
}
|
54 |
+
},
|
55 |
+
"Emily": {
|
56 |
+
"style": "professional",
|
57 |
+
"roles": {
|
58 |
+
"Negative_Task_Allocation_state": "Negative_First",
|
59 |
+
"Debate_Order_state": "Negative_First",
|
60 |
+
"Debate_Random_state": "Negative_First"
|
61 |
+
}
|
62 |
+
},
|
63 |
+
"William": {
|
64 |
+
"style": "professional",
|
65 |
+
"roles": {
|
66 |
+
"Negative_Task_Allocation_state": "Negative_Second",
|
67 |
+
"Debate_Order_state": "Negative_Second",
|
68 |
+
"Debate_Random_state": "Negative_Second"
|
69 |
+
}
|
70 |
+
},
|
71 |
+
"Sarah": {
|
72 |
+
"style": "professional",
|
73 |
+
"roles": {
|
74 |
+
"Negative_Task_Allocation_state": "Negative_Third",
|
75 |
+
"Debate_Order_state": "Negative_Third",
|
76 |
+
"Debate_Random_state": "Negative_Third"
|
77 |
+
}
|
78 |
+
},
|
79 |
+
"David": {
|
80 |
+
"style": "professional",
|
81 |
+
"roles": {
|
82 |
+
"Debate_Order_state": "Debate_Judge",
|
83 |
+
"Debate_Random_state": "Debate_Judge",
|
84 |
+
"Judge_state": "Debate_Judge"
|
85 |
+
}
|
86 |
+
}
|
87 |
+
},
|
88 |
+
"root": "Affirmative_Task_Allocation_state",
|
89 |
+
"relations": {
|
90 |
+
"Affirmative_Task_Allocation_state": {
|
91 |
+
"0": "Affirmative_Task_Allocation_state",
|
92 |
+
"1": "Negative_Task_Allocation_state"
|
93 |
+
},
|
94 |
+
"Negative_Task_Allocation_state": {
|
95 |
+
"0": "Negative_Task_Allocation_state",
|
96 |
+
"1": "Debate_Order_state"
|
97 |
+
},
|
98 |
+
"Debate_Order_state": {
|
99 |
+
"0": "Debate_Order_state",
|
100 |
+
"1": "Debate_Random_state"
|
101 |
+
},
|
102 |
+
"Debate_Random_state": {
|
103 |
+
"0": "Debate_Random_state",
|
104 |
+
"1": "Judge_state"
|
105 |
+
},
|
106 |
+
"Judge_state": {
|
107 |
+
"0": "end_state"
|
108 |
+
}
|
109 |
+
},
|
110 |
+
"states": {
|
111 |
+
"end_state": {
|
112 |
+
"agent_states": {}
|
113 |
+
},
|
114 |
+
"Affirmative_Task_Allocation_state": {
|
115 |
+
"controller": {
|
116 |
+
"controller_type": "order",
|
117 |
+
"max_chat_nums": 12,
|
118 |
+
"judge_system_prompt": "",
|
119 |
+
"judge_last_prompt": "",
|
120 |
+
"judge_extract_words": "end"
|
121 |
+
},
|
122 |
+
"begin_role": "Affirmative_Debate_organizer",
|
123 |
+
"begin_query": "The debate topic is as follows: \n<debate topic>\nShould AI Replace Humans in Creative Fields?? Affirmative viewpoint: AI should replace humans in creative fields because it can produce art and content efficiently, reduce costs, and eliminate human bias. negative viewpoint: AI should not replace humans in creative fields as it lacks true creativity, emotions, and the ability to understand complex human experiences.\n<debate topic>\n, now , begin to discuss!",
|
124 |
+
"environment_prompt": "It is currently the debate stage, where the positive side is assigning tasks.Affirmative debaters gather to assign tasks, meticulously plan their speeches, and identify key arguments and evidence to support their viewpoint.",
|
125 |
+
"roles": [
|
126 |
+
"Affirmative_Debate_organizer",
|
127 |
+
"Affirmative_First",
|
128 |
+
"Affirmative_Second",
|
129 |
+
"Affirmative_Third"
|
130 |
+
],
|
131 |
+
"LLM_type": "OpenAI",
|
132 |
+
"LLM": {
|
133 |
+
"temperature": 1.0,
|
134 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
135 |
+
"log_path": "logs/Affirmative_Task_Allocation_state"
|
136 |
+
},
|
137 |
+
"agent_states": {
|
138 |
+
"Affirmative_First": {
|
139 |
+
"style": {
|
140 |
+
"role": "Opening Advocate for the Affirmative"
|
141 |
+
},
|
142 |
+
"task": {
|
143 |
+
"task": "1.Present arguments and main points.\n2.Summarize and analyze other people's opinions so that you can better complete tasks and actively provide opinions to others.\n3.Please try to focus the discussion around the topic."
|
144 |
+
},
|
145 |
+
"rule": {
|
146 |
+
"rule": "1.Organize clear facts and logic to firmly support the stance. Introduce main points succinctly in the opening statement, laying a solid foundation for the debate.\n2.Exploring ways to structure the opening statement for maximum impact and clarity. Consider using attention-grabbing statistics or quotes to engage the audience.\n3.Actively discuss and express opinions with others and assist others in improving their arguments.4.Actively discuss and express opinions with others and assist others in improving their arguments And actively identify flaws in other people's arguments as well. 5.Don't reiterate your own tasks repeatedly; offer more suggestions for others' tasks."
|
147 |
+
}
|
148 |
+
},
|
149 |
+
"Affirmative_Second": {
|
150 |
+
"style": {
|
151 |
+
"role": "Compelling Evidence Presenter"
|
152 |
+
},
|
153 |
+
"task": {
|
154 |
+
"task": "1.Elaborate on arguments, provide evidence.\n2.Summarize and analyze other people's opinions so that you can better complete tasks and actively provide opinions to others.\n3.Please try to focus the discussion around the topic."
|
155 |
+
},
|
156 |
+
"rule": {
|
157 |
+
"rule": "1.Elaborate on the points raised by the First Affirmative, present specific examples, data, and expert opinions to support the claims. Address potential counterarguments, emphasize strengths, and ensure coherent and persuasive reasoning.\n2.Discuss strategies for integrating real-world examples that resonate with the audience's experiences. Brainstorm ways to preemptively address common counterarguments and provide solid rebuttals.\n3.Actively discuss and express opinions with others and assist others in improving their arguments.\n4.Actively discuss and express opinions with others and assist others in improving their arguments And actively identify flaws in other people's arguments as well. 5.Don't reiterate your own tasks repeatedly; offer more suggestions for others' tasks."
|
158 |
+
}
|
159 |
+
},
|
160 |
+
"Affirmative_Third": {
|
161 |
+
"style": {
|
162 |
+
"role": "Counterpoint Master"
|
163 |
+
},
|
164 |
+
"task": {
|
165 |
+
"task": "1.Counter negative arguments, summarize affirmative stance.\n2.Summarize and analyze other people's opinions so that you can better complete tasks and actively provide opinions to others.\n3.Please try to focus the discussion around the topic."
|
166 |
+
},
|
167 |
+
"rule": {
|
168 |
+
"rule": "1.Counter negative arguments by identifying logical flaws or weaknesses. Highlight the advantages of the affirmative stance, emphasize points already presented, and deliver a concise yet strong summary of the affirmative position.\n2.Exchange ideas on how to structure counterarguments effectively. Emphasize the importance of maintaining a respectful tone while refuting negative arguments.\n3.Actively discuss and express opinions with others and assist others in improving their arguments.\n4.Actively discuss and express opinions with others and assist others in improving their arguments And actively identify flaws in other people's arguments as well. 5.Don't reiterate your own tasks repeatedly; offer more suggestions for others' tasks."
|
169 |
+
}
|
170 |
+
},
|
171 |
+
"Affirmative_Debate_organizer": {
|
172 |
+
"style": {
|
173 |
+
"role": "Debate Organizer"
|
174 |
+
},
|
175 |
+
"task": {
|
176 |
+
"task": "1.Manage debate proceedings and provide suggestions to help improve their arguments.\n2.Summarize and analyze other people's opinions so that you can better complete tasks and actively provide opinions to others.\n3.Please try to focus the discussion around the topic."
|
177 |
+
},
|
178 |
+
"rule": {
|
179 |
+
"rule": "1.Introduce the topic and rules before the debate starts, ensure each speaker adheres to their allotted time. Guide the questioning and answer phase, prevent excessive arguing. Summarize the debate outcome, providing a clear conclusion for judges and the audience.\n2.Brainstorm ways to facilitate smooth transitions between speakers and manage the flow of the debate. Discuss techniques for managing heated exchanges and encouraging respectful discourse.\n3.Actively discuss and express opinions with others and assist others in improving their arguments.\n4.Actively discuss and express opinions with others and assist others in improving their arguments And actively identify flaws in other people's arguments as well. 5.Don't reiterate your own tasks repeatedly; offer more suggestions for others' tasks."
|
180 |
+
}
|
181 |
+
}
|
182 |
+
}
|
183 |
+
},
|
184 |
+
"Negative_Task_Allocation_state": {
|
185 |
+
"controller": {
|
186 |
+
"controller_type": "order",
|
187 |
+
"max_chat_nums": 12,
|
188 |
+
"judge_system_prompt": "",
|
189 |
+
"judge_last_prompt": "",
|
190 |
+
"judge_extract_words": "end"
|
191 |
+
},
|
192 |
+
"begin_role": "Negative_Debate_organizer",
|
193 |
+
"begin_query": "The debate topic is as follows: \n<debate topic>\nShould AI Replace Humans in Creative Fields?? Affirmative viewpoint: AI should replace humans in creative fields because it can produce art and content efficiently, reduce costs, and eliminate human bias. negative viewpoint: AI should not replace humans in creative fields as it lacks true creativity, emotions, and the ability to understand complex human experiences.\n<debate topic>\n, now , begin to discuss!",
|
194 |
+
"environment_prompt": "It is currently the debate stage, where the Negative side is assigning tasks.The debate organizer sets the stage for the competition, explaining the debate process and rules. Debaters are called upon to allocate tasks for each speech, ensuring an equal distribution of responsibilities.Negative debaters gather to assign tasks, meticulously plan their speeches, and identify key arguments and evidence to support their viewpoint.",
|
195 |
+
"roles": [
|
196 |
+
"Negative_Debate_organizer",
|
197 |
+
"Negative_First",
|
198 |
+
"Negative_Second",
|
199 |
+
"Negative_Third"
|
200 |
+
],
|
201 |
+
"LLM_type": "OpenAI",
|
202 |
+
"LLM": {
|
203 |
+
"temperature": 1.0,
|
204 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
205 |
+
"log_path": "logs/Negative_Task_Allocation_state"
|
206 |
+
},
|
207 |
+
"agent_states": {
|
208 |
+
"Negative_First": {
|
209 |
+
"style": {
|
210 |
+
"role": "Opening Advocate for the Negative",
|
211 |
+
"style": "professional"
|
212 |
+
},
|
213 |
+
"task": {
|
214 |
+
"task": "1.Present arguments and main points.\n2.Summarize and analyze other people's opinions so that you can better complete tasks and actively provide opinions to others.\n3.Please try to focus the discussion around the topic."
|
215 |
+
},
|
216 |
+
"rule": {
|
217 |
+
"rule": "1.Organize clear facts and logic to firmly support the stance. Introduce main points succinctly in the opening statement, laying a solid foundation for the debate.\n2.Exploring ways to structure the opening statement for maximum impact and clarity. Consider using attention-grabbing statistics or quotes to engage the audience.\n3.Actively discuss and express opinions with others and assist others in improving their arguments.\n4.Actively discuss and express opinions with others and assist others in improving their arguments And actively identify flaws in other people's arguments as well. 5.Don't reiterate your own tasks repeatedly; offer more suggestions for others' tasks."
|
218 |
+
}
|
219 |
+
},
|
220 |
+
"Negative_Second": {
|
221 |
+
"style": {
|
222 |
+
"role": "Compelling Evidence Presenter for the Negative",
|
223 |
+
"style": "professional"
|
224 |
+
},
|
225 |
+
"task": {
|
226 |
+
"task": "1.Elaborate on arguments, provide evidence.\n2.Summarize and analyze other people's opinions so that you can better complete tasks and actively provide opinions to others.\n\n3.Please try to focus the discussion around the topic."
|
227 |
+
},
|
228 |
+
"rule": {
|
229 |
+
"rule": "1.Elaborate on the points raised by the First Negative, present specific examples, data, and expert opinions to support the claims. Address potential counterarguments, emphasize strengths, and ensure coherent and persuasive reasoning.\n2.Discuss strategies for integrating real-world examples that resonate with the audience's experiences. Brainstorm ways to preemptively address common counterarguments and provide solid rebuttals.\n3.Actively discuss and express opinions with others and assist others in improving their arguments.\n\n4.Actively discuss and express opinions with others and assist others in improving their arguments And actively identify flaws in other people's arguments as well. 5.Don't reiterate your own tasks repeatedly; offer more suggestions for others' tasks."
|
230 |
+
}
|
231 |
+
},
|
232 |
+
"Negative_Third": {
|
233 |
+
"style": {
|
234 |
+
"role": "Counterpoint Master for the Negative",
|
235 |
+
"style": "professional"
|
236 |
+
},
|
237 |
+
"task": {
|
238 |
+
"task": "1.Counter affirmative arguments, summarize Negative stance.\n2.Summarize and analyze other people's opinions so that you can better complete tasks and actively provide opinions to others.\n3.Please try to focus the discussion around the topic."
|
239 |
+
},
|
240 |
+
"rule": {
|
241 |
+
"rule": "1.Counter affirmative arguments by identifying logical flaws or weaknesses. Highlight the advantages of the Negative stance, emphasize points already presented, and deliver a concise yet strong summary of the Negative position.\n2.Exchange ideas on how to structure counterarguments effectively. Emphasize the importance of maintaining a respectful tone while refuting negative arguments.\n3.Actively discuss and express opinions with others and assist others in improving their arguments.\n4.Actively discuss and express opinions with others and assist others in improving their arguments And actively identify flaws in other people's arguments as well. 5.Don't reiterate your own tasks repeatedly; offer more suggestions for others' tasks."
|
242 |
+
}
|
243 |
+
},
|
244 |
+
"Negative_Debate_organizer": {
|
245 |
+
"style": {
|
246 |
+
"role": "Debate Organizer for the Negative",
|
247 |
+
"style": "professional"
|
248 |
+
},
|
249 |
+
"task": {
|
250 |
+
"task": "1.Manage debate proceedings and provide suggestions to help improve their arguments.\n2.Summarize and analyze other people's opinions so that you can better complete tasks and actively provide opinions to others.\n3.Please try to focus the discussion around the topic."
|
251 |
+
},
|
252 |
+
"rule": {
|
253 |
+
"rule": "1.Introduce the topic and rules before the debate starts, ensure each speaker adheres to their allotted time. Guide the questioning and answer phase, prevent excessive arguing. Summarize the debate outcome, providing a clear conclusion for judges and the audience.\n2.Brainstorm ways to facilitate smooth transitions between speakers and manage the flow of the debate. Discuss techniques for managing heated exchanges and encouraging respectful discourse.\n3.Actively discuss and express opinions with others and assist others in improving their arguments.\n4.Actively discuss and express opinions with others and assist others in improving their arguments And actively identify flaws in other people's arguments as well. 5.Don't reiterate your own tasks repeatedly; offer more suggestions for others' tasks."
|
254 |
+
}
|
255 |
+
}
|
256 |
+
}
|
257 |
+
},
|
258 |
+
"Debate_Order_state": {
|
259 |
+
"controller": {
|
260 |
+
"controller_type": "order",
|
261 |
+
"max_chat_nums": 7,
|
262 |
+
"judge_system_prompt": "",
|
263 |
+
"judge_last_prompt": "",
|
264 |
+
"judge_extract_words": "end"
|
265 |
+
},
|
266 |
+
"roles": [
|
267 |
+
"Debate_Judge",
|
268 |
+
"Affirmative_First",
|
269 |
+
"Negative_First",
|
270 |
+
"Affirmative_Second",
|
271 |
+
"Negative_Second",
|
272 |
+
"Affirmative_Third",
|
273 |
+
"Negative_Third"
|
274 |
+
],
|
275 |
+
"LLM_type": "OpenAI",
|
276 |
+
"LLM": {
|
277 |
+
"temperature": 1.0,
|
278 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
279 |
+
"log_path": "logs/Debate_state"
|
280 |
+
},
|
281 |
+
"begin_role": "Debate_Judge",
|
282 |
+
"begin_query": "Now please proceed with your sequential debate according to your chosen topic.",
|
283 |
+
"environment_prompt": "Now that we've started the sequential debating phase, each debater needs to present their own viewpoints.",
|
284 |
+
"agent_states": {
|
285 |
+
"Affirmative_First": {
|
286 |
+
"style": {
|
287 |
+
"role": "Opening Advocate for the Affirmative",
|
288 |
+
"style": "professional"
|
289 |
+
},
|
290 |
+
"task": {
|
291 |
+
"task": "Deliver the first speech for the affirmative side, elaborating on the main arguments and providing additional evidence.\n"
|
292 |
+
},
|
293 |
+
"rule": {
|
294 |
+
"rule": "Expand on the affirmative points from the previous speech, address any challenges raised by the negative side, and present solid evidence to support your claims."
|
295 |
+
}
|
296 |
+
},
|
297 |
+
"Negative_First": {
|
298 |
+
"style": {
|
299 |
+
"role": "Opening Advocate for the Negative",
|
300 |
+
"style": "professional"
|
301 |
+
},
|
302 |
+
"task": {
|
303 |
+
"task": "Deliver the first speech for the negative side, countering the affirmative arguments and presenting your own points with supporting evidence.\n"
|
304 |
+
},
|
305 |
+
"rule": {
|
306 |
+
"rule": "Effectively counter the affirmative arguments made in the first speech, present well-reasoned arguments for the negative side, and provide strong evidence to back your stance."
|
307 |
+
}
|
308 |
+
},
|
309 |
+
"Affirmative_Second": {
|
310 |
+
"style": {
|
311 |
+
"role": "Compelling Evidence Presenter for the Affirmative",
|
312 |
+
"style": "professional"
|
313 |
+
},
|
314 |
+
"task": {
|
315 |
+
"task": "Deliver the second speech for the affirmative side, further strengthening the affirmative arguments and addressing any challenges posed by the negative side.\n"
|
316 |
+
},
|
317 |
+
"rule": {
|
318 |
+
"rule": "Build upon the affirmative case, respond to the negative's counterarguments, and reinforce your points with compelling evidence.\n"
|
319 |
+
}
|
320 |
+
},
|
321 |
+
"Negative_Second": {
|
322 |
+
"style": {
|
323 |
+
"role": "Compelling Evidence Presenter for the Negative",
|
324 |
+
"style": "professional"
|
325 |
+
},
|
326 |
+
"task": {
|
327 |
+
"task": "Deliver the second speech for the negative side, further countering the affirmative arguments and providing additional evidence to support your stance.\n"
|
328 |
+
},
|
329 |
+
"rule": {
|
330 |
+
"rule": "Continue to challenge the affirmative side's points, present new arguments or counterarguments, and substantiate your position with strong evidence.\n"
|
331 |
+
}
|
332 |
+
},
|
333 |
+
"Affirmative_Third": {
|
334 |
+
"style": {
|
335 |
+
"role": "Counterpoint Master for the Affirmative",
|
336 |
+
"style": "professional"
|
337 |
+
},
|
338 |
+
"task": {
|
339 |
+
"task": "Deliver the third speech for the affirmative side, summarizing the key points and providing a persuasive conclusion.\n"
|
340 |
+
},
|
341 |
+
"rule": {
|
342 |
+
"rule": "Summarize the affirmative case, reinforce the main arguments, respond to the negative side's challenges, and make a strong closing statement.\n"
|
343 |
+
}
|
344 |
+
},
|
345 |
+
"Negative_Third": {
|
346 |
+
"style": {
|
347 |
+
"role": "Counterpoint Master for the Negative",
|
348 |
+
"style": "professional"
|
349 |
+
},
|
350 |
+
"task": {
|
351 |
+
"task": "Deliver the third speech for the negative side, summarizing the key points and providing a persuasive conclusion.\n"
|
352 |
+
},
|
353 |
+
"rule": {
|
354 |
+
"rule": "Summarize the negative case, highlight the main counterarguments, respond to the affirmative side's points, and deliver a compelling closing statement.\n"
|
355 |
+
}
|
356 |
+
},
|
357 |
+
"Debate_Judge": {
|
358 |
+
"style": {
|
359 |
+
"role": "Debate Judge",
|
360 |
+
"style": "professional"
|
361 |
+
},
|
362 |
+
"task": {
|
363 |
+
"task": "Evaluate the quality of the debate presentations and arguments made by both sides.\n"
|
364 |
+
},
|
365 |
+
"rule": {
|
366 |
+
"rule": "Listen carefully to the speeches, consider the strength of the arguments, the evidence presented, and the overall persuasiveness. Provide a fair and objective assessment of each side's performance.\n"
|
367 |
+
}
|
368 |
+
}
|
369 |
+
}
|
370 |
+
},
|
371 |
+
"Debate_Random_state": {
|
372 |
+
"controller": {
|
373 |
+
"controller_type": "rule",
|
374 |
+
"max_chat_nums": 12,
|
375 |
+
"judge_system_prompt": "",
|
376 |
+
"judge_last_prompt": "",
|
377 |
+
"judge_extract_words": "end",
|
378 |
+
"call_system_prompt": "Observe the ongoing discussion and decide who should speak next based on the current context.Please carefully analyze the flow of the discussion, identify which side needs to respond or present new points, and ensure fairness in allocating speaking opportunities.",
|
379 |
+
"call_last_prompt": "Please strictly adhere to the following format for outputting:",
|
380 |
+
"call_extract_words": "end"
|
381 |
+
},
|
382 |
+
"roles": [
|
383 |
+
"Debate_Judge",
|
384 |
+
"Affirmative_First",
|
385 |
+
"Negative_First",
|
386 |
+
"Affirmative_Second",
|
387 |
+
"Negative_Second",
|
388 |
+
"Affirmative_Third",
|
389 |
+
"Negative_Third"
|
390 |
+
],
|
391 |
+
"LLM_type": "OpenAI",
|
392 |
+
"LLM": {
|
393 |
+
"temperature": 1.0,
|
394 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
395 |
+
"log_path": "logs/Debate_Random_state"
|
396 |
+
},
|
397 |
+
"begin_role": "Debate_Judge",
|
398 |
+
"begin_query": "Now, please engage in a free debate based on your chosen topic.",
|
399 |
+
"environment_prompt": "We are now in the open debate phase, where each debater has the freedom to speak as they wish.",
|
400 |
+
"agent_states": {
|
401 |
+
"Affirmative_First": {
|
402 |
+
"style": {
|
403 |
+
"role": "Opening Advocate for the Affirmative",
|
404 |
+
"style": "professional"
|
405 |
+
},
|
406 |
+
"task": {
|
407 |
+
"task": "Present your viewpoints and arguments for the affirmative side in the ongoing discussion.\n"
|
408 |
+
},
|
409 |
+
"rule": {
|
410 |
+
"rule": "Effectively counter the negative arguments, present your own points, and engage in constructive debate."
|
411 |
+
}
|
412 |
+
},
|
413 |
+
"Negative_First": {
|
414 |
+
"style": {
|
415 |
+
"role": "Opening Advocate for the Negative",
|
416 |
+
"style": "professional"
|
417 |
+
},
|
418 |
+
"task": {
|
419 |
+
"task": "Present your viewpoints and arguments for the negative side in the ongoing discussion.\n"
|
420 |
+
},
|
421 |
+
"rule": {
|
422 |
+
"rule": "Effectively counter the affirmative arguments, present your own points, and engage in constructive debate."
|
423 |
+
}
|
424 |
+
},
|
425 |
+
"Affirmative_Second": {
|
426 |
+
"style": {
|
427 |
+
"role": "Compelling Evidence Presenter for the Affirmative",
|
428 |
+
"style": "professional"
|
429 |
+
},
|
430 |
+
"task": {
|
431 |
+
"task": "Present your viewpoints and arguments for the affirmative side in the ongoing discussion.\n"
|
432 |
+
},
|
433 |
+
"rule": {
|
434 |
+
"rule": "Effectively counter the negative arguments, present your own points, and engage in constructive debate."
|
435 |
+
}
|
436 |
+
},
|
437 |
+
"Negative_Second": {
|
438 |
+
"style": {
|
439 |
+
"role": "Compelling Evidence Presenter for the Negative",
|
440 |
+
"style": "professional"
|
441 |
+
},
|
442 |
+
"task": {
|
443 |
+
"task": "Present your viewpoints and arguments for the negative side in the ongoing discussion.\n"
|
444 |
+
},
|
445 |
+
"rule": {
|
446 |
+
"rule": "Effectively counter the affirmative arguments, present your own points, and engage in constructive debate."
|
447 |
+
}
|
448 |
+
},
|
449 |
+
"Affirmative_Third": {
|
450 |
+
"style": {
|
451 |
+
"role": "Counterpoint Master for the Affirmative",
|
452 |
+
"style": "professional"
|
453 |
+
},
|
454 |
+
"task": {
|
455 |
+
"task": "Present your viewpoints and arguments for the affirmative side in the ongoing discussion.\n"
|
456 |
+
},
|
457 |
+
"rule": {
|
458 |
+
"rule": "Effectively counter the negative arguments, present your own points, and engage in constructive debate."
|
459 |
+
}
|
460 |
+
},
|
461 |
+
"Negative_Third": {
|
462 |
+
"style": {
|
463 |
+
"role": "Counterpoint Master for the Negative",
|
464 |
+
"style": "professional"
|
465 |
+
},
|
466 |
+
"task": {
|
467 |
+
"task": "Present your viewpoints and arguments for the negative side in the ongoing discussion.\n"
|
468 |
+
},
|
469 |
+
"rule": {
|
470 |
+
"rule": "Effectively counter the affirmative arguments, present your own points, and engage in constructive debate."
|
471 |
+
}
|
472 |
+
},
|
473 |
+
"Debate_Judge": {
|
474 |
+
"style": {
|
475 |
+
"role": "Debate Judge",
|
476 |
+
"style": "professional"
|
477 |
+
},
|
478 |
+
"task": {
|
479 |
+
"task": "Evaluate the quality of the debate presentations and arguments made by both sides.\n"
|
480 |
+
},
|
481 |
+
"rule": {
|
482 |
+
"rule": "Listen carefully to the speeches, consider the strength of the arguments, the evidence presented, and the overall persuasiveness. Provide a fair and objective assessment of each side's performance.\n"
|
483 |
+
}
|
484 |
+
}
|
485 |
+
}
|
486 |
+
},
|
487 |
+
"Judge_state": {
|
488 |
+
"roles": [
|
489 |
+
"Debate_Judge"
|
490 |
+
],
|
491 |
+
"LLM_type": "OpenAI",
|
492 |
+
"LLM": {
|
493 |
+
"temperature": 1.0,
|
494 |
+
"model": "gpt-3.5-turbo-16k-0613",
|
495 |
+
"log_path": "logs/Judge_state"
|
496 |
+
},
|
497 |
+
"agent_states": {
|
498 |
+
"Debate_Judge": {
|
499 |
+
"style": {
|
500 |
+
"role": "Debate Judge",
|
501 |
+
"style": "professional"
|
502 |
+
},
|
503 |
+
"task": {
|
504 |
+
"task": "Determine the current debate's winner.\n"
|
505 |
+
},
|
506 |
+
"rule": {
|
507 |
+
"rule": " After evaluating the speeches, arguments, evidence, and overall persuasiveness, provide a clear and decisive judgment. If the affirmative side wins, your response should be 'winner is Affirmative'. If the negative side wins, your response should be 'winner is Negative'. Your decision is required. Respond in a concise and certain manner, leaving no room for ambiguity."
|
508 |
+
},
|
509 |
+
"last": {
|
510 |
+
"last_prompt": "Determine the current debate's winner, whether it's the affirmative side or the negative side. Please strictly adhere to the following format for output: If the affirmative side wins, output 'winner is Affirmative'. Otherwise, output'winner is Negative'.You must choose a winner and not waver.!"
|
511 |
+
}
|
512 |
+
}
|
513 |
+
}
|
514 |
+
}
|
515 |
+
}
|
516 |
+
}
|
gradio_backend.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import yaml
|
2 |
+
import os
|
3 |
+
import argparse
|
4 |
+
import sys
|
5 |
+
sys.path.append("../../../src/agents")
|
6 |
+
sys.path.append("../../Gradio_Config")
|
7 |
+
from agents.SOP import SOP
|
8 |
+
from agents.Agent import Agent
|
9 |
+
from agents.Environment import Environment
|
10 |
+
from agents.Memory import Memory
|
11 |
+
from gradio_base import Client
|
12 |
+
from app import DebateUI
|
13 |
+
|
14 |
+
def process(action):
|
15 |
+
response = action.response
|
16 |
+
send_name = action.name
|
17 |
+
send_role = action.role
|
18 |
+
if not action.is_user:
|
19 |
+
print(f"{send_name}({send_role}):{response}")
|
20 |
+
memory = Memory(send_role, send_name, response)
|
21 |
+
return memory
|
22 |
+
|
23 |
+
def gradio_process(action,current_state):
|
24 |
+
response = action.response
|
25 |
+
all = ""
|
26 |
+
for i,res in enumerate(response):
|
27 |
+
all+=res
|
28 |
+
state = 10
|
29 |
+
if action.is_user:
|
30 |
+
state = 30
|
31 |
+
elif action.state_begin:
|
32 |
+
state = 12
|
33 |
+
action.state_begin = False
|
34 |
+
elif i>0:
|
35 |
+
state = 11
|
36 |
+
send_name = f"{action.name}({action.role})"
|
37 |
+
Client.send_server(str([state, send_name, res, current_state.name]))
|
38 |
+
if state == 30:
|
39 |
+
# print("client: waiting for input.")
|
40 |
+
data: list = next(Client.receive_server)
|
41 |
+
content = ""
|
42 |
+
for item in data:
|
43 |
+
if item.startswith("<USER>"):
|
44 |
+
content = item.split("<USER>")[1]
|
45 |
+
break
|
46 |
+
# print(f"client: recieved `{content}` from server")
|
47 |
+
action.response = content
|
48 |
+
break
|
49 |
+
else:
|
50 |
+
action.response = all
|
51 |
+
|
52 |
+
def block_when_next(current_agent, current_state):
|
53 |
+
if Client.LAST_USER:
|
54 |
+
assert not current_agent.is_user
|
55 |
+
Client.LAST_USER = False
|
56 |
+
return
|
57 |
+
if current_agent.is_user:
|
58 |
+
# if next turn is user, we don't handle it here
|
59 |
+
Client.LAST_USER = True
|
60 |
+
return
|
61 |
+
if Client.FIRST_RUN:
|
62 |
+
Client.FIRST_RUN = False
|
63 |
+
else:
|
64 |
+
# block current process
|
65 |
+
if Client.mode == Client.SINGLE_MODE:
|
66 |
+
Client.send_server(str([98, f"{current_agent.name}({current_agent.state_roles[current_state.name]})", " ", current_state.name]))
|
67 |
+
data: list = next(Client.receive_server)
|
68 |
+
|
69 |
+
|
70 |
+
def init(config):
|
71 |
+
if not os.path.exists("logs"):
|
72 |
+
os.mkdir("logs")
|
73 |
+
sop = SOP.from_config(config)
|
74 |
+
agents,roles_to_names,names_to_roles = Agent.from_config(config)
|
75 |
+
environment = Environment.from_config(config)
|
76 |
+
environment.agents = agents
|
77 |
+
environment.roles_to_names,environment.names_to_roles = roles_to_names,names_to_roles
|
78 |
+
sop.roles_to_names,sop.names_to_roles = roles_to_names,names_to_roles
|
79 |
+
for name,agent in agents.items():
|
80 |
+
agent.environment = environment
|
81 |
+
return agents,sop,environment
|
82 |
+
|
83 |
+
def run(agents,sop,environment):
|
84 |
+
while True:
|
85 |
+
current_state,current_agent= sop.next(environment,agents)
|
86 |
+
block_when_next(current_agent, current_state)
|
87 |
+
if sop.finished:
|
88 |
+
print("finished!")
|
89 |
+
Client.send_server(str([99, ' ', ' ', current_state.name]))
|
90 |
+
os.environ.clear()
|
91 |
+
break
|
92 |
+
action = current_agent.step(current_state,"") #component_dict = current_state[self.role[current_node.name]] current_agent.compile(component_dict)
|
93 |
+
gradio_process(action,current_state)
|
94 |
+
memory = process(action)
|
95 |
+
environment.update_memory(memory,current_state)
|
96 |
+
|
97 |
+
|
98 |
+
def prepare(agents, sop, environment):
|
99 |
+
client = Client()
|
100 |
+
Client.send_server = client.send_message
|
101 |
+
content = sop.states['Affirmative_Task_Allocation_state'].begin_query
|
102 |
+
parse_data = DebateUI.extract(content)
|
103 |
+
client.send_message(
|
104 |
+
{
|
105 |
+
"theme": f"{parse_data[0]}",
|
106 |
+
"positive": f"{parse_data[1]}",
|
107 |
+
"negative": f"{parse_data[2]}",
|
108 |
+
"agents_name": DebateUI.convert2list4agentname(sop)[0],
|
109 |
+
"only_name": DebateUI.convert2list4agentname(sop)[0],
|
110 |
+
"default_cos_play_id": -1
|
111 |
+
}
|
112 |
+
)
|
113 |
+
client.listening_for_start_()
|
114 |
+
client.mode = Client.mode = client.cache["mode"]
|
115 |
+
# cover config and then start
|
116 |
+
if Client.cache["cosplay"] is not None:
|
117 |
+
agents[Client.cache["cosplay"]].is_user = True
|
118 |
+
sop.states['Negative_Task_Allocation_state'] = sop.states['Affirmative_Task_Allocation_state'].begin_query = \
|
119 |
+
DebateUI.merge(
|
120 |
+
theme=Client.cache["theme"], positive=Client.cache["positive"], negative=Client.cache["negative"],
|
121 |
+
origin_content=sop.states['Affirmative_Task_Allocation_state'].begin_query
|
122 |
+
)
|
123 |
+
|
124 |
+
|
125 |
+
if __name__ == '__main__':
|
126 |
+
parser = argparse.ArgumentParser(description='A demo of chatbot')
|
127 |
+
parser.add_argument('--agent', type=str, help='path to SOP json', default="config.json")
|
128 |
+
args = parser.parse_args()
|
129 |
+
|
130 |
+
agents,sop,environment = init(args.agent)
|
131 |
+
|
132 |
+
# add ==============================
|
133 |
+
prepare(agents, sop, environment)
|
134 |
+
# ==================================
|
135 |
+
|
136 |
+
run(agents,sop,environment)
|
137 |
+
|
138 |
+
|
gradio_base.py
ADDED
@@ -0,0 +1,559 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The AIWaves Inc. team.
|
3 |
+
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
|
17 |
+
# Emoji comes from this website:
|
18 |
+
# https://emojipedia.org/
|
19 |
+
import subprocess
|
20 |
+
from gradio_config import GradioConfig as gc
|
21 |
+
import gradio as gr
|
22 |
+
from typing import List, Tuple, Any
|
23 |
+
import time
|
24 |
+
import socket
|
25 |
+
import psutil
|
26 |
+
import os
|
27 |
+
from abc import abstractmethod
|
28 |
+
|
29 |
+
def convert2list4agentname(sop):
|
30 |
+
"""
|
31 |
+
Extract the agent names of all states
|
32 |
+
return:
|
33 |
+
only name: [name1, name2, ...]
|
34 |
+
agent_name: [name1(role1), name2(role2), ...]
|
35 |
+
"""
|
36 |
+
only_name = []
|
37 |
+
agent_name = []
|
38 |
+
roles_to_names = sop.roles_to_names
|
39 |
+
for state_name,roles_names in roles_to_names.items():
|
40 |
+
for role,name in roles_names.items():
|
41 |
+
agent_name.append(f"{name}({role})")
|
42 |
+
only_name.append(name)
|
43 |
+
agent_name = list(set(agent_name))
|
44 |
+
agent_name.sort()
|
45 |
+
return agent_name, only_name
|
46 |
+
|
47 |
+
def is_port_in_use(port):
|
48 |
+
"""Check if the port is available"""
|
49 |
+
for conn in psutil.net_connections():
|
50 |
+
if conn.laddr.port == port:
|
51 |
+
return True
|
52 |
+
return False
|
53 |
+
|
54 |
+
def check_port(port):
|
55 |
+
"""Determine available ports"""
|
56 |
+
if os.path.isfile("PORT.txt"):
|
57 |
+
port = int(open("PORT.txt","r",encoding='utf-8').readlines()[0])
|
58 |
+
else:
|
59 |
+
for i in range(10):
|
60 |
+
if is_port_in_use(port+i) == False:
|
61 |
+
port += i
|
62 |
+
break
|
63 |
+
with open("PORT.txt", "w") as f:
|
64 |
+
f.writelines(str(port))
|
65 |
+
return port
|
66 |
+
|
67 |
+
# Determine some heads
|
68 |
+
SPECIAL_SIGN = {
|
69 |
+
"START": "<START>",
|
70 |
+
"SPLIT": "<SELFDEFINESEP>",
|
71 |
+
"END": "<ENDSEP>"
|
72 |
+
}
|
73 |
+
HOST = "127.0.0.1"
|
74 |
+
# The starting port number for the search.
|
75 |
+
PORT = 15000
|
76 |
+
PORT = check_port(PORT)
|
77 |
+
|
78 |
+
def print_log(message:str):
|
79 |
+
print(f"[{time.ctime()}]{message}")
|
80 |
+
|
81 |
+
global_dialog = {
|
82 |
+
"user": [],
|
83 |
+
"agent": {},
|
84 |
+
"system": []
|
85 |
+
}
|
86 |
+
|
87 |
+
class UIHelper:
|
88 |
+
"""Static Class"""
|
89 |
+
|
90 |
+
@classmethod
|
91 |
+
def wrap_css(cls, content, name) -> str:
|
92 |
+
"""
|
93 |
+
Description:
|
94 |
+
Wrap CSS around each output, and return it in HTML format for rendering with Markdown.
|
95 |
+
Input:
|
96 |
+
content: Output content
|
97 |
+
name: Whose output is it
|
98 |
+
Output:
|
99 |
+
HTML
|
100 |
+
"""
|
101 |
+
assert name in gc.OBJECT_INFO, \
|
102 |
+
f"The current name `{name}` is not registered with an image. The names of the currently registered agents are in `{gc.OBJECT_INFO.keys()}`. Please use `GradioConfig.add_agent()` from `Gradio_Config/gradio_config.py` to bind the name of the new agent."
|
103 |
+
output = ""
|
104 |
+
info = gc.OBJECT_INFO[name]
|
105 |
+
if info["id"] == "USER":
|
106 |
+
output = gc.BUBBLE_CSS["USER"].format(
|
107 |
+
info["bubble_color"], # Background-color
|
108 |
+
info["text_color"], # Color of the agent's name
|
109 |
+
name, # Agent name
|
110 |
+
info["text_color"], # Font color
|
111 |
+
info["font_size"], # Font size
|
112 |
+
content, # Content
|
113 |
+
info["head_url"] # URL of the avatar
|
114 |
+
)
|
115 |
+
elif info["id"] == "SYSTEM":
|
116 |
+
output = gc.BUBBLE_CSS["SYSTEM"].format(
|
117 |
+
info["bubble_color"], # Background-color
|
118 |
+
info["font_size"], # Font size
|
119 |
+
info["text_color"], # Font color
|
120 |
+
name, # Agent name
|
121 |
+
content # Content
|
122 |
+
)
|
123 |
+
elif info["id"] == "AGENT":
|
124 |
+
output = gc.BUBBLE_CSS["AGENT"].format(
|
125 |
+
info["head_url"], # URL of the avatar
|
126 |
+
info["bubble_color"], # Background-color
|
127 |
+
info["text_color"], # Font color
|
128 |
+
name, # Agent name
|
129 |
+
info["text_color"], # Font color
|
130 |
+
info["font_size"], # Font size
|
131 |
+
content, # Content
|
132 |
+
)
|
133 |
+
else:
|
134 |
+
assert False, f"Id `{info['id']}` is invalid. The valid id is in ['SYSTEM', 'AGENT', 'USER']"
|
135 |
+
return output
|
136 |
+
|
137 |
+
@classmethod
|
138 |
+
def novel_filter(cls, content, agent_name):
|
139 |
+
|
140 |
+
"""比如<CONTENT>...</CONTENT>,就应该输出CONTENT:..."""
|
141 |
+
IS_RECORDER = agent_name.lower() in ["recorder", "summary"]
|
142 |
+
if IS_RECORDER:
|
143 |
+
BOLD_FORMAT = """<div style="color: #000000; display:inline">
|
144 |
+
<b>{}</b>
|
145 |
+
</div>
|
146 |
+
<span style="color: black;">
|
147 |
+
"""
|
148 |
+
else:
|
149 |
+
BOLD_FORMAT = "<b>{}</b>"
|
150 |
+
CENTER_FORMAT = """<div style="background-color: #F0F0F0; text-align: center; padding: 5px; color: #000000">
|
151 |
+
<b>{}</b>
|
152 |
+
</div>
|
153 |
+
"""
|
154 |
+
START_FORMAT = "<{}>"
|
155 |
+
END_FORMAT = "</{}>"
|
156 |
+
mapping = {
|
157 |
+
"TARGET": "🎯 Current Target: ",
|
158 |
+
"NUMBER": "🍖 Required Number: ",
|
159 |
+
"THOUGHT": "🤔 Overall Thought: ",
|
160 |
+
"FIRST NAME": "⚪ First Name: ",
|
161 |
+
"LAST NAME": "⚪ Last Name: ",
|
162 |
+
"ROLE": "🤠 Character Properties: ",
|
163 |
+
"RATIONALES": "🤔 Design Rationale: ",
|
164 |
+
"BACKGROUND": "🚊 Character Background: ",
|
165 |
+
"ID": "🔴 ID: ",
|
166 |
+
"TITLE": "🧩 Chapter Title: ",
|
167 |
+
"ABSTRACT": "🎬 Abstract: ",
|
168 |
+
"CHARACTER INVOLVED": "☃️ Character Involved: ",
|
169 |
+
"ADVICE": "💬 Advice:",
|
170 |
+
"NAME": "📛 Name: ",
|
171 |
+
"GENDER": "👩👩👦👦 Gender: ",
|
172 |
+
"AGE": "⏲️ Age: ",
|
173 |
+
"WORK": "👨🔧 Work: ",
|
174 |
+
"PERSONALITY": "🧲 Character Personality: ",
|
175 |
+
"SPEECH STYLE": "🗣️ Speaking Style: ",
|
176 |
+
"RELATION": "🏠 Relation with Others: ",
|
177 |
+
"WORD COUNT": "🎰 Word Count: ",
|
178 |
+
"CHARACTER DESIGN": "📈 Character Design: ",
|
179 |
+
"CHARACTER REQUIRE": "📈 Character Require: ",
|
180 |
+
"CHARACTER NAME": "📈 Character Naming Analysis: ",
|
181 |
+
"CHARACTER NOW": "📈 Character Now: ",
|
182 |
+
"OUTLINE DESIGN": "📈 Outline Design: ",
|
183 |
+
"OUTLINE REQUIRE": "📈 Outline Require: ",
|
184 |
+
"OUTLINE NOW": "📈 Outline Now: ",
|
185 |
+
"SUB TASK": "🎯 Current Sub Task: ",
|
186 |
+
"CHARACTER ADVICE": "💬 Character Design Advice: ",
|
187 |
+
"OUTLINE ADVANTAGE": "📈 Outline Advantage: ",
|
188 |
+
"OUTLINE DISADVANTAGE": "📈 Outline Disadvantage: ",
|
189 |
+
"OUTLINE ADVICE": "💬 Outline Advice: ",
|
190 |
+
"NEXT": "➡️ Next Advice: ",
|
191 |
+
"TOTAL NUMBER": "🔢 Total Number: "
|
192 |
+
}
|
193 |
+
for i in range(1, 10):
|
194 |
+
mapping[f"CHARACTER {i}"] = f"🦄 Character {i}"
|
195 |
+
mapping[f"SECTION {i}"] = f"🏷️ Chapter {i}"
|
196 |
+
for key in mapping:
|
197 |
+
if key in [f"CHARACTER {i}" for i in range(1, 10)] \
|
198 |
+
or key in [f"SECTION {i}" for i in range(1, 10)] \
|
199 |
+
:
|
200 |
+
content = content.replace(
|
201 |
+
START_FORMAT.format(key), CENTER_FORMAT.format(mapping[key])
|
202 |
+
)
|
203 |
+
elif key in ["TOTAL NUMBER"]:
|
204 |
+
content = content.replace(
|
205 |
+
START_FORMAT.format(key), CENTER_FORMAT.format(mapping[key]) + """<span style="color: black;">"""
|
206 |
+
)
|
207 |
+
content = content.replace(
|
208 |
+
END_FORMAT.format(key), "</span>"
|
209 |
+
)
|
210 |
+
else:
|
211 |
+
content = content.replace(
|
212 |
+
START_FORMAT.format(key), BOLD_FORMAT.format(mapping[key])
|
213 |
+
)
|
214 |
+
|
215 |
+
content = content.replace(
|
216 |
+
END_FORMAT.format(key), "</span>" if IS_RECORDER else ""
|
217 |
+
)
|
218 |
+
return content
|
219 |
+
|
220 |
+
@classmethod
|
221 |
+
def singleagent_filter(cls, content, agent_name):
|
222 |
+
return content
|
223 |
+
|
224 |
+
@classmethod
|
225 |
+
def debate_filter(cls, content, agent_name):
|
226 |
+
return content
|
227 |
+
|
228 |
+
@classmethod
|
229 |
+
def code_filter(cls, content, agent_name):
|
230 |
+
# return content.replace("```python", "<pre><code>").replace("```","</pre></code>")
|
231 |
+
return content
|
232 |
+
|
233 |
+
@classmethod
|
234 |
+
def general_filter(cls, content, agent_name):
|
235 |
+
return content
|
236 |
+
|
237 |
+
@classmethod
|
238 |
+
def filter(cls, content: str, agent_name: str, ui_name: str):
|
239 |
+
"""
|
240 |
+
Description:
|
241 |
+
Make certain modifications to the output content to enhance its aesthetics when content is showed in gradio.
|
242 |
+
Input:
|
243 |
+
content: output content
|
244 |
+
agent_name: Whose output is it
|
245 |
+
ui_name: What UI is currently launching
|
246 |
+
Output:
|
247 |
+
Modified content
|
248 |
+
"""
|
249 |
+
mapping = {
|
250 |
+
"SingleAgentUI": cls.singleagent_filter,
|
251 |
+
"DebateUI": cls.debate_filter,
|
252 |
+
"NovelUI": cls.novel_filter,
|
253 |
+
"CodeUI": cls.code_filter,
|
254 |
+
"GeneralUI": cls.general_filter
|
255 |
+
}
|
256 |
+
if ui_name in mapping:
|
257 |
+
return mapping[ui_name](content, agent_name)
|
258 |
+
else:
|
259 |
+
return content
|
260 |
+
|
261 |
+
class Client:
|
262 |
+
"""
|
263 |
+
For inter-process communication, this is the client.
|
264 |
+
`gradio_backend.PY` serves as the backend, while `run_gradio` is the frontend.
|
265 |
+
Communication between the frontend and backend is accomplished using Sockets.
|
266 |
+
"""
|
267 |
+
# =======================Radio Const String======================
|
268 |
+
SINGLE_MODE = "Single Mode"
|
269 |
+
AUTO_MODE = "Auto Mode"
|
270 |
+
MODE_LABEL = "Select the execution mode"
|
271 |
+
MODE_INFO = "Single mode refers to when the current agent output ends, it will stop running until you click to continue. Auto mode refers to when you complete the input, all agents will continue to output until the task ends."
|
272 |
+
# ===============================================================
|
273 |
+
mode = AUTO_MODE
|
274 |
+
FIRST_RUN:bool = True
|
275 |
+
# if last agent is user, then next agent will be executed automatically rather than click button
|
276 |
+
LAST_USER:bool = False
|
277 |
+
|
278 |
+
receive_server = None
|
279 |
+
send_server = None
|
280 |
+
current_node = None
|
281 |
+
cache = {}
|
282 |
+
|
283 |
+
def __init__(self, host=HOST, port=PORT, bufsize=1024):
|
284 |
+
assert Client.mode in [Client.SINGLE_MODE, Client.AUTO_MODE]
|
285 |
+
self.SIGN = SPECIAL_SIGN
|
286 |
+
self.bufsize = bufsize
|
287 |
+
assert bufsize > 0
|
288 |
+
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
289 |
+
self.client_socket.connect((host, port))
|
290 |
+
while True:
|
291 |
+
data = self.client_socket.recv(self.bufsize).decode('utf-8')
|
292 |
+
if data == "hi":
|
293 |
+
self.client_socket.send("hello agent".encode('utf-8'))
|
294 |
+
time.sleep(1)
|
295 |
+
elif data == "check":
|
296 |
+
break
|
297 |
+
print_log("Client: connecting successfully......")
|
298 |
+
|
299 |
+
def start_server(self):
|
300 |
+
while True:
|
301 |
+
message = yield
|
302 |
+
if message == 'exit':
|
303 |
+
break
|
304 |
+
self.send_message(message=message)
|
305 |
+
|
306 |
+
def send_message(self, message):
|
307 |
+
"""Send the messaget to the server."""
|
308 |
+
if isinstance(message, list) or isinstance(message, dict):
|
309 |
+
message = str(message)
|
310 |
+
assert isinstance(message, str)
|
311 |
+
message = message + self.SIGN["SPLIT"]
|
312 |
+
self.client_socket.send(message.encode('utf-8'))
|
313 |
+
|
314 |
+
def receive_message(self, end_identifier: str = None, split_identifier: str = SPECIAL_SIGN["SPLIT"]) -> List:
|
315 |
+
"""Receive messages from the server, and it will block the process. Supports receiving long text."""
|
316 |
+
remaining = ""
|
317 |
+
while True:
|
318 |
+
# receive message
|
319 |
+
dataset = self.client_socket.recv(self.bufsize)
|
320 |
+
try:
|
321 |
+
# If decoding fails, it indicates that the current transmission is a long text.
|
322 |
+
dataset = dataset.decode('utf-8')
|
323 |
+
except UnicodeDecodeError:
|
324 |
+
if not isinstance(remaining, bytes):
|
325 |
+
remaining = remaining.encode('utf-8')
|
326 |
+
assert isinstance(dataset, bytes)
|
327 |
+
remaining += dataset
|
328 |
+
try:
|
329 |
+
dataset = remaining.decode('utf-8')
|
330 |
+
remaining = ""
|
331 |
+
except UnicodeDecodeError:
|
332 |
+
continue
|
333 |
+
assert isinstance(remaining, str)
|
334 |
+
dataset = remaining + dataset
|
335 |
+
list_dataset = dataset.split(split_identifier)
|
336 |
+
if len(list_dataset) == 1:
|
337 |
+
# If there is only one result from the split, it indicates that the current sequence itself has not yet ended.
|
338 |
+
remaining = list_dataset[0]
|
339 |
+
continue
|
340 |
+
else:
|
341 |
+
remaining = list_dataset[-1]
|
342 |
+
# Recieve successfully
|
343 |
+
list_dataset = list_dataset[:-1]
|
344 |
+
return_value = []
|
345 |
+
for item in list_dataset:
|
346 |
+
if end_identifier is not None and item == end_identifier:
|
347 |
+
break
|
348 |
+
return_value.append(item)
|
349 |
+
identifier = yield return_value
|
350 |
+
if identifier is not None:
|
351 |
+
end_identifier, split_identifier = identifier
|
352 |
+
|
353 |
+
def listening_for_start_(self):
|
354 |
+
"""
|
355 |
+
When the server starts, the client is automatically launched.
|
356 |
+
At this point, process synchronization is required,
|
357 |
+
such as sending client data to the server for rendering,
|
358 |
+
then the server sending the modified data back to the client,
|
359 |
+
and simultaneously sending a startup command.
|
360 |
+
Once the client receives the data, it will start running.
|
361 |
+
"""
|
362 |
+
Client.receive_server = self.receive_message()
|
363 |
+
# Waiting for information from the server.
|
364 |
+
data: list = next(Client.receive_server)
|
365 |
+
assert len(data) == 1
|
366 |
+
data = eval(data[0])
|
367 |
+
assert isinstance(data, dict)
|
368 |
+
Client.cache.update(data)
|
369 |
+
# Waiting for start command from the server.
|
370 |
+
data:list = Client.receive_server.send(None)
|
371 |
+
assert len(data) == 1
|
372 |
+
assert data[0] == "<START>"
|
373 |
+
|
374 |
+
class WebUI:
|
375 |
+
"""
|
376 |
+
The base class for the frontend, which encapsulates some functions for process information synchronization.
|
377 |
+
When a new frontend needs to be created, you should inherit from this class,
|
378 |
+
then implement the `construct_ui()` method and set up event listeners.
|
379 |
+
Finally, execute `run()` to load it.
|
380 |
+
"""
|
381 |
+
|
382 |
+
def receive_message(
|
383 |
+
self,
|
384 |
+
end_identifier:str=None,
|
385 |
+
split_identifier:str=SPECIAL_SIGN["SPLIT"]
|
386 |
+
)->List:
|
387 |
+
"""This is the same as in Client class."""
|
388 |
+
yield "hello"
|
389 |
+
remaining = ""
|
390 |
+
while True:
|
391 |
+
dataset = self.client_socket.recv(self.bufsize)
|
392 |
+
try:
|
393 |
+
dataset = dataset.decode('utf-8')
|
394 |
+
except UnicodeDecodeError:
|
395 |
+
if not isinstance(remaining, bytes):
|
396 |
+
remaining = remaining.encode('utf-8')
|
397 |
+
assert isinstance(dataset, bytes)
|
398 |
+
remaining += dataset
|
399 |
+
try:
|
400 |
+
dataset = remaining.decode('utf-8')
|
401 |
+
remaining = ""
|
402 |
+
except UnicodeDecodeError:
|
403 |
+
continue
|
404 |
+
assert isinstance(remaining, str)
|
405 |
+
dataset = remaining + dataset
|
406 |
+
list_dataset = dataset.split(split_identifier)
|
407 |
+
if len(list_dataset) == 1:
|
408 |
+
remaining = list_dataset[0]
|
409 |
+
continue
|
410 |
+
else:
|
411 |
+
remaining = list_dataset[-1]
|
412 |
+
list_dataset = list_dataset[:-1]
|
413 |
+
return_value = []
|
414 |
+
for item in list_dataset:
|
415 |
+
if end_identifier is not None and item == end_identifier:
|
416 |
+
break
|
417 |
+
return_value.append(item)
|
418 |
+
identifier = yield return_value
|
419 |
+
if identifier is not None:
|
420 |
+
end_identifier, split_identifier = identifier
|
421 |
+
|
422 |
+
def send_message(self, message:str):
|
423 |
+
"""Send message to client."""
|
424 |
+
SEP = self.SIGN["SPLIT"]
|
425 |
+
self.client_socket.send(
|
426 |
+
(message+SEP).encode("utf-8")
|
427 |
+
)
|
428 |
+
|
429 |
+
def _connect(self):
|
430 |
+
# check
|
431 |
+
if self.server_socket:
|
432 |
+
self.server_socket.close()
|
433 |
+
assert not os.path.isfile("PORT.txt")
|
434 |
+
self.socket_port = check_port(PORT)
|
435 |
+
# Step1. initialize
|
436 |
+
self.server_socket = socket.socket(
|
437 |
+
socket.AF_INET, socket.SOCK_STREAM
|
438 |
+
)
|
439 |
+
# Step2. binding ip and port
|
440 |
+
self.server_socket.bind((self.socket_host, self.socket_port))
|
441 |
+
# Step3. run client
|
442 |
+
self._start_client()
|
443 |
+
|
444 |
+
# Step4. listening for connect
|
445 |
+
self.server_socket.listen(1)
|
446 |
+
|
447 |
+
# Step5. test connection
|
448 |
+
client_socket, client_address = self.server_socket.accept()
|
449 |
+
print_log("server: establishing connection......")
|
450 |
+
self.client_socket = client_socket
|
451 |
+
while True:
|
452 |
+
client_socket.send("hi".encode('utf-8'))
|
453 |
+
time.sleep(1)
|
454 |
+
data = client_socket.recv(self.bufsize).decode('utf-8')
|
455 |
+
if data == "hello agent":
|
456 |
+
client_socket.send("check".encode('utf-8'))
|
457 |
+
print_log("server: connect successfully")
|
458 |
+
break
|
459 |
+
assert os.path.isfile("PORT.txt")
|
460 |
+
os.remove("PORT.txt")
|
461 |
+
if self.receive_server:
|
462 |
+
del self.receive_server
|
463 |
+
self.receive_server = self.receive_message()
|
464 |
+
assert next(self.receive_server) == "hello"
|
465 |
+
|
466 |
+
@abstractmethod
|
467 |
+
def render_and_register_ui(self):
|
468 |
+
# You need to implement this function.
|
469 |
+
# The function's purpose is to bind the name of the agent with an image.
|
470 |
+
# The name of the agent is stored in `self.cache[]`,
|
471 |
+
# and the function for binding is in the method `add_agents` of the class `GradioConfig` in `Gradio_Config/gradio_config.py``.
|
472 |
+
# This function will be executed in `self.first_recieve_from_client()`
|
473 |
+
pass
|
474 |
+
|
475 |
+
def first_recieve_from_client(self, reset_mode:bool=False):
|
476 |
+
"""
|
477 |
+
This function is used to receive information from the client and is typically executed during the initialization of the class.
|
478 |
+
If `reset_mode` is False, it will bind the name of the agent with an image.
|
479 |
+
"""
|
480 |
+
self.FIRST_RECIEVE_FROM_CLIENT = True
|
481 |
+
data_list:List = self.receive_server.send(None)
|
482 |
+
assert len(data_list) == 1
|
483 |
+
data = eval(data_list[0])
|
484 |
+
assert isinstance(data, dict)
|
485 |
+
self.cache.update(data)
|
486 |
+
if not reset_mode:
|
487 |
+
self.render_and_register_ui()
|
488 |
+
|
489 |
+
def _second_send(self, message:dict):
|
490 |
+
# Send the modified message.
|
491 |
+
# It will be executed in `self.send_start_cmd()` automtically.
|
492 |
+
self.send_message(str(message))
|
493 |
+
|
494 |
+
def _third_send(self):
|
495 |
+
# Send start command.
|
496 |
+
# It will be executed in `self.send_start_cmd()` automtically.
|
497 |
+
self.send_message(self.SIGN['START'])
|
498 |
+
|
499 |
+
def send_start_cmd(self, message:dict={"hello":"hello"}):
|
500 |
+
# If you have no message to send, you can ignore the args `message`.
|
501 |
+
assert self.FIRST_RECIEVE_FROM_CLIENT, "Please make sure you have executed `self.first_recieve_from_client()` manually."
|
502 |
+
self._second_send(message=message)
|
503 |
+
time.sleep(1)
|
504 |
+
self._third_send()
|
505 |
+
self.FIRST_RECIEVE_FROM_CLIENT = False
|
506 |
+
|
507 |
+
def __init__(
|
508 |
+
self,
|
509 |
+
client_cmd: list, # ['python','test.py','--a','b','--c','d']
|
510 |
+
socket_host: str = HOST,
|
511 |
+
socket_port: int = PORT,
|
512 |
+
bufsize: int = 1024,
|
513 |
+
ui_name: str = ""
|
514 |
+
):
|
515 |
+
self.ui_name = ui_name
|
516 |
+
self.server_socket = None
|
517 |
+
self.SIGN = SPECIAL_SIGN
|
518 |
+
self.socket_host = socket_host
|
519 |
+
self.socket_port = socket_port
|
520 |
+
self.bufsize = bufsize
|
521 |
+
self.client_cmd = client_cmd
|
522 |
+
|
523 |
+
self.receive_server = None
|
524 |
+
self.cache = {}
|
525 |
+
assert self.bufsize > 0
|
526 |
+
self._connect()
|
527 |
+
|
528 |
+
def _start_client(self):
|
529 |
+
print(f"server: excuting `{' '.join(self.client_cmd)}` ...")
|
530 |
+
self.backend = subprocess.Popen(self.client_cmd)
|
531 |
+
|
532 |
+
def _close_client(self):
|
533 |
+
print(f"server: killing `{' '.join(self.client_cmd)}` ...")
|
534 |
+
self.backend.terminate()
|
535 |
+
|
536 |
+
def reset(self):
|
537 |
+
print("server: restarting ...")
|
538 |
+
self._close_client()
|
539 |
+
time.sleep(1)
|
540 |
+
self._connect()
|
541 |
+
|
542 |
+
def render_bubble(self, rendered_data, agent_response, node_name, render_node_name:bool=True):
|
543 |
+
# Rendered bubbles (HTML format) are used for gradio output.
|
544 |
+
output = f"**{node_name}**<br>" if render_node_name else ""
|
545 |
+
for item in agent_response:
|
546 |
+
for agent_name in item:
|
547 |
+
content = item[agent_name].replace("\n", "<br>")
|
548 |
+
content = UIHelper.filter(content, agent_name, self.ui_name)
|
549 |
+
output = f"{output}<br>{UIHelper.wrap_css(content, agent_name)}"
|
550 |
+
rendered_data[-1] = [rendered_data[-1][0], output]
|
551 |
+
return rendered_data
|
552 |
+
|
553 |
+
def run(self,share: bool = True):
|
554 |
+
self.demo.queue()
|
555 |
+
self.demo.launch()
|
556 |
+
|
557 |
+
|
558 |
+
if __name__ == '__main__':
|
559 |
+
pass
|
gradio_config.py
ADDED
@@ -0,0 +1,437 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2023 The AIWaves Inc. team.
|
3 |
+
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
|
17 |
+
import json
|
18 |
+
from PIL import Image
|
19 |
+
import requests
|
20 |
+
from typing import List, Tuple
|
21 |
+
|
22 |
+
class GradioConfig:
|
23 |
+
# How many avatars are currently registered
|
24 |
+
POINTER = 0
|
25 |
+
|
26 |
+
# Avatar image. You can add or replace.
|
27 |
+
AGENT_HEAD_URL = [
|
28 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306241687579617434043.jpg",
|
29 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306241687592097408547.jpg",
|
30 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561699613.jpg",
|
31 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561275758.jpg",
|
32 |
+
"https://img.touxiangwu.com/uploads/allimg/2021090300/ry5k31wt33c.jpg",
|
33 |
+
"https://img.touxiangwu.com/uploads/allimg/2021090300/0ls2gmwhrf5.jpg",
|
34 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/02/202302281677545695326193.jpg",
|
35 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/03/202303271679886128550253.jpg",
|
36 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686711344407060.jpg",
|
37 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686711345834296.jpg",
|
38 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/05/202305171684311194291520.jpg",
|
39 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/05/202305171684311196958993.jpg",
|
40 |
+
"https://img.touxiangwu.com/uploads/allimg/2021082612/vr0bkov0dwl.jpg",
|
41 |
+
"https://img.touxiangwu.com/uploads/allimg/2021082612/auqx5zfsv5g.jpg",
|
42 |
+
"https://img.touxiangwu.com/uploads/allimg/2021082612/llofpivtwls.jpg",
|
43 |
+
"https://img.touxiangwu.com/uploads/allimg/2021082612/3j2sdot3ye0.jpg",
|
44 |
+
"https://img.touxiangwu.com/2020/3/nQfYf2.jpg",
|
45 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918068774532.jpg",
|
46 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918068289945.jpg",
|
47 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918069785183.jpg",
|
48 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561292003.jpg",
|
49 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561578616.jpg",
|
50 |
+
"https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726564597524.jpg"
|
51 |
+
]
|
52 |
+
USER_HEAD_URL = "https://img.touxiangwu.com/zb_users/upload/2023/05/202305301685407468585486.jpg"
|
53 |
+
|
54 |
+
# The css style of gradio.Chatbot
|
55 |
+
CSS = """
|
56 |
+
#chatbot1 .user {
|
57 |
+
background-color:transparent;
|
58 |
+
border-color:transparent;
|
59 |
+
}
|
60 |
+
#chatbot1 .bot {
|
61 |
+
background-color:transparent;
|
62 |
+
border-color:transparent;
|
63 |
+
}
|
64 |
+
#btn {color: red; border-color: red;}
|
65 |
+
"""
|
66 |
+
|
67 |
+
ID = ["USER", "AGENT", "SYSTEM"]
|
68 |
+
|
69 |
+
# Bubble template
|
70 |
+
BUBBLE_CSS = {
|
71 |
+
# Background-color Name-color Name-content Font-color Font-size Content Avatar-URL
|
72 |
+
"USER": """
|
73 |
+
<div style="display: flex; align-items: flex-start; justify-content: flex-end;">
|
74 |
+
<div style="background-color: {}; border-radius: 20px 0px 20px 20px; padding: 15px; min-width: 100px; max-width: 300px;">
|
75 |
+
<p style="margin: 0; padding: 0; color: {}; font-weight: bold; font-size: 18px;">{}</p>
|
76 |
+
<p style="margin: 0; padding: 0; color: {}; font-size: {}px;">{}</p>
|
77 |
+
</div>
|
78 |
+
<img src="{}" alt="USER" style="width: 50px; height: 50px; border-radius: 50%; margin-left: 10px;">
|
79 |
+
</div>
|
80 |
+
""",
|
81 |
+
|
82 |
+
# Avatar-URL Background-color Name-color Name-Content Font-color Font-size Content
|
83 |
+
"AGENT": """
|
84 |
+
<div style="display: flex; align-items: flex-start;">
|
85 |
+
<img src="{}" alt="AGENT" style="width: 50px; height: 50px; border-radius: 50%; margin-right: 10px;">
|
86 |
+
<div style="background-color: {}; border-radius: 0px 20px 20px 20px; padding: 15px; min-width: 100px; max-width: 600px;">
|
87 |
+
<p style="margin: 0; padding: 0; color: {}; font-weight: bold; font-size: 18px;">{}</p>
|
88 |
+
<p style="margin: 0; padding: 0; color: {}; font-size: {}px;">{}</p>
|
89 |
+
</div>
|
90 |
+
</div>
|
91 |
+
""",
|
92 |
+
|
93 |
+
# Backrgound-color Font-size Font-color Name Content
|
94 |
+
"SYSTEM": """
|
95 |
+
<div style="display: flex; align-items: center; justify-content: center;">
|
96 |
+
<div style="background-color: {}; border-radius: 20px; padding: 1px; min-width: 200px; max-width: 1000px;">
|
97 |
+
<p style="margin: 0; padding: 0; text-align: center; font-size: {}px; font-weight: bold; font-family: '微软雅黑', sans-serif; color: {};">{}:{}</p>
|
98 |
+
</div>
|
99 |
+
</div>
|
100 |
+
"""
|
101 |
+
}
|
102 |
+
|
103 |
+
ROLE_2_NAME = {}
|
104 |
+
|
105 |
+
OBJECT_INFO = {
|
106 |
+
|
107 |
+
"User": {
|
108 |
+
# https://img-blog.csdnimg.cn/img_convert/7c20bc39ac69b6972a22e18762d02db3.jpeg
|
109 |
+
"head_url": USER_HEAD_URL,
|
110 |
+
"bubble_color": "#95EC69",
|
111 |
+
"text_color": "#000000",
|
112 |
+
"font_size": 0,
|
113 |
+
"id": "USER"
|
114 |
+
},
|
115 |
+
|
116 |
+
"System": {
|
117 |
+
# https://img-blog.csdnimg.cn/img_convert/e7e5887cfff67df8c2205c2ef0e5e7fa.png
|
118 |
+
"head_url": "https://img.touxiangwu.com/zb_users/upload/2023/03/202303141678768524747045.jpg",
|
119 |
+
"bubble_color": "#7F7F7F", ##FFFFFF
|
120 |
+
"text_color": "#FFFFFF", ##000000
|
121 |
+
"font_size": 0,
|
122 |
+
"id": "SYSTEM"
|
123 |
+
},
|
124 |
+
|
125 |
+
"wait": {
|
126 |
+
"head_url": "https://img.touxiangwu.com/zb_users/upload/2022/12/202212011669881536145501.jpg",
|
127 |
+
"bubble_color": "#E7CBA6",
|
128 |
+
"text_color": "#000000",
|
129 |
+
"font_size": 0,
|
130 |
+
"id": "AGENT"
|
131 |
+
},
|
132 |
+
|
133 |
+
"Recorder": {
|
134 |
+
"head_url": "https://img.touxiangwu.com/zb_users/upload/2023/02/202302281677545695326193.jpg",
|
135 |
+
"bubble_color": "#F7F7F7",
|
136 |
+
"text_color": "#000000",
|
137 |
+
"font_size": 0,
|
138 |
+
"id": "AGENT"
|
139 |
+
}
|
140 |
+
}
|
141 |
+
|
142 |
+
@classmethod
|
143 |
+
def color_for_img(cls, url):
|
144 |
+
"""
|
145 |
+
Extract the main colors from the picture and set them as the background color,
|
146 |
+
then determine the corresponding text color.
|
147 |
+
"""
|
148 |
+
|
149 |
+
def get_main_color(image):
|
150 |
+
image = image.convert("RGB")
|
151 |
+
width, height = image.size
|
152 |
+
pixels = image.getcolors(width * height)
|
153 |
+
most_common_pixel = max(pixels, key=lambda item: item[0])
|
154 |
+
return most_common_pixel[1]
|
155 |
+
|
156 |
+
def is_dark_color(rgb_color):
|
157 |
+
r, g, b = rgb_color
|
158 |
+
luminance = (0.299 * r + 0.587 * g + 0.114 * b) / 255
|
159 |
+
return luminance < 0.5
|
160 |
+
|
161 |
+
def download_image(url):
|
162 |
+
print(f"binding: {url}")
|
163 |
+
response = requests.get(url)
|
164 |
+
if response.status_code == 200:
|
165 |
+
with open('image.jpg', 'wb') as f:
|
166 |
+
f.write(response.content)
|
167 |
+
|
168 |
+
def rgb_to_hex(color):
|
169 |
+
return "#{:02X}{:02X}{:02X}".format(color[0], color[1], color[2])
|
170 |
+
|
171 |
+
def get_color(image_url):
|
172 |
+
download_image(image_url)
|
173 |
+
|
174 |
+
image = Image.open("image.jpg")
|
175 |
+
main_color = get_main_color(image)
|
176 |
+
is_dark = is_dark_color(main_color)
|
177 |
+
|
178 |
+
if is_dark:
|
179 |
+
font_color = "#FFFFFF"
|
180 |
+
else:
|
181 |
+
font_color = "#000000"
|
182 |
+
|
183 |
+
return rgb_to_hex(main_color), font_color
|
184 |
+
|
185 |
+
return get_color(url)
|
186 |
+
|
187 |
+
@classmethod
|
188 |
+
def init(cls, JSON):
|
189 |
+
# Deprecated
|
190 |
+
with open(JSON) as f:
|
191 |
+
sop = json.load(f)
|
192 |
+
cnt = 0
|
193 |
+
FISRT_NODE = True
|
194 |
+
fisrt_node_roles = []
|
195 |
+
for node_name in sop['nodes']:
|
196 |
+
node_info = sop['nodes'][node_name]
|
197 |
+
agent_states = node_info['agent_states']
|
198 |
+
for agent_role in agent_states:
|
199 |
+
name = agent_states[agent_role]['style']['name']
|
200 |
+
cls.ROLE_2_NAME[agent_role] = name
|
201 |
+
if FISRT_NODE:
|
202 |
+
fisrt_node_roles.append(agent_role)
|
203 |
+
bubble_color, text_color = cls.color_for_img(cls.AGENT_HEAD_URL[cnt])
|
204 |
+
cls.OBJECT_INFO[name] = {
|
205 |
+
"head_url": f"{cls.AGENT_HEAD_URL[cnt]}",
|
206 |
+
"bubble_color": bubble_color,
|
207 |
+
"text_color": text_color,
|
208 |
+
"font_size": 0,
|
209 |
+
"id": "AGENT"
|
210 |
+
}
|
211 |
+
cnt += 1
|
212 |
+
if FISRT_NODE:
|
213 |
+
FISRT_NODE = False
|
214 |
+
print(cls.OBJECT_INFO)
|
215 |
+
for usr_name in cls.OBJECT_INFO:
|
216 |
+
if cls.OBJECT_INFO[usr_name]["id"] == "SYSTEM":
|
217 |
+
cls.OBJECT_INFO[usr_name]["font_size"] = 12
|
218 |
+
elif cls.OBJECT_INFO[usr_name]["id"] in ["USER", "AGENT"]:
|
219 |
+
cls.OBJECT_INFO[usr_name]["font_size"] = 16
|
220 |
+
else:
|
221 |
+
assert False
|
222 |
+
return fisrt_node_roles
|
223 |
+
|
224 |
+
@classmethod
|
225 |
+
def add_agent(cls, agents_name:List):
|
226 |
+
for name in agents_name:
|
227 |
+
bubble_color, text_color = cls.color_for_img(cls.AGENT_HEAD_URL[cls.POINTER])
|
228 |
+
cls.OBJECT_INFO[name] = {
|
229 |
+
"head_url": f"{cls.AGENT_HEAD_URL[cls.POINTER]}",
|
230 |
+
"bubble_color": bubble_color,
|
231 |
+
"text_color": text_color,
|
232 |
+
"font_size": 0,
|
233 |
+
"id": "AGENT"
|
234 |
+
}
|
235 |
+
cls.POINTER += 1
|
236 |
+
for usr_name in cls.OBJECT_INFO:
|
237 |
+
if cls.OBJECT_INFO[usr_name]["id"] == "SYSTEM":
|
238 |
+
cls.OBJECT_INFO[usr_name]["font_size"] = 12
|
239 |
+
elif cls.OBJECT_INFO[usr_name]["id"] in ["USER", "AGENT"]:
|
240 |
+
cls.OBJECT_INFO[usr_name]["font_size"] = 16
|
241 |
+
else:
|
242 |
+
assert False
|
243 |
+
|
244 |
+
|
245 |
+
class StateConfig:
|
246 |
+
"""UI configuration for the step progress bar (indicating the current node)"""
|
247 |
+
|
248 |
+
CSS = """
|
249 |
+
:root {
|
250 |
+
--gradient-start: 100%;
|
251 |
+
--gradient-end: 0%;
|
252 |
+
}
|
253 |
+
.container.progress-bar-container {
|
254 |
+
position: relative;
|
255 |
+
display: flex;
|
256 |
+
align-items: flex-end;
|
257 |
+
width: 100%;
|
258 |
+
overflow-x: auto;
|
259 |
+
padding-bottom: 30px;
|
260 |
+
padding-top: 20px
|
261 |
+
}
|
262 |
+
.container.progress-bar-container::-webkit-scrollbar {
|
263 |
+
width: 8px;
|
264 |
+
background-color: transparent;
|
265 |
+
}
|
266 |
+
|
267 |
+
.container.progress-bar-container::-webkit-scrollbar-thumb {
|
268 |
+
background-color: transparent;
|
269 |
+
}
|
270 |
+
|
271 |
+
.progress-bar-container .progressbar {
|
272 |
+
counter-reset: step;
|
273 |
+
white-space: nowrap;
|
274 |
+
}
|
275 |
+
.progress-bar-container .progressbar li {
|
276 |
+
list-style: none;
|
277 |
+
display: inline-block;
|
278 |
+
width: 200px;
|
279 |
+
position: relative;
|
280 |
+
text-align: center;
|
281 |
+
cursor: pointer;
|
282 |
+
white-space: normal;
|
283 |
+
}
|
284 |
+
.progress-bar-container .progressbar li:before {
|
285 |
+
content: counter(step);
|
286 |
+
counter-increment: step;
|
287 |
+
width: 30px;
|
288 |
+
height: 30px;
|
289 |
+
line-height: 30px;
|
290 |
+
border: 1px solid #ddd;
|
291 |
+
border-radius: 100%;
|
292 |
+
display: block;
|
293 |
+
text-align: center;
|
294 |
+
margin: 0 auto 10px auto;
|
295 |
+
background-color: #ffffff;
|
296 |
+
}
|
297 |
+
.progress-bar-container .progressbar li:after {
|
298 |
+
content: attr(data-content);
|
299 |
+
position: absolute;
|
300 |
+
width: 87%;
|
301 |
+
height: 2px;
|
302 |
+
background-color: #dddddd;
|
303 |
+
top: 15px;
|
304 |
+
left: -45%;
|
305 |
+
}
|
306 |
+
.progress-bar-container .progressbar li:first-child:after {
|
307 |
+
content: none;
|
308 |
+
}
|
309 |
+
.progress-bar-container .progressbar li.active {
|
310 |
+
color: green;
|
311 |
+
}
|
312 |
+
.progress-bar-container .progressbar li.active:before {
|
313 |
+
border-color: green;
|
314 |
+
background-color: green;
|
315 |
+
color: white;
|
316 |
+
}
|
317 |
+
.progress-bar-container .progressbar li.active + li:after {
|
318 |
+
background: linear-gradient(to right, green var(--gradient-start), lightgray var(--gradient-end));
|
319 |
+
}
|
320 |
+
.progress-bar-container .small-element {
|
321 |
+
transform: scale(0.8);
|
322 |
+
}
|
323 |
+
.progress-bar-container .progressbar li span {
|
324 |
+
position: absolute;
|
325 |
+
top: 40px;
|
326 |
+
left: 0;
|
327 |
+
width: 100%;
|
328 |
+
text-align: center;
|
329 |
+
}
|
330 |
+
.progress-bar-container .progressbar li .data-content {
|
331 |
+
position: absolute;
|
332 |
+
width: 100%;
|
333 |
+
top: -10px;
|
334 |
+
left: -100px;
|
335 |
+
text-align: center;
|
336 |
+
}
|
337 |
+
"""
|
338 |
+
|
339 |
+
FORMAT = """
|
340 |
+
<html>
|
341 |
+
<head>
|
342 |
+
<style>
|
343 |
+
{}
|
344 |
+
</style>
|
345 |
+
</head>
|
346 |
+
<body>
|
347 |
+
<br>
|
348 |
+
<center>
|
349 |
+
<div class="container progress-bar-container">
|
350 |
+
<ul class="progressbar">
|
351 |
+
{}
|
352 |
+
</ul>
|
353 |
+
</div>
|
354 |
+
</center>
|
355 |
+
</body>
|
356 |
+
</html>
|
357 |
+
"""
|
358 |
+
|
359 |
+
STATES_NAME:List[str] = None
|
360 |
+
|
361 |
+
@classmethod
|
362 |
+
def _generate_template(cls, types:str)->str:
|
363 |
+
# normal: A state with no execution.
|
364 |
+
# active-show-up: Active state, and content displayed above the horizontal line.
|
365 |
+
# active-show-down: Active state, and content displayed below the horizontal line.
|
366 |
+
# active-show-both: Active state, and content displayed both above and below the horizontal line.
|
367 |
+
# active-show-none: Active state, with no content displayed above the horizontal line.
|
368 |
+
|
369 |
+
assert types.lower() in ["normal","active-show-up", "active-show-down", "active-show-both", "active", "active-show-none"]
|
370 |
+
both_templates = """<li class="active" style="--gradient-start: {}%; --gradient-end: {}%;">
|
371 |
+
<div class="data-content">
|
372 |
+
<center>
|
373 |
+
<p style="line-height: 1px;"></p>
|
374 |
+
{}
|
375 |
+
<p>
|
376 |
+
{}
|
377 |
+
</p>
|
378 |
+
</center>
|
379 |
+
</div>
|
380 |
+
<span>{}</span>
|
381 |
+
</li>"""
|
382 |
+
|
383 |
+
if types.lower() == "normal":
|
384 |
+
templates = "<li><span>{}</span></li>"
|
385 |
+
elif types.lower() == "active":
|
386 |
+
templates = """<li class="active"><span>{}</span></li>"""
|
387 |
+
elif types.lower() == "active-show-up":
|
388 |
+
templates = both_templates.format("{}","{}", "{}", "", "{}")
|
389 |
+
elif types.lower() == "active-show-down":
|
390 |
+
templates = both_templates.format("{}","{}", "", "{}", "{}")
|
391 |
+
elif types.lower() == "active-show-both":
|
392 |
+
templates = both_templates
|
393 |
+
elif types.lower() == "active-show-none":
|
394 |
+
templates = """<li class="active" style="--gradient-start: {}%; --gradient-end: {}%;">
|
395 |
+
<span>{}</span>
|
396 |
+
</li>"""
|
397 |
+
else:
|
398 |
+
assert False
|
399 |
+
return templates
|
400 |
+
|
401 |
+
@classmethod
|
402 |
+
def update_states(cls, current_states:List[int], current_templates:List[str], show_content:List[Tuple[str]])->str:
|
403 |
+
assert len(current_states) == len(current_templates)
|
404 |
+
# You can dynamically change the number of states.
|
405 |
+
# assert len(current_states) == len(cls.STATES_NAME)
|
406 |
+
css_code = []
|
407 |
+
for idx in range(len(current_states)):
|
408 |
+
if idx == 0:
|
409 |
+
if current_states[idx] != 0:
|
410 |
+
css_code = [f"{cls._generate_template('active').format(cls.STATES_NAME[idx])}"]
|
411 |
+
else:
|
412 |
+
css_code = [f"{cls._generate_template('normal').format(cls.STATES_NAME[idx])}"]
|
413 |
+
continue
|
414 |
+
if current_states[idx-1] == 0:
|
415 |
+
# new_code = f"{cls._generate_template('normal').format(*(show_content[idx]))}"
|
416 |
+
new_code = f"{cls._generate_template('normal').format(cls.STATES_NAME[idx])}"
|
417 |
+
else:
|
418 |
+
new_code = f"{cls._generate_template(current_templates[idx]).format(current_states[idx-1], 100-current_states[idx-1],*(show_content[idx-1]), cls.STATES_NAME[idx])}"
|
419 |
+
if current_states[idx-1] != 100 or (current_states[idx]==0 and current_states[idx-1]==100):
|
420 |
+
new_code = new_code.replace("""li class="active" ""","""li """)
|
421 |
+
css_code.append(new_code)
|
422 |
+
return "\n".join(css_code)
|
423 |
+
|
424 |
+
@classmethod
|
425 |
+
def create_states(cls, states_name:List[str], manual_create_end_nodes:bool=False):
|
426 |
+
# Create states
|
427 |
+
if manual_create_end_nodes:
|
428 |
+
states_name.append("Done")
|
429 |
+
css_code = ""
|
430 |
+
cls.STATES_NAME: List[str] = states_name
|
431 |
+
for name in states_name:
|
432 |
+
css_code = f"{css_code}\n{cls._generate_template('normal').format(name)}"
|
433 |
+
return css_code
|
434 |
+
|
435 |
+
|
436 |
+
if __name__ == '__main__':
|
437 |
+
pass
|