callanwu commited on
Commit
af382f0
1 Parent(s): a2d581f
app.py ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append("../../Gradio_Config")
3
+ import os
4
+ from gradio_base import WebUI, UIHelper, PORT, HOST
5
+ from gradio_config import GradioConfig as gc
6
+ from gradio_config import StateConfig as sc
7
+ from typing import List
8
+ import gradio as gr
9
+
10
+
11
+ class NovelUI(WebUI):
12
+
13
+ node2show = {
14
+ "Node 1": "Write Character Settings and Script Outlines🖊️",
15
+ "Node 2": "Expand the first chapter<br>✍️",
16
+ "Node 3": "Expand the second chapter<br>✍️",
17
+ "Node 4": "Expand the third chapter<br>✍️",
18
+ "state1": "Perform the first plot<br>🎭",
19
+ "state2": "Perform the second plot<br>🎭",
20
+ "state3": "Perform the third plot<br>🎭",
21
+ "state4": "Perform the forth plot<br>🎭"
22
+ }
23
+ show2node = {}
24
+
25
+ def render_and_register_ui(self):
26
+ self.agent_name:list = [self.cache["agents_name"]] if isinstance(self.cache["agents_name"], str) else self.cache['agents_name']
27
+ gc.add_agent(self.agent_name)
28
+
29
+ def handle_message(self, history:list, record:list, state, agent_name, token, node_name):
30
+ RECORDER = True if state//10 ==2 else False
31
+ render_data:list = record if RECORDER else history
32
+ data:list = self.data_recorder if RECORDER else self.data_history
33
+ if state % 10 == 0:
34
+ data.append({agent_name: token})
35
+ elif state % 10 == 1:
36
+ # Same state. Need to add new bubble in same bubble.
37
+ data[-1][agent_name] += token
38
+ elif state % 10 == 2:
39
+ # New state. Need to add new bubble.
40
+ render_data.append([None, ""])
41
+ data.clear()
42
+ data.append({agent_name: token})
43
+ else:
44
+ assert False, "Invalid state."
45
+ render_data = self.render_bubble(render_data, data, node_name, render_node_name=True)
46
+ if RECORDER:
47
+ record = render_data
48
+ else:
49
+ history = render_data
50
+ return history, record
51
+
52
+ def update_progress(self, node_name, node_schedule):
53
+ DONE = True
54
+ node_name = self.node2show[node_name]
55
+ for idx, name in enumerate(self.cache['nodes_name']):
56
+ name = self.node2show[name]
57
+ self.progress_manage['show_type'][idx] = "active-show-up"
58
+ self.progress_manage['show_content'][idx] = ("" if name != node_name else "💬",)
59
+ if name == node_name:
60
+ DONE = False
61
+ self.progress_manage['schedule'][idx] = node_schedule
62
+ elif DONE:
63
+ self.progress_manage['schedule'][idx] = 100
64
+ elif DONE == False:
65
+ self.progress_manage['schedule'][idx] = 0
66
+ if self.cache['nodes_name'].index(self.show2node[node_name]) == len(self.cache['nodes_name']) - 2 and node_schedule == 100:
67
+ self.progress_manage['schedule'][-1] = 100
68
+ return sc.FORMAT.format(
69
+ sc.CSS,
70
+ sc.update_states(
71
+ current_states=self.progress_manage["schedule"],
72
+ current_templates=self.progress_manage["show_type"],
73
+ show_content=self.progress_manage["show_content"]
74
+ )
75
+ )
76
+
77
+ def __init__(
78
+ self,
79
+ client_cmd: list,
80
+ socket_host: str = HOST,
81
+ socket_port: int = PORT,
82
+ bufsize: int = 1024,
83
+ ui_name: str = "NovelUI"
84
+ ):
85
+ super(NovelUI, self).__init__(client_cmd, socket_host, socket_port, bufsize, ui_name)
86
+ self.first_recieve_from_client()
87
+ for item in ['agents_name', 'nodes_name', 'output_file_path', 'requirement']:
88
+ assert item in self.cache
89
+ self.progress_manage = {
90
+ "schedule": [None for _ in range(len(self.cache['nodes_name']))],
91
+ "show_type": [None for _ in range(len(self.cache['nodes_name']))],
92
+ "show_content": [None for _ in range(len(self.cache['nodes_name']))]
93
+ }
94
+ NovelUI.show2node = {NovelUI.node2show[_]:_ for _ in NovelUI.node2show.keys()}
95
+
96
+ def construct_ui(self):
97
+ with gr.Blocks(css=gc.CSS) as demo:
98
+ with gr.Column():
99
+ self.progress = gr.HTML(
100
+ value=sc.FORMAT.format(
101
+ sc.CSS,
102
+ sc.create_states([NovelUI.node2show[name] for name in self.cache['nodes_name']], False)
103
+ )
104
+ )
105
+ with gr.Row():
106
+ with gr.Column(scale=6):
107
+ self.chatbot = gr.Chatbot(
108
+ elem_id="chatbot1",
109
+ label="Dialog",
110
+ height=500
111
+ )
112
+ with gr.Row():
113
+ self.text_requirement = gr.Textbox(
114
+ placeholder="Requirement of the novel",
115
+ value=self.cache['requirement'],
116
+ scale=9
117
+ )
118
+ self.btn_start = gr.Button(
119
+ value="Start",
120
+ scale=1
121
+ )
122
+ self.btn_reset = gr.Button(
123
+ value="Restart",
124
+ visible=False
125
+ )
126
+ with gr.Column(scale=5):
127
+ self.chat_record = gr.Chatbot(
128
+ elem_id="chatbot1",
129
+ label="Record",
130
+ visible=False
131
+ )
132
+ self.file_show = gr.File(
133
+ value=[],
134
+ label="FileList",
135
+ visible=False
136
+ )
137
+ self.chat_show = gr.Chatbot(
138
+ elem_id="chatbot1",
139
+ label="FileRead",
140
+ visible=False
141
+ )
142
+
143
+ # ===============Event Listener===============
144
+ self.btn_start.click(
145
+ fn=self.btn_start_when_click,
146
+ inputs=[self.text_requirement],
147
+ outputs=[self.chatbot, self.chat_record, self.btn_start, self.text_requirement]
148
+ ).then(
149
+ fn=self.btn_start_after_click,
150
+ inputs=[self.chatbot, self.chat_record],
151
+ outputs=[self.progress, self.chatbot, self.chat_record, self.chat_show, self.btn_start, self.btn_reset, self.text_requirement, self.file_show]
152
+ )
153
+ self.btn_reset.click(
154
+ fn=self.btn_reset_when_click,
155
+ inputs=[],
156
+ outputs=[self.progress, self.chatbot, self.chat_record, self.chat_show, self.btn_start, self.btn_reset, self.text_requirement, self.file_show]
157
+ ).then(
158
+ fn=self.btn_reset_after_click,
159
+ inputs=[],
160
+ outputs=[self.progress, self.chatbot, self.chat_record, self.chat_show, self.btn_start, self.btn_reset, self.text_requirement, self.file_show]
161
+ )
162
+ self.file_show.select(
163
+ fn=self.file_when_select,
164
+ inputs=[self.file_show],
165
+ outputs=[self.chat_show]
166
+ )
167
+ # ===========================================
168
+ self.demo = demo
169
+
170
+ def btn_start_when_click(self, text_requirement:str):
171
+ """
172
+ inputs=[self.text_requirement],
173
+ outputs=[self.chatbot, self.chat_record, self.btn_start, self.text_requirement]
174
+ """
175
+ history = [[UIHelper.wrap_css(content=text_requirement, name="User"), None]]
176
+ yield history,\
177
+ gr.Chatbot.update(visible=True),\
178
+ gr.Button.update(interactive=False, value="Running"),\
179
+ gr.Textbox.update(value="", interactive=False)
180
+ self.send_start_cmd({'requirement': text_requirement})
181
+ return
182
+
183
+ def btn_start_after_click(self, history:List, record):
184
+ def walk_file():
185
+ print("file:", self.cache['output_file_path'])
186
+ files = []
187
+ for _ in os.listdir(self.cache['output_file_path']):
188
+ if os.path.isfile(self.cache['output_file_path']+'/'+_):
189
+ files.append(self.cache['output_file_path']+'/'+_)
190
+
191
+ return files
192
+ """
193
+ inputs=[self.chatbot, self.chat_record],
194
+ outputs=[self.progress, self.chatbot, self.chat_record, self.chat_show, self.btn_start, self.btn_reset, self.text_requirement, self.file_show]
195
+ """
196
+ self.data_recorder = list()
197
+ self.data_history = list()
198
+ receive_server = self.receive_server
199
+ while True:
200
+ data_list: List = receive_server.send(None)
201
+ for item in data_list:
202
+ data = eval(item)
203
+ assert isinstance(data, list)
204
+ state, agent_name, token, node_name, node_schedule = data
205
+ assert isinstance(state, int)
206
+ fs:List = walk_file()
207
+ # 10/11/12 -> history
208
+ # 20/21/22 -> recorder
209
+ # 99 -> finish
210
+ # 30 -> register new agent
211
+ assert state in [10, 11, 12, 20, 21, 22, 99, 30]
212
+ if state == 30:
213
+ # register new agent.
214
+ gc.add_agent(eval(agent_name))
215
+ continue
216
+ if state == 99:
217
+ # finish
218
+ yield gr.HTML.update(value=self.update_progress(node_name, node_schedule)),\
219
+ history,\
220
+ gr.Chatbot.update(visible=True, value=record),\
221
+ gr.Chatbot.update(visible=True),\
222
+ gr.Button.update(visible=True, interactive=False, value="Done"),\
223
+ gr.Button.update(visible=True, interactive=True),\
224
+ gr.Textbox.update(visible=True, interactive=False),\
225
+ gr.File.update(value=fs, visible=True, interactive=True)
226
+ return
227
+
228
+ history, record = self.handle_message(history, record, state, agent_name, token, node_name)
229
+ # [self.progress, self.chatbot, self.chat_record, self.chat_show, self.btn_start, self.btn_reset, self.text_requirement, self.file_show]
230
+ yield gr.HTML.update(value=self.update_progress(node_name, node_schedule)),\
231
+ history,\
232
+ gr.Chatbot.update(visible=True, value=record),\
233
+ gr.Chatbot.update(visible=False),\
234
+ gr.Button.update(visible=True, interactive=False),\
235
+ gr.Button.update(visible=False, interactive=True),\
236
+ gr.Textbox.update(visible=True, interactive=False),\
237
+ gr.File.update(value=fs, visible=True, interactive=True)
238
+
239
+ def btn_reset_when_click(self):
240
+ """
241
+ inputs=[],
242
+ outputs=[self.progress, self.chatbot, self.chat_record, self.chat_show, self.btn_start, self.btn_reset, self.text_requirement, self.file_show]
243
+ """
244
+ return gr.HTML.update(value=sc.create_states(states_name=self.cache['nodes_name'])),\
245
+ gr.Chatbot.update(value=None),\
246
+ gr.Chatbot.update(value=None, visible=False),\
247
+ gr.Chatbot.update(value=None, visible=False),\
248
+ gr.Button.update(value="Restarting...", visible=True, interactive=False),\
249
+ gr.Button.update(value="Restarting...", visible=True, interactive=False),\
250
+ gr.Textbox.update(value="Restarting...", interactive=False, visible=True),\
251
+ gr.File.update(visible=False)
252
+
253
+ def btn_reset_after_click(self):
254
+ """
255
+ inputs=[],
256
+ outputs=[self.progress, self.chatbot, self.chat_record, self.chat_show, self.btn_start, self.btn_reset, self.text_requirement, self.file_show]
257
+ """
258
+ self.reset()
259
+ self.first_recieve_from_client(reset_mode=True)
260
+ return gr.HTML.update(value=sc.create_states(states_name=self.cache['nodes_name'])),\
261
+ gr.Chatbot.update(value=None),\
262
+ gr.Chatbot.update(value=None, visible=False),\
263
+ gr.Chatbot.update(value=None, visible=False),\
264
+ gr.Button.update(value="Start", visible=True, interactive=True),\
265
+ gr.Button.update(value="Restart", visible=False, interactive=False),\
266
+ gr.Textbox.update(value="", interactive=True, visible=True),\
267
+ gr.File.update(visible=False)
268
+
269
+ def file_when_select(self, file_obj):
270
+ """
271
+ inputs=[self.file_show],
272
+ outputs=[self.chat_show]
273
+ """
274
+ CODE_PREFIX = "```json\n{}\n```"
275
+ with open(file_obj.name, "r", encoding='utf-8') as f:
276
+ contents = f.readlines()
277
+ codes = "".join(contents)
278
+ return [[CODE_PREFIX.format(codes),None]]
279
+
280
+
281
+ if __name__ == '__main__':
282
+ ui = NovelUI(client_cmd=["python","gradio_backend.py"])
283
+ ui.construct_ui()
284
+ ui.run(share=True)
cmd_perform.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import yaml
2
+ import os
3
+ import argparse
4
+ import random
5
+ import sys
6
+ sys.path.append("../../../src/agents")
7
+ sys.path.append("../../Gradio_Config")
8
+ from agents.SOP import SOP
9
+ from agents.Agent import Agent
10
+ from agents.Environment import Environment
11
+ from gradio_base import Client
12
+ from agents.Memory import Memory
13
+ # from gradio_example import DebateUI
14
+
15
+ # Client.server.send(str([state, name, chunk, node_name])+"<SELFDEFINESEP>")
16
+ # Client.cache["start_agent_name"]
17
+ # state = 10, 11, 12, 30
18
+
19
+ def init(config):
20
+ if not os.path.exists("logs"):
21
+ os.mkdir("logs")
22
+ sop = SOP.from_config(config)
23
+ agents,roles_to_names,names_to_roles = Agent.from_config(config)
24
+ environment = Environment.from_config(config)
25
+ environment.agents = agents
26
+ environment.roles_to_names,environment.names_to_roles = roles_to_names,names_to_roles
27
+ sop.roles_to_names,sop.names_to_roles = roles_to_names,names_to_roles
28
+ for name,agent in agents.items():
29
+ agent.environment = environment
30
+ return agents,sop,environment
31
+
32
+ def run(agents,sop,environment):
33
+ while True:
34
+ current_state,current_agent= sop.next(environment,agents)
35
+ if sop.finished:
36
+ print("finished!")
37
+ break
38
+
39
+ if current_state.is_begin:
40
+ print("The new state has begun!")
41
+ # clear agent's long_term_memory
42
+ for agent_name, agent_class in agents.items():
43
+ agent_class.long_term_memory = []
44
+ # clear environment.shared_memory["long_term_memory"]
45
+ environment.shared_memory["long_term_memory"] = []
46
+
47
+ action = current_agent.step(current_state,"") #component_dict = current_state[self.role[current_node.name]] current_agent.compile(component_dict)
48
+ response = action.response
49
+ ans = ""
50
+ for i,res in enumerate(response):
51
+ # if res == '\n\n':
52
+ # continue
53
+ state = 10
54
+ if action.state_begin:
55
+ state = 12
56
+ action.state_begin = False
57
+ elif i>0:
58
+ state = 11
59
+ elif action.is_user:
60
+ state = 30
61
+ Client.send_server(str([state, action.name, res, current_state.name, 50]))
62
+ # Client.server.send(str([state, action["name"], res, current_state.name])+"<SELFDEFINESEP>")
63
+ ans += res
64
+ print(res)
65
+ print(ans)
66
+ environment.update_memory(Memory(action.name, action.role, ans),current_state)
67
+
68
+ if __name__ == '__main__':
69
+ agents,sop,environment = init("novel_outline.json")
70
+ run(agents,sop,environment)
create_sop.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ def create_sop(folder_name: str = "novel_outline", encoding: str = "utf-8", save_name: str = "novel_outline") -> None:
5
+ folder = f'./{folder_name}'
6
+ file_list = os.listdir(folder)
7
+ plot_list = []
8
+
9
+ for file in file_list:
10
+ if "character" in file:
11
+ character_file = file
12
+ # elif "chapter" in file and "plot" in file:
13
+ elif "plot" in file:
14
+ plot_list.append(file)
15
+ plot_list.sort()
16
+
17
+ with open(os.path.join(folder, character_file), 'r', encoding=encoding) as f:
18
+ character_settings = json.load(f)
19
+
20
+ plot_list_new = []
21
+ for plot_name in plot_list:
22
+ legal = True
23
+ with open(os.path.join(folder, plot_name), 'r', encoding=encoding) as f:
24
+ plot = json.load(f)
25
+ c_mentioned = plot["characters"]
26
+ for c in c_mentioned:
27
+ if c not in character_settings:
28
+ legal = False
29
+ break
30
+ if legal:
31
+ plot_list_new.append(plot_name)
32
+ plot_list = plot_list_new
33
+ plot_list.sort()
34
+
35
+
36
+ # creat json file of sop
37
+ sop_file = f"./{save_name}.json"
38
+ sop_dict = {
39
+ "config": {
40
+ "API_KEY": "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",
41
+ "PROXY": "",
42
+ "MAX_CHAT_HISTORY" : "100",
43
+ "TOP_K" : "1",
44
+ "ACTIVE_MODE" : "0",
45
+ "GRADIO" : "0",
46
+ "User_Names" : "[]"
47
+ },
48
+ "LLM_type": "OpenAI",
49
+ "LLM": {
50
+ "temperature": 0.0,
51
+ "model": "gpt-3.5-turbo-16k-0613",
52
+ "log_path": "logs/god"
53
+ },
54
+ "agents": {},
55
+ "root": "state1",
56
+ "relations": {},
57
+ "states": {
58
+ "end_state":{
59
+ "name":"end_state",
60
+ "agent_states":{}
61
+ }
62
+ }
63
+ }
64
+
65
+ nodes_num = len(plot_list)
66
+ # nodes_num = 4 if nodes_num > 4 else nodes_num
67
+ plot_list_new = []
68
+ for i in range(nodes_num):
69
+ plot_file = plot_list[i]
70
+ with open(os.path.join(folder, plot_file), 'r', encoding=encoding) as f:
71
+ plot = json.load(f)
72
+ plot_content = plot["plot"]
73
+ c_mentioned = plot["characters"]
74
+ if len(c_mentioned) > 1:
75
+ plot_list_new.append(plot_file)
76
+ plot_list = plot_list_new
77
+ nodes_num = len(plot_list)
78
+ nodes_num = 4 if nodes_num > 4 else nodes_num
79
+
80
+ for i in range(nodes_num):
81
+ node_name = f"state{i+1}"
82
+ plot_file = plot_list[i]
83
+ with open(os.path.join(folder, plot_file), 'r', encoding=encoding) as f:
84
+ plot = json.load(f)
85
+ c_mentioned = plot["characters"]
86
+ if "Director" not in sop_dict["agents"]:
87
+ sop_dict["agents"]["Director"] = {}
88
+ if "style" not in sop_dict["agents"]["Director"]:
89
+ sop_dict["agents"]["Director"]["style"] = "Commanding, directive"
90
+ if "roles" not in sop_dict["agents"]["Director"]:
91
+ sop_dict["agents"]["Director"]["roles"] = {}
92
+ sop_dict["agents"]["Director"]["roles"][node_name] = "Director"
93
+
94
+ for c in c_mentioned:
95
+ if c not in sop_dict["agents"]:
96
+ sop_dict["agents"][c] = {}
97
+ if "style" not in sop_dict["agents"][c]:
98
+ sop_dict["agents"][c]["style"] = character_settings[c]["speaking_style"]
99
+ if "roles" not in sop_dict["agents"][c]:
100
+ sop_dict["agents"][c]["roles"] = {}
101
+ sop_dict["agents"][c]["roles"][node_name] = c
102
+
103
+ for i in range(nodes_num):
104
+ if i == nodes_num - 1:
105
+ node_name = f"state{i+1}"
106
+ sop_dict["relations"][node_name] = {"0": node_name, "1": "end_state"}
107
+ sop_dict["relations"]["end_state"] = {"0": "end_state"}
108
+ else:
109
+ node_name = f"state{i+1}"
110
+ sop_dict["relations"][node_name] = {"0": node_name, "1": f"state{i+2}"}
111
+
112
+
113
+ for i in range(nodes_num):
114
+ node_name = f"state{i+1}"
115
+ plot_file = plot_list[i]
116
+ with open(os.path.join(folder, plot_file), 'r', encoding=encoding) as f:
117
+ plot = json.load(f)
118
+ plot_content = plot["plot"]
119
+ c_mentioned = plot["characters"]
120
+
121
+ c_string = ", ".join(c_mentioned)
122
+ sop_dict["states"][node_name] = {"begin_role" : "Director", "begin_query" : "<Director>I'm going to start posting performance instructions now, so please follow my instructions, actors and actresses.</Director>", }
123
+ sop_dict["states"][node_name]["environment_prompt"] = f"The current scene is a playing of a \"script\", with the main characters involved: Director, {c_string}. The content of the \"script\" that these characters need to play is: \"{plot_content}\". The characters have to act out the \"script\" together. One character performs in each round."
124
+ sop_dict["states"][node_name]["name"] = node_name
125
+ sop_dict["states"][node_name]["roles"] = ["Director"] + c_mentioned
126
+ sop_dict["states"][node_name]["LLM_type"] = "OpenAI"
127
+ sop_dict["states"][node_name]["LLM"] = {
128
+ "temperature": 1.0,
129
+ "model": "gpt-3.5-turbo-16k-0613",
130
+ "log_path": f"logs/{node_name}"
131
+ }
132
+ sop_dict["states"][node_name]["agent_states"] = {}
133
+ sop_dict["states"][node_name]["agent_states"]["Director"] = {
134
+ "LLM_type": "OpenAI",
135
+ "LLM": {
136
+ "temperature": 1.0,
137
+ "model": "gpt-3.5-turbo-16k-0613",
138
+ "log_path": "logs/director"
139
+ },
140
+ "style": {
141
+ "role": "Director",
142
+ "style": "Commanding, directive"
143
+ },
144
+ "task": {
145
+ "task": "You are the director of this \"script\", you need to plan the content of the \"script\" into small segments, each segment should be expanded with more detailed details, and then you need to use these subdivided segments one at a time as instructions to direct the actors to perform, you need to specify which actor or actors are to perform each time you issue instructions. Your instructions must include what the actors are going to do next, and cannot end with \"Please take a break\" or \"Prepare for the next round of performances\". You can't repeat instructions you've given in the history of the dialog! Each time you give a new instruction, it must be different from the one you gave before! When you have given all your instructions, reply with \"Show is over\" and do not repeat the same instruction! Note: You can only output in English!"
146
+ },
147
+ "rule": {
148
+ "rule": "You are only the Director, responsible for giving acting instructions, you cannot output the content of other characters."
149
+ },
150
+ "last":{
151
+ "last_prompt":"Remember, your identity is the Director, you can only output content on behalf of the Director, the output format is \n<Director>\n....\n</Director>\n Only need to output one round of dialog!"
152
+ }
153
+ }
154
+ for c in c_mentioned:
155
+ c_other = [item for item in c_mentioned if item != c]
156
+ c_other_string = ', '.join(c_other)
157
+ c_setting = character_settings[c]
158
+ sop_dict["states"][node_name]["agent_states"][c] = {
159
+ "LLM_type": "OpenAI",
160
+ "LLM": {
161
+ "temperature": 1.0,
162
+ "model": "gpt-3.5-turbo-16k-0613",
163
+ "log_path": f"logs/{c}"
164
+ },
165
+ "style": {
166
+ "role": c,
167
+ "style": c_setting["speaking_style"]
168
+ },
169
+ "task": {
170
+ "task": f"You are {c} in this \"script\" and you need to follow the Director's instructions and act with {c_other_string}."
171
+ },
172
+ "rule": {
173
+ "rule": f'Your settings are: your name is {c_setting["role_name"]}, your gender is {c_setting["gender"]}, your age is {c_setting["age"]} years old, your occupation is {c_setting["occupation"]}, your personality is {c_setting["personality"]}, your speaking style is {c_setting["speaking_style"]}, your relationship with other people is {c_setting["relation_with_others"]}, your background is {c_setting["background"]}. Your performance should match your setting, the content of the "script" and the Director\'s instructions. Note: Do not respond to the Director by saying things like "yes" or "yes, director", just follow the Director\'s instructions and interact with the other actors! You need to output NULL when the Director\'s current instruction does not specify you to perform, or when you think the Director\'s current instruction does not require you to perform; you cannot repeat what you have said in the dialog history! Each time you speak, it has to be different from previous speeches you have made! Your performances can only contain your words, actions, etc. alone, not the performances of others!'
174
+ },
175
+ "last":{
176
+ "last_prompt": f"Remember, your identity is {c} and you can only output content on behalf of {c}, the output format is \n<{c}>\n....\n</{c}>\n Just output one round of dialog!\nWhen the current instruction posted by the Director does not specify you to perform or when you think the current instruction from the Director does not require you to perform, you need to output \n<{c}>\nNULL\n</{c}>\n"
177
+ }
178
+ }
179
+
180
+ c_call_string = ""
181
+ for c in c_mentioned:
182
+ c_call_string += f", if it is {c}, then output <output>{c}</output>"
183
+ c_string_2 = ", ".join(c_mentioned)
184
+
185
+ sop_dict["states"][node_name]["controller"] = {
186
+ "controller_type": "order",
187
+ "max_chat_nums": 80,
188
+ "judge_system_prompt": f"Determine whether the Director, {c_string} have finished the \"script\", if so, output <output>1</output>; if not, output <output>0</output>. Note: If the Director says \"Show is over\" or something similar to \"Show is over\", output <output>1</output>; if the Director's instructions state that this is the last scene, you should wait until the actors have finished the scene before outputting <output>1</output>; if you find that a character has repeated the same dialog many times, output <output>1</output>.",
189
+ "judge_last_prompt": f"Depending on the current status of the performance process, determine whether the Director, {c_string} have finished the \"script\", if so, output <output>1</output>; if not, output <output>0</output>. Note: If the Director says \"Show is over\" or something similar to \"Show is over\", output <output>1</output>; if the Director's instructions state that this is the last scene, you should wait until the actors have finished the scene before outputting <output>1</output>; if you find that a character has repeated the same dialog many times, output <output>1</output>.",
190
+ "judge_extract_words": "output",
191
+ "call_system_prompt": f"You need to determine whose turn it is to output the content, if it is the Director, then output <output>Director</output>{c_call_string}, {c_string_2} are actors, you should let the Director output the performance instruction first each time, and then arrange which actor to output the content for the next round according to the specified person in the Director's instruction, you may need to arrange the actor to perform the performance for several rounds after the Director has given the instruction. When the actors' several rounds of output contents have finished the last instruction, you should let the Director continue to output the instruction.",
192
+ "call_last_prompt": f"Depending on the current status of the performance process, you need to determine whose turn it is to output the content, if it is the Director, then output <output>Director</output>{c_call_string}, {c_string_2} are actors, you should let the Director output the performance instruction first each time, and then arrange which actor to output the content for the next round according to the specified person in the Director's instruction, you may need to arrange the actor to perform the performance for several rounds after the Director has given the instruction. When the actors' several rounds of output contents have finished the last instruction, you should let the Director continue to output the instruction.",
193
+ "call_extract_words": "output"
194
+ }
195
+
196
+ # save
197
+ json_obj = json.dumps(sop_dict, ensure_ascii=False, indent=4, )
198
+ with open(sop_file, 'w') as f:
199
+ f.write(json_obj)
200
+
201
+ if __name__ == "__main__":
202
+ # create_sop(folder_name='jintian_ver1_cn', encoding='GB2312', save_name="jintian_ver1_cn")
203
+ # create_sop(folder_name='jintian', encoding='utf-8', save_name="jintian")
204
+ create_sop()
gradio_backend.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append("../../../src/agents")
3
+ sys.path.append("./novel-server")
4
+ sys.path.append("../../Gradio_Config")
5
+
6
+ import yaml
7
+ import os
8
+ import argparse
9
+ import random
10
+
11
+ from agents.SOP import SOP
12
+ from agents.Agent import Agent
13
+ from agents.Environment import Environment
14
+ from gradio_base import Client
15
+ from agents.Memory import Memory
16
+
17
+ from myagent import Node, MyAgent, ask_gpt
18
+ from typing import List, Tuple
19
+ from PROMPT import NOVEL_PROMPT
20
+ from myutils import print_log, new_parse
21
+ import json
22
+ from gradio_base import Client
23
+
24
+
25
+ from cmd_outline import run_node_1, run_node_2
26
+ from cmd_perform import init, run
27
+ from create_sop import create_sop
28
+
29
+ def show_in_gradio(state, name, chunk, node_name):
30
+ if state == 30:
31
+ Client.send_server(str([state, name, chunk, node_name, 50]))
32
+ return
33
+
34
+ if name.lower() in ["summary", "recorder"]:
35
+ """It is recorder"""
36
+ name = "Recorder"
37
+ if state == 0:
38
+ state = 22
39
+ else:
40
+ state = 21
41
+ else:
42
+ if Client.current_node != node_name and state == 0:
43
+ state = 12
44
+ Client.current_node = node_name
45
+ elif Client.current_node != node_name and state != 0:
46
+ assert False
47
+ else:
48
+ state = 10 + state
49
+ Client.send_server(str([state, name, chunk, node_name, 50]))
50
+
51
+
52
+ if __name__ == "__main__":
53
+ MyAgent.SIMULATION = False
54
+ MyAgent.TEMPERATURE = 0.3
55
+ stream_output = True
56
+ output_func = show_in_gradio
57
+ print("in")
58
+
59
+ if output_func is not None:
60
+ client = Client()
61
+ Client.send_server = client.send_message
62
+ client.send_message(
63
+ {
64
+ "agents_name": ['Elmo','Abby', 'Zoe', 'Ernie', 'Bert', 'Oscar'],
65
+ "nodes_name": ['Node 1','Node 2','Node 3', 'Node 4', 'state1', 'state2', 'state3', 'state4'],
66
+ "output_file_path": f"{os.getcwd()+'/novel_outline'}",
67
+ "requirement": NOVEL_PROMPT['Node 1']["task"]
68
+ }
69
+ )
70
+ client.listening_for_start_()
71
+ NOVEL_PROMPT['Node 1']['task'] = client.cache['requirement']
72
+ print("Received: ", client.cache['requirement'])
73
+ outline = run_node_1(
74
+ stream_output=stream_output,
75
+ output_func=output_func,
76
+ task_prompt=client.cache['requirement']
77
+ )
78
+ else:
79
+ outline = run_node_1(
80
+ stream_output=stream_output,
81
+ output_func=output_func
82
+ )
83
+ # pass
84
+ print(outline)
85
+ run_node_2(outline, stream_output=stream_output, output_func=output_func)
86
+ print("done")
87
+
88
+ create_sop()
89
+
90
+ with open("novel_outline.json", 'r') as f:
91
+ data = json.load(f)
92
+ name_list = list(data["agents"].keys())
93
+
94
+ show_in_gradio(30, str(name_list), " ", " ")
95
+
96
+ agents,sop,environment = init("novel_outline.json")
97
+ run(agents,sop,environment)
98
+
gradio_base.py ADDED
@@ -0,0 +1,559 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The AIWaves Inc. team.
3
+
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Emoji comes from this website:
18
+ # https://emojipedia.org/
19
+ import subprocess
20
+ from gradio_config import GradioConfig as gc
21
+ import gradio as gr
22
+ from typing import List, Tuple, Any
23
+ import time
24
+ import socket
25
+ import psutil
26
+ import os
27
+ from abc import abstractmethod
28
+
29
+ def convert2list4agentname(sop):
30
+ """
31
+ Extract the agent names of all states
32
+ return:
33
+ only name: [name1, name2, ...]
34
+ agent_name: [name1(role1), name2(role2), ...]
35
+ """
36
+ only_name = []
37
+ agent_name = []
38
+ roles_to_names = sop.roles_to_names
39
+ for state_name,roles_names in roles_to_names.items():
40
+ for role,name in roles_names.items():
41
+ agent_name.append(f"{name}({role})")
42
+ only_name.append(name)
43
+ agent_name = list(set(agent_name))
44
+ agent_name.sort()
45
+ return agent_name, only_name
46
+
47
+ def is_port_in_use(port):
48
+ """Check if the port is available"""
49
+ for conn in psutil.net_connections():
50
+ if conn.laddr.port == port:
51
+ return True
52
+ return False
53
+
54
+ def check_port(port):
55
+ """Determine available ports"""
56
+ if os.path.isfile("PORT.txt"):
57
+ port = int(open("PORT.txt","r",encoding='utf-8').readlines()[0])
58
+ else:
59
+ for i in range(10):
60
+ if is_port_in_use(port+i) == False:
61
+ port += i
62
+ break
63
+ with open("PORT.txt", "w") as f:
64
+ f.writelines(str(port))
65
+ return port
66
+
67
+ # Determine some heads
68
+ SPECIAL_SIGN = {
69
+ "START": "<START>",
70
+ "SPLIT": "<SELFDEFINESEP>",
71
+ "END": "<ENDSEP>"
72
+ }
73
+ HOST = "127.0.0.1"
74
+ # The starting port number for the search.
75
+ PORT = 15000
76
+ PORT = check_port(PORT)
77
+
78
+ def print_log(message:str):
79
+ print(f"[{time.ctime()}]{message}")
80
+
81
+ global_dialog = {
82
+ "user": [],
83
+ "agent": {},
84
+ "system": []
85
+ }
86
+
87
+ class UIHelper:
88
+ """Static Class"""
89
+
90
+ @classmethod
91
+ def wrap_css(cls, content, name) -> str:
92
+ """
93
+ Description:
94
+ Wrap CSS around each output, and return it in HTML format for rendering with Markdown.
95
+ Input:
96
+ content: Output content
97
+ name: Whose output is it
98
+ Output:
99
+ HTML
100
+ """
101
+ assert name in gc.OBJECT_INFO, \
102
+ f"The current name `{name}` is not registered with an image. The names of the currently registered agents are in `{gc.OBJECT_INFO.keys()}`. Please use `GradioConfig.add_agent()` from `Gradio_Config/gradio_config.py` to bind the name of the new agent."
103
+ output = ""
104
+ info = gc.OBJECT_INFO[name]
105
+ if info["id"] == "USER":
106
+ output = gc.BUBBLE_CSS["USER"].format(
107
+ info["bubble_color"], # Background-color
108
+ info["text_color"], # Color of the agent's name
109
+ name, # Agent name
110
+ info["text_color"], # Font color
111
+ info["font_size"], # Font size
112
+ content, # Content
113
+ info["head_url"] # URL of the avatar
114
+ )
115
+ elif info["id"] == "SYSTEM":
116
+ output = gc.BUBBLE_CSS["SYSTEM"].format(
117
+ info["bubble_color"], # Background-color
118
+ info["font_size"], # Font size
119
+ info["text_color"], # Font color
120
+ name, # Agent name
121
+ content # Content
122
+ )
123
+ elif info["id"] == "AGENT":
124
+ output = gc.BUBBLE_CSS["AGENT"].format(
125
+ info["head_url"], # URL of the avatar
126
+ info["bubble_color"], # Background-color
127
+ info["text_color"], # Font color
128
+ name, # Agent name
129
+ info["text_color"], # Font color
130
+ info["font_size"], # Font size
131
+ content, # Content
132
+ )
133
+ else:
134
+ assert False, f"Id `{info['id']}` is invalid. The valid id is in ['SYSTEM', 'AGENT', 'USER']"
135
+ return output
136
+
137
+ @classmethod
138
+ def novel_filter(cls, content, agent_name):
139
+
140
+ """比如<CONTENT>...</CONTENT>,就应该输出CONTENT:..."""
141
+ IS_RECORDER = agent_name.lower() in ["recorder", "summary"]
142
+ if IS_RECORDER:
143
+ BOLD_FORMAT = """<div style="color: #000000; display:inline">
144
+ <b>{}</b>
145
+ </div>
146
+ <span style="color: black;">
147
+ """
148
+ else:
149
+ BOLD_FORMAT = "<b>{}</b>"
150
+ CENTER_FORMAT = """<div style="background-color: #F0F0F0; text-align: center; padding: 5px; color: #000000">
151
+ <b>{}</b>
152
+ </div>
153
+ """
154
+ START_FORMAT = "<{}>"
155
+ END_FORMAT = "</{}>"
156
+ mapping = {
157
+ "TARGET": "🎯 Current Target: ",
158
+ "NUMBER": "🍖 Required Number: ",
159
+ "THOUGHT": "🤔 Overall Thought: ",
160
+ "FIRST NAME": "⚪ First Name: ",
161
+ "LAST NAME": "⚪ Last Name: ",
162
+ "ROLE": "🤠 Character Properties: ",
163
+ "RATIONALES": "🤔 Design Rationale: ",
164
+ "BACKGROUND": "🚊 Character Background: ",
165
+ "ID": "🔴 ID: ",
166
+ "TITLE": "🧩 Chapter Title: ",
167
+ "ABSTRACT": "🎬 Abstract: ",
168
+ "CHARACTER INVOLVED": "☃️ Character Involved: ",
169
+ "ADVICE": "💬 Advice:",
170
+ "NAME": "📛 Name: ",
171
+ "GENDER": "👩‍👩‍👦‍👦 Gender: ",
172
+ "AGE": "⏲️ Age: ",
173
+ "WORK": "👨‍🔧 Work: ",
174
+ "PERSONALITY": "🧲 Character Personality: ",
175
+ "SPEECH STYLE": "🗣️ Speaking Style: ",
176
+ "RELATION": "🏠 Relation with Others: ",
177
+ "WORD COUNT": "🎰 Word Count: ",
178
+ "CHARACTER DESIGN": "📈 Character Design: ",
179
+ "CHARACTER REQUIRE": "📈 Character Require: ",
180
+ "CHARACTER NAME": "📈 Character Naming Analysis: ",
181
+ "CHARACTER NOW": "📈 Character Now: ",
182
+ "OUTLINE DESIGN": "📈 Outline Design: ",
183
+ "OUTLINE REQUIRE": "📈 Outline Require: ",
184
+ "OUTLINE NOW": "📈 Outline Now: ",
185
+ "SUB TASK": "🎯 Current Sub Task: ",
186
+ "CHARACTER ADVICE": "💬 Character Design Advice: ",
187
+ "OUTLINE ADVANTAGE": "📈 Outline Advantage: ",
188
+ "OUTLINE DISADVANTAGE": "📈 Outline Disadvantage: ",
189
+ "OUTLINE ADVICE": "💬 Outline Advice: ",
190
+ "NEXT": "➡️ Next Advice: ",
191
+ "TOTAL NUMBER": "🔢 Total Number: "
192
+ }
193
+ for i in range(1, 10):
194
+ mapping[f"CHARACTER {i}"] = f"🦄 Character {i}"
195
+ mapping[f"SECTION {i}"] = f"🏷️ Chapter {i}"
196
+ for key in mapping:
197
+ if key in [f"CHARACTER {i}" for i in range(1, 10)] \
198
+ or key in [f"SECTION {i}" for i in range(1, 10)] \
199
+ :
200
+ content = content.replace(
201
+ START_FORMAT.format(key), CENTER_FORMAT.format(mapping[key])
202
+ )
203
+ elif key in ["TOTAL NUMBER"]:
204
+ content = content.replace(
205
+ START_FORMAT.format(key), CENTER_FORMAT.format(mapping[key]) + """<span style="color: black;">"""
206
+ )
207
+ content = content.replace(
208
+ END_FORMAT.format(key), "</span>"
209
+ )
210
+ else:
211
+ content = content.replace(
212
+ START_FORMAT.format(key), BOLD_FORMAT.format(mapping[key])
213
+ )
214
+
215
+ content = content.replace(
216
+ END_FORMAT.format(key), "</span>" if IS_RECORDER else ""
217
+ )
218
+ return content
219
+
220
+ @classmethod
221
+ def singleagent_filter(cls, content, agent_name):
222
+ return content
223
+
224
+ @classmethod
225
+ def debate_filter(cls, content, agent_name):
226
+ return content
227
+
228
+ @classmethod
229
+ def code_filter(cls, content, agent_name):
230
+ # return content.replace("```python", "<pre><code>").replace("```","</pre></code>")
231
+ return content
232
+
233
+ @classmethod
234
+ def general_filter(cls, content, agent_name):
235
+ return content
236
+
237
+ @classmethod
238
+ def filter(cls, content: str, agent_name: str, ui_name: str):
239
+ """
240
+ Description:
241
+ Make certain modifications to the output content to enhance its aesthetics when content is showed in gradio.
242
+ Input:
243
+ content: output content
244
+ agent_name: Whose output is it
245
+ ui_name: What UI is currently launching
246
+ Output:
247
+ Modified content
248
+ """
249
+ mapping = {
250
+ "SingleAgentUI": cls.singleagent_filter,
251
+ "DebateUI": cls.debate_filter,
252
+ "NovelUI": cls.novel_filter,
253
+ "CodeUI": cls.code_filter,
254
+ "GeneralUI": cls.general_filter
255
+ }
256
+ if ui_name in mapping:
257
+ return mapping[ui_name](content, agent_name)
258
+ else:
259
+ return content
260
+
261
+ class Client:
262
+ """
263
+ For inter-process communication, this is the client.
264
+ `gradio_backend.PY` serves as the backend, while `run_gradio` is the frontend.
265
+ Communication between the frontend and backend is accomplished using Sockets.
266
+ """
267
+ # =======================Radio Const String======================
268
+ SINGLE_MODE = "Single Mode"
269
+ AUTO_MODE = "Auto Mode"
270
+ MODE_LABEL = "Select the execution mode"
271
+ MODE_INFO = "Single mode refers to when the current agent output ends, it will stop running until you click to continue. Auto mode refers to when you complete the input, all agents will continue to output until the task ends."
272
+ # ===============================================================
273
+ mode = AUTO_MODE
274
+ FIRST_RUN:bool = True
275
+ # if last agent is user, then next agent will be executed automatically rather than click button
276
+ LAST_USER:bool = False
277
+
278
+ receive_server = None
279
+ send_server = None
280
+ current_node = None
281
+ cache = {}
282
+
283
+ def __init__(self, host=HOST, port=PORT, bufsize=1024):
284
+ assert Client.mode in [Client.SINGLE_MODE, Client.AUTO_MODE]
285
+ self.SIGN = SPECIAL_SIGN
286
+ self.bufsize = bufsize
287
+ assert bufsize > 0
288
+ self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
289
+ self.client_socket.connect((host, port))
290
+ while True:
291
+ data = self.client_socket.recv(self.bufsize).decode('utf-8')
292
+ if data == "hi":
293
+ self.client_socket.send("hello agent".encode('utf-8'))
294
+ time.sleep(1)
295
+ elif data == "check":
296
+ break
297
+ print_log("Client: connecting successfully......")
298
+
299
+ def start_server(self):
300
+ while True:
301
+ message = yield
302
+ if message == 'exit':
303
+ break
304
+ self.send_message(message=message)
305
+
306
+ def send_message(self, message):
307
+ """Send the messaget to the server."""
308
+ if isinstance(message, list) or isinstance(message, dict):
309
+ message = str(message)
310
+ assert isinstance(message, str)
311
+ message = message + self.SIGN["SPLIT"]
312
+ self.client_socket.send(message.encode('utf-8'))
313
+
314
+ def receive_message(self, end_identifier: str = None, split_identifier: str = SPECIAL_SIGN["SPLIT"]) -> List:
315
+ """Receive messages from the server, and it will block the process. Supports receiving long text."""
316
+ remaining = ""
317
+ while True:
318
+ # receive message
319
+ dataset = self.client_socket.recv(self.bufsize)
320
+ try:
321
+ # If decoding fails, it indicates that the current transmission is a long text.
322
+ dataset = dataset.decode('utf-8')
323
+ except UnicodeDecodeError:
324
+ if not isinstance(remaining, bytes):
325
+ remaining = remaining.encode('utf-8')
326
+ assert isinstance(dataset, bytes)
327
+ remaining += dataset
328
+ try:
329
+ dataset = remaining.decode('utf-8')
330
+ remaining = ""
331
+ except UnicodeDecodeError:
332
+ continue
333
+ assert isinstance(remaining, str)
334
+ dataset = remaining + dataset
335
+ list_dataset = dataset.split(split_identifier)
336
+ if len(list_dataset) == 1:
337
+ # If there is only one result from the split, it indicates that the current sequence itself has not yet ended.
338
+ remaining = list_dataset[0]
339
+ continue
340
+ else:
341
+ remaining = list_dataset[-1]
342
+ # Recieve successfully
343
+ list_dataset = list_dataset[:-1]
344
+ return_value = []
345
+ for item in list_dataset:
346
+ if end_identifier is not None and item == end_identifier:
347
+ break
348
+ return_value.append(item)
349
+ identifier = yield return_value
350
+ if identifier is not None:
351
+ end_identifier, split_identifier = identifier
352
+
353
+ def listening_for_start_(self):
354
+ """
355
+ When the server starts, the client is automatically launched.
356
+ At this point, process synchronization is required,
357
+ such as sending client data to the server for rendering,
358
+ then the server sending the modified data back to the client,
359
+ and simultaneously sending a startup command.
360
+ Once the client receives the data, it will start running.
361
+ """
362
+ Client.receive_server = self.receive_message()
363
+ # Waiting for information from the server.
364
+ data: list = next(Client.receive_server)
365
+ assert len(data) == 1
366
+ data = eval(data[0])
367
+ assert isinstance(data, dict)
368
+ Client.cache.update(data)
369
+ # Waiting for start command from the server.
370
+ data:list = Client.receive_server.send(None)
371
+ assert len(data) == 1
372
+ assert data[0] == "<START>"
373
+
374
+ class WebUI:
375
+ """
376
+ The base class for the frontend, which encapsulates some functions for process information synchronization.
377
+ When a new frontend needs to be created, you should inherit from this class,
378
+ then implement the `construct_ui()` method and set up event listeners.
379
+ Finally, execute `run()` to load it.
380
+ """
381
+
382
+ def receive_message(
383
+ self,
384
+ end_identifier:str=None,
385
+ split_identifier:str=SPECIAL_SIGN["SPLIT"]
386
+ )->List:
387
+ """This is the same as in Client class."""
388
+ yield "hello"
389
+ remaining = ""
390
+ while True:
391
+ dataset = self.client_socket.recv(self.bufsize)
392
+ try:
393
+ dataset = dataset.decode('utf-8')
394
+ except UnicodeDecodeError:
395
+ if not isinstance(remaining, bytes):
396
+ remaining = remaining.encode('utf-8')
397
+ assert isinstance(dataset, bytes)
398
+ remaining += dataset
399
+ try:
400
+ dataset = remaining.decode('utf-8')
401
+ remaining = ""
402
+ except UnicodeDecodeError:
403
+ continue
404
+ assert isinstance(remaining, str)
405
+ dataset = remaining + dataset
406
+ list_dataset = dataset.split(split_identifier)
407
+ if len(list_dataset) == 1:
408
+ remaining = list_dataset[0]
409
+ continue
410
+ else:
411
+ remaining = list_dataset[-1]
412
+ list_dataset = list_dataset[:-1]
413
+ return_value = []
414
+ for item in list_dataset:
415
+ if end_identifier is not None and item == end_identifier:
416
+ break
417
+ return_value.append(item)
418
+ identifier = yield return_value
419
+ if identifier is not None:
420
+ end_identifier, split_identifier = identifier
421
+
422
+ def send_message(self, message:str):
423
+ """Send message to client."""
424
+ SEP = self.SIGN["SPLIT"]
425
+ self.client_socket.send(
426
+ (message+SEP).encode("utf-8")
427
+ )
428
+
429
+ def _connect(self):
430
+ # check
431
+ if self.server_socket:
432
+ self.server_socket.close()
433
+ assert not os.path.isfile("PORT.txt")
434
+ self.socket_port = check_port(PORT)
435
+ # Step1. initialize
436
+ self.server_socket = socket.socket(
437
+ socket.AF_INET, socket.SOCK_STREAM
438
+ )
439
+ # Step2. binding ip and port
440
+ self.server_socket.bind((self.socket_host, self.socket_port))
441
+ # Step3. run client
442
+ self._start_client()
443
+
444
+ # Step4. listening for connect
445
+ self.server_socket.listen(1)
446
+
447
+ # Step5. test connection
448
+ client_socket, client_address = self.server_socket.accept()
449
+ print_log("server: establishing connection......")
450
+ self.client_socket = client_socket
451
+ while True:
452
+ client_socket.send("hi".encode('utf-8'))
453
+ time.sleep(1)
454
+ data = client_socket.recv(self.bufsize).decode('utf-8')
455
+ if data == "hello agent":
456
+ client_socket.send("check".encode('utf-8'))
457
+ print_log("server: connect successfully")
458
+ break
459
+ assert os.path.isfile("PORT.txt")
460
+ os.remove("PORT.txt")
461
+ if self.receive_server:
462
+ del self.receive_server
463
+ self.receive_server = self.receive_message()
464
+ assert next(self.receive_server) == "hello"
465
+
466
+ @abstractmethod
467
+ def render_and_register_ui(self):
468
+ # You need to implement this function.
469
+ # The function's purpose is to bind the name of the agent with an image.
470
+ # The name of the agent is stored in `self.cache[]`,
471
+ # and the function for binding is in the method `add_agents` of the class `GradioConfig` in `Gradio_Config/gradio_config.py``.
472
+ # This function will be executed in `self.first_recieve_from_client()`
473
+ pass
474
+
475
+ def first_recieve_from_client(self, reset_mode:bool=False):
476
+ """
477
+ This function is used to receive information from the client and is typically executed during the initialization of the class.
478
+ If `reset_mode` is False, it will bind the name of the agent with an image.
479
+ """
480
+ self.FIRST_RECIEVE_FROM_CLIENT = True
481
+ data_list:List = self.receive_server.send(None)
482
+ assert len(data_list) == 1
483
+ data = eval(data_list[0])
484
+ assert isinstance(data, dict)
485
+ self.cache.update(data)
486
+ if not reset_mode:
487
+ self.render_and_register_ui()
488
+
489
+ def _second_send(self, message:dict):
490
+ # Send the modified message.
491
+ # It will be executed in `self.send_start_cmd()` automtically.
492
+ self.send_message(str(message))
493
+
494
+ def _third_send(self):
495
+ # Send start command.
496
+ # It will be executed in `self.send_start_cmd()` automtically.
497
+ self.send_message(self.SIGN['START'])
498
+
499
+ def send_start_cmd(self, message:dict={"hello":"hello"}):
500
+ # If you have no message to send, you can ignore the args `message`.
501
+ assert self.FIRST_RECIEVE_FROM_CLIENT, "Please make sure you have executed `self.first_recieve_from_client()` manually."
502
+ self._second_send(message=message)
503
+ time.sleep(1)
504
+ self._third_send()
505
+ self.FIRST_RECIEVE_FROM_CLIENT = False
506
+
507
+ def __init__(
508
+ self,
509
+ client_cmd: list, # ['python','test.py','--a','b','--c','d']
510
+ socket_host: str = HOST,
511
+ socket_port: int = PORT,
512
+ bufsize: int = 1024,
513
+ ui_name: str = ""
514
+ ):
515
+ self.ui_name = ui_name
516
+ self.server_socket = None
517
+ self.SIGN = SPECIAL_SIGN
518
+ self.socket_host = socket_host
519
+ self.socket_port = socket_port
520
+ self.bufsize = bufsize
521
+ self.client_cmd = client_cmd
522
+
523
+ self.receive_server = None
524
+ self.cache = {}
525
+ assert self.bufsize > 0
526
+ self._connect()
527
+
528
+ def _start_client(self):
529
+ print(f"server: excuting `{' '.join(self.client_cmd)}` ...")
530
+ self.backend = subprocess.Popen(self.client_cmd)
531
+
532
+ def _close_client(self):
533
+ print(f"server: killing `{' '.join(self.client_cmd)}` ...")
534
+ self.backend.terminate()
535
+
536
+ def reset(self):
537
+ print("server: restarting ...")
538
+ self._close_client()
539
+ time.sleep(1)
540
+ self._connect()
541
+
542
+ def render_bubble(self, rendered_data, agent_response, node_name, render_node_name:bool=True):
543
+ # Rendered bubbles (HTML format) are used for gradio output.
544
+ output = f"**{node_name}**<br>" if render_node_name else ""
545
+ for item in agent_response:
546
+ for agent_name in item:
547
+ content = item[agent_name].replace("\n", "<br>")
548
+ content = UIHelper.filter(content, agent_name, self.ui_name)
549
+ output = f"{output}<br>{UIHelper.wrap_css(content, agent_name)}"
550
+ rendered_data[-1] = [rendered_data[-1][0], output]
551
+ return rendered_data
552
+
553
+ def run(self,share: bool = True):
554
+ self.demo.queue()
555
+ self.demo.launch()
556
+
557
+
558
+ if __name__ == '__main__':
559
+ pass
gradio_config.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The AIWaves Inc. team.
3
+
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import json
18
+ from PIL import Image
19
+ import requests
20
+ from typing import List, Tuple
21
+
22
+ class GradioConfig:
23
+ # How many avatars are currently registered
24
+ POINTER = 0
25
+
26
+ # Avatar image. You can add or replace.
27
+ AGENT_HEAD_URL = [
28
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306241687579617434043.jpg",
29
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306241687592097408547.jpg",
30
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561699613.jpg",
31
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561275758.jpg",
32
+ "https://img.touxiangwu.com/uploads/allimg/2021090300/ry5k31wt33c.jpg",
33
+ "https://img.touxiangwu.com/uploads/allimg/2021090300/0ls2gmwhrf5.jpg",
34
+ "https://img.touxiangwu.com/zb_users/upload/2023/02/202302281677545695326193.jpg",
35
+ "https://img.touxiangwu.com/zb_users/upload/2023/03/202303271679886128550253.jpg",
36
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686711344407060.jpg",
37
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686711345834296.jpg",
38
+ "https://img.touxiangwu.com/zb_users/upload/2023/05/202305171684311194291520.jpg",
39
+ "https://img.touxiangwu.com/zb_users/upload/2023/05/202305171684311196958993.jpg",
40
+ "https://img.touxiangwu.com/uploads/allimg/2021082612/vr0bkov0dwl.jpg",
41
+ "https://img.touxiangwu.com/uploads/allimg/2021082612/auqx5zfsv5g.jpg",
42
+ "https://img.touxiangwu.com/uploads/allimg/2021082612/llofpivtwls.jpg",
43
+ "https://img.touxiangwu.com/uploads/allimg/2021082612/3j2sdot3ye0.jpg",
44
+ "https://img.touxiangwu.com/2020/3/nQfYf2.jpg",
45
+ "https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918068774532.jpg",
46
+ "https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918068289945.jpg",
47
+ "https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918069785183.jpg",
48
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561292003.jpg",
49
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561578616.jpg",
50
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726564597524.jpg"
51
+ ]
52
+ USER_HEAD_URL = "https://img.touxiangwu.com/zb_users/upload/2023/05/202305301685407468585486.jpg"
53
+
54
+ # The css style of gradio.Chatbot
55
+ CSS = """
56
+ #chatbot1 .user {
57
+ background-color:transparent;
58
+ border-color:transparent;
59
+ }
60
+ #chatbot1 .bot {
61
+ background-color:transparent;
62
+ border-color:transparent;
63
+ }
64
+ #btn {color: red; border-color: red;}
65
+ """
66
+
67
+ ID = ["USER", "AGENT", "SYSTEM"]
68
+
69
+ # Bubble template
70
+ BUBBLE_CSS = {
71
+ # Background-color Name-color Name-content Font-color Font-size Content Avatar-URL
72
+ "USER": """
73
+ <div style="display: flex; align-items: flex-start; justify-content: flex-end;">
74
+ <div style="background-color: {}; border-radius: 20px 0px 20px 20px; padding: 15px; min-width: 100px; max-width: 300px;">
75
+ <p style="margin: 0; padding: 0; color: {}; font-weight: bold; font-size: 18px;">{}</p>
76
+ <p style="margin: 0; padding: 0; color: {}; font-size: {}px;">{}</p>
77
+ </div>
78
+ <img src="{}" alt="USER" style="width: 50px; height: 50px; border-radius: 50%; margin-left: 10px;">
79
+ </div>
80
+ """,
81
+
82
+ # Avatar-URL Background-color Name-color Name-Content Font-color Font-size Content
83
+ "AGENT": """
84
+ <div style="display: flex; align-items: flex-start;">
85
+ <img src="{}" alt="AGENT" style="width: 50px; height: 50px; border-radius: 50%; margin-right: 10px;">
86
+ <div style="background-color: {}; border-radius: 0px 20px 20px 20px; padding: 15px; min-width: 100px; max-width: 600px;">
87
+ <p style="margin: 0; padding: 0; color: {}; font-weight: bold; font-size: 18px;">{}</p>
88
+ <p style="margin: 0; padding: 0; color: {}; font-size: {}px;">{}</p>
89
+ </div>
90
+ </div>
91
+ """,
92
+
93
+ # Backrgound-color Font-size Font-color Name Content
94
+ "SYSTEM": """
95
+ <div style="display: flex; align-items: center; justify-content: center;">
96
+ <div style="background-color: {}; border-radius: 20px; padding: 1px; min-width: 200px; max-width: 1000px;">
97
+ <p style="margin: 0; padding: 0; text-align: center; font-size: {}px; font-weight: bold; font-family: '微软雅黑', sans-serif; color: {};">{}:{}</p>
98
+ </div>
99
+ </div>
100
+ """
101
+ }
102
+
103
+ ROLE_2_NAME = {}
104
+
105
+ OBJECT_INFO = {
106
+
107
+ "User": {
108
+ # https://img-blog.csdnimg.cn/img_convert/7c20bc39ac69b6972a22e18762d02db3.jpeg
109
+ "head_url": USER_HEAD_URL,
110
+ "bubble_color": "#95EC69",
111
+ "text_color": "#000000",
112
+ "font_size": 0,
113
+ "id": "USER"
114
+ },
115
+
116
+ "System": {
117
+ # https://img-blog.csdnimg.cn/img_convert/e7e5887cfff67df8c2205c2ef0e5e7fa.png
118
+ "head_url": "https://img.touxiangwu.com/zb_users/upload/2023/03/202303141678768524747045.jpg",
119
+ "bubble_color": "#7F7F7F", ##FFFFFF
120
+ "text_color": "#FFFFFF", ##000000
121
+ "font_size": 0,
122
+ "id": "SYSTEM"
123
+ },
124
+
125
+ "wait": {
126
+ "head_url": "https://img.touxiangwu.com/zb_users/upload/2022/12/202212011669881536145501.jpg",
127
+ "bubble_color": "#E7CBA6",
128
+ "text_color": "#000000",
129
+ "font_size": 0,
130
+ "id": "AGENT"
131
+ },
132
+
133
+ "Recorder": {
134
+ "head_url": "https://img.touxiangwu.com/zb_users/upload/2023/02/202302281677545695326193.jpg",
135
+ "bubble_color": "#F7F7F7",
136
+ "text_color": "#000000",
137
+ "font_size": 0,
138
+ "id": "AGENT"
139
+ }
140
+ }
141
+
142
+ @classmethod
143
+ def color_for_img(cls, url):
144
+ """
145
+ Extract the main colors from the picture and set them as the background color,
146
+ then determine the corresponding text color.
147
+ """
148
+
149
+ def get_main_color(image):
150
+ image = image.convert("RGB")
151
+ width, height = image.size
152
+ pixels = image.getcolors(width * height)
153
+ most_common_pixel = max(pixels, key=lambda item: item[0])
154
+ return most_common_pixel[1]
155
+
156
+ def is_dark_color(rgb_color):
157
+ r, g, b = rgb_color
158
+ luminance = (0.299 * r + 0.587 * g + 0.114 * b) / 255
159
+ return luminance < 0.5
160
+
161
+ def download_image(url):
162
+ print(f"binding: {url}")
163
+ response = requests.get(url)
164
+ if response.status_code == 200:
165
+ with open('image.jpg', 'wb') as f:
166
+ f.write(response.content)
167
+
168
+ def rgb_to_hex(color):
169
+ return "#{:02X}{:02X}{:02X}".format(color[0], color[1], color[2])
170
+
171
+ def get_color(image_url):
172
+ download_image(image_url)
173
+
174
+ image = Image.open("image.jpg")
175
+ main_color = get_main_color(image)
176
+ is_dark = is_dark_color(main_color)
177
+
178
+ if is_dark:
179
+ font_color = "#FFFFFF"
180
+ else:
181
+ font_color = "#000000"
182
+
183
+ return rgb_to_hex(main_color), font_color
184
+
185
+ return get_color(url)
186
+
187
+ @classmethod
188
+ def init(cls, JSON):
189
+ # Deprecated
190
+ with open(JSON) as f:
191
+ sop = json.load(f)
192
+ cnt = 0
193
+ FISRT_NODE = True
194
+ fisrt_node_roles = []
195
+ for node_name in sop['nodes']:
196
+ node_info = sop['nodes'][node_name]
197
+ agent_states = node_info['agent_states']
198
+ for agent_role in agent_states:
199
+ name = agent_states[agent_role]['style']['name']
200
+ cls.ROLE_2_NAME[agent_role] = name
201
+ if FISRT_NODE:
202
+ fisrt_node_roles.append(agent_role)
203
+ bubble_color, text_color = cls.color_for_img(cls.AGENT_HEAD_URL[cnt])
204
+ cls.OBJECT_INFO[name] = {
205
+ "head_url": f"{cls.AGENT_HEAD_URL[cnt]}",
206
+ "bubble_color": bubble_color,
207
+ "text_color": text_color,
208
+ "font_size": 0,
209
+ "id": "AGENT"
210
+ }
211
+ cnt += 1
212
+ if FISRT_NODE:
213
+ FISRT_NODE = False
214
+ print(cls.OBJECT_INFO)
215
+ for usr_name in cls.OBJECT_INFO:
216
+ if cls.OBJECT_INFO[usr_name]["id"] == "SYSTEM":
217
+ cls.OBJECT_INFO[usr_name]["font_size"] = 12
218
+ elif cls.OBJECT_INFO[usr_name]["id"] in ["USER", "AGENT"]:
219
+ cls.OBJECT_INFO[usr_name]["font_size"] = 16
220
+ else:
221
+ assert False
222
+ return fisrt_node_roles
223
+
224
+ @classmethod
225
+ def add_agent(cls, agents_name:List):
226
+ for name in agents_name:
227
+ bubble_color, text_color = cls.color_for_img(cls.AGENT_HEAD_URL[cls.POINTER])
228
+ cls.OBJECT_INFO[name] = {
229
+ "head_url": f"{cls.AGENT_HEAD_URL[cls.POINTER]}",
230
+ "bubble_color": bubble_color,
231
+ "text_color": text_color,
232
+ "font_size": 0,
233
+ "id": "AGENT"
234
+ }
235
+ cls.POINTER += 1
236
+ for usr_name in cls.OBJECT_INFO:
237
+ if cls.OBJECT_INFO[usr_name]["id"] == "SYSTEM":
238
+ cls.OBJECT_INFO[usr_name]["font_size"] = 12
239
+ elif cls.OBJECT_INFO[usr_name]["id"] in ["USER", "AGENT"]:
240
+ cls.OBJECT_INFO[usr_name]["font_size"] = 16
241
+ else:
242
+ assert False
243
+
244
+
245
+ class StateConfig:
246
+ """UI configuration for the step progress bar (indicating the current node)"""
247
+
248
+ CSS = """
249
+ :root {
250
+ --gradient-start: 100%;
251
+ --gradient-end: 0%;
252
+ }
253
+ .container.progress-bar-container {
254
+ position: relative;
255
+ display: flex;
256
+ align-items: flex-end;
257
+ width: 100%;
258
+ overflow-x: auto;
259
+ padding-bottom: 30px;
260
+ padding-top: 20px
261
+ }
262
+ .container.progress-bar-container::-webkit-scrollbar {
263
+ width: 8px;
264
+ background-color: transparent;
265
+ }
266
+
267
+ .container.progress-bar-container::-webkit-scrollbar-thumb {
268
+ background-color: transparent;
269
+ }
270
+
271
+ .progress-bar-container .progressbar {
272
+ counter-reset: step;
273
+ white-space: nowrap;
274
+ }
275
+ .progress-bar-container .progressbar li {
276
+ list-style: none;
277
+ display: inline-block;
278
+ width: 200px;
279
+ position: relative;
280
+ text-align: center;
281
+ cursor: pointer;
282
+ white-space: normal;
283
+ }
284
+ .progress-bar-container .progressbar li:before {
285
+ content: counter(step);
286
+ counter-increment: step;
287
+ width: 30px;
288
+ height: 30px;
289
+ line-height: 30px;
290
+ border: 1px solid #ddd;
291
+ border-radius: 100%;
292
+ display: block;
293
+ text-align: center;
294
+ margin: 0 auto 10px auto;
295
+ background-color: #ffffff;
296
+ }
297
+ .progress-bar-container .progressbar li:after {
298
+ content: attr(data-content);
299
+ position: absolute;
300
+ width: 87%;
301
+ height: 2px;
302
+ background-color: #dddddd;
303
+ top: 15px;
304
+ left: -45%;
305
+ }
306
+ .progress-bar-container .progressbar li:first-child:after {
307
+ content: none;
308
+ }
309
+ .progress-bar-container .progressbar li.active {
310
+ color: green;
311
+ }
312
+ .progress-bar-container .progressbar li.active:before {
313
+ border-color: green;
314
+ background-color: green;
315
+ color: white;
316
+ }
317
+ .progress-bar-container .progressbar li.active + li:after {
318
+ background: linear-gradient(to right, green var(--gradient-start), lightgray var(--gradient-end));
319
+ }
320
+ .progress-bar-container .small-element {
321
+ transform: scale(0.8);
322
+ }
323
+ .progress-bar-container .progressbar li span {
324
+ position: absolute;
325
+ top: 40px;
326
+ left: 0;
327
+ width: 100%;
328
+ text-align: center;
329
+ }
330
+ .progress-bar-container .progressbar li .data-content {
331
+ position: absolute;
332
+ width: 100%;
333
+ top: -10px;
334
+ left: -100px;
335
+ text-align: center;
336
+ }
337
+ """
338
+
339
+ FORMAT = """
340
+ <html>
341
+ <head>
342
+ <style>
343
+ {}
344
+ </style>
345
+ </head>
346
+ <body>
347
+ <br>
348
+ <center>
349
+ <div class="container progress-bar-container">
350
+ <ul class="progressbar">
351
+ {}
352
+ </ul>
353
+ </div>
354
+ </center>
355
+ </body>
356
+ </html>
357
+ """
358
+
359
+ STATES_NAME:List[str] = None
360
+
361
+ @classmethod
362
+ def _generate_template(cls, types:str)->str:
363
+ # normal: A state with no execution.
364
+ # active-show-up: Active state, and content displayed above the horizontal line.
365
+ # active-show-down: Active state, and content displayed below the horizontal line.
366
+ # active-show-both: Active state, and content displayed both above and below the horizontal line.
367
+ # active-show-none: Active state, with no content displayed above the horizontal line.
368
+
369
+ assert types.lower() in ["normal","active-show-up", "active-show-down", "active-show-both", "active", "active-show-none"]
370
+ both_templates = """<li class="active" style="--gradient-start: {}%; --gradient-end: {}%;">
371
+ <div class="data-content">
372
+ <center>
373
+ <p style="line-height: 1px;"></p>
374
+ {}
375
+ <p>
376
+ {}
377
+ </p>
378
+ </center>
379
+ </div>
380
+ <span>{}</span>
381
+ </li>"""
382
+
383
+ if types.lower() == "normal":
384
+ templates = "<li><span>{}</span></li>"
385
+ elif types.lower() == "active":
386
+ templates = """<li class="active"><span>{}</span></li>"""
387
+ elif types.lower() == "active-show-up":
388
+ templates = both_templates.format("{}","{}", "{}", "", "{}")
389
+ elif types.lower() == "active-show-down":
390
+ templates = both_templates.format("{}","{}", "", "{}", "{}")
391
+ elif types.lower() == "active-show-both":
392
+ templates = both_templates
393
+ elif types.lower() == "active-show-none":
394
+ templates = """<li class="active" style="--gradient-start: {}%; --gradient-end: {}%;">
395
+ <span>{}</span>
396
+ </li>"""
397
+ else:
398
+ assert False
399
+ return templates
400
+
401
+ @classmethod
402
+ def update_states(cls, current_states:List[int], current_templates:List[str], show_content:List[Tuple[str]])->str:
403
+ assert len(current_states) == len(current_templates)
404
+ # You can dynamically change the number of states.
405
+ # assert len(current_states) == len(cls.STATES_NAME)
406
+ css_code = []
407
+ for idx in range(len(current_states)):
408
+ if idx == 0:
409
+ if current_states[idx] != 0:
410
+ css_code = [f"{cls._generate_template('active').format(cls.STATES_NAME[idx])}"]
411
+ else:
412
+ css_code = [f"{cls._generate_template('normal').format(cls.STATES_NAME[idx])}"]
413
+ continue
414
+ if current_states[idx-1] == 0:
415
+ # new_code = f"{cls._generate_template('normal').format(*(show_content[idx]))}"
416
+ new_code = f"{cls._generate_template('normal').format(cls.STATES_NAME[idx])}"
417
+ else:
418
+ new_code = f"{cls._generate_template(current_templates[idx]).format(current_states[idx-1], 100-current_states[idx-1],*(show_content[idx-1]), cls.STATES_NAME[idx])}"
419
+ if current_states[idx-1] != 100 or (current_states[idx]==0 and current_states[idx-1]==100):
420
+ new_code = new_code.replace("""li class="active" ""","""li """)
421
+ css_code.append(new_code)
422
+ return "\n".join(css_code)
423
+
424
+ @classmethod
425
+ def create_states(cls, states_name:List[str], manual_create_end_nodes:bool=False):
426
+ # Create states
427
+ if manual_create_end_nodes:
428
+ states_name.append("Done")
429
+ css_code = ""
430
+ cls.STATES_NAME: List[str] = states_name
431
+ for name in states_name:
432
+ css_code = f"{css_code}\n{cls._generate_template('normal').format(name)}"
433
+ return css_code
434
+
435
+
436
+ if __name__ == '__main__':
437
+ pass
novel-server/PROMPT.py ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ NOVEL_PROMPT = {
2
+ "Node 1": {
3
+ "task": \
4
+ """
5
+ Now I need to write a script outline about modern family ethics, the script outline needs to have about 3 chapters and I need to make sure that the script is appealing. The primary characters of the script are Mike Smith and Jane Black, Mike is an internet company programmer and Jane is a high school chemistry teacher, they have a child who is in kindergarten, in addition to the two main characters above, there are 2 additional secondary characters that need to be designed to enrich the script. Please set up names for each character, which should conform to the `first name + last name` format, e.g. Jenny White, names like a and b are forbidden. A summary of each chapter in the outline should be about 100 words or so, and the title of each chapter is required.
6
+ """,
7
+ "agents": {
8
+ "Elmo": {
9
+ "system": \
10
+ """
11
+ You're Elmo, you specialize in character design and first draft outline writing, and you'll be working with two other people (Abby, who rewrites based on suggestions, and Zoe, who is responsible for providing advice and controlling the overall process) to complete the following task:
12
+ {}
13
+
14
+
15
+ In addition to this, you can post your own opinions when Abby provides a rewritten character design or outline, or when Zoe gives her opinion.
16
+ You will need to first output your current assignment and then output according to the assignment, here are your output formatting requirements:
17
+ {}
18
+ Please format the output strictly according to the above.
19
+
20
+
21
+ Here are the guidelines you must follow:
22
+ 1. no apologizing or thanking each other is allowed;
23
+ 2. if someone apologizes or thanks, remind and stop them immediately;
24
+ 3. do not do anything unrelated to the task;
25
+ 4. do not say repetitive things;
26
+ 5. remind and stop someone as soon as they say something repetitive.
27
+ """,
28
+ "output": \
29
+ """
30
+ First output the current target:
31
+ <TARGET>{If there is no first draft of the character setting, then output CHARACTER DESIGN; if there is a first draft of the character setting and you have comments and the outline is not yet written, then output ADVICE CHARACTER; if you do not have comments on the character setting and the outline is not yet written, then output OUTLINE DESIGN; if there is a first draft of the outline and you have comments to make, then output ADVICE OUTLINE; if you think both the character setting and the outline are completed, then output NOTHING. And then follow the following requirements to continue output.}</TARGET>
32
+ <NUMBER>{If you are writing the character setting, output how many people need to be designed according to the requirement, including the main characters and secondary characters; if you are writing the first draft of the outline, output how many chapters need to be designed according to the requirement.}</NUMBER>
33
+ <THOUGHT>{If you are writing a character setting or giving your suggestions, output NONE; if you are writing the first draft of an outline, based on the characters, please conceptualize and output the main idea that needs to be expressed in the story, as well as how the main idea is expressed (how it develops), i.e., the impact of the various characters and interactions between characters on the plot and the main idea, with a word count of no less than 100 words.}</THOUGHT>
34
+
35
+ It is then divided into 4 conditions and formatted for output according to the corresponding conditions:
36
+
37
+ Condition 1. If TARGET is CHARACTER DESIGN:
38
+ <CHARACTER DESIGN>
39
+ <FIRST NAME>{Output the first name of the character to be designed}</FIRST NAME>
40
+ <LAST NAME>{Output the last name of the character to be designed}</LAST NAME>
41
+ <NAME>{Character Name}</NAME>
42
+ <ROLE>{Output it is a secondary or primary character}</ROLE>
43
+ <RATIONALES>{Output the effect that the design or introduction of the character has on the development of the story and the plot, and what the reader is expected to learn through the character}</RATIONALES>
44
+ <ID>{The i-th character}</ID>
45
+ <GENDER>{Gender of the character}</GENDER>
46
+ <AGE>{Age of the character}</AGE>
47
+ <WORK>{Work of the character}</WORK>
48
+ <PERSONALITY>{Personality of the character}</PERSONALITY>
49
+ <SPEECH STYLE>{Speaking style of the character}</SPEECH STYLE>
50
+ <RELATION>{Relations with other characters}</RELATION>
51
+ <BACKGROUND>{Character's background in about 50 words based on the character's job, personality and relations with other characters}</BACKGROUND>
52
+ </CHARACTER DESIGN>
53
+
54
+ Condition 2. If TARGET is OUTLINE DESIGN:
55
+ <OUTLINE DESIGN>
56
+ <RATIONALES>{Why this chapter is designed, how it connects to the previous chapter, what the chapter hopes to convey to the audience, and as much as possible, the cause and effect of the chapter}</RATIONALES>
57
+ <ID>{The i-th chapter}</ID>
58
+ <TITLE>{Chapter Title}</TITLE>
59
+ <CHARACTER INVOLVED>{Characters involved, try to make sure there are primary characters in each chapter}</CHARACTER INVOLVED>
60
+ <ABSTRACT>{Approximate plot of the chapter}</ABSTRACT>
61
+ </OUTLINE DESIGN>
62
+
63
+ Condition 3. If TARGET is ADVICE CHARACTER or ADVICE OUTLINE:
64
+ <ADVICE>
65
+ {First analyze the current (point out a certain chapter or a certain character) strengths and weaknesses, do not copy other people's opinions, if there are other people's opinions, by the way, analyze other people's opinions, and then according to the strengths and weaknesses to put forward a detailed, concrete, non-abstract modification, it is best to give a specific direction of improvement, modification opinions please try to be as detailed as possible, and give the reason for it}
66
+ </ADVICE>
67
+
68
+ If it has already been completed, no output is required.
69
+ """,
70
+ "query": \
71
+ """
72
+ Please provide opinions based on everyone's ideas and historical information, do not repeat yourself or others, and follow the format output below:
73
+ {}
74
+ """
75
+ },
76
+ "Abby": {
77
+ "system": \
78
+ """
79
+ You are Abby, you specialize in rewriting character designs and outlines based on suggestions, with years of relevant experience, and you will be working with two other people (Elmo, who is responsible for writing the first draft, and Zoe, who is responsible for providing suggestions and controlling the overall process, respectively) to complete the following task together:
80
+ {}
81
+ If the characters have been designed but the first draft of the outline hasn't been generated yet, the first draft of the outline is written by Elmo, so please output to have Elmo write the outline.
82
+
83
+
84
+ Here is the format of your output:
85
+ {}
86
+ Please follow the above format strictly for the output.
87
+
88
+
89
+ Here are the guidelines you must follow:
90
+ 1. no apologizing or thanking each other is allowed;
91
+ 2. if someone apologizes or thanks, remind and stop them immediately;
92
+ 3. do not do anything unrelated to the task;
93
+ 4. do not say repetitive things;
94
+ 5. remind and stop someone as soon as they say something repetitive.
95
+ """,
96
+ "output": \
97
+ """
98
+ <TARGET>{If you are rewriting the character setting, output CHARACTER DESIGN; if you are rewriting the first draft of the outline, output OUTLINE DESIGN; if you think that the current character setting and outline have been completed, and no other people have proposed modification opinions, output NOTHING.}</TARGET>
99
+ <RATIONALES>{Please analyze other people's suggestions step by step here (focusing on the comments given by <ADVICE>, <OUTLINE ADVICE> and <CHARACTER ADVICE>, first output the original comments, and then publish the details of the modification based on the comments), and write down detailed rewriting directions and ideas (don't copy the other people's comments), specific to a certain character or chapter, and pay attention to make sure that the sentences flow smoothly when rewriting, don't splice them together directly, and need to polish them up!}</RATIONALES>
100
+
101
+ Then output in different situations depending on the target:
102
+
103
+ If Zoe gives suggestions for the outline, but Elmo does not write a first draft of the outline, no output is required. If neither Zoe nor Elmo gave any suggestions, no output is needed. No output is needed if it has already been completed.
104
+
105
+ If the character setting is currently being discussed, please output it in the format below:
106
+ <NAME>{Character Name}</NAME>
107
+ <GENDER>{Character Gender}</GENDER>
108
+ <AGE>{Character Age}</AGE>
109
+ <WORK>{Character Work}</WORK>
110
+ <PERSONALITY>{Character Personality}</PERSONALITY>
111
+ <SPEECH STYLE>{Speaking style of the character}</SPEECH STYLE>
112
+ <RELATION>{Relations with other characters}</RELATION>
113
+ <BACKGROUND>{Character Background}</BACKGROUND>
114
+
115
+ If the outline is currently being discussed, please output it in the format below:
116
+ <ID>{The i-th chapter}</ID>
117
+ <TITLE>{Chapter Title}</TITLE>
118
+ <RATIONALES>{Why this chapter is designed, how it connects to the previous one, what reaction the chapter expects from the audience, and as much as possible, the cause and effect of the situation}</RATIONALES>
119
+ <CHARACTER INVOLVED>{Characters involved, try to make sure there are primary characters in each chapter}</CHARACTER INVOLVED>
120
+ <WORD COUNT>{required number of words}</WORD COUNT>
121
+ <ABSTRACT>{Rewrite chapter abstracts based on previous suggestions and original content}</ABSTRACT>
122
+
123
+ """,
124
+ "query": \
125
+ """
126
+ Please rewrite it in detail based on everyone's suggestions and historical information and output it in the format below:
127
+ {}
128
+ """
129
+ },
130
+ "Zoe": {
131
+ "system": \
132
+ """
133
+ You are Zoe, and you are responsible for the overall control of the task and for providing suggestions on the outline and characters. Together with two other people (Elmo, who writes the first draft, and Abby, who rewrites it based on suggestions), you will complete the following tasks:
134
+ {}
135
+ Note that no more than three rounds may be spent discussing character settings.
136
+
137
+ Your output is formatted as:
138
+ {}
139
+ Please follow the above format strictly for the output.
140
+
141
+ Here are the guidelines you must follow:
142
+ 1. no apologizing or thanking each other is allowed;
143
+ 2. if someone apologizes or thanks, remind and stop them immediately;
144
+ 3. do not do anything unrelated to the task;
145
+ 4. do not say repetitive things;
146
+ 5. remind and stop someone as soon as they say something repetitive;
147
+ 6. as soon as someone deviates from the topic, please correct them immediately.
148
+ """,
149
+ "output": \
150
+ """
151
+ <CHARACTER DESIGN>{If done, output DONE, otherwise output DOING}</CHARACTER DESIGN>
152
+ <CHARACTER REQUIRE>{Output character requirements based on the task, e.g. number of characters, necessary characters, etc.}</CHARACTER REQUIRE>
153
+ <CHARACTER NAME>{Analyze the composition of the names of existing characters one by one, and analyze whether they are legal or not. `first name + last name` is the legal format of name, and output `name (composition, legal or not legal)`, e.g., Doctor Smith (occupation + last name, not legal), Little Jack (nickname, not legal), Bob Green (first name + last name, legal)}</CHARACTER NAME>
154
+ <CHARACTER NOW>{According to the CHARACTER NAME field, point out the illegal names, and according to the existing characters, output the current number of characters, character names, etc., and compare the CHARACTER REQUIRE field, analyze and output whether it meets the requirements, such as the number of characters and character naming (whether it meets the first name + last name), etc., and point out if the name naming doesn't meet this format.}</CHARACTER NOW>
155
+ <OUTLINE DESIGN>{If complete, output DONE; if the character design is not yet finalized, output TODO; if the character design is complete and outline writing is underway, output DOING}</OUTLINE DESIGN>
156
+ <OUTLINE REQUIRE>{Output outline requirements based on the task, such as the number of chapters, word count requirements, chapter content requirements, etc.}</OUTLINE REQUIRE>
157
+ <OUTLINE NOW>{Based on the existing outline, output the number of chapters in the current outline, the number of words and the requirements of the chapters, etc., and compare the OUTLINE REQUIRE field to determine whether the requirements are met. If there is no outline yet, then output None}</OUTLINE NOW>
158
+ <SUB TASK>{The current subtask that needs to be completed, output CHARACTER, OUTLINE or None.}</SUBTASK>
159
+ <CHARACTER ADVICE>
160
+ {If the current task is CHARACTER, according to the CHARACTER NOW and CHARACTER NAME fields, give suggestions for modification in separate lines, if the number of characters is not satisfied, you can add them, **but don't exceed the required number** (don't add an extra number of characters), if you are not satisfied with a certain character, you can make a modification to the character's name, occupation, etc.,. Note that the content of the suggestion needs to be detailed, and give reasons, in addition to suggestions on the content, if the number of characters is not in accordance with the requirements or missing fields, also need to be proposed, the naming of the character to ensure that the `first name + last name` format, other formats do not meet requirements; if the current task is OUTLINE, then output None; if you believe that the current character design has been completed, output DONE}
161
+ </CHARACTER ADVICE>
162
+ <OUTLINE ADVANTAGE>{Analyze and output the benefits of the current outline; if the current task is not outline, output None}</OUTLINE ADVANTAGE>
163
+ <OUTLINE DISADVANTAGE>{Analyze and output the disadvantages of the current outline in detail (including, but not limited to, whether or not the outline contains all of the characters mentioned), and try to make sure that each chapter has primary characters involved; if the current task is not outline, then output None}</OUTLINE DISADVANTAGE>
164
+ <OUTLINE ADVICE>
165
+ {If the current task is CHARACTER, then output None; if the current task is OUTLINE, according to the advantages and disadvantages and the suggestions of others, output a detailed proposal in separate lists, and give the reasons, the content of the proposal needs to include the chapter and the content of the proposal, in addition to suggestions on the content, if the number of chapters is not in accordance with the requirements or lacking fields, you need to propose; if the current task is None, then output None}
166
+ </OUTLINE ADVICE>
167
+ <NEXT>{If you think there are no more modifications to the current character, output: "Let Elmo write the first version of the outline"; if you think the current character needs to be modified and you have given your suggestions, output: "Let Abby modify the character"; if you think suggestions for modifications to the outline have been given, output: "Let Abby modify the outline"; if you think the outline and the character have been completed, output end}</NEXT>
168
+ """,
169
+ "query": \
170
+ """
171
+ Please provide suggestions or take control of the process based on the information above, and output in the format below:
172
+ {}
173
+ """
174
+ }
175
+ },
176
+ "summary": {
177
+ "system": \
178
+ """
179
+ You are a person who is good at extracting the main content from a multi-person conversation in a specified format. The task now is:
180
+ {}
181
+
182
+ I will give you a series of multiple rounds of dialogues with different characters, from which you will need to extract as required, and for content, please try to extract as much as you can from the dialogues as they are, rather than summarizing them.
183
+ Your output format is:
184
+ {}
185
+ Please follow the above format strictly for the output.
186
+ """,
187
+ "output": \
188
+ """
189
+
190
+ <CHARACTERS>
191
+ <TOTAL NUMBER>{Total number of characters}</TOTAL NUMBER>
192
+ <CHARACTER i>
193
+ <NAME>{Name of the i-th character}</NAME>
194
+ <GENDER>{Gender of the i-th character}</GENDER>
195
+ <WORK>{Work of the i-th character}</WORK>
196
+ <AGE>{Age of the i-th character}</AGE>
197
+ <PERSONALITY>{Personality of the i-th character}</PERSONALITY>
198
+ <SPEECH STYLE>{Speaking style of the i-th character}</SPEECH STYLE>
199
+ <RELATION>{Relations with others of the i-th character}</RELATION>
200
+ <BACKGROUND>{Background of the i-th character}</BACKGROUND>
201
+ </CHARACTER i>
202
+ ...
203
+ </CHARACTERS>
204
+
205
+ <OUTLINE>
206
+ <TOTAL NUMBER>{Total number of chapters in the outline}</TOTAL NUMBER>
207
+ <SECTION i>
208
+ <TITLE>{Title of chapter i}</TITLE>
209
+ <CHARACTER INVOLVED>{Characters mentioned in chapter i}</CHARACTER INVOLVED>
210
+ <ABSTRACT>{Abstract of chapter i}</ABSTRACT>
211
+ <RATIONALES>{Function of chapter i in the whole story, desired audience response}</RATIONALES>
212
+ </SECTION i>
213
+ ...
214
+ </OUTLINE>
215
+
216
+ """,
217
+ "query": \
218
+ """
219
+ The following multiple conversations discuss the outline and character settings of the first version, so please try to extract as much as you can from the conversations as they are, rather than summarizing them:
220
+ {}
221
+ """
222
+ }
223
+ },
224
+
225
+ "Node 2": {
226
+ "task": \
227
+ """
228
+ Below are the character settings and outline of a script:
229
+ <VERSION 1>
230
+ {}
231
+ </VERSION 1>
232
+ {}
233
+ """,
234
+ "agents": {
235
+ "Ernie": {
236
+ "system": \
237
+ """
238
+ You are Ernie, who is responsible for the initial expansion of the outline and making suggestions (mainly from the perspective of character and story diversity), and you will be working with two other people (Bert, who expands and rewrites the outline based on suggestions, and Oscar, who takes control of the overall process and provides suggestions) on the following tasks:
239
+ {}
240
+
241
+ In addition to this, you will need to work with Oscar to provide comments or suggestions on Bert's rewritten outline when there is an expanded version of a particular chapter.
242
+ When outputting, you first need to output the current task, and then choose different output formats according to the task:
243
+ {}
244
+
245
+ Here are the guidelines you must follow:
246
+ 1. no apologizing or thanking each other is allowed;
247
+ 2. if someone apologizes or thanks, remind and stop them immediately;
248
+ 3. do not do anything unrelated to the task;
249
+ 4. do not say repetitive things;
250
+ 5. remind and stop someone as soon as they say something repetitive;
251
+ 6. the outline expansion should be story-rich and not empty.
252
+ """,
253
+ "output": \
254
+ """
255
+ <TARGET>{Output CHAPTER i if your current task is to expand the content of chapter i; Output ADVICE CHAPTER i if your current task is to advise on the content of chapter i, and advise on the following}</TARGET>
256
+ Then output in different situations according to the target:
257
+ If modifications are currently being discussed, please follow the format below for output:
258
+ <ANALYZE>{Compare the latest rewrite with Oscar's previous suggestions, and then analyze in detail whether all of the rewrites are in accordance with the rewrite's requirements (it is recommended to mention the expanded content and previous suggestions). Next, analyze whether the characters are related, continuous, etc. in the various plots, and assess whether the stories in the plots are coherent and engaging, and whether the individual stories are detailed, and if not, please point out that}</ANALYZE>
259
+ <EXTENSION ADVICE>
260
+ {Based on the latest rewrite and the analysis above, give suggestions for modifications to the diversity of the characters and the plot, one for each, which should be detailed and reasonable. }
261
+ </EXTENSION ADVICE>
262
+
263
+ If you currently need to expand a particular chapter, please follow the format below for output according to the chapter corresponding to the outline:
264
+ <TITLE>{Title of chapter i}</TITLE>
265
+ <ABSTRACT>{Abstract of chapter i}</ABSTRACT>
266
+ <CHARACTER INVOLVED>{The names of the characters mentioned in the i-th chapter}</CHARACTER INVOLVED>
267
+ <ROLE>{Output the function of the current chapter in the whole text, including the function for the theme, the function for the audience}</ROLE>
268
+ <THINK>{Based on the full text outline, the abstract of the current chapter, and what has been expanded, think about how many plots (at least 3) the i-th chapter needs to be divided into, the spatial and temporal relationships that need to exist between the plots, and briefly conceptualize what each plot will be about. Ensure that there are compelling beginnings, goals and conflicts, climaxes, suspense, and emotional elements interspersed with each other}</THINK>
269
+ <PLOT NUMBER>{Total number of plots expanded in the current chapter}</PLOT NUMBER>
270
+ <CONTENT>
271
+ <PLOT i>
272
+ <CHARACTER INVOLVED>{Characters involved in the i-th plot}</CHARACTER INVOLVED>
273
+ <DESCRIPTION>{Detailed description of the i-th plot, with at least one small, detailed piece of storytelling, noting the need to take into account previous plots, outlines, and expansions, to ensure spatial and temporal continuity and logic, and to ensure smooth flow and storytelling}</DESCRIPTION>
274
+ </PLOT i>
275
+ ...
276
+ </CONTENT>
277
+
278
+ If it is considered that there is no current need for modification and is fully compliant, then output None.
279
+ """,
280
+ "query": \
281
+ """
282
+ Please provide comments on the expansion of Chapter {} based on other people's comments, rewritten content, and historical information, output in the format below:
283
+ {}
284
+ Please don't repeat Oscar's words.
285
+ """,
286
+ },
287
+ "Bert": {
288
+ "system": \
289
+ """
290
+ You are Bert, you specialize in rewriting and expanding outlines based on comments and have many years of experience in this field, your writing style is beautiful and vivid, you will work with two other people (Ernie, who is in charge of expanding and suggesting outlines for the first version of the outline, and Oscar, who is in charge of controlling the overall process and providing suggestions) to complete the following tasks:
291
+ {}
292
+
293
+ Here is the format of your output:
294
+ {}
295
+ Please follow the above format strictly for the output.
296
+
297
+ Here are the guidelines you must follow:
298
+ 1. no apologizing or thanking each other is allowed;
299
+ 2. if someone apologizes or thanks, remind and stop them immediately;
300
+ 3. do not do anything unrelated to the task;
301
+ 4. do not say repetitive things;
302
+ 5. remind and stop someone as soon as they say something repetitive;
303
+ 6. the outline expansion should be story-rich and not empty.
304
+ """,
305
+ "output": \
306
+ """
307
+ <TARGET>{Output EXPENDING CHAPTER i if the comments are rewriting a specific chapter; output None if everyone is satisfied and there are no comments}</TARGET>
308
+
309
+ If a specific chapter is currently being discussed, please rewrite it based on the comments and output it in the format below:
310
+ <RATIONALES>{Please analyze here, line by line, based on others' suggestions and the original content, and write a short abstract of the rewritten content}</RATIONALES>
311
+ <TITLE>{Title of chapter i}</TITLE>
312
+ <ABSTRACT>{Summary of chapter i}</ABSTRACT>
313
+ <CHARACTER INVOLVED>{The names of the characters involved in the i-th chapter}</CHARACTER INVOLVED>
314
+ <STORY NUMBER>{Total number of plots expanded in the current chapter}</STORY NUMBER>
315
+ <CONTENT>
316
+ <PLOT i>
317
+ <CHARACTER INVOLVED>{Characters involved in the i-th plot}</CHARACTER INVOLVED>
318
+ <DESCRIPTION>{Rewrite the i-th plot based on the previous comments and your own rewriting ideas, and in conjunction with the previous content, paying attention to the need to take into account the previous plots, outlines, and expansions, to ensure spatial and temporal continuity and logic, in addition to the need to ensure that the line of the text is smooth and storytelling}</DESCRIPTION>
319
+ </PLOT i>
320
+ ...
321
+ </CONTENT>
322
+ """,
323
+ "query": \
324
+ """
325
+ Please rewrite chapter {} in detail based on everyone's suggestions and information from history, and output it in the format below:
326
+ {}
327
+ """,
328
+ },
329
+ "Oscar": {
330
+ "system": \
331
+ """
332
+ You are Oscar, and you are responsible for the overall control of the task and for providing detailed suggestions on the expanded outline (mainly from the perspective of plot, logic, conflict, and characters), and you will be working with two other people (Ernie, who is responsible for the first version of the outline, and for suggesting the outline, and Bert, who is responsible for expanding and rewriting the outline based on the suggestions), to accomplish the following tasks:
333
+ {}
334
+
335
+ Your output format is:
336
+ {}
337
+ Please follow the above format strictly for the output.
338
+
339
+ Here are the guidelines you must follow:
340
+ 1. no apologizing or thanking each other is allowed;
341
+ 2. if someone apologizes or thanks, remind and stop them immediately;
342
+ 3. do not do anything unrelated to the task;
343
+ 4. do not say repetitive things;
344
+ 5. remind and stop someone as soon as they say something repetitive;
345
+ 6. as soon as someone deviates from the topic, please correct them immediately.
346
+ """,
347
+ "output": \
348
+ """
349
+ <SCRIPT OUTLINE EXTENSION>{Determine whether the current expanded chapter is logically self-consistent and meets the requirements, if so then output DONE, otherwise output DOING}</SCRIPT OUTLINE EXTENSION>
350
+ <EXTENSION REQUIREMENT>{Output the word count requirements for the current expanded chapter based on the task requirements}</EXTENSION REQUIREMENT>
351
+ <EXTENSION NOW>{Output the number of chapters, word count, etc. that have been expanded according to the latest expanded content}</EXTENSION NOW>
352
+ <SUB TASK>{Output the title of the chapter that is currently being expanded, output CHAPTER i, and the following suggestions are also made for this chapter}</SUB TASK>
353
+ <ADVANTAGE>{Output the strengths of the current latest expansion, line by line, including but not limited to whether the plot, textual presentation, logic is self-explanatory, fields are missing, whether there is a strong conflict, unexpected, whether the characters involved in the current chapter (not the characters of the entire outline) are present in all the plots of the current chapter, whether the story is described in detail, etc., being different from the opinions of other people}</ADVANTAGE>
354
+ <DISADVANTAGE>{Output the current disadvantages of the latest expansion content line by line (do not repeat Ernie's words), including but not limited to whether the plot, textual presentation, logic is self-consistent, fields are missing, whether there is a strong conflict, unexpected, whether the characters involved in the current chapter (not the characters of the entire outline) appear in all the plots of the current chapter, whether the story is described in detail, etc., and be different from other people's opinions}</DISADVANTAGE>
355
+ <HISTORY ADVICE>{Output Ernie's historical suggestions, line by line.}</HISTORY ADVICE>
356
+ <EXTENSION ADVICE>
357
+ {Based on the strengths and weaknesses, the expanded plot, and your thoughts, provide suggestions for modifying the text description, plot fluency, and story one by one. The modification suggestions need to specify which specific plot, and the suggestions should be as detailed as possible to ensure that the plot has storytelling, which should be different from Ernie's opinions}
358
+ </EXTENSION ADVICE>
359
+ Be careful not to repeat what others say.
360
+ """,
361
+ "query": \
362
+ """
363
+ Please provide feedback on the content of Chapter {} and others' opinions based on the information, outline, and previously expanded chapters above, or control the process and output in the following format:
364
+ {}
365
+ Please don't repeat Ernie's words.
366
+ """,
367
+ }
368
+ },
369
+ "summary": {
370
+ "system": \
371
+ """
372
+ You are skilled at extracting the main content from multiple conversations in a specified format. The current task is:
373
+ {}
374
+
375
+ I will give you a series of multiple rounds of dialogues with different characters, from which you will need to extract as required, and for content, please try to extract as much as you can from the dialogues as they are, rather than summarizing them.
376
+ Your output format is:
377
+ {}
378
+ Please follow the above format strictly for the output.
379
+ """,
380
+ "output": \
381
+ """
382
+ # Chapter {i} {Title of chapter i}
383
+ > There are a total of {n} plots
384
+
385
+ ## Plot j
386
+ - Characters involved: {names of the characters involved in the j-th plot of chapter i}
387
+ - Specifics: {the specifics of the expanded j-th plot of chapter i. Don't rewrite or abbreviate, just take it directly from someone else}
388
+ """,
389
+ "query": \
390
+ """
391
+ The following multiple conversations discuss expanding the plots of chapter {} based on the outline; please try to extract the plots of chapter {} from the conversations as they are, rather than summarizing them:
392
+ {}
393
+ """
394
+ }
395
+ }
396
+ }
397
+
398
+
399
+
400
+
novel-server/cmd_outline.py ADDED
@@ -0,0 +1,474 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import os
3
+ from myagent import Node, MyAgent, ask_gpt, Client
4
+ from typing import List, Tuple
5
+ from PROMPT import NOVEL_PROMPT
6
+ from myutils import print_log, new_parse
7
+ import json
8
+
9
+ class FirstNode(Node):
10
+ def __init__(
11
+ self,
12
+ name: str,
13
+ agents: List[MyAgent],
14
+ start_agent_name: str,
15
+ start_agent_query: str,
16
+ summary_agent: MyAgent = None,
17
+ save: bool=True,
18
+ stream_output: bool=True,
19
+ output_func=None,
20
+ ):
21
+ super(FirstNode, self).__init__(agents, summary_agent, save)
22
+ self.name = name
23
+ self.output_func = output_func
24
+ self.stream_output = stream_output
25
+ self.start_agent_name = start_agent_name
26
+ self.start_agent_query = start_agent_query
27
+ if self.stream_output:
28
+ print_log("stream output ......")
29
+ assert self.start_agent_name in self.agents, \
30
+ f"invalid agent name `{self.start_agent_name}`"
31
+
32
+ def start(self):
33
+ self.agents[self.start_agent_name].prepare_message(
34
+ self.start_agent_query+"\n"+self.agents[self.start_agent_name].query
35
+ )
36
+ self.agents[self.start_agent_name].output_message(
37
+ recorder=self.recorder, stream=self.stream_output, output_func=self.output_func,
38
+ node_name=self.name
39
+ )
40
+ # if self.stream_output:
41
+ # print(f"【{self.start_agent_name}】 ", end="")
42
+ # for chunk in self.agents[self.start_agent_name].send_message(recorder=self.recorder, stream=self.stream_output):
43
+ # if chunk is not None:
44
+ # print(chunk, end="")
45
+ # print()
46
+ # else:
47
+ # self.agents[self.start_agent_name].send_message(recorder=self.recorder, stream=self.stream_output)
48
+ # self.print(agent_name=self.start_agent_name)
49
+
50
+ def communicate(self):
51
+ for turn in range(2):
52
+ for agent_name in ["Zoe", "Abby", "Elmo"]:
53
+ history = self.recorder.prepare(
54
+ agent_name=agent_name,
55
+ agents=self.agents,
56
+ return_dict=False
57
+ )
58
+ query = self.agents[agent_name].query
59
+ if isinstance(history, str):
60
+ history = f"The following are the conversations of others, with what someone says wrapped in `<name>...</name>`: \n{history}\n{query}"
61
+ elif isinstance(history, list):
62
+ history.append({"role": "user", "content": query})
63
+ self.agents[agent_name].prepare_message(
64
+ history
65
+ )
66
+ self.agents[agent_name].output_message(
67
+ recorder=self.recorder, stream=self.stream_output,
68
+ output_func=self.output_func, node_name=self.name
69
+ )
70
+ # if not self.stream_output:
71
+ # self.agents[agent_name].send_message(
72
+ # recorder=self.recorder, stream=self.stream_output)
73
+ # self.print(agent_name=agent_name)
74
+ # else:
75
+ # print(f"【{agent_name}】 ", end="")
76
+ # for chunk in self.agents[agent_name].send_message(
77
+ # recorder=self.recorder, stream=self.stream_output):
78
+ # if chunk is not None:
79
+ # print(chunk, end="")
80
+ # print()
81
+
82
+ def end(self):
83
+ temperature_copy = MyAgent.TEMPERATURE
84
+ MyAgent.TEMPERATURE = 0
85
+ message: str = self.summary_agent.query.format(
86
+ self.recorder.prepare(agent_name="all", agents=self.agents, return_dict=False)
87
+ )
88
+ self.summary_agent.prepare_message(message)
89
+ self.summary_agent.output_message(
90
+ recorder=self.recorder, stream=self.stream_output,
91
+ output_func=self.output_func, node_name=self.name
92
+ )
93
+ # if not self.stream_output:
94
+ # self.summary_agent.send_message(recorder=self.recorder, stream=self.stream_output)
95
+ # print(f"【summary】{self.summary_agent.get_message(index=-1)}")
96
+ # else:
97
+ # print("【summary】 ", end="")
98
+ # for
99
+ MyAgent.TEMPERATURE = temperature_copy
100
+ content = self.summary_agent.get_message(index=-1)
101
+ # start to parse ====================================================
102
+ data_dict = new_parse(content, labels=[], return_dict=True)
103
+ # 1. parse character
104
+ characters_card: dict = {}
105
+ for key in data_dict['CHARACTERS']:
106
+ """default success"""
107
+ if 'CHARACTER' in key:
108
+ characters_card[
109
+ data_dict['CHARACTERS'][key]['NAME']
110
+ ] = {
111
+ "role_name": data_dict['CHARACTERS'][key]['NAME'],
112
+ "gender": data_dict['CHARACTERS'][key]['GENDER'],
113
+ "age": data_dict['CHARACTERS'][key]['AGE'],
114
+ "occupation": data_dict['CHARACTERS'][key]['WORK'],
115
+ "personality": data_dict['CHARACTERS'][key]['PERSONALITY'],
116
+ "speaking_style": data_dict['CHARACTERS'][key]['SPEECH STYLE'],
117
+ "relation_with_others": data_dict['CHARACTERS'][key]['RELATION'],
118
+ "background": data_dict['CHARACTERS'][key]['BACKGROUND']
119
+ }
120
+ # file_name = "character_settings.json"
121
+ try:
122
+ os.mkdir("novel_outline")
123
+ except:
124
+ pass
125
+ file_name = "./novel_outline/character_settings.json"
126
+ with open(file_name, "w") as json_file:
127
+ json.dump(characters_card, json_file, ensure_ascii=False)
128
+ print("Save Successfully")
129
+ # 2. converted to markdown
130
+ return_content = "<CHARACTERS>\n# Character\n> There are a total of {} characters\n\n".format(
131
+ data_dict['CHARACTERS']['TOTAL NUMBER'],
132
+ )
133
+ cnt = 1
134
+ for key in data_dict['CHARACTERS']:
135
+ """default success"""
136
+ if 'CHARACTER' in key:
137
+ return_content += "## Character{}\n- Gender: {}\n- Name: {}\n- Age: {}\n- Work: {}\n- Personality: {}\n- Speaking Style: {}\n- Relation with Others: {}\n- Background: {}\n\n".format(
138
+ cnt, data_dict['CHARACTERS'][key]['GENDER'], data_dict['CHARACTERS'][key]['NAME'],
139
+ data_dict['CHARACTERS'][key]['AGE'],
140
+ data_dict['CHARACTERS'][key]['WORK'],
141
+ data_dict['CHARACTERS'][key]['PERSONALITY'], data_dict['CHARACTERS'][key]['SPEECH STYLE'],
142
+ data_dict['CHARACTERS'][key]['RELATION'],
143
+ data_dict['CHARACTERS'][key]['BACKGROUND']
144
+ )
145
+ cnt += 1
146
+ return_content += "</CHARACTERS>\n\n<OUTLINE>\n# outline\n> There are a total of {} chapters\n\n".format(
147
+ data_dict['OUTLINE']['TOTAL NUMBER']
148
+ )
149
+ cnt = 1
150
+ for key in data_dict['OUTLINE']:
151
+ if 'SECTION' in key:
152
+ return_content += "## Chapter {} {}\n- Characters Involved: {}\n- Story Summary: {}\n\n".format(
153
+ cnt, data_dict['OUTLINE'][key]['TITLE'], data_dict['OUTLINE'][key]['CHARACTER INVOLVED'],
154
+ data_dict['OUTLINE'][key]['ABSTRACT']
155
+ )
156
+ cnt += 1
157
+ return_content += "</OUTLINE>"
158
+ return return_content
159
+
160
+ def print(self, agent_name):
161
+ print(f"【{agent_name}】{self.agents[agent_name].get_message(index=-1)}")
162
+
163
+ class SecondNode(Node):
164
+ def __init__(
165
+ self,
166
+ name: str,
167
+ agents: List[MyAgent],
168
+ start_agent_name: str,
169
+ start_agent_query: str,
170
+ summary_agent: MyAgent = None,
171
+ save: bool=True,
172
+ stream_output:bool=True,
173
+ output_func=None
174
+ ):
175
+ super(SecondNode, self).__init__(agents, summary_agent, save)
176
+ self.name = name
177
+ self.stream_output = stream_output
178
+ self.start_agent_name = start_agent_name
179
+ self.start_agent_query = start_agent_query
180
+ self.output_func = output_func
181
+ assert self.start_agent_name in self.agents, \
182
+ f"invalid agent name `{self.start_agent_name}`"
183
+ self.temperature = [0.3, 0.3, 0.3]
184
+ if self.stream_output:
185
+ print("streaming output ......")
186
+
187
+ def start(self):
188
+ MyAgent.TEMPERATURE = self.temperature[0]
189
+ self.agents[self.start_agent_name].prepare_message(
190
+ self.start_agent_query+"\n"+self.agents[self.start_agent_name].query
191
+ )
192
+ self.agents[self.start_agent_name].output_message(
193
+ recorder=self.recorder, stream=self.stream_output, output_func=self.output_func,
194
+ node_name=self.name
195
+ )
196
+ # self.agents[self.start_agent_name].send_message(recorder=self.recorder)
197
+ # print(f"【{self.start_agent_name}】{self.agents[self.start_agent_name].get_message(index=-1)}")
198
+ # self.print(agent_name=self.start_agent_name)
199
+
200
+ def communicate(self):
201
+ MyAgent.TEMPERATURE = self.temperature[1]
202
+ """to store output,e.g. Chapter i"""
203
+ self.output_memory = []
204
+ for turn in range(2):
205
+ for agent_name in ["Oscar", "Bert", "Ernie"]:
206
+ if agent_name == "Bert":
207
+ MyAgent.TEMPERATURE = 0.3
208
+ else:
209
+ MyAgent.TEMPERATURE = turn*0.1 + 0.5
210
+ history = self.recorder.prepare(
211
+ agent_name=agent_name,
212
+ agents=self.agents,
213
+ return_dict=False
214
+ )
215
+
216
+ # print(f"===========START {agent_name}==========")
217
+ # print(history)
218
+ # print("================END===============")
219
+ query = self.agents[agent_name].query
220
+ if isinstance(history, str):
221
+ history = f"The following are the conversations of others, with what someone says wrapped in `<name>...</name>`: \n{history}\n{query}"
222
+ elif isinstance(history, list):
223
+ history.append({"role": "user", "content": query})
224
+ self.agents[agent_name].prepare_message(
225
+ history
226
+ )
227
+ self.agents[agent_name].output_message(
228
+ recorder=self.recorder, stream=self.stream_output,
229
+ output_func=self.output_func, node_name=self.name
230
+ )
231
+ # self.agents[agent_name].send_message(recorder=self.recorder)
232
+ # self.print(agent_name=agent_name)
233
+
234
+ def end(self):
235
+ MyAgent.TEMPERATURE = self.temperature[2]
236
+
237
+ content = self.agents["Bert"].get_message(-1)
238
+ index = -1
239
+ while "none" in content.lower():
240
+ index -= 1
241
+ content = self.agents["Bert"].get_message(index)
242
+ try:
243
+ def save(data: dict):
244
+ for idx, (key, value) in enumerate(data.items()):
245
+ # file_name = f"{self.name.replace(' ', '')}-plot-{idx+1}.json"
246
+ try:
247
+ os.mkdir("novel_outline")
248
+ except:
249
+ pass
250
+ file_name = f"./novel_outline/{self.name.replace(' ', '')}-plot-{idx+1}.json"
251
+ _characters:str = value["CHARACTER INVOLVED"]
252
+ characters = ask_gpt(
253
+ system_prompt="You are very good at structuring text, please make sure you structure your output as follows, with no other extra chars, in addition, extract from the given text as much as possible, rather than summarizing.",
254
+ input=f"""Here is a sentence: "{_characters}"\n What are the names of the people in the sentence above? Please use a semicolon to separate the names, e.g. "name 1; name 2", taking care not to add any words before or after."""
255
+ ).split(";")
256
+ characters = [c.strip() for c in characters]
257
+ print("mike",characters)
258
+ output = {
259
+ "plot": value["DESCRIPTION"],
260
+ "characters": characters #value["CHARACTER INVOLVED"]
261
+ }
262
+ with open(file_name, "w") as json_file:
263
+ json.dump(output, json_file, ensure_ascii=False)
264
+ # json_file.writelines(output)
265
+ # data_dict = parse(copy.deepcopy(content), labels=["CONTENT"], return_dict=True)["CONTENT"]
266
+ data_dict = new_parse(content, labels=["CONTENT"], return_dict=True)["CONTENT"]
267
+ if len(data_dict) == 0 or data_dict is None:
268
+ assert False
269
+ save(data_dict)
270
+ data_str = new_parse(content, labels=["CONTENT"], return_dict=False)
271
+ if len(data_str) == 0:
272
+ assert False
273
+ print_log("Save successfully")
274
+ print(data_str)
275
+
276
+ if self.output_func:
277
+ self.output_func(0, "Recorder", data_str[0], self.name)
278
+ self.output_func(21, "Recorder", data_str[1:], self.name)
279
+ return data_str
280
+ except Exception as e:
281
+ raise e
282
+
283
+
284
+ def print(self, agent_name):
285
+ print(f"【{agent_name}】{self.agents[agent_name].get_message(index=-1)}")
286
+
287
+ def generate_first_agents(task_prompt:str=None) -> Tuple[List[MyAgent], MyAgent]:
288
+ prompts_set = NOVEL_PROMPT["Node 1"]
289
+ if task_prompt is not None:
290
+ print_log("The default task prompt has been replaced!")
291
+ NOVEL_PROMPT["Node 1"]["task"] = task_prompt
292
+ prompts_task = prompts_set["task"]
293
+ prompts_agents = prompts_set["agents"]
294
+ agents_list = []
295
+ for agent_name in prompts_agents:
296
+ agents_list.append(
297
+ MyAgent(
298
+ name=agent_name,
299
+ SYSTEM_PROMPT=prompts_agents[agent_name]["system"].format(
300
+ prompts_task, prompts_agents[agent_name]["output"]
301
+ ),
302
+ query=prompts_agents[agent_name]["query"].format(
303
+ prompts_agents[agent_name]["output"]
304
+ )
305
+ )
306
+ )
307
+ summary_agent = MyAgent(
308
+ name="summary",
309
+ SYSTEM_PROMPT=prompts_set["summary"]["system"].format(
310
+ prompts_task, prompts_set["summary"]["output"]
311
+ ),
312
+ query=prompts_set["summary"]["query"]
313
+ )
314
+
315
+ return agents_list, summary_agent
316
+
317
+ def generate_second_agents() -> Tuple[List[MyAgent], MyAgent]:
318
+ prompts_set = NOVEL_PROMPT["Node 2"]
319
+ prompts_task = prompts_set["task"] # .format(outline, other)
320
+ prompts_agents = prompts_set["agents"]
321
+ agents_list = []
322
+ for agent_name in prompts_agents:
323
+ agents_list.append(
324
+ MyAgent(
325
+ name=agent_name,
326
+ SYSTEM_PROMPT=prompts_agents[agent_name]["system"].format(
327
+ prompts_task, prompts_agents[agent_name]["output"]
328
+ ),
329
+ query=prompts_agents[agent_name]["query"].format(
330
+ prompts_agents[agent_name]["output"]
331
+ )
332
+ )
333
+ )
334
+ summary_agent = MyAgent(
335
+ name="summary",
336
+ SYSTEM_PROMPT=prompts_set["summary"]["system"].format(
337
+ prompts_task, prompts_set["summary"]["output"]
338
+ ),
339
+ query=prompts_set["summary"]["query"]
340
+ )
341
+
342
+ return agents_list, summary_agent
343
+
344
+ def run_node_1(stream_output:bool=False, output_func=None,
345
+ start_agent_name:str="Elmo", start_agent_query:str="Let's start by writing a first draft of the character settings.",
346
+ task_prompt=None):
347
+ print("node 1 start ...")
348
+ first_agents, first_summary = generate_first_agents(task_prompt=task_prompt)
349
+ first_node = FirstNode(
350
+ name="Node 1",
351
+ agents=first_agents,
352
+ summary_agent=first_summary,
353
+ save=True,
354
+ start_agent_name=start_agent_name,
355
+ start_agent_query=start_agent_query,
356
+ stream_output=stream_output,
357
+ output_func=output_func,
358
+ )
359
+ output = first_node.run()
360
+ print("node 1 done ...")
361
+ return output
362
+
363
+ def run_node_2(outline, node_start_index=2, stream_output:bool=False, output_func=None):
364
+ num2cn = ["ONE","TWO","THREE","FOUR","FIVE","SIX","SEVEN","EIGHT","NINE","TEN","ELEVEN","TWELVE"]
365
+ def generate_task_end_prompt(memory: list) -> str:
366
+ if len(memory) == 0:
367
+ return "\nThere are 5 chapters in total, please enrich the plot of the first chapter according to the outline above, taking care to be storytelling, logical, mainly in third person point of view, not involving description of dialogues, and without empty words. The plot of the first chapter is at least 800 words."
368
+ else:
369
+ start_prompt = f"\nThe following are the contents of chapter {', '.join(num2cn[0:len(memory)])}, which have been expanded: \n<EXPANDED>\n"
370
+ for i in range(len(memory)):
371
+ start_prompt = f"\n{start_prompt}<CHAPTER {i+1}>\n{memory[i]}\n</CHAPTER {i+1}>"
372
+ start_prompt += "\n</EXPANDED>\n"
373
+ end_prompt = f"\nPlease, based on the outline above and the content of chapter {', '.join(num2cn[0:len(memory)])}, which have been expanded, to enrich the plot of chapter {num2cn[len(memory)]}, " \
374
+ f"Content is noted to be storytelling, logical, and in the third person point of view, not involving descriptions of dialog, and without empty words. The plot of chapter {num2cn[len(memory)]} is at least 800 words."
375
+ return start_prompt + end_prompt
376
+
377
+ output_memory = []
378
+ start_agent_names = ["Ernie", "Ernie", "Ernie", "Ernie", "Ernie"]
379
+ start_agent_queries = [f"Let's start by expanding on chapter {num2cn[i]} as required" for i in range(5)]
380
+ ORIGIN_TASK_PROMPT = NOVEL_PROMPT["Node 2"]["task"]
381
+ ORIGIN_QUERY_PROMPT = {}
382
+ ORIGIN_SUMMARY_PROMPT = NOVEL_PROMPT["Node 2"]["summary"]["query"]
383
+ for idx in range(3):
384
+ node_idx = idx + node_start_index
385
+
386
+ NOVEL_PROMPT["Node 2"]["task"] = ORIGIN_TASK_PROMPT.format(
387
+ outline,
388
+ generate_task_end_prompt(output_memory)
389
+ )
390
+
391
+ for agent_name in NOVEL_PROMPT["Node 2"]["agents"]:
392
+ if agent_name not in ORIGIN_QUERY_PROMPT:
393
+ ORIGIN_QUERY_PROMPT[agent_name] = NOVEL_PROMPT["Node 2"]["agents"][agent_name]["query"]
394
+ NOVEL_PROMPT["Node 2"]["agents"][agent_name]["query"] = ORIGIN_QUERY_PROMPT[agent_name].format(
395
+ num2cn[idx], "{}"
396
+ )
397
+ NOVEL_PROMPT["Node 2"]["summary"]["query"] = ORIGIN_SUMMARY_PROMPT.format(num2cn[idx], num2cn[idx], "{}")
398
+ start_agent_name = start_agent_names[idx]
399
+ start_agent_query = start_agent_queries[idx]
400
+
401
+ print(f"node {node_idx} starting ......")
402
+ second_agents, second_summary = generate_second_agents()
403
+ second_node = SecondNode(
404
+ name=f"Node {node_idx}",
405
+ agents=second_agents,
406
+ summary_agent=second_summary,
407
+ save=True,
408
+ start_agent_name=start_agent_name,
409
+ start_agent_query=start_agent_query,
410
+ stream_output=stream_output,
411
+ output_func=output_func
412
+ )
413
+
414
+ output_memory.append(
415
+ second_node.run()
416
+ )
417
+
418
+
419
+ def show_in_gradio(state, name, chunk, node_name):
420
+
421
+ if state == 30:
422
+ Client.server.send(str([state, name, chunk, node_name])+"<SELFDEFINESEP>")
423
+ return
424
+
425
+ if name.lower() in ["summary", "recorder"]:
426
+ """It is recorder"""
427
+ name = "Recorder"
428
+ if state == 0:
429
+ state = 22
430
+ else:
431
+ state = 21
432
+ else:
433
+ if Client.current_node != node_name and state == 0:
434
+ state = 12
435
+ Client.current_node = node_name
436
+ elif Client.current_node != node_name and state != 0:
437
+ assert False
438
+ else:
439
+ state = 10 + state
440
+ Client.server.send(str([state, name, chunk, node_name])+"<SELFDEFINESEP>")
441
+
442
+
443
+ if __name__ == '__main__':
444
+
445
+ MyAgent.SIMULATION = False
446
+ MyAgent.TEMPERATURE = 1.0
447
+ stream_output = True
448
+ output_func = show_in_gradio
449
+ output_func = None
450
+
451
+ if output_func is not None:
452
+ global client
453
+ client = Client()
454
+
455
+ client.listening_for_start()
456
+ Client.server = client.start_server()
457
+ next(Client.server)
458
+
459
+ outline = run_node_1(
460
+ stream_output=stream_output,
461
+ output_func=output_func,
462
+ start_agent_name=Client.cache["start_agent_name"],
463
+ start_agent_query=Client.cache["start_agent_query"],
464
+ task_prompt=Client.cache["task"]
465
+ )
466
+ else:
467
+ outline = run_node_1(
468
+ stream_output=stream_output,
469
+ output_func=output_func
470
+ )
471
+ print(outline)
472
+ # assert False
473
+ run_node_2(outline, stream_output=stream_output, output_func=output_func)
474
+ print("done")
novel-server/config.ini ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [Section]
2
+ prod = True
novel-server/myagent.py ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append('../../../../src/agents')
3
+ from agents.Agent import Agent
4
+ from agents.State import State
5
+ import os
6
+ import copy
7
+ import time
8
+ from typing import List, Dict, Any
9
+ import openai
10
+ from myutils import print_log, simulation
11
+ import abc
12
+ import json
13
+ import socket
14
+
15
+ PROXY = os.environ["PROXY"]
16
+ openai.proxy = PROXY
17
+
18
+ class Client:
19
+
20
+ server = None
21
+ current_node = None
22
+ cache = {}
23
+
24
+ def __init__(self, host='127.0.0.1', port=9999, bufsize=1024):
25
+ self.bufsize = bufsize
26
+ assert bufsize > 0
27
+ self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
28
+ self.client_socket.connect((host, port))
29
+ self.client_socket.send("hello agent".encode('utf-8'))
30
+ print_log("client: Connected successfully......")
31
+
32
+ def start_server(self):
33
+ while True:
34
+ message = yield
35
+ if message == 'exit':
36
+ break
37
+ self.client_socket.send(message.encode('utf-8'))
38
+
39
+ def listening_for_start(self):
40
+
41
+ remaining = ""
42
+ while True:
43
+
44
+ dataset = self.client_socket.recv(self.bufsize)
45
+ try:
46
+ # if isinstance(remaining, bytes):
47
+ # raise UnicodeDecodeError
48
+ dataset = dataset.decode('utf-8')
49
+ except UnicodeDecodeError:
50
+
51
+ if not isinstance(remaining, bytes):
52
+
53
+ remaining = remaining.encode('utf-8')
54
+ assert isinstance(dataset, bytes)
55
+ remaining += dataset
56
+ try:
57
+ response = remaining.decode('utf-8')
58
+ remaining = ""
59
+ except:
60
+ continue
61
+ assert isinstance(remaining, str)
62
+
63
+ dataset = remaining + dataset
64
+ if dataset == "<START>":
65
+ break
66
+ list_dataset = dataset.split("<SELFDEFINESEP>")
67
+ if len(list_dataset) == 1:
68
+
69
+ remaining = list_dataset[0]
70
+
71
+ continue
72
+ else:
73
+
74
+ remaining = list_dataset[-1]
75
+
76
+ list_dataset = list_dataset[:-1]
77
+ print(list_dataset)
78
+ for data in list_dataset:
79
+ data = eval(data)
80
+ if isinstance(data, dict):
81
+ Client.cache.update(data)
82
+ else:
83
+ assert False
84
+
85
+ class MyAgent(Agent):
86
+ API_KEY: str = os.environ["API_KEY"]
87
+ WAIT_TIME: int = 20
88
+ DEFAULT_MODEL: int = "gpt-3.5-turbo-16k-0613"
89
+ TEMPERATURE: int = 0.3
90
+ SIMULATION: bool = False
91
+ __REDUCE_MODE__: list = ["cut", "summary"]
92
+
93
+ def __init__(
94
+ self,
95
+ name: str,
96
+ SYSTEM_PROMPT: str,
97
+ query: str
98
+ ):
99
+ self.name = name
100
+ self.SYSTEM_PROMPT = SYSTEM_PROMPT
101
+ self.messages: list = list()
102
+ self.messages.append(
103
+ {"role": "system", "content": self.SYSTEM_PROMPT}
104
+ )
105
+ self.messages_copy: list = copy.deepcopy(self.messages)
106
+ self.query = query
107
+
108
+ self.summary_pointer = 1
109
+ openai.api_key = MyAgent.API_KEY
110
+
111
+
112
+
113
+ def send_message(self, recorder=None, mode="cut", stream=True):
114
+ # print("sending...")
115
+ assert self.messages[-1]["role"] in ["user", "system"], \
116
+ "please make sure the last role is user or system!"
117
+ while True:
118
+ try:
119
+ # copy_message = copy.deepcopy(self.messages)
120
+ # print(self.messages)
121
+ if not MyAgent.SIMULATION:
122
+ completion = openai.ChatCompletion.create(
123
+ model=MyAgent.DEFAULT_MODEL,
124
+ messages=self.messages,
125
+ temperature=MyAgent.TEMPERATURE,
126
+ stream=stream
127
+ )
128
+ else:
129
+ completion = simulation()
130
+ if not stream:
131
+ if completion["choices"][0]["finish_reason"] == "length":
132
+
133
+ print("Length exceeded, deleted")
134
+ self.reduce_message(mode=mode, N=2)
135
+ continue
136
+ self.messages.append(
137
+ self._parse_response(completion)
138
+ )
139
+ self.messages_copy.append(
140
+ copy.deepcopy(self.messages[-1])
141
+ )
142
+ else:
143
+
144
+ complete_response = ""
145
+ for chunk in completion:
146
+ # print(chunk)
147
+ if "content" in chunk["choices"][0]["delta"]:
148
+ complete_response = f"""{complete_response}{chunk["choices"][0]["delta"]["content"]}"""
149
+ yield chunk["choices"][0]["delta"]["content"]
150
+ yield None
151
+ self.messages.append(
152
+ self._parse_response(complete_response)
153
+ )
154
+ self.messages_copy.append(
155
+ copy.deepcopy(self.messages[-1])
156
+ )
157
+ if recorder is not None:
158
+ recorder.add(
159
+ agent_name=self.name,
160
+ new_message_index=len(self.messages) - 1
161
+ )
162
+ break
163
+ except Exception as e:
164
+ raise e
165
+ print_log(e)
166
+ if "maximum context length is" in str(e):
167
+ print_log("maximum length exceeded! skip!")
168
+ self.reduce_message(mode=mode, N=2)
169
+ else:
170
+ print_log(f"Please wait {MyAgent.WAIT_TIME} seconds and resend later ...")
171
+ time.sleep(MyAgent.WAIT_TIME)
172
+
173
+
174
+
175
+ def prepare_message(self, message):
176
+ if isinstance(message, str):
177
+ self.messages.append(
178
+ {"role": "user", "content": message}
179
+ )
180
+ self.messages_copy.append(
181
+ {"role": "user", "content": message}
182
+ )
183
+ elif isinstance(message, list):
184
+ self.messages.extend(message)
185
+ self.messages_copy.extend(message)
186
+ else:
187
+ assert False
188
+
189
+ def _parse_response(self, completion, check_name: bool = True) -> dict:
190
+ if isinstance(completion, dict):
191
+ js = completion["choices"][0]["message"]
192
+ elif isinstance(completion, str):
193
+ js = {"content": completion, "role": "assistant"}
194
+ else:
195
+ assert False, \
196
+ "invalid completion."
197
+ if check_name:
198
+ js["content"] = js["content"].replace(f"<{self.name}>", "").replace(f"</{self.name}>", "")
199
+ return {"role": js["role"], "content": js["content"]}
200
+
201
+
202
+
203
+ def get_message(self, index: int, function=None, source: str = "copy", **kwargs) -> str:
204
+ assert source in ["copy", "origin"]
205
+ assert len(self.messages) > index
206
+ if function:
207
+ if source == "copy":
208
+ return function(self.messages_copy[index]["content"], kwargs)
209
+ elif source == "origin":
210
+ return function(self.messages[index]["content"], kwargs)
211
+ else:
212
+ if source == "copy":
213
+ return self.messages_copy[index]["content"]
214
+ elif source == "origin":
215
+ return self.messages[index]["content"]
216
+
217
+
218
+
219
+ def reduce_message(self, mode: str = "cut", N: int = 1, summary_agent=None):
220
+ assert mode in MyAgent.__REDUCE_MODE__, \
221
+ f"mode `{mode}` is invalid."
222
+ if mode == "cut":
223
+
224
+ """system | user | assistant | user | assistant"""
225
+ for i in range(N):
226
+ self.messages.pop(1)
227
+ assert self.messages[-1]["role"] in ["user", "system"], \
228
+ "please make sure the last role is user or system!"
229
+ elif mode == "summary":
230
+ assert isinstance(summary_agent, MyAgent), \
231
+ "the summary agent is not class MyAgent."
232
+
233
+ # summary_agent.prepare_message()
234
+
235
+
236
+
237
+ def output_message(self, recorder=None, mode="cut", stream=True, output_func=None, node_name:str=None):
238
+ if stream:
239
+ print(f"【{self.name}】 ", end="")
240
+ complete_response = ""
241
+ FIRST = True
242
+ for chunk in self.send_message(recorder=recorder, stream=stream, mode=mode):
243
+ if chunk is not None:
244
+ complete_response = f"{complete_response}{chunk}"
245
+ if output_func is None:
246
+ print(chunk, end="")
247
+ else:
248
+ # print(chunk, end="")
249
+ if FIRST:
250
+ output_func(0, self.name, chunk, node_name)
251
+ FIRST = False
252
+ else:
253
+ output_func(1, self.name, chunk, node_name)
254
+ # yield complete_response, self.name
255
+ else:
256
+ next(self.send_message(recorder=recorder, stream=stream, mode=mode), None)
257
+ if output_func is None:
258
+ print(f"【{self.name}】{self.get_message(index=-1)}")
259
+ else:
260
+ output_func(None, self.name, self.get_message(index=-1), node_name)
261
+
262
+ class Recorder:
263
+ def __init__(self, agents: Dict[str, MyAgent]):
264
+
265
+ self.recorder: List = list()
266
+ self.__AGENTS_NAME__ = []
267
+ # 记录一下每个AGENT上次说话的时间,这样就不用一次一次的遍历了
268
+ self.__AGENTS_SPEAK_TIME__ = {}
269
+ self.agents: Dict[str, MyAgent] = agents
270
+ self._register()
271
+
272
+
273
+
274
+ def _register(self):
275
+ for agent_name in self.agents:
276
+ self.__AGENTS_NAME__.append(agent_name)
277
+ self.__AGENTS_SPEAK_TIME__[agent_name] = 0
278
+
279
+ def add(self, agent_name: str, new_message_index: int):
280
+ self.recorder.append(
281
+ [agent_name, new_message_index]
282
+ )
283
+ if agent_name not in self.__AGENTS_NAME__:
284
+ self.__AGENTS_NAME__.append(agent_name)
285
+ self.__AGENTS_SPEAK_TIME__[agent_name] = len(self.recorder)
286
+
287
+ def clear(self):
288
+ self.recorder.clear()
289
+
290
+
291
+
292
+ def prepare(self, agent_name: str, agents: Dict[str, MyAgent], return_dict: bool = False):
293
+ if agent_name.lower() != "all":
294
+ assert agent_name in self.__AGENTS_NAME__, \
295
+ f"There is no `MyAgent {agent_name}` in Recorder!"
296
+
297
+ history = ""
298
+ history_dict = []
299
+ start_index = self.__AGENTS_SPEAK_TIME__[agent_name] if agent_name.lower() != "all" else 0
300
+ for i in range(
301
+ start_index, len(self.recorder)
302
+ ):
303
+ his_ag_name, his_ag_index = self.recorder[i]
304
+ history_dict.append(
305
+ {"role": "user", "content": agents[his_ag_name].get_message(his_ag_index, source="copy")}
306
+ )
307
+ history = f"{history}\n<{his_ag_name.upper()}>\n{agents[his_ag_name].get_message(his_ag_index, source='copy')}\n</{his_ag_name.upper()}>\n"
308
+ # history = f"{history}\n{agent_name}: {agents[his_ag_name].get_message(his_ag_index, source='copy')}\n"
309
+ if return_dict:
310
+ return history_dict
311
+ else:
312
+ return history.strip()
313
+
314
+ class Node(State):
315
+
316
+ def __init__(
317
+ self,
318
+ agents: List[MyAgent],
319
+ summary_agent: MyAgent = None,
320
+ save: bool = False
321
+ ):
322
+ self.agents = {}
323
+ for agent in agents:
324
+ self.agents[agent.name] = agent
325
+
326
+ self.summary_agent: MyAgent = summary_agent
327
+ self.recorder = Recorder(agents=self.agents)
328
+ self.save = save
329
+
330
+ @abc.abstractmethod
331
+ def start(self):
332
+ raise NotImplementedError()
333
+
334
+ @abc.abstractmethod
335
+ def communicate(self):
336
+ raise NotImplementedError
337
+
338
+ @abc.abstractmethod
339
+ def end(self):
340
+ raise NotImplementedError()
341
+
342
+ def run(self):
343
+ self.start()
344
+ self.communicate()
345
+ response = self.end()
346
+ if self.save:
347
+ self.save_history()
348
+ return response
349
+
350
+ def save_history(self, save_path=None):
351
+ if save_path is None:
352
+ save_path = f"./Node2.json"
353
+ results = []
354
+ for agent_name in self.agents:
355
+ results.append(
356
+ self.agents[agent_name].messages_copy
357
+ )
358
+ json.dump(
359
+ results,
360
+ open(save_path, "w")
361
+ )
362
+
363
+ def ask_gpt(system_prompt="", input="", name="parser"):
364
+
365
+ temperature = MyAgent.TEMPERATURE
366
+ MyAgent.TEMPERATURE = 0
367
+ agent = MyAgent(
368
+ name=name, SYSTEM_PROMPT=system_prompt, query=""
369
+ )
370
+ agent.prepare_message(message=input)
371
+ # agent.temp_send_message()
372
+ agent.output_message(stream=False)
373
+ MyAgent.TEMPERATURE = temperature
374
+ return agent.get_message(index=-1)
375
+
novel-server/myutils.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import copy
3
+ import time
4
+ import re
5
+ from tree import construct_tree, tree2xml, tree2dict
6
+ import json
7
+
8
+ def extract_tag_names(text):
9
+
10
+ pattern = r'<([^<>]+)>'
11
+
12
+
13
+ matches = re.findall(pattern, text)
14
+
15
+
16
+ stack = []
17
+ answer = []
18
+ for item in matches:
19
+ if item[0] != '/':
20
+ stack.append(item)
21
+ else:
22
+
23
+ if item[1:] in stack:
24
+ while stack[-1] != item[1:]:
25
+ stack.pop()
26
+ answer.append(stack.pop())
27
+ return answer
28
+
29
+ def print_log(message: str):
30
+ print(f"[{time.ctime()}] {message}")
31
+
32
+ def simulation():
33
+ content = ""
34
+ for i in range(5000):
35
+ content = f"{content} hello"
36
+ return {
37
+ 'id': 'chatcmpl-6p9XYPYSTTRi0xEviKjjilqrWU2Ve',
38
+ 'object': 'chat.completion',
39
+ 'created': 1677649420,
40
+ 'model': 'gpt-3.5-turbo',
41
+ 'usage': {'prompt_tokens': 56, 'completion_tokens': 31, 'total_tokens': 87},
42
+ 'choices': [
43
+ {
44
+ 'message': {
45
+ 'role': 'assistant',
46
+ 'content': content
47
+ },
48
+ 'finish_reason': 'stop',
49
+ 'index': 0
50
+ }
51
+ ]
52
+ }
53
+
54
+ def new_parse(content:str, labels: list, return_dict:bool=False):
55
+
56
+ tree = construct_tree(content, add_root_label=True)
57
+ tree.first_label = False
58
+ if len(labels) == 0 or labels is None:
59
+ if return_dict:
60
+ return tree2dict(tree)['root']
61
+ else:
62
+
63
+ return "\n".join(tree2xml(tree).split('\n')[1:-1])
64
+ else:
65
+ if return_dict:
66
+ tree_dict = tree2dict(tree, filter=labels, mode="remain")['root']
67
+ return tree_dict
68
+ else:
69
+ tree_xml = tree2xml(tree, filter=labels, mode="remain")
70
+ return "\n".join(tree_xml.split('\n')[1:-1])
71
+
novel-server/tree.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import copy
3
+ from typing import List, Tuple, Any
4
+ import re
5
+
6
+ class Item:
7
+ def __init__(self, value, start, end):
8
+ self.value = value
9
+ self.start = start
10
+ self.end = end
11
+
12
+ class TreeNode:
13
+ def __init__(self, item:Item):
14
+ self.item = item
15
+ self.state = 0
16
+ self.sons = []
17
+ self.parent = None
18
+
19
+ class Tree:
20
+ def __init__(self, item: Item, text:str):
21
+ self.root = TreeNode(item)
22
+ self.text = text
23
+ self.first_label = True
24
+
25
+ def isNodeIn(self, node1:TreeNode, node2:TreeNode):
26
+
27
+ if node1.item.start > node2.item.start and node1.item.end < node2.item.end:
28
+ return True
29
+ return False
30
+
31
+ def insert(self, new_node: TreeNode, current_node: TreeNode):
32
+ if len(current_node.sons) == 0:
33
+
34
+ if self.isNodeIn(new_node, current_node):
35
+
36
+ current_node.sons.append(new_node)
37
+ new_node.parent = current_node
38
+ return True
39
+ else:
40
+
41
+ return False
42
+ for son in current_node.sons:
43
+
44
+ done = self.insert(new_node, son)
45
+
46
+ if done:
47
+ return True
48
+ if self.isNodeIn(new_node, current_node):
49
+ current_node.sons.append(new_node)
50
+ new_node.parent = current_node
51
+ return True
52
+ else:
53
+ return False
54
+
55
+ def node_count(self):
56
+ cnt = 0
57
+ if self.root is not None:
58
+ cnt = 1
59
+ sons = self.root.sons
60
+ while len(sons) > 0:
61
+ current: TreeNode = sons.pop()
62
+ cnt += 1
63
+ if len(current.sons) > 0:
64
+ sons.extend(current.sons)
65
+ return cnt
66
+
67
+ def reset_state(self, reset_value, current_node:TreeNode=None):
68
+ if current_node == None:
69
+ current_node = self.root
70
+ current_node.state = reset_value
71
+ for i in range(len(current_node.sons)):
72
+ self.reset_state(reset_value, current_node=current_node.sons[i])
73
+
74
+ def get_node_content(self, node:TreeNode):
75
+ value_length = len(node.item.value)
76
+ start = node.item.start
77
+ end = node.item.end
78
+ return self.text[
79
+ start+value_length+1:end-1
80
+ ]
81
+
82
+ def build_dict(self, current_dict:dict, current_root:TreeNode, filter_value:list=None, mode:str="filter"):
83
+ assert mode.lower() in ["filter", "remain"], \
84
+ f"mode `{mode}` is not in ['filter', 'remain']"
85
+
86
+ if len(current_root.sons) == 0:
87
+
88
+ if filter_value is None or (mode.lower() == "remain" and current_root.state == 1):
89
+ return {current_root.item.value: self.get_node_content(current_root)}
90
+ if mode.lower() == "filter" and current_root.item.value in filter_value:
91
+ return None
92
+ elif mode.lower() == "filter" and current_root.item.value not in filter_value:
93
+ return {current_root.item.value: self.get_node_content(current_root)}
94
+ elif mode.lower() == "remain" and current_root.item.value in filter_value:
95
+ return {current_root.item.value: self.get_node_content(current_root)}
96
+ elif mode.lower() == "remain" and current_root.item.value not in filter_value:
97
+ return None
98
+ else:
99
+ if filter_value is not None:
100
+ if mode.lower() == "filter" and current_root.item.value in filter_value:
101
+ return None
102
+ if self.first_label:
103
+ if mode.lower() == "remain" and current_root.item.value not in filter_value and current_root.item.value != "root" and current_root.state==0:
104
+ return None
105
+ current_dict[current_root.item.value] = {}
106
+ for i in range(len(current_root.sons)):
107
+
108
+ if mode.lower() == "remain":
109
+ if current_root.parent is not None and current_root.parent.state == 1:
110
+
111
+ current_root.state = 1
112
+ if current_root.item.value in filter_value:
113
+
114
+ current_root.state = 1
115
+ current_root.sons[i].state = 1
116
+ if current_root.state == 1:
117
+
118
+ current_root.sons[i].state = 1
119
+ if current_root.sons[i].item.value in filter_value:
120
+ current_root.sons[i].state = 1
121
+ item = self.build_dict(current_dict[current_root.item.value], current_root.sons[i], filter_value, mode)
122
+ if isinstance(item, dict):
123
+ current_dict[current_root.item.value].update(item)
124
+
125
+ def build_xml(self, current_item: list, current_root:TreeNode, filter_value:list=None, mode:str="filter"):
126
+ assert mode.lower() in ["filter", "remain"], \
127
+ f"mode `{mode}` is not in ['filter', 'remain']"
128
+ if len(current_root.sons) == 0:
129
+
130
+ if filter_value is None or (mode.lower() == "remain" and current_root.state == 1):
131
+ return f"<{current_root.item.value}>{self.get_node_content(current_root)}</{current_root.item.value}>"
132
+ if mode.lower() == "filter" and current_root.item.value in filter_value:
133
+ return None
134
+ elif mode.lower() == "filter" and current_root.item.value not in filter_value:
135
+ return f"<{current_root.item.value}>{self.get_node_content(current_root)}</{current_root.item.value}>"
136
+ elif mode.lower() == "remain" and current_root.item.value in filter_value:
137
+ return f"<{current_root.item.value}>{self.get_node_content(current_root)}</{current_root.item.value}>"
138
+ elif mode.lower() == "remain" and current_root.item.value not in filter_value:
139
+ return None
140
+ else:
141
+ if filter_value is not None:
142
+ if mode.lower() == "filter" and current_root.item.value in filter_value:
143
+ return None
144
+ if self.first_label:
145
+ if mode.lower() == "remain" and current_root.item.value not in filter_value and current_root.item.value != "root" and current_root.state==0:
146
+ return None
147
+ current_item.append(f"<{current_root.item.value}>")
148
+ for i in range(len(current_root.sons)):
149
+
150
+ if mode.lower() == "remain":
151
+ if current_root.parent is not None and current_root.parent.state == 1:
152
+
153
+ current_root.state = 1
154
+ if current_root.item.value in filter_value:
155
+
156
+ current_root.state = 1
157
+ current_root.sons[i].state = 1
158
+ if current_root.state == 1:
159
+
160
+ current_root.sons[i].state = 1
161
+ if current_root.sons[i].item.value in filter_value:
162
+ current_root.sons[i].state = 1
163
+ item = self.build_xml(current_item, current_root.sons[i], filter_value, mode)
164
+ if isinstance(item, str):
165
+ current_item.append(f"{item}")
166
+ current_item.append(f"</{current_root.item.value}>")
167
+
168
+ def extract_tag_names(text: str, sort:bool=True)->List[Tuple[str, int, int]]:
169
+
170
+ pattern = r'<([^<>]+)>'
171
+
172
+
173
+ matches = re.findall(pattern, text)
174
+
175
+ pos = []
176
+ start = 0
177
+ for item in matches:
178
+ pos.append(
179
+ text[start:].find(item)+start
180
+ )
181
+ start = text[start:].find(item)+start + len(item)
182
+
183
+
184
+ stack_item = []
185
+ stack_pos = []
186
+ answer = []
187
+ for idx, item in enumerate(matches):
188
+ if item[0] != '/':
189
+ stack_item.append(item)
190
+ stack_pos.append(pos[idx])
191
+ else:
192
+ end_pos = pos[idx]
193
+
194
+ if item[1:] in stack_item:
195
+ while stack_item[-1] != item[1:]:
196
+ stack_item.pop()
197
+ stack_pos.pop()
198
+
199
+ answer.append((stack_item.pop(), stack_pos.pop(), end_pos))
200
+ if sort:
201
+ return sorted(answer, key=lambda x: x[1])
202
+ return answer
203
+
204
+ def construct_tree(text, add_root_label:bool=True):
205
+ if add_root_label:
206
+ print("root label is added!")
207
+ text = f"<root>\n{text}\n</root>"
208
+ data = extract_tag_names(text)
209
+ tree = Tree(Item(*data[0]), text)
210
+ nodes_list = []
211
+ for d in data[1:]:
212
+ new_node = TreeNode(
213
+ Item(*d)
214
+ )
215
+ nodes_list.append(new_node)
216
+ for i in range(len(nodes_list)):
217
+ tree.insert(
218
+ new_node=nodes_list[i],
219
+ current_node=tree.root
220
+ )
221
+ return tree
222
+
223
+ def tree2dict(tree:Tree, filter:list=None, mode="filter"):
224
+ answer = {}
225
+ tree.reset_state(0)
226
+ tree.build_dict(answer, tree.root, filter, mode)
227
+ return answer
228
+
229
+ def tree2xml(tree, filter:list=None, mode="filter"):
230
+ answer = []
231
+ tree.reset_state(0)
232
+ tree.build_xml(answer, tree.root, filter, mode)
233
+ return "\n".join(answer)
234
+
novel_outline/character_settings.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Mike Smith": {"role_name": "Mike Smith", "gender": "Male", "age": "35", "occupation": "Internet company programmer", "personality": "Hardworking, dedicated, tech-savvy", "speaking_style": "Clear and concise", "relation_with_others": "Married to Jane Black, father of their child", "background": "Mike Smith is a skilled programmer who works long hours at an internet company. He is dedicated to his job but struggles to find a balance between work and family. He loves his wife, Jane, and their child, and wants to provide them with a good life. However, his demanding job often takes precedence, causing tension in his marriage and affecting his relationship with his child."}, "Jane Black": {"role_name": "Jane Black", "gender": "Female", "age": "32", "occupation": "High school chemistry teacher", "personality": "Intelligent, caring, organized", "speaking_style": "Articulate and informative", "relation_with_others": "Married to Mike Smith, mother of their child", "background": "Jane Black is a dedicated high school chemistry teacher who is passionate about education. She loves her family but often feels overwhelmed by the demands of her job and motherhood. Jane strives to be a positive role model for her child and hopes to inspire her students to pursue their dreams. She relies on her strong partnership with Mike to navigate the challenges of modern family life."}, "Emily Smith": {"role_name": "Emily Smith", "gender": "Female", "age": "5", "occupation": "Kindergarten student", "personality": "Playful, inquisitive, imaginative", "speaking_style": "Childlike and enthusiastic", "relation_with_others": "Daughter of Mike and Jane", "background": "Emily Smith is a bright and energetic kindergartener who loves to explore the world around her. She brings joy and laughter to her parents' lives and serves as a reminder of the importance of family. Emily's innocence and curiosity often lead to humorous and heartwarming moments in the story, as her parents navigate the challenges of raising a young child."}, "Alex Johnson": {"role_name": "Alex Johnson", "gender": "Male", "age": "38", "occupation": "Internet company programmer", "personality": "Ambitious, witty, loyal", "speaking_style": "Sarcastic and humorous", "relation_with_others": "Friend and coworker of Mike", "background": "Alex Johnson is a talented programmer who shares a close friendship with Mike. He is ambitious and driven, always pushing himself to succeed in the competitive tech industry. Alex provides a source of comic relief in the story, often using humor to lighten the mood during challenging times. His friendship and support for Mike play a significant role in the development of the plot."}}