voidDescriptor commited on
Commit
3451f44
1 Parent(s): 193dc4a

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +306 -0
app.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ import time
5
+
6
+ class ConversationManager:
7
+ def __init__(self):
8
+ self.models = {}
9
+ self.conversation = []
10
+ self.delay = 3
11
+ self.is_paused = False
12
+ self.current_model = None
13
+ self.initial_prompt = ""
14
+ self.task_complete = False
15
+
16
+ def load_model(self, model_name):
17
+ if not model_name:
18
+ print("Error: Empty model name provided")
19
+ return None
20
+
21
+ if model_name in self.models:
22
+ return self.models[model_name]
23
+
24
+ try:
25
+ print(f"Attempting to load model: {model_name}")
26
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
27
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True)
28
+ self.models[model_name] = (model, tokenizer)
29
+ print(f"Successfully loaded model: {model_name}")
30
+ return self.models[model_name]
31
+ except Exception as e:
32
+ print(f"Failed to load model {model_name}: {e}")
33
+ print(f"Error type: {type(e).__name__}")
34
+ print(f"Error details: {str(e)}")
35
+ return None
36
+
37
+ def generate_response(self, model_name, prompt):
38
+ model, tokenizer = self.load_model(model_name)
39
+
40
+ if "llama" in model_name.lower():
41
+ formatted_prompt = self.format_llama2_prompt(prompt)
42
+ else:
43
+ formatted_prompt = self.format_general_prompt(prompt)
44
+
45
+ inputs = tokenizer(formatted_prompt, return_tensors="pt", max_length=1024, truncation=True)
46
+ with torch.no_grad():
47
+ outputs = model.generate(**inputs, max_length=200, num_return_sequences=1, do_sample=True)
48
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
49
+
50
+ def format_llama2_prompt(self, prompt):
51
+ B_INST, E_INST = "[INST]", "[/INST]"
52
+ B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
53
+ system_prompt = "You are a helpful AI assistant. Please provide a concise and relevant response."
54
+
55
+ formatted_prompt = f"{B_INST} {B_SYS}{system_prompt}{E_SYS}{prompt.strip()} {E_INST}"
56
+ return formatted_prompt
57
+
58
+ def format_general_prompt(self, prompt):
59
+ return f"Human: {prompt.strip()}\n\nAssistant:"
60
+
61
+ def add_to_conversation(self, model_name, response):
62
+ self.conversation.append((model_name, response))
63
+ if "task complete?" in response.lower():
64
+ self.task_complete = True
65
+
66
+ def get_conversation_history(self):
67
+ return "\n".join([f"{model}: {msg}" for model, msg in self.conversation])
68
+
69
+ def clear_conversation(self):
70
+ self.conversation = []
71
+ self.initial_prompt = ""
72
+ self.models = {}
73
+ self.current_model = None
74
+ self.task_complete = False
75
+
76
+ def rewind_conversation(self, steps):
77
+ self.conversation = self.conversation[:-steps]
78
+ self.task_complete = False
79
+
80
+ def rewind_and_insert(self, steps, inserted_response):
81
+ if steps > 0:
82
+ self.conversation = self.conversation[:-steps]
83
+ if inserted_response.strip():
84
+ last_model = self.conversation[-1][0] if self.conversation else "User"
85
+ next_model = "Model 1" if last_model == "Model 2" or last_model == "User" else "Model 2"
86
+ self.conversation.append((next_model, inserted_response))
87
+ self.current_model = last_model
88
+ self.task_complete = False
89
+
90
+ manager = ConversationManager()
91
+
92
+ def get_model(dropdown, custom):
93
+ return custom if custom and custom.strip() else dropdown
94
+
95
+ def chat(model1, model2, user_input, history, inserted_response=""):
96
+ try:
97
+ print(f"Starting chat with models: {model1}, {model2}")
98
+ print(f"User input: {user_input}")
99
+
100
+ model1 = get_model(model1, model1_custom.value)
101
+ model2 = get_model(model2, model2_custom.value)
102
+
103
+ print(f"Selected models: {model1}, {model2}")
104
+
105
+ if not manager.load_model(model1) or not manager.load_model(model2):
106
+ return "Error: Failed to load one or both models. Please check the model names and try again.", ""
107
+
108
+ if not manager.conversation:
109
+ manager.initial_prompt = user_input
110
+ manager.clear_conversation()
111
+ manager.add_to_conversation("User", user_input)
112
+
113
+ models = [model1, model2]
114
+ current_model_index = 0 if manager.current_model in ["User", "Model 2"] else 1
115
+
116
+ while not manager.task_complete:
117
+ if manager.is_paused:
118
+ yield history, "Conversation paused."
119
+ return
120
+
121
+ model = models[current_model_index]
122
+ manager.current_model = model
123
+
124
+ if inserted_response and current_model_index == 0:
125
+ response = inserted_response
126
+ inserted_response = ""
127
+ else:
128
+ conversation_history = manager.get_conversation_history()
129
+ prompt = f"{conversation_history}\n\nPlease continue the conversation. If you believe the task is complete, end your response with 'Task complete?'"
130
+ response = manager.generate_response(model, prompt)
131
+
132
+ manager.add_to_conversation(model, response)
133
+ history = manager.get_conversation_history()
134
+
135
+ for i in range(manager.delay, 0, -1):
136
+ yield history, f"{model} is writing... {i}"
137
+ time.sleep(1)
138
+
139
+ yield history, ""
140
+
141
+ if manager.task_complete:
142
+ yield history, "Models believe the task is complete. Are you satisfied with the result? (Yes/No)"
143
+ return
144
+
145
+ current_model_index = (current_model_index + 1) % 2
146
+
147
+ return history, "Conversation completed."
148
+ except Exception as e:
149
+ print(f"Error in chat function: {str(e)}")
150
+ print(f"Error type: {type(e).__name__}")
151
+ print(f"Error details: {str(e)}")
152
+ return f"An error occurred: {str(e)}", ""
153
+
154
+ def user_satisfaction(satisfied, history):
155
+ if satisfied.lower() == 'yes':
156
+ return history, "Task completed successfully."
157
+ else:
158
+ manager.task_complete = False
159
+ return history, "Continuing the conversation..."
160
+
161
+ def pause_conversation():
162
+ manager.is_paused = True
163
+ return "Conversation paused. Press Resume to continue."
164
+
165
+ def resume_conversation():
166
+ manager.is_paused = False
167
+ return "Conversation resumed."
168
+
169
+ def edit_response(edited_text):
170
+ if manager.conversation:
171
+ manager.conversation[-1] = (manager.current_model, edited_text)
172
+ manager.task_complete = False
173
+ return manager.get_conversation_history()
174
+
175
+ def restart_conversation(model1, model2, user_input):
176
+ manager.clear_conversation()
177
+ return chat(model1, model2, user_input, "")
178
+
179
+ def rewind_and_insert(steps, inserted_response, history):
180
+ manager.rewind_and_insert(int(steps), inserted_response)
181
+ return manager.get_conversation_history(), ""
182
+
183
+ open_source_models = [
184
+ "meta-llama/Llama-2-7b-chat-hf",
185
+ "meta-llama/Llama-2-13b-chat-hf",
186
+ "meta-llama/Llama-2-70b-chat-hf",
187
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
188
+ "bigcode/starcoder2-15b",
189
+ "bigcode/starcoder2-3b",
190
+ "tiiuae/falcon-7b",
191
+ "tiiuae/falcon-40b",
192
+ "EleutherAI/gpt-neox-20b",
193
+ "google/flan-ul2",
194
+ "stabilityai/stablelm-zephyr-3b",
195
+ "HuggingFaceH4/zephyr-7b-beta",
196
+ "microsoft/phi-2",
197
+ "google/gemma-7b-it"
198
+ ]
199
+
200
+ with gr.Blocks() as demo:
201
+ gr.Markdown("# ConversAI Playground")
202
+
203
+ with gr.Row():
204
+ with gr.Column(scale=1):
205
+ model1_dropdown = gr.Dropdown(choices=open_source_models, label="Model 1")
206
+ model1_custom = gr.Textbox(label="Custom Model 1")
207
+ with gr.Column(scale=1):
208
+ model2_dropdown = gr.Dropdown(choices=open_source_models, label="Model 2")
209
+ model2_custom = gr.Textbox(label="Custom Model 2")
210
+
211
+ user_input = gr.Textbox(label="Initial prompt", lines=2)
212
+ chat_history = gr.Textbox(label="Conversation", lines=20)
213
+ current_response = gr.Textbox(label="Current model response", lines=3)
214
+
215
+ with gr.Row():
216
+ pause_btn = gr.Button("Pause")
217
+ edit_btn = gr.Button("Edit")
218
+ rewind_btn = gr.Button("Rewind")
219
+ resume_btn = gr.Button("Resume")
220
+ restart_btn = gr.Button("Restart")
221
+ clear_btn = gr.Button("Clear")
222
+
223
+ with gr.Row():
224
+ rewind_steps = gr.Slider(0, 10, 1, label="Steps to rewind")
225
+ inserted_response = gr.Textbox(label="Insert response after rewind", lines=2)
226
+
227
+ delay_slider = gr.Slider(0, 10, 3, label="Response Delay (seconds)")
228
+
229
+ user_satisfaction_input = gr.Textbox(label="Are you satisfied with the result? (Yes/No)", visible=False)
230
+
231
+ gr.Markdown("""
232
+ ## Button Descriptions
233
+ - **Pause**: Temporarily stops the conversation. The current model will finish its response.
234
+ - **Edit**: Allows you to modify the last response in the conversation.
235
+ - **Rewind**: Removes the specified number of last responses from the conversation.
236
+ - **Resume**: Continues the conversation from where it was paused.
237
+ - **Restart**: Begins a new conversation with the same or different models, keeping the initial prompt.
238
+ - **Clear**: Resets everything, including loaded models, conversation history, and initial prompt.
239
+ """)
240
+
241
+ def on_chat_update(history, response):
242
+ if response and "Models believe the task is complete" in response:
243
+ return gr.update(visible=True), gr.update(visible=False)
244
+ return gr.update(visible=False), gr.update(visible=True)
245
+
246
+ start_btn = gr.Button("Start Conversation")
247
+ chat_output = start_btn.click(
248
+ chat,
249
+ inputs=[
250
+ model1_dropdown,
251
+ model2_dropdown,
252
+ user_input,
253
+ chat_history
254
+ ],
255
+ outputs=[chat_history, current_response]
256
+ )
257
+
258
+ chat_output.then(
259
+ on_chat_update,
260
+ inputs=[chat_history, current_response],
261
+ outputs=[user_satisfaction_input, start_btn]
262
+ )
263
+
264
+ user_satisfaction_input.submit(
265
+ user_satisfaction,
266
+ inputs=[user_satisfaction_input, chat_history],
267
+ outputs=[chat_history, current_response]
268
+ ).then(
269
+ chat,
270
+ inputs=[
271
+ model1_dropdown,
272
+ model2_dropdown,
273
+ user_input,
274
+ chat_history
275
+ ],
276
+ outputs=[chat_history, current_response]
277
+ )
278
+
279
+ pause_btn.click(pause_conversation, outputs=[current_response])
280
+ resume_btn.click(
281
+ chat,
282
+ inputs=[
283
+ model1_dropdown,
284
+ model2_dropdown,
285
+ user_input,
286
+ chat_history,
287
+ inserted_response
288
+ ],
289
+ outputs=[chat_history, current_response]
290
+ )
291
+ edit_btn.click(edit_response, inputs=[current_response], outputs=[chat_history])
292
+ rewind_btn.click(rewind_and_insert, inputs=[rewind_steps, inserted_response, chat_history], outputs=[chat_history, current_response])
293
+ restart_btn.click(
294
+ restart_conversation,
295
+ inputs=[
296
+ model1_dropdown,
297
+ model2_dropdown,
298
+ user_input
299
+ ],
300
+ outputs=[chat_history, current_response]
301
+ )
302
+ clear_btn.click(manager.clear_conversation, outputs=[chat_history, current_response, user_input])
303
+ delay_slider.change(lambda x: setattr(manager, 'delay', x), inputs=[delay_slider])
304
+
305
+ if __name__ == "__main__":
306
+ demo.launch()