voidDescriptor commited on
Commit
183d24b
1 Parent(s): 041e17c

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +309 -0
app.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ import time
5
+ import bitsandbytes as bnb
6
+
7
+ print(f"bitsandbytes version: {bnb.__version__}")
8
+ print(f"CUDA is available: {torch.cuda.is_available()}")
9
+ print(f"CUDA device count: {torch.cuda.device_count()}")
10
+ if torch.cuda.is_available():
11
+ print(f"Current CUDA device: {torch.cuda.current_device()}")
12
+ print(f"CUDA device name: {torch.cuda.get_device_name(0)}")
13
+
14
+ class ConversationManager:
15
+ def __init__(self):
16
+ self.models = {}
17
+ self.conversation = []
18
+ self.delay = 3
19
+ self.is_paused = False
20
+ self.current_model = None
21
+ self.initial_prompt = ""
22
+ self.task_complete = False
23
+
24
+ def load_model(self, model_name):
25
+ if not model_name:
26
+ print("Error: Empty model name provided")
27
+ return None
28
+
29
+ if model_name in self.models:
30
+ return self.models[model_name]
31
+
32
+ try:
33
+ print(f"Attempting to load model: {model_name}")
34
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
35
+ # Try to load the model with 8-bit quantization
36
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True)
37
+ except RuntimeError as e:
38
+ print(f"8-bit quantization not available, falling back to full precision: {e}")
39
+ if torch.cuda.is_available():
40
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
41
+ else:
42
+ model = AutoModelForCausalLM.from_pretrained(model_name)
43
+ except Exception as e:
44
+ print(f"Failed to load model {model_name}: {e}")
45
+ print(f"Error type: {type(e).__name__}")
46
+ print(f"Error details: {str(e)}")
47
+ return None
48
+
49
+ self.models[model_name] = (model, tokenizer)
50
+ print(f"Successfully loaded model: {model_name}")
51
+ return self.models[model_name]
52
+
53
+ def generate_response(self, model_name, prompt):
54
+ model, tokenizer = self.load_model(model_name)
55
+
56
+ formatted_prompt = f"Human: {prompt.strip()}\n\nAssistant:"
57
+
58
+ inputs = tokenizer(formatted_prompt, return_tensors="pt", max_length=1024, truncation=True)
59
+ with torch.no_grad():
60
+ outputs = model.generate(**inputs, max_length=200, num_return_sequences=1, do_sample=True)
61
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
62
+
63
+ def add_to_conversation(self, model_name, response):
64
+ self.conversation.append((model_name, response))
65
+ if "task complete?" in response.lower():
66
+ self.task_complete = True
67
+
68
+ def get_conversation_history(self):
69
+ return "\n".join([f"{model}: {msg}" for model, msg in self.conversation])
70
+
71
+ def clear_conversation(self):
72
+ self.conversation = []
73
+ self.initial_prompt = ""
74
+ self.models = {}
75
+ self.current_model = None
76
+ self.task_complete = False
77
+
78
+ def rewind_conversation(self, steps):
79
+ self.conversation = self.conversation[:-steps]
80
+ self.task_complete = False
81
+
82
+ def rewind_and_insert(self, steps, inserted_response):
83
+ if steps > 0:
84
+ self.conversation = self.conversation[:-steps]
85
+ if inserted_response.strip():
86
+ last_model = self.conversation[-1][0] if self.conversation else "User"
87
+ next_model = "Model 1" if last_model == "Model 2" or last_model == "User" else "Model 2"
88
+ self.conversation.append((next_model, inserted_response))
89
+ self.current_model = last_model
90
+ self.task_complete = False
91
+
92
+ manager = ConversationManager()
93
+
94
+ def get_model(dropdown, custom):
95
+ return custom if custom and custom.strip() else dropdown
96
+
97
+ def chat(model1, model2, user_input, history, inserted_response=""):
98
+ try:
99
+ print(f"Starting chat with models: {model1}, {model2}")
100
+ print(f"User input: {user_input}")
101
+
102
+ model1 = get_model(model1, model1_custom.value)
103
+ model2 = get_model(model2, model2_custom.value)
104
+
105
+ print(f"Selected models: {model1}, {model2}")
106
+
107
+ if not manager.load_model(model1) or not manager.load_model(model2):
108
+ return "Error: Failed to load one or both models. Please check the model names and try again.", ""
109
+
110
+ if not manager.conversation:
111
+ manager.initial_prompt = user_input
112
+ manager.clear_conversation()
113
+ manager.add_to_conversation("User", user_input)
114
+
115
+ models = [model1, model2]
116
+ current_model_index = 0 if manager.current_model in ["User", "Model 2"] else 1
117
+
118
+ while not manager.task_complete:
119
+ if manager.is_paused:
120
+ yield history, "Conversation paused."
121
+ return
122
+
123
+ model = models[current_model_index]
124
+ manager.current_model = model
125
+
126
+ if inserted_response and current_model_index == 0:
127
+ response = inserted_response
128
+ inserted_response = ""
129
+ else:
130
+ conversation_history = manager.get_conversation_history()
131
+ prompt = f"{conversation_history}\n\nPlease continue the conversation. If you believe the task is complete, end your response with 'Task complete?'"
132
+ response = manager.generate_response(model, prompt)
133
+
134
+ manager.add_to_conversation(model, response)
135
+ history = manager.get_conversation_history()
136
+
137
+ for i in range(manager.delay, 0, -1):
138
+ yield history, f"{model} is writing... {i}"
139
+ time.sleep(1)
140
+
141
+ yield history, ""
142
+
143
+ if manager.task_complete:
144
+ yield history, "Models believe the task is complete. Are you satisfied with the result? (Yes/No)"
145
+ return
146
+
147
+ current_model_index = (current_model_index + 1) % 2
148
+
149
+ return history, "Conversation completed."
150
+ except Exception as e:
151
+ print(f"Error in chat function: {str(e)}")
152
+ print(f"Error type: {type(e).__name__}")
153
+ print(f"Error details: {str(e)}")
154
+ return f"An error occurred: {str(e)}", ""
155
+
156
+ def user_satisfaction(satisfied, history):
157
+ if satisfied.lower() == 'yes':
158
+ return history, "Task completed successfully."
159
+ else:
160
+ manager.task_complete = False
161
+ return history, "Continuing the conversation..."
162
+
163
+ def pause_conversation():
164
+ manager.is_paused = True
165
+ return "Conversation paused. Press Resume to continue."
166
+
167
+ def resume_conversation():
168
+ manager.is_paused = False
169
+ return "Conversation resumed."
170
+
171
+ def edit_response(edited_text):
172
+ if manager.conversation:
173
+ manager.conversation[-1] = (manager.current_model, edited_text)
174
+ manager.task_complete = False
175
+ return manager.get_conversation_history()
176
+
177
+ def restart_conversation(model1, model2, user_input):
178
+ manager.clear_conversation()
179
+ return chat(model1, model2, user_input, "")
180
+
181
+ def rewind_and_insert(steps, inserted_response, history):
182
+ manager.rewind_and_insert(int(steps), inserted_response)
183
+ return manager.get_conversation_history(), ""
184
+
185
+ open_source_models = [
186
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
187
+ "bigcode/starcoder2-15b",
188
+ "bigcode/starcoder2-3b",
189
+ "tiiuae/falcon-7b",
190
+ "EleutherAI/gpt-neox-20b",
191
+ "google/flan-ul2",
192
+ "stabilityai/stablelm-zephyr-3b",
193
+ "HuggingFaceH4/zephyr-7b-beta",
194
+ "microsoft/phi-2",
195
+ "google/gemma-7b-it",
196
+ "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
197
+ "mosaicml/mpt-7b-chat",
198
+ "databricks/dolly-v2-12b",
199
+ "thebloke/Wizard-Vicuna-13B-Uncensored-HF",
200
+ "bigscience/bloom-560m"
201
+ ]
202
+
203
+ with gr.Blocks() as demo:
204
+ gr.Markdown("# ConversAI Playground")
205
+
206
+ with gr.Row():
207
+ with gr.Column(scale=1):
208
+ model1_dropdown = gr.Dropdown(choices=open_source_models, label="Model 1")
209
+ model1_custom = gr.Textbox(label="Custom Model 1")
210
+ with gr.Column(scale=1):
211
+ model2_dropdown = gr.Dropdown(choices=open_source_models, label="Model 2")
212
+ model2_custom = gr.Textbox(label="Custom Model 2")
213
+
214
+ user_input = gr.Textbox(label="Initial prompt", lines=2)
215
+ chat_history = gr.Textbox(label="Conversation", lines=20)
216
+ current_response = gr.Textbox(label="Current model response", lines=3)
217
+
218
+ with gr.Row():
219
+ pause_btn = gr.Button("Pause")
220
+ edit_btn = gr.Button("Edit")
221
+ rewind_btn = gr.Button("Rewind")
222
+ resume_btn = gr.Button("Resume")
223
+ restart_btn = gr.Button("Restart")
224
+ clear_btn = gr.Button("Clear")
225
+
226
+ with gr.Row():
227
+ rewind_steps = gr.Slider(0, 10, 1, label="Steps to rewind")
228
+ inserted_response = gr.Textbox(label="Insert response after rewind", lines=2)
229
+
230
+ delay_slider = gr.Slider(0, 10, 3, label="Response Delay (seconds)")
231
+
232
+ user_satisfaction_input = gr.Textbox(label="Are you satisfied with the result? (Yes/No)", visible=False)
233
+
234
+ gr.Markdown("""
235
+ ## Button Descriptions
236
+ - **Pause**: Temporarily stops the conversation. The current model will finish its response.
237
+ - **Edit**: Allows you to modify the last response in the conversation.
238
+ - **Rewind**: Removes the specified number of last responses from the conversation.
239
+ - **Resume**: Continues the conversation from where it was paused.
240
+ - **Restart**: Begins a new conversation with the same or different models, keeping the initial prompt.
241
+ - **Clear**: Resets everything, including loaded models, conversation history, and initial prompt.
242
+ """)
243
+
244
+ def on_chat_update(history, response):
245
+ if response and "Models believe the task is complete" in response:
246
+ return gr.update(visible=True), gr.update(visible=False)
247
+ return gr.update(visible=False), gr.update(visible=True)
248
+
249
+ start_btn = gr.Button("Start Conversation")
250
+ chat_output = start_btn.click(
251
+ chat,
252
+ inputs=[
253
+ model1_dropdown,
254
+ model2_dropdown,
255
+ user_input,
256
+ chat_history
257
+ ],
258
+ outputs=[chat_history, current_response]
259
+ )
260
+
261
+ chat_output.then(
262
+ on_chat_update,
263
+ inputs=[chat_history, current_response],
264
+ outputs=[user_satisfaction_input, start_btn]
265
+ )
266
+
267
+ user_satisfaction_input.submit(
268
+ user_satisfaction,
269
+ inputs=[user_satisfaction_input, chat_history],
270
+ outputs=[chat_history, current_response]
271
+ ).then(
272
+ chat,
273
+ inputs=[
274
+ model1_dropdown,
275
+ model2_dropdown,
276
+ user_input,
277
+ chat_history
278
+ ],
279
+ outputs=[chat_history, current_response]
280
+ )
281
+
282
+ pause_btn.click(pause_conversation, outputs=[current_response])
283
+ resume_btn.click(
284
+ chat,
285
+ inputs=[
286
+ model1_dropdown,
287
+ model2_dropdown,
288
+ user_input,
289
+ chat_history,
290
+ inserted_response
291
+ ],
292
+ outputs=[chat_history, current_response]
293
+ )
294
+ edit_btn.click(edit_response, inputs=[current_response], outputs=[chat_history])
295
+ rewind_btn.click(rewind_and_insert, inputs=[rewind_steps, inserted_response, chat_history], outputs=[chat_history, current_response])
296
+ restart_btn.click(
297
+ restart_conversation,
298
+ inputs=[
299
+ model1_dropdown,
300
+ model2_dropdown,
301
+ user_input
302
+ ],
303
+ outputs=[chat_history, current_response]
304
+ )
305
+ clear_btn.click(manager.clear_conversation, outputs=[chat_history, current_response, user_input])
306
+ delay_slider.change(lambda x: setattr(manager, 'delay', x), inputs=[delay_slider])
307
+
308
+ if __name__ == "__main__":
309
+ demo.launch()