winglian commited on
Commit
e49e33e
1 Parent(s): 93cc712

da big reveal

Browse files

use getter for default dict
state fixes, reveal fixes
add USER stop token for instruct models
attribute typo

Files changed (1) hide show
  1. app.py +60 -40
app.py CHANGED
@@ -2,10 +2,12 @@ import concurrent
2
  import functools
3
  import logging
4
  import os
 
5
  import re
6
  import traceback
7
  import uuid
8
  import datetime
 
9
  from time import sleep
10
 
11
  import boto3
@@ -34,7 +36,7 @@ class Pipeline:
34
  "seed": -1,
35
  "batch_size": 8,
36
  "threads": -1,
37
- "stop": ["</s>"],
38
  }
39
 
40
  def __call__(self, prompt):
@@ -77,8 +79,18 @@ class Pipeline:
77
  sleep(3)
78
 
79
 
80
- model_hermes = Pipeline("l96d2cswh431qj", "hermes-13b")
81
- model_manticore = Pipeline("jifr1oczbrmr3n", "manticore-13b")
 
 
 
 
 
 
 
 
 
 
82
 
83
  start_message = """- The Assistant is helpful and transparent.
84
  - The Assistant will never refuse to answer anything asked by the user.
@@ -90,16 +102,6 @@ def generate_responses(system_prompt, user_query, model_choice):
90
  # Prepare the prompt
91
  prompt = system_prompt + " " + user_query
92
 
93
- # Generate responses from the selected models
94
- if model_choice == 'Both':
95
- response1 = model_hermes(prompt)[0]['generated_text']
96
- response2 = model_manticore(prompt)[0]['generated_text']
97
- else:
98
- model = model_hermes if model_choice == 'Model 1' else model_manticore
99
- response1 = model(prompt)[0]['generated_text']
100
- response2 = model(prompt)[0]['generated_text']
101
-
102
- return response1, response2
103
 
104
 
105
  def user(message, nudge_msg, history1, history2):
@@ -127,10 +129,14 @@ def chat(history1, history2, system_msg):
127
  messages1 = messages1.rstrip()
128
  messages2 = messages2.rstrip()
129
 
 
 
 
 
130
  with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
131
  futures = []
132
- futures.append(executor.submit(model_hermes, messages1))
133
- futures.append(executor.submit(model_manticore, messages2))
134
 
135
  # Wait for all threads to finish...
136
  for future in concurrent.futures.as_completed(futures):
@@ -139,24 +145,24 @@ def chat(history1, history2, system_msg):
139
  print('Exception: {}'.format(future.exception()))
140
  traceback.print_exception(type(future.exception()), future.exception(), future.exception().__traceback__)
141
 
142
- tokens_hermes = re.findall(r'\s*\S+\s*', futures[0].result()[0]['generated_text'])
143
- tokens_manticore = re.findall(r'\s*\S+\s*', futures[1].result()[0]['generated_text'])
144
- len_tokens_hermes = len(tokens_hermes)
145
- len_tokens_manticore = len(tokens_manticore)
146
- max_tokens = max(len_tokens_hermes, len_tokens_manticore)
147
  for i in range(0, max_tokens):
148
- if i <= len_tokens_hermes:
149
- answer1 = tokens_hermes[i]
150
  history1[-1][1] += answer1
151
- if i <= len_tokens_manticore:
152
- answer2 = tokens_manticore[i]
153
  history2[-1][1] += answer2
154
  # stream the response
155
- yield history1, history2, ""
156
  sleep(0.15)
157
 
158
 
159
- def chosen_one(label, choice0_history, choice1_history, system_msg, nudge_msg, rlhf_persona):
160
  # Generate a uuid for each submission
161
  arena_battle_id = str(uuid.uuid4())
162
 
@@ -170,9 +176,9 @@ def chosen_one(label, choice0_history, choice1_history, system_msg, nudge_msg, r
170
  'timestamp': timestamp,
171
  'system_msg': system_msg,
172
  'nudge_prefix': nudge_msg,
173
- 'choice0_name': model_hermes.name,
174
  'choice0': choice0_history,
175
- 'choice1_name': model_manticore.name,
176
  'choice1': choice1_history,
177
  'label': label,
178
  'rlhf_persona': rlhf_persona,
@@ -191,7 +197,6 @@ with gr.Blocks() as arena:
191
  - Due to limitations of Runpod Serverless, it cannot stream responses immediately
192
  - Responses WILL take AT LEAST 30 seconds to respond, probably longer
193
  - For now, this is single turn only
194
- - For now, Hermes 13B on the left, Manticore on the right.
195
  """)
196
  with gr.Tab("Chatbot"):
197
  with gr.Row():
@@ -202,12 +207,17 @@ with gr.Blocks() as arena:
202
  with gr.Row():
203
  choose1 = gr.Button(value="Prefer left", variant="secondary", visible=False).style(full_width=True)
204
  choose2 = gr.Button(value="Prefer right", variant="secondary", visible=False).style(full_width=True)
 
 
 
 
 
205
  with gr.Row():
206
  with gr.Column():
207
  rlhf_persona = gr.Textbox(
208
- "", label="Persona Tags", interactive=True, visible=True, placeholder="Tell us about how you are judging the quality. like #SFW #NSFW #helpful #ethical #creativity", lines=1)
209
  message = gr.Textbox(
210
- label="What do you want to chat about?",
211
  placeholder="Ask me anything.",
212
  lines=3,
213
  )
@@ -226,6 +236,7 @@ with gr.Blocks() as arena:
226
  ### TBD
227
  - This is very much a work-in-progress, if you'd like to help build this out, join us on [Discord](https://discord.gg/QYF8QrtEUm)
228
  """)
 
229
 
230
  clear.click(lambda: None, None, chatbot1, queue=False)
231
  clear.click(lambda: None, None, chatbot2, queue=False)
@@ -242,7 +253,7 @@ with gr.Blocks() as arena:
242
  ).then(
243
  fn=user, inputs=[message, nudge_msg, chatbot1, chatbot2], outputs=[message, nudge_msg, chatbot1, chatbot2], queue=True
244
  ).then(
245
- fn=chat, inputs=[chatbot1, chatbot2, system_msg], outputs=[chatbot1, chatbot2, message], queue=True
246
  ).then(
247
  lambda *args: (
248
  gr.update(visible=False, interactive=False),
@@ -255,34 +266,43 @@ with gr.Blocks() as arena:
255
  )
256
 
257
  choose1_click_event = choose1.click(
258
- fn=chosen_one_first, inputs=[chatbot1, chatbot2, system_msg, nudge_msg, rlhf_persona], outputs=[], queue=True
259
  ).then(
260
  lambda *args: (
261
- gr.update(visible=True, interactive=True),
262
  gr.update(visible=False),
263
  gr.update(visible=False),
264
  gr.update(visible=True),
265
  gr.update(visible=True),
266
- None,
267
- None,
268
  ),
269
- inputs=[], outputs=[message, choose1, choose2, clear, submit, chatbot1, chatbot2], queue=True
270
  )
271
 
272
  choose2_click_event = choose2.click(
273
- fn=chosen_one_second, inputs=[chatbot1, chatbot2, system_msg, nudge_msg, rlhf_persona], outputs=[], queue=True
274
  ).then(
 
 
 
 
 
 
 
 
 
 
 
275
  lambda *args: (
276
  gr.update(visible=True, interactive=True),
277
  gr.update(visible=False),
278
  gr.update(visible=False),
 
279
  gr.update(visible=True),
280
  gr.update(visible=True),
281
  None,
282
  None,
283
  ),
284
- inputs=[], outputs=[message, choose1, choose2, clear, submit, chatbot1, chatbot2], queue=True
285
  )
286
 
287
-
288
  arena.queue(concurrency_count=5, max_size=16).launch(debug=True, server_name="0.0.0.0", server_port=7860)
 
2
  import functools
3
  import logging
4
  import os
5
+ import random
6
  import re
7
  import traceback
8
  import uuid
9
  import datetime
10
+ from collections import defaultdict
11
  from time import sleep
12
 
13
  import boto3
 
36
  "seed": -1,
37
  "batch_size": 8,
38
  "threads": -1,
39
+ "stop": ["</s>", "USER:"],
40
  }
41
 
42
  def __call__(self, prompt):
 
79
  sleep(3)
80
 
81
 
82
+ AVAILABLE_MODELS = {
83
+ "hermes-13b": "l96d2cswh431qj",
84
+ "manticore-13b": "jifr1oczbrmr3n",
85
+ }
86
+
87
+ _memoized_models = defaultdict()
88
+
89
+
90
+ def get_model_pipeline(model_name):
91
+ if not _memoized_models.get(model_name):
92
+ _memoized_models[model_name] = Pipeline(AVAILABLE_MODELS[model_name], model_name)
93
+ return _memoized_models.get(model_name)
94
 
95
  start_message = """- The Assistant is helpful and transparent.
96
  - The Assistant will never refuse to answer anything asked by the user.
 
102
  # Prepare the prompt
103
  prompt = system_prompt + " " + user_query
104
 
 
 
 
 
 
 
 
 
 
 
105
 
106
 
107
  def user(message, nudge_msg, history1, history2):
 
129
  messages1 = messages1.rstrip()
130
  messages2 = messages2.rstrip()
131
 
132
+ random_battle = random.sample(AVAILABLE_MODELS.keys(), 2)
133
+ model1 = get_model_pipeline(random_battle[0])
134
+ model2 = get_model_pipeline(random_battle[1])
135
+
136
  with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
137
  futures = []
138
+ futures.append(executor.submit(model1, messages1))
139
+ futures.append(executor.submit(model2, messages2))
140
 
141
  # Wait for all threads to finish...
142
  for future in concurrent.futures.as_completed(futures):
 
145
  print('Exception: {}'.format(future.exception()))
146
  traceback.print_exception(type(future.exception()), future.exception(), future.exception().__traceback__)
147
 
148
+ tokens_model1 = re.findall(r'\s*\S+\s*', futures[0].result()[0]['generated_text'])
149
+ tokens_model2 = re.findall(r'\s*\S+\s*', futures[1].result()[0]['generated_text'])
150
+ len_tokens_model1 = len(tokens_model1)
151
+ len_tokens_model2 = len(tokens_model2)
152
+ max_tokens = max(len_tokens_model1, len_tokens_model2)
153
  for i in range(0, max_tokens):
154
+ if i <= len_tokens_model1:
155
+ answer1 = tokens_model1[i]
156
  history1[-1][1] += answer1
157
+ if i <= len_tokens_model2:
158
+ answer2 = tokens_model2[i]
159
  history2[-1][1] += answer2
160
  # stream the response
161
+ yield history1, history2, "", gr.update(value=random_battle[0]), gr.update(value=random_battle[1]), {"models": [model1.name, model2.name]}
162
  sleep(0.15)
163
 
164
 
165
+ def chosen_one(label, choice0_history, choice1_history, system_msg, nudge_msg, rlhf_persona, state):
166
  # Generate a uuid for each submission
167
  arena_battle_id = str(uuid.uuid4())
168
 
 
176
  'timestamp': timestamp,
177
  'system_msg': system_msg,
178
  'nudge_prefix': nudge_msg,
179
+ 'choice0_name': state["models"][0],
180
  'choice0': choice0_history,
181
+ 'choice1_name': state["models"][1],
182
  'choice1': choice1_history,
183
  'label': label,
184
  'rlhf_persona': rlhf_persona,
 
197
  - Due to limitations of Runpod Serverless, it cannot stream responses immediately
198
  - Responses WILL take AT LEAST 30 seconds to respond, probably longer
199
  - For now, this is single turn only
 
200
  """)
201
  with gr.Tab("Chatbot"):
202
  with gr.Row():
 
207
  with gr.Row():
208
  choose1 = gr.Button(value="Prefer left", variant="secondary", visible=False).style(full_width=True)
209
  choose2 = gr.Button(value="Prefer right", variant="secondary", visible=False).style(full_width=True)
210
+ with gr.Row():
211
+ reveal1 = gr.Textbox(label="Model Name", value="", interactive=False, visible=False).style(full_width=True)
212
+ reveal2 = gr.Textbox(label="Model Name", value="", interactive=False, visible=False).style(full_width=True)
213
+ with gr.Row():
214
+ dismiss_reveal = gr.Button(value="Dismiss & Continue", variant="secondary", visible=False).style(full_width=True)
215
  with gr.Row():
216
  with gr.Column():
217
  rlhf_persona = gr.Textbox(
218
+ "", label="Persona Tags", interactive=True, visible=True, placeholder="Tell us about how you are judging the quality. ex: #SFW #NSFW #helpful #ethical #creativity", lines=1)
219
  message = gr.Textbox(
220
+ label="What do you want to ask?",
221
  placeholder="Ask me anything.",
222
  lines=3,
223
  )
 
236
  ### TBD
237
  - This is very much a work-in-progress, if you'd like to help build this out, join us on [Discord](https://discord.gg/QYF8QrtEUm)
238
  """)
239
+ state = gr.State({})
240
 
241
  clear.click(lambda: None, None, chatbot1, queue=False)
242
  clear.click(lambda: None, None, chatbot2, queue=False)
 
253
  ).then(
254
  fn=user, inputs=[message, nudge_msg, chatbot1, chatbot2], outputs=[message, nudge_msg, chatbot1, chatbot2], queue=True
255
  ).then(
256
+ fn=chat, inputs=[chatbot1, chatbot2, system_msg], outputs=[chatbot1, chatbot2, message, reveal1, reveal2, state], queue=True
257
  ).then(
258
  lambda *args: (
259
  gr.update(visible=False, interactive=False),
 
266
  )
267
 
268
  choose1_click_event = choose1.click(
269
+ fn=chosen_one_first, inputs=[chatbot1, chatbot2, system_msg, nudge_msg, rlhf_persona, state], outputs=[], queue=True
270
  ).then(
271
  lambda *args: (
 
272
  gr.update(visible=False),
273
  gr.update(visible=False),
274
  gr.update(visible=True),
275
  gr.update(visible=True),
276
+ gr.update(visible=True),
 
277
  ),
278
+ inputs=[], outputs=[choose1, choose2, dismiss_reveal, reveal1, reveal2], queue=True
279
  )
280
 
281
  choose2_click_event = choose2.click(
282
+ fn=chosen_one_second, inputs=[chatbot1, chatbot2, system_msg, nudge_msg, rlhf_persona, state], outputs=[], queue=True
283
  ).then(
284
+ lambda *args: (
285
+ gr.update(visible=False),
286
+ gr.update(visible=False),
287
+ gr.update(visible=True),
288
+ gr.update(visible=True),
289
+ gr.update(visible=True),
290
+ ),
291
+ inputs=[], outputs=[choose1, choose2, dismiss_reveal, reveal1, reveal2], queue=True
292
+ )
293
+
294
+ dismiss_click_event = dismiss_reveal.click(
295
  lambda *args: (
296
  gr.update(visible=True, interactive=True),
297
  gr.update(visible=False),
298
  gr.update(visible=False),
299
+ gr.update(visible=False),
300
  gr.update(visible=True),
301
  gr.update(visible=True),
302
  None,
303
  None,
304
  ),
305
+ inputs=[], outputs=[message, choose1, choose2, dismiss_reveal, clear, submit, chatbot1, chatbot2], queue=True
306
  )
307
 
 
308
  arena.queue(concurrency_count=5, max_size=16).launch(debug=True, server_name="0.0.0.0", server_port=7860)