ffreemt commited on
Commit
b590c0b
1 Parent(s): 7bf25b0
Files changed (1) hide show
  1. app.py +18 -9
app.py CHANGED
@@ -30,10 +30,12 @@ def predict0(prompt, bot):
30
  print(word, end="", flush=True)
31
  print("")
32
  response = word
 
33
  except Exception as exc:
34
  logger.error(exc)
35
  response = f"{exc=}"
36
- bot = {"inputs": [response]}
 
37
 
38
  return prompt, bot
39
 
@@ -244,19 +246,26 @@ generation_config = GenerationConfig(
244
  user_prefix = "[user]: "
245
  assistant_prefix = "[assistant]: "
246
 
 
 
 
 
 
247
  with gr.Blocks(
248
  theme=gr.themes.Soft(),
249
- css=".disclaimer {font-variant-caps: all-small-caps; font-size: small;}",
250
  ) as demo:
251
- gr.Markdown(
252
- """<h1><center>MosaicML MPT-30B-Chat</center></h1>
 
253
 
254
- This demo is of [MPT-30B-Chat](https://huggingface.co/mosaicml/mpt-30b-ch a t). It is based on [MPT-30B](https://huggingface.co/mosaicml/mpt-30b) fine-tuned on approximately 300,000 turns of high-quality conversations, and is powered by [MosaicML Inference](https://www.mosaicml.com/inference).
255
 
256
- If you're interested in [training](https://www.mosaicml.com/training) and [deploying](https://www.mosaicml.com/inference) your own MPT or LLMs, [sign up](https://forms.mosaicml.com/demo?utm_source=huggingface&utm_medium=referral&utm_campaign=mpt-30b) for MosaicML platform.
257
 
258
- """
259
- )
 
260
  conversation = Chat()
261
  chatbot = gr.Chatbot().style(height=200) # 500
262
  with gr.Row():
@@ -354,7 +363,7 @@ with gr.Blocks(
354
  fn=predict0,
355
  inputs=[msg, chatbot],
356
  outputs=[msg, chatbot],
357
- queue=False,
358
  )
359
 
360
  demo.queue(max_size=36, concurrency_count=14).launch(debug=True)
 
30
  print(word, end="", flush=True)
31
  print("")
32
  response = word
33
+ logger.debug(f"{response=}")
34
  except Exception as exc:
35
  logger.error(exc)
36
  response = f"{exc=}"
37
+ # bot = {"inputs": [response]}
38
+ bot = [(prompt, response)]
39
 
40
  return prompt, bot
41
 
 
246
  user_prefix = "[user]: "
247
  assistant_prefix = "[assistant]: "
248
 
249
+ css = """
250
+ .disclaimer {font-variant-caps: all-small-caps; font-size: xx-small;}
251
+ .intro {font-size: x-small;}
252
+ """
253
+
254
  with gr.Blocks(
255
  theme=gr.themes.Soft(),
256
+ css=css,
257
  ) as demo:
258
+ with gr.Accordion("🎈 Info", open=False):
259
+ gr.Markdown(
260
+ """<h4><center>mosaicml mpt-30b-chat</center></h4>
261
 
262
+ This demo is of [MPT-30B-Chat](https://huggingface.co/mosaicml/mpt-30b-ch a t). It is based on [MPT-30B](https://huggingface.co/mosaicml/mpt-30b) fine-tuned on approximately 300,000 turns of high-quality conversations, and is powered by [MosaicML Inference](https://www.mosaicml.com/inference).
263
 
264
+ If you're interested in [training](https://www.mosaicml.com/training) and [deploying](https://www.mosaicml.com/inference) your own MPT or LLMs, [sign up](https://forms.mosaicml.com/demo?utm_source=huggingface&utm_medium=referral&utm_campaign=mpt-30b) for MosaicML platform.
265
 
266
+ """,
267
+ elem_classes="intro"
268
+ )
269
  conversation = Chat()
270
  chatbot = gr.Chatbot().style(height=200) # 500
271
  with gr.Row():
 
363
  fn=predict0,
364
  inputs=[msg, chatbot],
365
  outputs=[msg, chatbot],
366
+ queue=True,
367
  )
368
 
369
  demo.queue(max_size=36, concurrency_count=14).launch(debug=True)