ysharma HF staff commited on
Commit
b791fd8
1 Parent(s): 4a799d6
Files changed (1) hide show
  1. app.py +8 -4
app.py CHANGED
@@ -49,9 +49,13 @@ def text_generate(problem, template, prompt):
49
  {
50
  "top_p": 0.9,
51
  "temperature": 1.1,
52
- "max_new_tokens": 250,
53
- "return_full_text": True
54
- }}
 
 
 
 
55
  response = requests.post(API_URL, headers=headers, json=json_)
56
  print(f"Response is : {response}")
57
  output = response.json()
@@ -69,7 +73,7 @@ demo = gr.Blocks()
69
  with demo:
70
  gr.Markdown("<h1><center>Step By Step With Bloom</center></h1>")
71
  gr.Markdown(
72
- """ APOLOGIES WIP FIXING SOMETHING. [BigScienceW Bloom](https://twitter.com/BigscienceW) \n\n Large language models have demonstrated a capability of 'Chain-of-thought reasoning'. A group of amazing researchers( [Jason Wei et al.](https://arxiv.org/abs/2205.11916)) recently found out that by adding **Lets think step by step** it improves the model's zero-shot performance. Some might say — You can get good results out of LLMs if you know how to speak to them. This space is an attempt at inspecting this LLM behavior/capability in the new HuggingFace BigScienceW [Bloom](https://huggingface.co/bigscience/bloom) model. \n\nThis Space is created by [Yuvraj Sharma](https://twitter.com/yvrjsharma) for EuroPython 2022 Demo.\nThis Space might sometime fail due to inference queue being full and logs would end up showing error as *queue full, try again later*, don't despair and try again after some time. I would try and improve the app as well over next couple days."""
73
  )
74
  with gr.Row():
75
 
 
49
  {
50
  "top_p": 0.9,
51
  "temperature": 1.1,
52
+ #"max_new_tokens": 250,
53
+ "return_full_text": False
54
+ }, "options":
55
+ {
56
+ "use_cache": True,
57
+ "wait_for_model":True
58
+ }}
59
  response = requests.post(API_URL, headers=headers, json=json_)
60
  print(f"Response is : {response}")
61
  output = response.json()
 
73
  with demo:
74
  gr.Markdown("<h1><center>Step By Step With Bloom</center></h1>")
75
  gr.Markdown(
76
+ """[BigScienceW Bloom](https://twitter.com/BigscienceW) \n\n Large language models have demonstrated a capability of 'Chain-of-thought reasoning'. A group of amazing researchers( [Jason Wei et al.](https://arxiv.org/abs/2205.11916)) recently found out that by adding **Lets think step by step** it improves the model's zero-shot performance. Some might say — You can get good results out of LLMs if you know how to speak to them. This space is an attempt at inspecting this LLM behavior/capability in the new HuggingFace BigScienceW [Bloom](https://huggingface.co/bigscience/bloom) model. \n\nThis Space is created by [Yuvraj Sharma](https://twitter.com/yvrjsharma) for EuroPython 2022 Demo.\nThis Space might sometime fail due to inference queue being full and logs would end up showing error as *queue full, try again later*, don't despair and try again after some time. I would try and improve the app as well over next couple days."""
77
  )
78
  with gr.Row():
79