ysharma HF staff commited on
Commit
c3d5815
1 Parent(s): ab6dafa
Files changed (1) hide show
  1. app.py +13 -9
app.py CHANGED
@@ -31,15 +31,18 @@ A: Let’s think step by step.
31
  """
32
 
33
 
34
- def text_generate(problem, template): #prompt
35
 
36
  #prints to debug
37
  print(f"*****Inside text_generate function******")
38
- #print(f"Prompt is :{prompt}")
39
  print(f"Problem is :{problem}")
40
  print(f"Template is :{template}")
 
41
 
42
- p = problem + "A: " + template #+ "\n"
 
 
 
43
  print(f"Final prompt is : {p}")
44
  json_ = {"inputs": p,
45
  "parameters":
@@ -66,21 +69,22 @@ demo = gr.Blocks()
66
  with demo:
67
  gr.Markdown("<h1><center>Step By Step With Bloom</center></h1>")
68
  gr.Markdown(
69
- """ [BigScienceW Bloom](https://twitter.com/BigscienceW) \n\n Large language models have demonstrated a capability of 'Chain-of-thought reasoning'. A group of amazing researchers( [Jason Wei et al.](https://arxiv.org/abs/2205.11916)) recently found out that by adding **Lets think step by step** it improves the model's zero-shot performance. Some might say — You can get good results out of LLMs if you know how to speak to them. This space is an attempt at inspecting this LLM behavior/capability in the new HuggingFace BigScienceW [Bloom](https://huggingface.co/bigscience/bloom) model. \n\nThis Space is created by [Yuvraj Sharma](https://twitter.com/yvrjsharma) for EuroPython 2022 Demo.\nThis Space might sometime fail due to inference queue being full and logs would end up showing error as *queue full, try again later*, don't despair and try again after some time. I would try and improve the app as well over next couple days."""
70
  )
71
  with gr.Row():
72
 
73
  #example_prompt = gr.Radio( ["Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?\nA: Let’s think step by step.\n", "Q: Roger has 5 tennis balls already. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now?\nA: Let’s think step by step.\n", "Q: On an average Joe throws 25 punches per minute. His fight lasts 5 rounds of 3 minutes each. How many punches did he throw?\nA: Let’s think about this logically.\n"], label= "Choose a sample Prompt")
74
 
75
- example_problem = gr.Radio( ["Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?\n", "Q: Roger has 5 tennis balls already. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now?\n", "Q: On an average Joe throws 25 punches per minute. His fight lasts 5 rounds of 3 minutes each. How many punches did he throw?\n"], label= "Choose a sample Problem")
76
 
77
- example_template = gr.Radio( ["Let’s think step by step.\n"," First, ", " Let’s think about this logically.\n", "Let’s solve this problem by splitting it into steps.\n", " Let’s be realistic and think step by step.\n", "Let’s think like a detective step by step.\n", "Let’s think ", "Before we dive into the answer, ", "The answer is after the proof.\n"], label= "Choose a sample Template for Zero-Shot CoT")
78
 
79
- #input_word = gr.Textbox(placeholder="Enter a word here to generate text ...")
80
  generated_txt = gr.Textbox(lines=10)
81
-
 
 
82
 
83
  b1 = gr.Button("Generate Text")
84
- b1.click(text_generate,inputs=[example_problem, example_template], outputs=generated_txt) #example_prompt
85
 
86
  demo.launch(enable_queue=True, debug=True)
 
31
  """
32
 
33
 
34
+ def text_generate(problem, template, prompt):
35
 
36
  #prints to debug
37
  print(f"*****Inside text_generate function******")
 
38
  print(f"Problem is :{problem}")
39
  print(f"Template is :{template}")
40
+ print(f"Prompt is :{prompt}")
41
 
42
+ if len(problem) == 0 and len(template) == 0:
43
+ p = prompt
44
+ else:
45
+ p = problem + "A: " + template #+ "\n"
46
  print(f"Final prompt is : {p}")
47
  json_ = {"inputs": p,
48
  "parameters":
 
69
  with demo:
70
  gr.Markdown("<h1><center>Step By Step With Bloom</center></h1>")
71
  gr.Markdown(
72
+ """ APOLOGIES WIP FIXING SOMETHING. [BigScienceW Bloom](https://twitter.com/BigscienceW) \n\n Large language models have demonstrated a capability of 'Chain-of-thought reasoning'. A group of amazing researchers( [Jason Wei et al.](https://arxiv.org/abs/2205.11916)) recently found out that by adding **Lets think step by step** it improves the model's zero-shot performance. Some might say — You can get good results out of LLMs if you know how to speak to them. This space is an attempt at inspecting this LLM behavior/capability in the new HuggingFace BigScienceW [Bloom](https://huggingface.co/bigscience/bloom) model. \n\nThis Space is created by [Yuvraj Sharma](https://twitter.com/yvrjsharma) for EuroPython 2022 Demo.\nThis Space might sometime fail due to inference queue being full and logs would end up showing error as *queue full, try again later*, don't despair and try again after some time. I would try and improve the app as well over next couple days."""
73
  )
74
  with gr.Row():
75
 
76
  #example_prompt = gr.Radio( ["Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?\nA: Let’s think step by step.\n", "Q: Roger has 5 tennis balls already. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now?\nA: Let’s think step by step.\n", "Q: On an average Joe throws 25 punches per minute. His fight lasts 5 rounds of 3 minutes each. How many punches did he throw?\nA: Let’s think about this logically.\n"], label= "Choose a sample Prompt")
77
 
78
+ example_problem = gr.Radio( ["Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?\n", "Q: Roger has 5 tennis balls already. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now?\n", "Q: On an average Joe throws 25 punches per minute. His fight lasts 5 rounds of 3 minutes each. How many punches did he throw?\n"], label= "Choose a sample Problem and corresponding Tempplate for Zero-Shot CoT:")
79
 
80
+ example_template = gr.Radio( ["Let’s think step by step.\n"," First, ", " Let’s think about this logically.\n", "Let’s solve this problem by splitting it into steps.\n", " Let’s be realistic and think step by step.\n", "Let’s think like a detective step by step.\n", "Let’s think ", "Before we dive into the answer, ", "The answer is after the proof.\n"], label= "Choose a sample Problem and corresponding Template for Zero-Shot CoT:")
81
 
 
82
  generated_txt = gr.Textbox(lines=10)
83
+
84
+ with gr.Row():
85
+ input_prompt = gr.Textbox(placeholder="Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?\nA: Let’s think step by step.\n", label="Or don't select from above examples and just enter your own prompt drawing from the above examples and submit... ")
86
 
87
  b1 = gr.Button("Generate Text")
88
+ b1.click(text_generate,inputs=[example_problem, example_template, input_prompt], outputs=generated_txt) #example_prompt
89
 
90
  demo.launch(enable_queue=True, debug=True)