Spaces:
Build error
Build error
| import gradio as gr | |
| import requests | |
| import os | |
| ##Bloom | |
| API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom" | |
| HF_TOKEN = os.environ["HF_TOKEN"] | |
| headers = {"Authorization": f"Bearer {HF_TOKEN}"} | |
| #Tested various prompts initially | |
| prompt1 = """ | |
| Q: Joy has 5 balls. He buys 2 more cans of balls. Each can has 3 balls. How many balls he has now? | |
| A: Joy had 5 balls. 2 cans of 3 balls each is 6 balls. 5 + 6 = 11. Answer is 11. | |
| Q: Jane has 16 balls. Half balls are golf balls, and half golf balls are red. How many red golf balls are there? | |
| A: """ | |
| prompt2 = """Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there? | |
| A: Let’s think step by step. | |
| """ | |
| def text_generate(problem, template, prompt): | |
| #prints to debug | |
| print(f"*****Inside text_generate function******") | |
| print(f"Problem is :{problem}") | |
| print(f"Template is :{template}") | |
| print(f"Prompt is :{prompt}") | |
| if len(problem) == 0 and len(template) == 0: | |
| p = prompt | |
| else: | |
| p = problem + "A: " + template #+ "\n" | |
| print(f"Final prompt is : {p}") | |
| json_ = {"inputs": p, | |
| "parameters": | |
| { | |
| "top_p": 0.9, | |
| "temperature": 1.1, | |
| "max_new_tokens": 64, | |
| "return_full_text": True | |
| }, "options": | |
| { | |
| "use_cache": True, | |
| "wait_for_model":True | |
| },} | |
| response = requests.post(API_URL, headers=headers, json=json_) | |
| print(f"Response is : {response}") | |
| output = response.json() | |
| print(f"output is : {output}") #{output}") | |
| output_tmp = output[0]['generated_text'] | |
| print(f"output_tmp is: {output_tmp}") | |
| solution = output_tmp.split("\nQ:")[0] #output[0]['generated_text'].split("Q:")[0] # +"." | |
| print(f"Final response after splits is: {solution}") | |
| return solution | |
| demo = gr.Blocks() | |
| with demo: | |
| gr.Markdown("<h1><center>Step By Step With Bloom</center></h1>") | |
| gr.Markdown( | |
| """[BigScienceW Bloom](https://twitter.com/BigscienceW) \n\n Large language models have demonstrated a capability of 'Chain-of-thought reasoning'. A group of amazing researchers( [Jason Wei et al.](https://arxiv.org/abs/2205.11916)) recently found out that by adding **Lets think step by step** it improves the model's zero-shot performance. Some might say — You can get good results out of LLMs if you know how to speak to them. This space is an attempt at inspecting this LLM behavior/capability in the new HuggingFace BigScienceW [Bloom](https://huggingface.co/bigscience/bloom) model. \n\nThis Space is created by [Yuvraj Sharma](https://twitter.com/yvrjsharma) for EuroPython 2022 Demo.\nThis Space might sometime fail due to inference queue being full and logs would end up showing error as *queue full, try again later*, don't despair and try again after some time. I would try and improve the app as well over next couple days.""" | |
| ) | |
| with gr.Row(): | |
| #example_prompt = gr.Radio( ["Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?\nA: Let’s think step by step.\n", "Q: Roger has 5 tennis balls already. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now?\nA: Let’s think step by step.\n", "Q: On an average Joe throws 25 punches per minute. His fight lasts 5 rounds of 3 minutes each. How many punches did he throw?\nA: Let’s think about this logically.\n"], label= "Choose a sample Prompt") | |
| example_problem = gr.Radio( ["Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?\n", "Q: Roger has 5 tennis balls already. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now?\n", "Q: On an average Joe throws 25 punches per minute. His fight lasts 5 rounds of 3 minutes each. How many punches did he throw?\n"], label= "Choose a sample Problem and corresponding Tempplate for Zero-Shot CoT:") | |
| example_template = gr.Radio( ["Let’s think step by step.\n"," First, ", " Let’s think about this logically.\n", "Let’s solve this problem by splitting it into steps.\n", " Let’s be realistic and think step by step.\n", "Let’s think like a detective step by step.\n", "Let’s think ", "Before we dive into the answer, ", "The answer is after the proof.\n"], label= "Choose a sample Problem and corresponding Template for Zero-Shot CoT:") | |
| generated_txt = gr.Textbox(lines=10) | |
| with gr.Row(): | |
| input_prompt = gr.Textbox(value="Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?\nA: Let’s think step by step.\n", label="Or don't select from above examples and just enter your own prompt drawing from the above examples and submit... ") | |
| b1 = gr.Button("Generate Text") | |
| b1.click(text_generate,inputs=[example_problem, example_template, input_prompt], outputs=generated_txt) #example_prompt | |
| with gr.Row(): | |
| gr.Markdown("") | |
| demo.launch(enable_queue=True, debug=True) |