muhtasham's picture
Update app.py
553390e
import gradio as gr
import requests
import os
##Bloom
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
HF_TOKEN = os.environ["HF_TOKEN"]
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
def text_generate(prompt):
print(f"Prompt is :{prompt}")
p = prompt + " Solution: "
print(f"Final prompt is : {p}")
json_ = {"inputs": p,
"parameters":
{
"top_p": 0.9,
"temperature": 1.1,
"max_new_tokens": 250,
"return_full_text": True
}, "options":
{
"use_cache": True,
"wait_for_model":True
},}
response = requests.post(API_URL, headers=headers, json=json_)
print(f"Response is : {response}")
output = response.json()
print(f"output is : {output}")
output_tmp = output[0]['generated_text']
print(f"output_tmp is: {output_tmp}")
solution = output_tmp.split("\nQ:")[0]
print(f"Final response after splits is: {solution}")
return solution
demo = gr.Blocks()
with demo:
gr.Markdown("<h1><center>Length generalization (LG) With BLOOM🌸 </center></h1>")
gr.Markdown(
"""
This Space is work in progress, BLOOM doesn't support inference on long sequencess so you may try with shorter sequences. \n
We will examine large language models ability to extrapolate to longer problems! \n
Length generalization (LG) is important: Often, long examples are rare and intrinsically more difficult, yet are the ones we care more about. \n
Recent paper [Exploring Length Generalization in Large Language Models](https://arxiv.org/pdf/2207.04901) found that using few-shot [scratchpad](https://arxiv.org/abs/2112.00114), a combo behind many strong LLM results (eg. #Minerva ) \n
leads to **substantial improvements in length generalization!** \n
In-context learning enables variable length pattern matching, producing solutions of correct lengths. \n
This space is an attempt at inspecting this LLM behavior/capability in the new HuggingFace BigScienceW [Bloom](https://huggingface.co/bigscience/bloom) model. \n
This Space is created by [Muhtasham Oblokulov](https://twitter.com/muhtahsam9) for EuroPython 2022 Demo. \n
"""
)
with gr.Row():
input_prompt = gr.Textbox(value="Question: The coin is heads up. (3) Then Kara flips. (2) Then Luke doesn't flip. (1) Then Austin flips. Is the coin still heads up? Solution: Coin is initially heads up. (3) After Kara flips, coin becomes tails. (2) Luke doesn't flip, so coin stays tails. (1) After Austin flips, coin turns to heads.\nQuestion: The coin is heads up. (20) Then Anthony doesn't flip. (19) Then Eric flips. (18) Then Elizabeth doesn't flip. (17) Then Adam doesn't flip. (16) Then Melissa flips. (15) Then Kevin flips. (14) Then Steven flips. (13) Then Thomas flips. (12) ... (5) Then Jeffrey flips. (4) Then Amy flips. (3) Then Crystal flips. (2) Then Michelle doesn't flip. (1) Then Jeremy flips. Is the coin still heads up?",
label="Enter your few-shot examlples followed by query to :")
generated_txt = gr.Textbox(lines=10, label="Generated Solution:")
b1 = gr.Button("Generate Text")
b1.click(text_generate,inputs=[input_prompt], outputs=[generated_txt])
demo.launch(enable_queue=True, debug=True)