testing_llm / app.py
ysharma's picture
ysharma HF staff
update
72aba1d
raw
history blame
6.35 kB
import gradio as gr
import requests
import os
import PIL
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
##Bloom
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
HF_TOKEN = os.environ["HF_TOKEN"]
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
prompt1 = """
word: risk
poem using word: And then the day came,
when the risk
to remain tight
in a bud
was more painful
than the risk
it took
to blossom.
word: """
prompt2 = """
Q: Joy has 5 balls. He buys 2 more cans of balls. Each can has 3 balls. How many balls he has now?
A: Joy had 5 balls. 2 cans of 3 balls each is 6 balls. 5 + 6 = 11. Answer is 11.
Q: Jane has 16 balls. Half balls are golf balls, and half golf balls are red. How many red golf balls are there?
A: """
prompt3 = """Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?
A: Let’s think step by step.
"""
#Complete below sentence in fun way.
prompt4 = """Distracted from: hubble
by: james webb
Distracted from: homework
by: side project
Distracted from: goals
by: new goals
Distracted from:
"""
def write_on_image(final_solution):
print("************ Inside write_on_image ***********")
image_path0 = "./distracted0.jpg"
image0 = Image.open(image_path0)
I1 = ImageDraw.Draw(image0)
myfont = ImageFont.truetype('./font1.ttf', 30)
I1.text((613, 89), "girlfriend",font=myfont, fill =(255, 255, 255))
I1.text((371, 223), "ME", font=myfont, fill =(255, 255, 255))
I1.text((142, 336), "new girl",font=myfont, fill =(255, 255, 255))
return image0
def meme_generate(img, prompt): #prompt, generated_txt): #, input_prompt_sql ): #, input_prompt_dalle2):
print(f"*****Inside meme_generate - Prompt is :{prompt}")
json_ = {"inputs": prompt,
"parameters":
{
#"top_p": 0.95,
"top_p": 0.90,
#"top_k":0,
"max_new_tokens": 250,
"temperature": 1.1,
#"num_return_sequences": 3,
"return_full_text": True,
"do_sample": True,
},
"options":
{"use_cache": True,
"wait_for_model": True,
},}
response = requests.post(API_URL, headers=headers, json=json_)
print(f"Response is : {response}")
output = response.json()
print(f"output is : {output}")
output_tmp = output[0]['generated_text']
print(f"output_tmp is: {output_tmp}")
solution = output_tmp.split("\nQ:")[0]
print(f"Final response after splits is: {solution}")
if '\nOutput:' in solution:
final_solution = solution.split("\nOutput:")[0]
print(f"Response after removing output is: {final_solution}")
elif '\n\n' in solution:
final_solution = solution.split("\n\n")[0]
print(f"Response after removing new line entries is: {final_solution}")
else:
final_solution = solution
meme_image = write_on_image(final_solution)
return meme_image, final_solution #final_solution #display_output, new_prompt #generated_txt+prompt
demo = gr.Blocks()
with demo:
gr.Markdown("<h1><center>Testing</center></h1>")
gr.Markdown(
"""Work In Progress"""
)
with gr.Row():
#example_prompt = gr.Radio( ["Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?\nA: Let’s think step by step.\n"], label= "Choose a sample Prompt")
#example_prompt = gr.Radio( [
#"Instruction: Given an input question, respond with syntactically correct PostgreSQL\nInput: How many users signed up in the past month?\nPostgreSQL query: ",
#"Instruction: Given an input question, respond with syntactically correct PostgreSQL\nInput: Create a query that displays empfname, emplname, deptid, deptname, location from employee table. Results should be in the ascending order based on the empfname and location.\nPostgreSQL query: ",
#"Instruction: Given an input question, respond with syntactically correct PostgreSQL. Only use tables called 'employees'.\nInput: What is the total salary paid to all the employees?\nPostgreSQL query: ",
#"Instruction: Given an input question, respond with syntactically correct PostgreSQL. Only use tables called 'employees'.\nInput: List names of all the employees whose name end with 'r'.\nPostgreSQL query: ",
#"Instruction: Given an input question, respond with syntactically correct PostgreSQL. Only use tables called 'employees'.\nInput: What are the number of employees in each department?\nPostgreSQL query: ",
#"Instruction: Given an input question, respond with syntactically correct PostgreSQL. Only use table called 'employees'.\nInput: Select names of all theemployees who have third character in their name as 't'.\nPostgreSQL query: ",
#"Instruction: Given an input question, respond with syntactically correct PostgreSQL. Only use table called 'employees'.\nInput: Select names of all the employees who are working under 'Peter'\nPostgreSQL query: ", ], label= "Choose a sample Prompt")
#"Dalle Prompt: Cyberwave vaporpunk art of a kneeling figure, looking up at a glowing neon book icon, smoke and mist, pink and blue lighting, cybernetic sci-fi render\nNew Dalle Prompt: " ], label= "Choose a sample Prompt")
#with gr.Row():
in_image = gr.Image(value="./distracted0.jpg")
input_prompt = gr.Textbox(label="Write some prompt...", lines=5) #input_prompt_sql
#input_prompt_dalle2 = gr.Textbox(label="Or Write sample Dalle2 prompts to get more Prompt ideas...")
#input_prompt2 = gr.Textbox(label="Write some text to get started...", lines=3, visible=False) #input_prompt_sql
#input_word = gr.Textbox(placeholder="Enter a word here to generate text ...")
#with gr.Row():
#generated_txt = gr.Textbox(lines=7, visible = True)
with gr.Row():
output_image = gr.Image() #type="filepath", shape=(256,256))
output_prompt = gr.Textbox(label="Text generated", lines=5)
b1 = gr.Button("Generate")
#b2 = gr.Button("Generate Image")
b1.click(meme_generate, inputs=[in_image, input_prompt], outputs=[output_image,output_prompt]) #input_word #input_prompt_dalle2 #input_prompt_sql #example_prompt
#b2.click(poem_to_image, poem_txt, output_image)
#examples=examples
demo.launch(enable_queue=True, debug=True)