Spaces:
Runtime error
Runtime error
import gradio as gr | |
import requests | |
# GPT-J-6B API | |
API_URL = "https://api-inference.huggingface.co/models/EleutherAI/gpt-j-6B" | |
headers = {"Authorization": "Bearer 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"} | |
prompt = """Oh, my life | |
is changing every day | |
Every possible way | |
And oh, my dreams, | |
it's never quite as it seems | |
Never quite as it seems""" | |
#examples = [["mind"], ["memory"], ["sleep"],["wellness"],["nutrition"]] | |
def poem2_generate(word): | |
p = word.lower() + "\n" + "poem using word: " | |
gr.Markdown("Prompt is :{p}") | |
json_ = {"inputs": p, | |
"parameters": | |
{ | |
"top_p": 0.9, | |
"temperature": 1.1, | |
"max_new_tokens": 50, | |
"return_full_text": False | |
}} | |
response = requests.post(API_URL, headers=headers, json=json_) | |
output = response.json() | |
gr.Markdown("error? Reason is : {output}") | |
output_tmp = output[0]['generated_text'] | |
gr.Markdown("GPTJ response without splits is: {output_tmp}") | |
poem = output[0]['generated_text'].split("\n\n")[0] # +"." | |
if "\n\n" not in output_tmp: | |
if output_tmp.find('.') != -1: | |
idx = output_tmp.find('.') | |
poem = output_tmp[:idx+1] | |
else: | |
idx = output_tmp.rfind('\n') | |
poem = output_tmp[:idx] | |
else: | |
poem = output_tmp.split("\n\n")[0] # +"." | |
poem = poem.replace('?','') | |
gr.Markdown("Returned is: {poem}") | |
return poem | |
def poem_generate(word): | |
p = prompt + word.lower() + "\n" + "poem using word: " | |
gr.Markdown("Generate - Prompt is :{p}") | |
json_ = {"inputs": p, | |
"parameters": | |
{ | |
"top_p": 0.9, | |
"temperature": 1.1, | |
"max_new_tokens": 50, | |
"return_full_text": False | |
}} | |
response = requests.post(API_URL, headers=headers, json=json_) | |
output = response.json() | |
gr.Markdown("error? Reason is : {output}") | |
output_tmp = output[0]['generated_text'] | |
gr.Markdown("Response without splits is: {output_tmp}") | |
poem = output[0]['generated_text'].split("\n\n")[0] # +"." | |
if "\n\n" not in output_tmp: | |
if output_tmp.find('.') != -1: | |
idx = output_tmp.find('.') | |
poem = output_tmp[:idx+1] | |
else: | |
idx = output_tmp.rfind('\n') | |
poem = output_tmp[:idx] | |
else: | |
poem = output_tmp.split("\n\n")[0] # +"." | |
poem = poem.replace('?','') | |
gr.Markdown("Returned is: {poem}") | |
return poem | |
def poem_to_image(poem): | |
gr.Markdown("toimage") | |
poem = " ".join(poem.split('\n')) | |
poem = poem + " oil on canvas." | |
steps, width, height, images, diversity = '50','256','256','1',15 | |
img = gr.Interface().load("spaces/multimodalart/latentdiffusion")(poem, steps, width, height, images, diversity)[0] | |
return img | |
def set_example(example: list) -> dict: | |
return gr.Textbox.update(value=example[0]) | |
demo = gr.Blocks() | |
with demo: | |
gr.Markdown("<h1><center>Few Shot Learning Text to Word Image Search</center></h1>") | |
gr.Markdown("https://huggingface.co/blog/few-shot-learning-gpt-neo-and-inference-api, https://github.com/EleutherAI/the-pile") | |
with gr.Row(): | |
input_word = gr.Textbox(lines=7, value=prompt) | |
poem_txt = gr.Textbox(lines=7) | |
output_image = gr.Image(type="filepath", shape=(256,256)) | |
b1 = gr.Button("Generate Text") | |
b2 = gr.Button("Generate Image") | |
b1.click(poem2_generate, input_word, poem_txt) | |
b2.click(poem_to_image, poem_txt, output_image) | |
examples=[["living, loving,"], ["I want to live. I want to give."],["Ive been to Hollywood. Ive been to Redwood"]] | |
example_text = gr.Dataset(components=[input_word], samples=examples) | |
example_text.click(fn=set_example,inputs = example_text,outputs= example_text.components) | |
demo.launch(enable_queue=True, debug=True) |