ysharma's picture
ysharma HF staff
a
02a5461
raw
history blame
1.25 kB
import gradio as gr
import requests
import os
#fn_index=1
# inputs:
# |-textbox
# |-slider
# |-slider
# |-slider
# |-slider
# outputs:
# |-gallery
#API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
HF_TOKEN = os.environ["HF_TOKEN"]
#headers = {"Authorization": f"Bearer {HF_TOKEN}"}
def get_sd(translated_txt):
print("******** Inside get_SD ********")
print(f"translated_txt is : {translated_txt}")
sd_inf = gr.Blocks.load(name="spaces/stabilityai/stable-diffusion", use_auth_token='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')
print(f"stable Diff inf is : {sd_inf}")
tbox, sd_img_gallery = sd_infa(translated_txt, '4', '45', '7.5', 1024, fn_index=1) #(prompt, samples, steps, scale, seed)
return sd_img_gallery[0]
demo = gr.Blocks()
with demo:
gr.Markdown("Testing Diffusion models. STILL VERY MUCH WORK IN PROGRESS !!!!!!!!")
with gr.Row():
in_text_prompt = gr.Textbox(label="Enter English text here")
#out_text_chinese = gr.Textbox(label="Your Chinese language output")
b1 = gr.Button("Generate SD")
out_sd = gr.Image(type="pil", label="SD output for the given prompt")
b1.click(get_sd, in_text_prompt, out_sd) #out_gallery )
demo.launch(enable_queue=True, debug=True)