import gradio as gr | |
# import torch | |
# from diffusers import UniDiffuserPipeline | |
# device = 'cuda' if torch.cuda.is_available() else 'cpu' | |
# pipeline = UniDiffuserPipeline.from_pretrained( | |
# "dg845/unidiffuser-diffusers", | |
# ) | |
# pipeline.to(device) | |
# def convert_to_none(s): | |
# if s: | |
# return s | |
# else: | |
# return None | |
# def set_mode(mode): | |
# if mode == "joint": | |
# pipeline.set_joint_mode() | |
# elif mode == "text2img": | |
# pipeline.set_text_to_image_mode() | |
# elif mode == "img2text": | |
# pipeline.set_image_text_mode() | |
# elif mode == "text": | |
# pipeline.set_text_mode() | |
# elif mode == "img": | |
# pipeline.set_image_mode() | |
# def sample(mode, prompt, image, num_inference_steps, guidance_scale): | |
# set_mode(mode) | |
# prompt = convert_to_none(prompt) | |
# image = convert_to_none(image) | |
# output_sample = pipeline( | |
# prompt=prompt, | |
# image=image, | |
# num_inference_steps=num_inference_steps, | |
# guidance_scale=guidance_scale, | |
# ) | |
# sample_image = None | |
# sample_text = "" | |
# if output_sample.images is not None: | |
# sample_image = output_sample.images[0] | |
# if output_sample.text is not None: | |
# sample_text = output_sample.text[0] | |
# return sample_image, sample_text | |
# iface = gr.Interface( | |
# fn=sample, | |
# inputs=[ | |
# gr.Textbox(value="", label="Generation Task"), | |
# gr.Textbox(value="", label="Conditioning prompt"), | |
# gr.Image(value=None, label="Conditioning image", type="pil"), | |
# gr.Number(value=20, label="Num Inference Steps", precision=0), | |
# gr.Number(value=8.0, label="Guidance Scale"), | |
# ], | |
# outputs=[ | |
# gr.Image(label="Sample image"), | |
# gr.Textbox(label="Sample text"), | |
# ], | |
# ) | |
# iface.launch() | |
from unidiffuser.scripts.sample_v1 import sample | |
def predict(mode, prompt, image, sample_steps, guidance_scale, seed): | |
output_images, output_text = sample( | |
mode, prompt, image, sample_steps=sample_steps, scale=guidance_scale, seed=seed, | |
) | |
sample_image = None | |
sample_text = "" | |
if output_images is not None: | |
sample_image = output_images[0] | |
if output_text is not None: | |
sample_text = output_text[0] | |
return sample_image, sample_text | |
iface = gr.Interface( | |
fn=predict, | |
inputs=[ | |
gr.Textbox(value="", label="Generation Task"), | |
gr.Textbox(value="", label="Conditioning prompt"), | |
gr.Image(value=None, label="Conditioning image", type="filepath"), | |
gr.Number(value=50, label="Num Inference Steps", precision=0), | |
gr.Number(value=7.0, label="Guidance Scale"), | |
gr.Number(value=1234, label="Seed", precision=0), | |
], | |
outputs=[ | |
gr.Image(label="Sample image"), | |
gr.Textbox(label="Sample text"), | |
], | |
) | |
iface.launch() |