Spaces:
Runtime error
Runtime error
import gradio as gr | |
import model_loader | |
import pipeline | |
from transformers import CLIPTokenizer | |
import torch | |
import cv2 | |
from download import download | |
DEVICE = "cpu" | |
ALLOW_CUDA = True | |
ALLOW_MPS = False | |
if torch.cuda.is_available() and ALLOW_CUDA: | |
DEVICE = "cuda" | |
elif (torch.has_mps or torch.backends.mps.is_available()) and ALLOW_MPS: | |
DEVICE = "mps" | |
print(f"Using device: {DEVICE}") | |
def generate_image(prompt): | |
tokenizer = CLIPTokenizer(r"./data/vocab.json", merges_file="./data/merges.txt") | |
model_file = "./data/v1-5-pruned-emaonly.ckpt" | |
models = model_loader.preload_models_from_standard_weights(model_file, DEVICE) | |
## TEXT TO IMAGE | |
uncond_prompt = "" # Also known as negative prompt | |
do_cfg = True | |
cfg_scale = 8 # min: 1, max: 14 | |
## SAMPLER | |
sampler = "ddpm" | |
num_inference_steps = 50 | |
seed = 42 | |
output_image = pipeline.generate( | |
prompt=prompt, | |
uncond_prompt=uncond_prompt, | |
input_image=None, | |
strength=0.5, | |
do_cfg=do_cfg, | |
cfg_scale=cfg_scale, | |
sampler_name=sampler, | |
n_inference_steps=num_inference_steps, | |
seed=seed, | |
models=models, | |
device=DEVICE, | |
idle_device="cpu", | |
tokenizer=tokenizer, | |
) | |
image = cv2.cvtColor(output_image, cv2.COLOR_RGB2BGR) | |
return image | |
with gr.Blocks() as demo: | |
gr.Markdown('# Diffusion') | |
with gr.Row(): | |
with gr.Column(): | |
# Add a text box | |
prompt = gr.Textbox(lines=3, label="Prompt", placeholder="Enter a prompt") | |
print(prompt) | |
with gr.Column(): | |
output_image = gr.Image(label="Generated Image") | |
submit_button_image = gr.Button("Generate Image") | |
submit_button_image.click(generate_image, inputs=prompt, outputs=output_image) | |
if __name__ == "__main__": | |
download() | |
demo.launch() | |