|
import os |
|
|
|
import gradio as gr |
|
import torch |
|
from diffusers import StableDiffusionPipeline |
|
|
|
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
|
PIPE = StableDiffusionPipeline.from_pretrained( |
|
"model/", |
|
torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32, |
|
) |
|
PIPE = PIPE.to(DEVICE) |
|
|
|
|
|
def generate_image(prompt, negative_prompt, image_size, scale, steps, seed): |
|
image_size = int(image_size) if image_size else 512 |
|
generator = torch.Generator(device=DEVICE).manual_seed(seed) |
|
images = PIPE( |
|
prompt, |
|
negative_prompt=negative_prompt, |
|
width=image_size, |
|
height=image_size, |
|
num_inference_steps=steps, |
|
guidance_scale=scale, |
|
num_images_per_prompt=1, |
|
generator=generator, |
|
).images[0] |
|
return images |
|
|
|
|
|
gr.Interface( |
|
fn=generate_image, |
|
inputs=[ |
|
gr.Textbox(label="Prompt", lines=5, max_lines=5), |
|
gr.Textbox(label="Negative prompt (optional)", lines=5, max_lines=5), |
|
gr.Textbox(label="Image size (optional)", lines=1, max_lines=1), |
|
gr.Slider(1, maximum=20, value=7.5, step=0.5, label="Scale"), |
|
gr.Slider(1, 150, 50, label="Steps"), |
|
gr.Slider(minimum=1, step=1, maximum=999999999999999999, randomize=True, label="Seed"), |
|
], |
|
outputs="image", |
|
title="Dreambooth - Powered by AutoTrain", |
|
description="Model:autotrain-OchoCincoDreambooth-L5QUCR07NQ-2568678396, concept prompts: concept1-> erwdb22. Tip: Switch to GPU hardware in settings to make inference superfast!", |
|
).launch() |