|
|
|
import torch |
|
import random |
|
import gradio as gr |
|
from diffusers import StableDiffusionPipeline, DDIMScheduler |
|
|
|
|
|
model_path = "model" |
|
|
|
|
|
pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float32) |
|
|
|
|
|
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) |
|
|
|
|
|
pipe.enable_xformers_memory_efficient_attention() |
|
|
|
|
|
def generate_response(prompt): |
|
|
|
|
|
negative_prompt = "bad anatomy, ugly, deformed, desfigured, distorted face, poorly drawn, blurry, low quality, low definition, lowres, out of frame, out of image, cropped, cut off, signature, watermark" |
|
|
|
|
|
num_samples = 5 |
|
guidance_scale = 7.5 |
|
num_inference_steps = 30 |
|
height = 512 |
|
width = 512 |
|
|
|
seed = random.randint(0, 2147483647) |
|
generator = torch.Generator(device='cuda').manual_seed(seed) |
|
|
|
with torch.inference_mode(): |
|
imgs = pipe( |
|
prompt, |
|
negative_prompt=negative_prompt, |
|
height=height, |
|
width=width, |
|
num_images_per_prompt=num_samples, |
|
num_inference_steps=num_inference_steps, |
|
guidance_scale=guidance_scale, |
|
generator=generator |
|
).images |
|
|
|
return imgs[0] |
|
|
|
|
|
gradio_ui = gr.Interface(fn=generate_response, inputs="text", outputs="image") |
|
gradio_ui.launch() |
|
|