|
from diffusers import StableDiffusionPipeline, DDIMScheduler |
|
import torch |
|
|
|
device = "cuda" |
|
|
|
scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=True) |
|
|
|
|
|
pipe = StableDiffusionPipeline.from_pretrained( |
|
f"./output-models/1500/", |
|
scheduler=scheduler, |
|
safety_checker=None, |
|
torch_dtype=torch.float16, |
|
).to(device) |
|
|
|
|
|
pipe.enable_xformers_memory_efficient_attention() |
|
|
|
prompt = "photo of zwx dog with Texas bluebonnet" |
|
negative_prompt = "" |
|
num_samples = 4 |
|
guidance_scale = 7.5 |
|
num_inference_steps = 50 |
|
height = 512 |
|
width = 512 |
|
|
|
with torch.autocast("cuda"), torch.inference_mode(): |
|
images = pipe( |
|
prompt, |
|
height=height, |
|
width=width, |
|
negative_prompt=negative_prompt, |
|
num_images_per_prompt=num_samples, |
|
num_inference_steps=num_inference_steps, |
|
guidance_scale=guidance_scale |
|
).images |
|
|
|
count = 1 |
|
for image in images: |
|
|
|
image.save(f"img-{count}.png") |
|
count += 1 |
|
|
|
|