|
|
|
|
|
from diffusers import StableDiffusionPipeline, DDIMScheduler |
|
from time import time |
|
from PIL import Image |
|
from einops import rearrange |
|
import numpy as np |
|
import torch |
|
from torch import autocast |
|
from torchvision.utils import make_grid |
|
|
|
torch.manual_seed(42) |
|
|
|
prompts = ["a photograph of an astronaut riding a horse"] |
|
|
|
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-3", revision="fp16", torch_dtype=torch.float16, use_auth_token=True) |
|
|
|
|
|
pipe.to("cuda") |
|
|
|
all_images = [] |
|
num_rows = 1 |
|
num_columns = 4 |
|
for prompt in prompts: |
|
with autocast("cuda"): |
|
images = pipe(num_columns * [prompt], guidance_scale=7.5, output_type="np")["sample"] |
|
all_images.append(torch.from_numpy(images)) |
|
|
|
|
|
grid = torch.stack(all_images, 0) |
|
grid = rearrange(grid, 'n b h w c -> (n b) h w c') |
|
grid = rearrange(grid, 'n h w c -> n c h w') |
|
grid = make_grid(grid, nrow=num_rows) |
|
|
|
|
|
grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy() |
|
image = Image.fromarray(grid.astype(np.uint8)) |
|
|
|
image.save(f"../images/diffusers/batch_{round(time())}.png") |
|
|