Not sure why inferencing was so slow

#10
by vionwinnie - opened

This is my code for importing your amazing model into the jupyter notebook with a 25GB GPU. I am not sure what I did wrong as my inference of 1 image for img2img takes 12 minutes to run...

import torch
from PIL import Image
from io import BytesIO
from diffusers import StableDiffusionImg2ImgPipeline, UniPCMultistepScheduler, AutoencoderKL

vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse",torch_dtype=torch.float16)

model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id,
                                                      vae=vae,
                                                      use_safetensors=True,
                                                      torch_dtype=torch.float16,
                                                      safety_checker=None)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_xformers_memory_efficient_attention()
pipe.enable_model_cpu_offload()

images = pipe(prompt=prompt, 
              negative_prompt=negative_prompt,
              image=init_img, 
              steps=5,
              num_images_per_prompt=1,
              strength=0.6, 
              guidance_scale=6.5).images

Sign up or log in to comment