|
|
|
from diffusers import StableDiffusionPipeline, UniPCMultistepScheduler |
|
import time |
|
import torch |
|
import sys |
|
|
|
path = sys.argv[1] |
|
use_device_map = bool(int(sys.argv[2])) |
|
|
|
start_time = time.time() |
|
|
|
if use_device_map: |
|
print("Load directly on GPU") |
|
pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16, device_map="auto") |
|
else: |
|
print("Load directly on CPU") |
|
pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16) |
|
pipe = pipe.to("cuda") |
|
|
|
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) |
|
|
|
prompt = "a highly realistic photo of green turtle" |
|
|
|
print("Loading Time", time.time() - start_time) |
|
generator = torch.Generator(device="cuda").manual_seed(0) |
|
image = pipe(prompt, generator=generator, num_inference_steps=15).images[0] |
|
print("Time", time.time() - start_time) |
|
|