patrickvonplaten commited on
Commit
ea151b9
1 Parent(s): 056c912
Files changed (3) hide show
  1. bench.py +27 -0
  2. control_net.py +53 -0
  3. safetensors_bench.py +16 -0
bench.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from diffusers import StableDiffusionPipeline, UniPCMultistepScheduler
3
+ import time
4
+ import torch
5
+ import sys
6
+
7
+ path = sys.argv[1]
8
+ use_device_map = bool(int(sys.argv[2]))
9
+
10
+ start_time = time.time()
11
+
12
+ if use_device_map:
13
+ print("Load directly on GPU")
14
+ pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16, device_map="auto")
15
+ else:
16
+ print("Load directly on CPU")
17
+ pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
18
+ pipe = pipe.to("cuda")
19
+
20
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
21
+
22
+ prompt = "a highly realistic photo of green turtle"
23
+
24
+ print("Loading Time", time.time() - start_time)
25
+ generator = torch.Generator(device="cuda").manual_seed(0)
26
+ image = pipe(prompt, generator=generator, num_inference_steps=15).images[0]
27
+ print("Time", time.time() - start_time)
control_net.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import torch
3
+ import numpy as np
4
+ import os
5
+ from huggingface_hub import HfApi
6
+ from pathlib import Path
7
+ import cv2
8
+ from PIL import Image
9
+ from diffusers.utils import load_image
10
+
11
+ from diffusers import (
12
+ ControlNetModel,
13
+ StableDiffusionControlNetPipeline,
14
+ UniPCMultistepScheduler,
15
+ )
16
+
17
+ image = load_image(
18
+ "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
19
+ )
20
+
21
+ image = np.array(image)
22
+
23
+ low_threshold = 100
24
+ high_threshold = 200
25
+
26
+ image = cv2.Canny(image, low_threshold, high_threshold)
27
+ image = image[:, :, None]
28
+ image = np.concatenate([image, image, image], axis=2)
29
+ canny_image = Image.fromarray(image)
30
+
31
+ controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
32
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
33
+ "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
34
+ )
35
+
36
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
37
+ pipe.enable_model_cpu_offload()
38
+
39
+ generator = torch.manual_seed(0)
40
+ out_image = pipe("futuristic-looking woman", num_inference_steps=20, generator=generator, image=canny_image).images[0]
41
+
42
+ path = os.path.join(Path.home(), "images", "aa.png")
43
+ out_image.save(path)
44
+
45
+ api = HfApi()
46
+
47
+ api.upload_file(
48
+ path_or_fileobj=path,
49
+ path_in_repo=path.split("/")[-1],
50
+ repo_id="patrickvonplaten/images",
51
+ repo_type="dataset",
52
+ )
53
+ print("https://huggingface.co/datasets/patrickvonplaten/images/blob/main/aa.png")
safetensors_bench.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from safetensors.torch import load_file as safe_load_file
3
+ import time
4
+ import sys
5
+
6
+ direct_on_gpu = bool(int(sys.argv[1]))
7
+
8
+ if direct_on_gpu:
9
+ start_time = time.time()
10
+ checkpoint = safe_load_file("/home/patrick_huggingface_co/stable-diffusion-v1-4/unet/diffusion_pytorch_model.safetensors", device=0)
11
+ print("Directly on GPU", time.time() - start_time)
12
+ else:
13
+ start_time = time.time()
14
+ checkpoint = safe_load_file("/home/patrick_huggingface_co/stable-diffusion-v1-4/unet/diffusion_pytorch_model.safetensors")
15
+ checkpoint = {k: v.to("cuda:0") for k, v in checkpoint.items()}
16
+ print("On CPU", time.time() - start_time)