pcuenq HF staff commited on
Commit
77ce6d2
1 Parent(s): 5247c29

Upload sayak_lcm_benchmark.py

Browse files
Files changed (1) hide show
  1. sayak_lcm_benchmark.py +54 -0
sayak_lcm_benchmark.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.utils.benchmark as benchmark
3
+ import argparse
4
+ from diffusers import DiffusionPipeline, LCMScheduler
5
+
6
+ PROMPT = "close-up photography of old man standing in the rain at night, in a street lit by lamps, leica 35mm summilux"
7
+ MODEL_ID = "stabilityai/stable-diffusion-xl-base-1.0"
8
+ LORA_ID = "latent-consistency/lcm-lora-sdxl"
9
+
10
+
11
+ def benchmark_fn(f, *args, **kwargs):
12
+ t0 = benchmark.Timer(
13
+ stmt="f(*args, **kwargs)", globals={"args": args, "kwargs": kwargs, "f": f}
14
+ )
15
+ return t0.blocked_autorange().mean * 1e6
16
+
17
+
18
+ def load_pipeline(standard_sdxl=False):
19
+ pipe = DiffusionPipeline.from_pretrained(MODEL_ID, variant="fp16")
20
+ if not standard_sdxl:
21
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
22
+ pipe.load_lora_weights(LORA_ID)
23
+
24
+ pipe.to(device="cuda", dtype=torch.float16)
25
+ pipe.enable_model_cpu_offload()
26
+ return pipe
27
+
28
+
29
+ def call_pipeline(pipe, batch_size, num_inference_steps, guidance_scale):
30
+ images = pipe(
31
+ prompt=PROMPT,
32
+ num_inference_steps=num_inference_steps,
33
+ num_images_per_prompt=batch_size,
34
+ guidance_scale=guidance_scale,
35
+ ).images[0]
36
+
37
+
38
+ if __name__ == "__main__":
39
+ parser = argparse.ArgumentParser()
40
+ parser.add_argument("--batch_size", type=int, default=1)
41
+ parser.add_argument("--standard_sdxl", action="store_true")
42
+ args = parser.parse_args()
43
+
44
+ pipeline = load_pipeline(args.standard_sdxl)
45
+ if args.standard_sdxl:
46
+ num_inference_steps = 25
47
+ guidance_scale = 5
48
+ else:
49
+ num_inference_steps = 4
50
+ guidance_scale = 1
51
+
52
+ time = benchmark_fn(call_pipeline, pipeline, args.batch_size, num_inference_steps, guidance_scale)
53
+
54
+ print(f"Batch size: {args.batch_size} in {time/1e6:.3f} seconds")