File size: 4,668 Bytes
f29be12 ab283bf f29be12 3522a46 f29be12 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
import argparse
from optimum.quanto import freeze, qfloat8, qint4, qint8, quantize
import torch
import json
import torch.utils.benchmark as benchmark
from diffusers import DiffusionPipeline
import gc
WARM_UP_ITERS = 5
PROMPT = "ghibli style, a fantasy landscape with castles"
TORCH_DTYPES = {"fp32": torch.float32, "fp16": torch.float16, "bf16": torch.bfloat16}
QTYPES = {"fp8": qfloat8, "int8": qint8, "int4": qint4, "none": None}
# QTYPES = {"fp8": qfloat8, "int8": qint8, "none": None}
PREFIXES = {
"stabilityai/stable-diffusion-3-medium-diffusers": "sd3",
"PixArt-alpha/PixArt-Sigma-XL-2-1024-MS": "pixart",
"fal/AuraFlow": "auraflow",
}
def flush():
"""Wipes off memory."""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
def load_pipeline(ckpt_id, torch_dtype, qtype=None, exclude_layers=None, qte=False, fuse=False):
pipe = DiffusionPipeline.from_pretrained(ckpt_id, torch_dtype=torch_dtype).to("cuda")
if fuse:
pipe.transformer.fuse_qkv_projections()
if qtype:
quantize(pipe.transformer, weights=qtype, exclude=exclude_layers)
freeze(pipe.transformer)
if qte:
quantize(pipe.text_encoder, weights=qtype)
freeze(pipe.text_encoder)
if hasattr(pipe, "text_encoder_2"):
quantize(pipe.text_encoder_2, weights=qtype)
freeze(pipe.text_encoder_2)
if hasattr(pipe, "text_encoder_3"):
quantize(pipe.text_encoder_3, weights=qtype)
freeze(pipe.text_encoder_3)
pipe.set_progress_bar_config(disable=True)
return pipe
def run_inference(pipe, batch_size=1):
_ = pipe(
prompt=PROMPT,
num_images_per_prompt=batch_size,
generator=torch.manual_seed(0),
)
def benchmark_fn(f, *args, **kwargs):
t0 = benchmark.Timer(stmt="f(*args, **kwargs)", globals={"args": args, "kwargs": kwargs, "f": f})
return f"{(t0.blocked_autorange().mean):.3f}"
def bytes_to_giga_bytes(bytes):
return f"{(bytes / 1024 / 1024 / 1024):.3f}"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--ckpt_id",
type=str,
default="stabilityai/stable-diffusion-3-medium-diffusers",
choices=list(PREFIXES.keys()),
)
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--torch_dtype", type=str, default="fp16", choices=list(TORCH_DTYPES.keys()))
parser.add_argument("--qtype", type=str, default="none", choices=list(QTYPES.keys()))
parser.add_argument("--qte", type=int, default=0, help="Quantize text encoder")
parser.add_argument("--fuse", type=int, default=0)
parser.add_argument("--exclude_layers", metavar="N", type=str, nargs="*", default=None)
args = parser.parse_args()
flush()
print(
f"Running with ckpt_id: {args.ckpt_id}, batch_size: {args.batch_size}, torch_dtype: {args.torch_dtype}, qtype: {args.qtype}, qte: {bool(args.qte)}, {args.exclude_layers=}, {args.fuse=}"
)
pipeline = load_pipeline(
ckpt_id=args.ckpt_id,
torch_dtype=TORCH_DTYPES[args.torch_dtype],
qtype=QTYPES[args.qtype],
exclude_layers=args.exclude_layers,
qte=args.qte,
fuse=bool(args.fuse),
)
for _ in range(WARM_UP_ITERS):
run_inference(pipeline, args.batch_size)
time = benchmark_fn(run_inference, pipeline, args.batch_size)
torch.cuda.empty_cache()
memory = bytes_to_giga_bytes(torch.cuda.memory_allocated()) # in GBs.
print(
f"ckpt: {args.ckpt_id} batch_size: {args.batch_size}, qte: {args.qte}, {args.exclude_layers=} "
f"torch_dtype: {args.torch_dtype}, qtype: {args.qtype} in {time} seconds and {memory} GBs."
)
ckpt_id = PREFIXES[args.ckpt_id]
img_name = f"ckpt@{ckpt_id}-bs@{args.batch_size}-dtype@{args.torch_dtype}-qtype@{args.qtype}-qte@{args.qte}-fuse@{args.fuse}"
if args.exclude_layers:
exclude_layers = "_".join(args.exclude_layers)
img_name += f"-exclude@{exclude_layers}"
image = pipeline(
prompt=PROMPT,
num_images_per_prompt=args.batch_size,
generator=torch.manual_seed(0),
).images[0]
image.save(f"{img_name}.png")
info = dict(
batch_size=args.batch_size,
memory=memory,
time=time,
dtype=args.torch_dtype,
qtype=args.qtype,
qte=args.qte,
exclude_layers=args.exclude_layers,
fuse=args.fuse,
)
info_file = f"{img_name}_info.json"
with open(info_file, "w") as f:
json.dump(info, f)
|