radames commited on
Commit
259d09b
1 Parent(s): 95efaf0
Files changed (1) hide show
  1. app.py +19 -31
app.py CHANGED
@@ -18,7 +18,7 @@ from sfast.compilers.diffusion_pipeline_compiler import compile, CompilationConf
18
  BASE = "stabilityai/stable-diffusion-xl-base-1.0"
19
  REPO = "ByteDance/SDXL-Lightning"
20
  # 1-step
21
- CHECKPOINT = "sdxl_lightning_1step_unet_x0.safetensors"
22
 
23
  # {
24
  # "1-Step": ["sdxl_lightning_1step_unet_x0.safetensors", 1],
@@ -38,17 +38,18 @@ torch_dtype = torch.float16
38
  print(f"TORCH_COMPILE: {TORCH_COMPILE}")
39
  print(f"device: {device}")
40
 
41
- # Load model.
42
- unet = UNet2DConditionModel.from_config(BASE, subfolder="unet").to(device, torch_dtype)
43
- unet.load_state_dict(load_file(hf_hub_download(REPO, CHECKPOINT), device="cuda"))
44
  pipe = StableDiffusionXLPipeline.from_pretrained(
45
- BASE, unet=unet, torch_dtype=torch_dtype, variant="fp16"
46
- ).to(device)
 
 
47
 
48
- # Ensure sampler uses "trailing" timesteps and "sample" prediction type.
49
  pipe.scheduler = EulerDiscreteScheduler.from_config(
50
- pipe.scheduler.config, timestep_spacing="trailing", prediction_type="sample"
51
  )
 
52
  pipe.set_progress_bar_config(disable=True)
53
  config = CompilationConfig.Default()
54
  try:
@@ -77,7 +78,7 @@ def predict(prompt, seed=1231231):
77
  results = pipe(
78
  prompt=prompt,
79
  generator=generator,
80
- num_inference_steps=1,
81
  guidance_scale=0.0,
82
  # width=768,
83
  # height=768,
@@ -110,7 +111,7 @@ with gr.Blocks(css=css) as demo:
110
  with gr.Column(elem_id="container"):
111
  gr.Markdown(
112
  """
113
- # SDXL-Lightning- Text To Image 1-Step
114
  **Model**: https://huggingface.co/ByteDance/SDXL-Lightning
115
  """,
116
  elem_id="intro",
@@ -132,36 +133,23 @@ with gr.Blocks(css=css) as demo:
132
  """## Running SDXL-Lightning with `diffusers`
133
  ```py
134
  import torch
135
- from diffusers import (
136
- StableDiffusionXLPipeline,
137
- UNet2DConditionModel,
138
- EulerDiscreteScheduler,
139
- )
140
  from huggingface_hub import hf_hub_download
141
- from safetensors.torch import load_file
142
 
143
  base = "stabilityai/stable-diffusion-xl-base-1.0"
144
  repo = "ByteDance/SDXL-Lightning"
145
- ckpt = "sdxl_lightning_1step_unet_x0.safetensors" # Use the correct ckpt for your step setting!
146
 
147
  # Load model.
148
- unet = UNet2DConditionModel.from_config(base, subfolder="unet").to(
149
- "cuda", torch.float16
150
- )
151
- unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cuda"))
152
- pipe = StableDiffusionXLPipeline.from_pretrained(
153
- base, unet=unet, torch_dtype=torch.float16, variant="fp16"
154
- ).to("cuda")
155
 
156
- # Ensure sampler uses "trailing" timesteps and "sample" prediction type.
157
- pipe.scheduler = EulerDiscreteScheduler.from_config(
158
- pipe.scheduler.config, timestep_spacing="trailing", prediction_type="sample"
159
- )
160
 
161
  # Ensure using the same inference steps as the loaded model and CFG set to 0.
162
- pipe("A girl smiling", num_inference_steps=1, guidance_scale=0).images[0].save(
163
- "output.png"
164
- )
165
  ```
166
  """
167
  )
 
18
  BASE = "stabilityai/stable-diffusion-xl-base-1.0"
19
  REPO = "ByteDance/SDXL-Lightning"
20
  # 1-step
21
+ CHECKPOINT = "sdxl_lightning_2step_lora.safetensors"
22
 
23
  # {
24
  # "1-Step": ["sdxl_lightning_1step_unet_x0.safetensors", 1],
 
38
  print(f"TORCH_COMPILE: {TORCH_COMPILE}")
39
  print(f"device: {device}")
40
 
41
+
 
 
42
  pipe = StableDiffusionXLPipeline.from_pretrained(
43
+ BASE, torch_dtype=torch.float16, variant="fp16"
44
+ ).to("cuda")
45
+ pipe.load_lora_weights(hf_hub_download(REPO, CHECKPOINT))
46
+ pipe.fuse_lora()
47
 
48
+ # Ensure sampler uses "trailing" timesteps.
49
  pipe.scheduler = EulerDiscreteScheduler.from_config(
50
+ pipe.scheduler.config, timestep_spacing="trailing"
51
  )
52
+
53
  pipe.set_progress_bar_config(disable=True)
54
  config = CompilationConfig.Default()
55
  try:
 
78
  results = pipe(
79
  prompt=prompt,
80
  generator=generator,
81
+ num_inference_steps=2,
82
  guidance_scale=0.0,
83
  # width=768,
84
  # height=768,
 
111
  with gr.Column(elem_id="container"):
112
  gr.Markdown(
113
  """
114
+ # SDXL-Lightning- Text To Image 2-Steps
115
  **Model**: https://huggingface.co/ByteDance/SDXL-Lightning
116
  """,
117
  elem_id="intro",
 
133
  """## Running SDXL-Lightning with `diffusers`
134
  ```py
135
  import torch
136
+ from diffusers import StableDiffusionXLPipeline, EulerDiscreteScheduler
 
 
 
 
137
  from huggingface_hub import hf_hub_download
 
138
 
139
  base = "stabilityai/stable-diffusion-xl-base-1.0"
140
  repo = "ByteDance/SDXL-Lightning"
141
+ ckpt = "sdxl_lightning_4step_lora.safetensors" # Use the correct ckpt for your step setting!
142
 
143
  # Load model.
144
+ pipe = StableDiffusionXLPipeline.from_pretrained(base, torch_dtype=torch.float16, variant="fp16").to("cuda")
145
+ pipe.load_lora_weights(hf_hub_download(repo, ckpt))
146
+ pipe.fuse_lora()
 
 
 
 
147
 
148
+ # Ensure sampler uses "trailing" timesteps.
149
+ pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
 
 
150
 
151
  # Ensure using the same inference steps as the loaded model and CFG set to 0.
152
+ pipe("A girl smiling", num_inference_steps=4, guidance_scale=0).images[0].save("output.png")
 
 
153
  ```
154
  """
155
  )