ameerazam08 commited on
Commit
3fb40ea
β€’
1 Parent(s): 7b7c0d1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -69
app.py CHANGED
@@ -1,5 +1,4 @@
1
 
2
-
3
  import os
4
 
5
  os.system("pip install -U peft")
@@ -11,166 +10,166 @@ import PIL.Image
11
 
12
  import spaces
13
  import torch
14
- from diffusers import AutoPipelineForText2Image, DPMSolverMultistepScheduler
15
-
16
-
17
-
18
-
19
  from huggingface_hub import hf_hub_download
20
- from diffusers.models.attention_processor import AttnProcessor2_0
21
 
22
  DESCRIPTION = """
23
  # Res-Adapter :Domain Consistent Resolution Adapter for Diffusion Models
24
  **Demo by [ameer azam] - [Twitter](https://twitter.com/Ameerazam18) - [GitHub](https://github.com/AMEERAZAM08)) - [Hugging Face](https://huggingface.co/ameerazam08)**
25
- This is a demo of https://huggingface.co/jiaxiangc/res-adapter LORAs by ByteDance
26
-
27
 
 
28
  """
29
  if not torch.cuda.is_available():
30
- DESCRIPTION += "\n<h1>Running on CPU πŸ₯Ά This demo does not work on CPU.</a> instead</h1>"
31
-
32
-
33
 
34
  MAX_SEED = np.iinfo(np.int32).max
35
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
36
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
37
 
38
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
39
- pipe = AutoPipelineForText2Image.from_pretrained('stabilityai/stable-diffusion-xl-base-1.0',use_safetensors=True)# torch_dtype=torch.float16, variant="safetensors")
40
- pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True, algorithm_type="sde-dpmsolver++")
41
-
42
-
43
-
44
-
45
-
46
-
47
-
48
 
49
 
50
 
 
 
 
51
 
 
 
 
 
 
52
 
 
53
  pipe.load_lora_weights(
54
  hf_hub_download(
55
- repo_id="jiaxiangc/res-adapter",
56
- subfolder="sdxl-i",
57
  filename="resolution_lora.safetensors",
58
  ),
59
  adapter_name="res_adapter",
60
  )
61
- pipe.set_adapters(["res_adapter"], adapter_weights=[1.0])
62
- pipe = pipe.to(device)
63
 
64
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
65
  if randomize_seed:
66
  seed: int = 0,
67
  width: int = 1024,
68
  height: int = 1024,
69
- guidance_scale_base: float = 5.0,
70
- num_inference_steps_base: int = 20,
71
  progress=gr.Progress(track_tqdm=True),
72
  ) -> PIL.Image.Image:
73
- print(f"** Generating image for: \"{prompt}\" **")
74
  generator = torch.Generator().manual_seed(seed)
75
 
76
  if not use_negative_prompt:
77
  prompt_2 = None # type: ignore
78
  if not use_negative_prompt_2:
79
  negative_prompt_2 = None # type: ignore
80
- res_adapt=pipe(
81
-
82
 
 
 
83
  prompt=prompt,
84
  negative_prompt=negative_prompt,
85
  prompt_2=prompt_2,
86
  negative_prompt_2=negative_prompt_2,
87
  width=width,
88
  height=height,
89
- guidance_scale=guidance_scale_base,
90
- num_inference_steps=num_inference_steps_base,
91
- generator=generator,
92
- output_type="pil",
93
 
 
 
94
  ).images[0]
95
 
96
- pipe.unet.set_attn_processor(AttnProcessor2_0())
97
- base_image = pipe(
98
 
 
 
99
  prompt=prompt,
100
  negative_prompt=negative_prompt,
101
  prompt_2=prompt_2,
102
  negative_prompt_2=negative_prompt_2,
103
  width=width,
104
  height=height,
105
- guidance_scale=guidance_scale_base,
106
- num_inference_steps=num_inference_steps_base,
107
-
108
  generator=generator,
109
- output_type="pil").images[0]
110
 
111
-
112
-
113
-
114
- return [res_adapt,base_image]
115
 
116
 
117
- examples = [
118
- "A realistic photograph of an astronaut in a jungle, cold color palette, detailed, 8k",
119
- "An astronaut riding a green horse",
120
- "cinematic film still, photo of a girl, cyberpunk, neonpunk, headset, city at night, sony fe 12-24mm f/2.8 gm, close up, 32k uhd, wallpaper, analog film grain, SONY headset"
121
- ]
122
-
123
- theme = gr.themes.Base(
124
- font=[gr.themes.GoogleFont('Libre Franklin'), gr.themes.GoogleFont('Public Sans'), 'system-ui', 'sans-serif'],
125
 
126
 
127
 
 
 
 
128
 
 
129
 
 
 
 
 
 
 
 
130
  )
131
  with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
132
  gr.Markdown(DESCRIPTION)
133
  # result = gr.Gallery(label="Right is Res-Adapt-LORA and Left is Base"),
134
  with gr.Accordion("Advanced options", open=False):
135
  with gr.Row():
136
- use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
137
  use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
138
- use_negative_prompt_2 = gr.Checkbox(label="Use negative prompt 2", value=False)
139
-
140
-
141
  negative_prompt = gr.Text(
142
  label="Negative prompt",
143
  max_lines=1,
144
- placeholder="ugly, deformed, noisy, blurry, nsfw, low contrast, text, BadDream, 3d, cgi, render, fake, anime, open mouth, big forehead, long neck",
145
  visible=True,
146
  )
147
  prompt_2 = gr.Text(
148
  value=512,
149
  )
150
  with gr.Row():
151
- guidance_scale_base = gr.Slider(
152
- label="Guidance scale for base",
153
- minimum=1,
154
  maximum=20,
155
  step=0.1,
156
- value=9.5,
157
  )
158
- num_inference_steps_base = gr.Slider(
159
- label="Number of inference steps for base",
160
- minimum=10,
161
- maximum=100,
162
  step=1,
163
- value=25,
164
  )
165
  gr.Examples(
166
  examples=examples,
167
  seed,
168
  width,
169
  height,
170
- guidance_scale_base,
171
- num_inference_steps_base,
172
  ],
173
- outputs=gr.Gallery(label="Left is Res-Adapt-LORA and Right is Base"),
174
  api_name="run",
175
  )
176
 
 
1
 
 
2
  import os
3
 
4
  os.system("pip install -U peft")
 
10
 
11
  import spaces
12
  import torch
13
+ from diffusers import (
14
+ StableDiffusionXLPipeline,
15
+ UNet2DConditionModel,
16
+ EulerDiscreteScheduler,
17
+ )
18
  from huggingface_hub import hf_hub_download
19
+ from safetensors.torch import load_file
20
 
21
  DESCRIPTION = """
22
  # Res-Adapter :Domain Consistent Resolution Adapter for Diffusion Models
23
  **Demo by [ameer azam] - [Twitter](https://twitter.com/Ameerazam18) - [GitHub](https://github.com/AMEERAZAM08)) - [Hugging Face](https://huggingface.co/ameerazam08)**
24
+ This is a demo of https://huggingface.co/jiaxiangc/res-adapter ResAdapter by ByteDance.
 
25
 
26
+ ByteDance provide a demo of [ResAdapter](https://huggingface.co/jiaxiangc/res-adapter) with [SDXL-Lightning-Step4](https://huggingface.co/ByteDance/SDXL-Lightning) to expand resolution range from 1024-only to 256~1024.
27
  """
28
  if not torch.cuda.is_available():
29
+ DESCRIPTION += (
30
+ "\n<h1>Running on CPU πŸ₯Ά This demo does not work on CPU.</a> instead</h1>"
31
+ )
32
 
33
  MAX_SEED = np.iinfo(np.int32).max
34
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
35
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
36
 
37
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 
 
 
 
 
 
 
 
 
38
 
39
 
40
 
41
+ base = "stabilityai/stable-diffusion-xl-base-1.0"
42
+ repo = "ByteDance/SDXL-Lightning"
43
+ ckpt = "sdxl_lightning_4step_unet.safetensors" # Use the correct ckpt for your step setting!
44
 
45
+ unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cuda", torch.float16)
46
+ unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cuda"))
47
+ pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16")
48
+ pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
49
+ pipe = pipe.to(device)
50
 
51
+ # Load resadapter
52
  pipe.load_lora_weights(
53
  hf_hub_download(
54
+ repo_id="jiaxiangc/res-adapter",
55
+ subfolder="sdxl-i",
56
  filename="resolution_lora.safetensors",
57
  ),
58
  adapter_name="res_adapter",
59
  )
60
+
61
+
62
 
63
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
64
  if randomize_seed:
65
  seed: int = 0,
66
  width: int = 1024,
67
  height: int = 1024,
68
+ guidance_scale: float = 0,
69
+ num_inference_steps: int = 4,
70
  progress=gr.Progress(track_tqdm=True),
71
  ) -> PIL.Image.Image:
72
+ print(f'** Generating image for: "{prompt}" **')
73
  generator = torch.Generator().manual_seed(seed)
74
 
75
  if not use_negative_prompt:
76
  prompt_2 = None # type: ignore
77
  if not use_negative_prompt_2:
78
  negative_prompt_2 = None # type: ignore
 
 
79
 
80
+ pipe.set_adapters(["res_adapter"], adapter_weights=[0.0])
81
+ base_image = pipe(
82
  prompt=prompt,
83
  negative_prompt=negative_prompt,
84
  prompt_2=prompt_2,
85
  negative_prompt_2=negative_prompt_2,
86
  width=width,
87
  height=height,
88
+ num_inference_steps=num_inference_steps,
89
+ guidance_scale=guidance_scale,
 
 
90
 
91
+ output_type="pil",
92
+ generator=generator,
93
  ).images[0]
94
 
 
 
95
 
96
+ pipe.set_adapters(["res_adapter"], adapter_weights=[1.0])
97
+ res_adapt = pipe(
98
  prompt=prompt,
99
  negative_prompt=negative_prompt,
100
  prompt_2=prompt_2,
101
  negative_prompt_2=negative_prompt_2,
102
  width=width,
103
  height=height,
104
+ num_inference_steps=num_inference_steps,
105
+ guidance_scale=guidance_scale,
106
+ output_type="pil",
107
  generator=generator,
108
+ ).images[0]
109
 
110
+ return [res_adapt, base_image]
 
 
 
111
 
112
 
 
 
 
 
 
 
 
 
113
 
114
 
115
 
116
+ examples = [
117
+ "A girl smiling",
118
+ "A boy smiling",
119
 
120
+ ]
121
 
122
+ theme = gr.themes.Base(
123
+ font=[
124
+ gr.themes.GoogleFont("Libre Franklin"),
125
+ gr.themes.GoogleFont("Public Sans"),
126
+ "system-ui",
127
+ "sans-serif",
128
+ ],
129
  )
130
  with gr.Blocks(css="footer{display:none !important}", theme=theme) as demo:
131
  gr.Markdown(DESCRIPTION)
132
  # result = gr.Gallery(label="Right is Res-Adapt-LORA and Left is Base"),
133
  with gr.Accordion("Advanced options", open=False):
134
  with gr.Row():
135
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
136
  use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
137
+ use_negative_prompt_2 = gr.Checkbox(
138
+ label="Use negative prompt 2", value=False
139
+ )
140
  negative_prompt = gr.Text(
141
  label="Negative prompt",
142
  max_lines=1,
143
+ placeholder="Enter your prompt",
144
  visible=True,
145
  )
146
  prompt_2 = gr.Text(
147
  value=512,
148
  )
149
  with gr.Row():
150
+ guidance_scale = gr.Slider(
151
+ label="Guidance scale",
152
+ minimum=0,
153
  maximum=20,
154
  step=0.1,
155
+ value=0,
156
  )
157
+ num_inference_steps = gr.Slider(
158
+ label="Number of inference steps",
159
+ minimum=1,
160
+ maximum=50,
161
  step=1,
162
+ value=4,
163
  )
164
  gr.Examples(
165
  examples=examples,
166
  seed,
167
  width,
168
  height,
169
+ guidance_scale,
170
+ num_inference_steps,
171
  ],
172
+ outputs=gr.Gallery(label="Left is ResAdapter and Right is Base"),
173
  api_name="run",
174
  )
175