amazonaws-sp commited on
Commit
203acc4
1 Parent(s): 1ea0405

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -71
app.py CHANGED
@@ -2,28 +2,30 @@
2
 
3
  from __future__ import annotations
4
 
 
5
  import os
6
  import random
7
 
8
  import gradio as gr
9
  import numpy as np
10
- import PIL.Image
11
  import spaces
12
  import torch
13
- from diffusers import AutoencoderKL, DiffusionPipeline
 
 
14
 
15
- DESCRIPTION = "# SDXL"
16
  if not torch.cuda.is_available():
17
- DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
18
 
19
  MAX_SEED = np.iinfo(np.int32).max
20
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
21
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1824"))
22
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
23
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
24
- ENABLE_REFINER = os.getenv("ENABLE_REFINER", "1") == "1"
25
  ENABLE_USE_LORA = os.getenv("ENABLE_USE_LORA", "1") == "1"
26
  ENABLE_USE_VAE = os.getenv("ENABLE_USE_VAE", "1") == "1"
 
27
 
28
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
29
 
@@ -46,26 +48,37 @@ def generate(
46
  width: int = 1024,
47
  height: int = 1024,
48
  guidance_scale_base: float = 5.0,
49
- guidance_scale_refiner: float = 5.0,
50
  num_inference_steps_base: int = 25,
51
- num_inference_steps_refiner: int = 25,
52
  use_vae: bool = False,
53
  use_lora: bool = False,
54
- apply_refiner: bool = False,
55
- model = 'cagliostrolab/animagine-xl-3.0',
56
- vaecall = 'stabilityai/sd-vae-ft-mse',
57
- lora = 'amazonaws-la/juliette',
58
  lora_scale: float = 0.7,
59
- ) -> PIL.Image.Image:
 
 
60
  if torch.cuda.is_available():
61
-
62
- if not use_vae:
63
- pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
64
 
65
- if use_vae:
66
- vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
67
- pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
 
 
 
68
 
 
 
 
 
 
 
 
 
 
 
 
69
  if use_lora:
70
  pipe.load_lora_weights(lora)
71
  pipe.fuse_lora(lora_scale)
@@ -88,7 +101,7 @@ def generate(
88
  if not use_negative_prompt_2:
89
  negative_prompt_2 = None # type: ignore
90
 
91
- if not apply_refiner:
92
  return pipe(
93
  prompt=prompt,
94
  negative_prompt=negative_prompt,
@@ -102,8 +115,10 @@ def generate(
102
  output_type="pil",
103
  ).images[0]
104
  else:
105
- latents = pipe(
106
  prompt=prompt,
 
 
107
  negative_prompt=negative_prompt,
108
  prompt_2=prompt_2,
109
  negative_prompt_2=negative_prompt_2,
@@ -112,75 +127,67 @@ def generate(
112
  guidance_scale=guidance_scale_base,
113
  num_inference_steps=num_inference_steps_base,
114
  generator=generator,
115
- output_type="latent",
116
- ).images
117
- image = refiner(
118
- prompt=prompt,
119
- negative_prompt=negative_prompt,
120
- prompt_2=prompt_2,
121
- negative_prompt_2=negative_prompt_2,
122
- guidance_scale=guidance_scale_refiner,
123
- num_inference_steps=num_inference_steps_refiner,
124
- image=latents,
125
- generator=generator,
126
  ).images[0]
127
- return image
128
-
129
 
130
  examples = [
131
  "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
132
  "An astronaut riding a green horse",
133
  ]
134
 
135
- with gr.Blocks(css="style.css") as demo:
136
- gr.Markdown(DESCRIPTION)
137
- gr.DuplicateButton(
138
- value="Duplicate Space for private use",
139
- elem_id="duplicate-button",
140
- visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
141
  )
 
142
  with gr.Group():
143
- model = gr.Text(label='Model')
144
- vaecall = gr.Text(label='VAE')
145
- lora = gr.Text(label='LoRA')
146
  lora_scale = gr.Slider(
 
147
  label="Lora Scale",
148
  minimum=0.01,
149
  maximum=1,
150
  step=0.01,
151
  value=0.7,
152
  )
 
153
  with gr.Row():
154
  prompt = gr.Text(
 
155
  label="Prompt",
156
  show_label=False,
157
  max_lines=1,
158
- placeholder="Enter your prompt",
159
  container=False,
160
  )
161
  run_button = gr.Button("Run", scale=0)
162
  result = gr.Image(label="Result", show_label=False)
163
  with gr.Accordion("Advanced options", open=False):
164
  with gr.Row():
 
 
 
165
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
166
  use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
167
  use_negative_prompt_2 = gr.Checkbox(label="Use negative prompt 2", value=False)
168
  negative_prompt = gr.Text(
 
169
  label="Negative prompt",
170
  max_lines=1,
171
- placeholder="Enter a negative prompt",
172
  visible=False,
173
  )
174
  prompt_2 = gr.Text(
 
175
  label="Prompt 2",
176
  max_lines=1,
177
- placeholder="Enter your prompt",
178
  visible=False,
179
  )
180
  negative_prompt_2 = gr.Text(
 
181
  label="Negative prompt 2",
182
  max_lines=1,
183
- placeholder="Enter a negative prompt",
184
  visible=False,
185
  )
186
 
@@ -207,38 +214,33 @@ with gr.Blocks(css="style.css") as demo:
207
  step=32,
208
  value=1024,
209
  )
210
- use_vae = gr.Checkbox(label='Use VAE', value=False, visible=ENABLE_USE_VAE)
211
- use_lora = gr.Checkbox(label='Use Lora', value=False, visible=ENABLE_USE_LORA)
212
- apply_refiner = gr.Checkbox(label="Apply refiner", value=False, visible=ENABLE_REFINER)
213
  with gr.Row():
214
  guidance_scale_base = gr.Slider(
215
- label="Guidance scale for base",
 
216
  minimum=1,
217
  maximum=20,
218
  step=0.1,
219
  value=5.0,
220
  )
 
221
  num_inference_steps_base = gr.Slider(
222
- label="Number of inference steps for base",
 
223
  minimum=10,
224
  maximum=100,
225
  step=1,
226
  value=25,
227
  )
228
- with gr.Row(visible=False) as refiner_params:
229
- guidance_scale_refiner = gr.Slider(
230
- label="Guidance scale for refiner",
231
- minimum=1,
232
- maximum=20,
233
- step=0.1,
234
- value=5.0,
235
- )
236
- num_inference_steps_refiner = gr.Slider(
237
- label="Number of inference steps for refiner",
238
- minimum=10,
239
- maximum=100,
240
- step=1,
241
- value=25,
242
  )
243
 
244
  gr.Examples(
@@ -284,10 +286,10 @@ with gr.Blocks(css="style.css") as demo:
284
  queue=False,
285
  api_name=False,
286
  )
287
- apply_refiner.change(
288
  fn=lambda x: gr.update(visible=x),
289
- inputs=apply_refiner,
290
- outputs=refiner_params,
291
  queue=False,
292
  api_name=False,
293
  )
@@ -319,16 +321,16 @@ with gr.Blocks(css="style.css") as demo:
319
  width,
320
  height,
321
  guidance_scale_base,
322
- guidance_scale_refiner,
323
  num_inference_steps_base,
324
- num_inference_steps_refiner,
325
  use_vae,
326
  use_lora,
327
- apply_refiner,
328
  model,
329
  vaecall,
330
  lora,
331
  lora_scale,
 
 
332
  ],
333
  outputs=result,
334
  api_name="run",
 
2
 
3
  from __future__ import annotations
4
 
5
+ import requests
6
  import os
7
  import random
8
 
9
  import gradio as gr
10
  import numpy as np
 
11
  import spaces
12
  import torch
13
+ from PIL import Image
14
+ from io import BytesIO
15
+ from diffusers import AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image
16
 
17
+ DESCRIPTION = "# Run any LoRA or SD Model"
18
  if not torch.cuda.is_available():
19
+ DESCRIPTION += "\n<p>⚠️ This space is running on the CPU. This demo doesn't work on CPU 😞! Run on a GPU by duplicating this space or test our website for free and unlimited by <a href='https://squaadai.com'>clicking here</a>, which provides these and more options.</p>"
20
 
21
  MAX_SEED = np.iinfo(np.int32).max
22
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
23
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1824"))
24
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
25
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
 
26
  ENABLE_USE_LORA = os.getenv("ENABLE_USE_LORA", "1") == "1"
27
  ENABLE_USE_VAE = os.getenv("ENABLE_USE_VAE", "1") == "1"
28
+ ENABLE_USE_IMG2IMG = os.getenv("ENABLE_USE_VAE", "1") == "1"
29
 
30
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
31
 
 
48
  width: int = 1024,
49
  height: int = 1024,
50
  guidance_scale_base: float = 5.0,
 
51
  num_inference_steps_base: int = 25,
52
+ strength_img2img: float = 0.7,
53
  use_vae: bool = False,
54
  use_lora: bool = False,
55
+ model = 'stabilityai/stable-diffusion-xl-base-1.0',
56
+ vaecall = 'madebyollin/sdxl-vae-fp16-fix',
57
+ lora = '',
 
58
  lora_scale: float = 0.7,
59
+ use_img2img: bool = False,
60
+ url = '',
61
+ ):
62
  if torch.cuda.is_available():
 
 
 
63
 
64
+ if not use_img2img:
65
+ pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
66
+
67
+ if use_vae:
68
+ vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
69
+ pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
70
 
71
+ if use_img2img:
72
+ pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
73
+
74
+ if use_vae:
75
+ vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
76
+ pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
77
+
78
+ response = requests.get(url)
79
+ init_image = Image.open(BytesIO(response.content)).convert("RGB")
80
+ init_image = init_image.resize((width, height))
81
+
82
  if use_lora:
83
  pipe.load_lora_weights(lora)
84
  pipe.fuse_lora(lora_scale)
 
101
  if not use_negative_prompt_2:
102
  negative_prompt_2 = None # type: ignore
103
 
104
+ if not use_img2img:
105
  return pipe(
106
  prompt=prompt,
107
  negative_prompt=negative_prompt,
 
115
  output_type="pil",
116
  ).images[0]
117
  else:
118
+ images = pipe(
119
  prompt=prompt,
120
+ image=init_image,
121
+ strength=strength_img2img,
122
  negative_prompt=negative_prompt,
123
  prompt_2=prompt_2,
124
  negative_prompt_2=negative_prompt_2,
 
127
  guidance_scale=guidance_scale_base,
128
  num_inference_steps=num_inference_steps_base,
129
  generator=generator,
130
+ output_type="pil",
 
 
 
 
 
 
 
 
 
 
131
  ).images[0]
132
+ return images
 
133
 
134
  examples = [
135
  "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
136
  "An astronaut riding a green horse",
137
  ]
138
 
139
+ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
140
+ gr.HTML(
141
+ "<p><center>📙 For any additional support, join our <a href='https://discord.gg/JprjXpjt9K'>Discord</a></center></p>"
 
 
 
142
  )
143
+ gr.Markdown(DESCRIPTION, elem_id="description")
144
  with gr.Group():
145
+ model = gr.Text(label='Model', placeholder='e.g. stabilityai/stable-diffusion-xl-base-1.0')
146
+ vaecall = gr.Text(label='VAE', placeholder='e.g. madebyollin/sdxl-vae-fp16-fix')
147
+ lora = gr.Text(label='LoRA', placeholder='e.g. nerijs/pixel-art-xl')
148
  lora_scale = gr.Slider(
149
+ info="The closer to 1, the more it will resemble LoRA, but errors may be visible.",
150
  label="Lora Scale",
151
  minimum=0.01,
152
  maximum=1,
153
  step=0.01,
154
  value=0.7,
155
  )
156
+ url = gr.Text(label='URL (Img2Img)', placeholder='e.g https://example.com/image.png')
157
  with gr.Row():
158
  prompt = gr.Text(
159
+ placeholder="Input prompt",
160
  label="Prompt",
161
  show_label=False,
162
  max_lines=1,
 
163
  container=False,
164
  )
165
  run_button = gr.Button("Run", scale=0)
166
  result = gr.Image(label="Result", show_label=False)
167
  with gr.Accordion("Advanced options", open=False):
168
  with gr.Row():
169
+ use_img2img = gr.Checkbox(label='Use Img2Img', value=False, visible=ENABLE_USE_IMG2IMG)
170
+ use_vae = gr.Checkbox(label='Use VAE', value=False, visible=ENABLE_USE_VAE)
171
+ use_lora = gr.Checkbox(label='Use Lora', value=False, visible=ENABLE_USE_LORA)
172
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
173
  use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
174
  use_negative_prompt_2 = gr.Checkbox(label="Use negative prompt 2", value=False)
175
  negative_prompt = gr.Text(
176
+ placeholder="Input Negative Prompt",
177
  label="Negative prompt",
178
  max_lines=1,
 
179
  visible=False,
180
  )
181
  prompt_2 = gr.Text(
182
+ placeholder="Input Prompt 2",
183
  label="Prompt 2",
184
  max_lines=1,
 
185
  visible=False,
186
  )
187
  negative_prompt_2 = gr.Text(
188
+ placeholder="Input Negative Prompt 2",
189
  label="Negative prompt 2",
190
  max_lines=1,
 
191
  visible=False,
192
  )
193
 
 
214
  step=32,
215
  value=1024,
216
  )
217
+
 
 
218
  with gr.Row():
219
  guidance_scale_base = gr.Slider(
220
+ info="Scale for classifier-free guidance",
221
+ label="Guidance scale",
222
  minimum=1,
223
  maximum=20,
224
  step=0.1,
225
  value=5.0,
226
  )
227
+ with gr.Row():
228
  num_inference_steps_base = gr.Slider(
229
+ info="Number of denoising steps",
230
+ label="Number of inference steps",
231
  minimum=10,
232
  maximum=100,
233
  step=1,
234
  value=25,
235
  )
236
+ with gr.Row():
237
+ strength_img2img = gr.Slider(
238
+ info="Strength for Img2Img",
239
+ label="Strength",
240
+ minimum=0,
241
+ maximum=1,
242
+ step=0.01,
243
+ value=0.7,
 
 
 
 
 
 
244
  )
245
 
246
  gr.Examples(
 
286
  queue=False,
287
  api_name=False,
288
  )
289
+ use_img2img.change(
290
  fn=lambda x: gr.update(visible=x),
291
+ inputs=use_img2img,
292
+ outputs=url,
293
  queue=False,
294
  api_name=False,
295
  )
 
321
  width,
322
  height,
323
  guidance_scale_base,
 
324
  num_inference_steps_base,
325
+ strength_img2img,
326
  use_vae,
327
  use_lora,
 
328
  model,
329
  vaecall,
330
  lora,
331
  lora_scale,
332
+ use_img2img,
333
+ url,
334
  ],
335
  outputs=result,
336
  api_name="run",