adamelliotfields commited on
Commit
5f7707a
1 Parent(s): 22a0476

Remove upscaler

Browse files
Files changed (6) hide show
  1. README.md +0 -5
  2. app.py +0 -14
  3. cli.py +0 -2
  4. generate.py +4 -25
  5. requirements.txt +0 -1
  6. usage.md +0 -4
README.md CHANGED
@@ -14,7 +14,6 @@ pinned: true
14
  header: mini
15
  license: apache-2.0
16
  models:
17
- - fal/AuraSR-v2
18
  - fluently/Fluently-v4
19
  - Linaqruf/anything-v3-1
20
  - Lykon/dreamshaper-8
@@ -22,9 +21,6 @@ models:
22
  - runwayml/stable-diffusion-v1-5
23
  - SG161222/Realistic_Vision_V5.1_noVAE
24
  preload_from_hub:
25
- - >-
26
- fal/AuraSR-v2
27
- config.json,model.safetensors
28
  - >-
29
  fluently/Fluently-v4
30
  text_encoder/model.fp16.safetensors,unet/diffusion_pytorch_model.fp16.safetensors,vae/diffusion_pytorch_model.fp16.safetensors
@@ -52,7 +48,6 @@ Gradio app for Stable Diffusion 1.5 including:
52
  * multiple samplers with Karras schedule
53
  * Compel prompting
54
  * 100+ styles from sdxl_prompt_styler
55
- * AuraSR GAN
56
  * DeepCache and ToMe
57
  * optional TAESD
58
 
 
14
  header: mini
15
  license: apache-2.0
16
  models:
 
17
  - fluently/Fluently-v4
18
  - Linaqruf/anything-v3-1
19
  - Lykon/dreamshaper-8
 
21
  - runwayml/stable-diffusion-v1-5
22
  - SG161222/Realistic_Vision_V5.1_noVAE
23
  preload_from_hub:
 
 
 
24
  - >-
25
  fluently/Fluently-v4
26
  text_encoder/model.fp16.safetensors,unet/diffusion_pytorch_model.fp16.safetensors,vae/diffusion_pytorch_model.fp16.safetensors
 
48
  * multiple samplers with Karras schedule
49
  * Compel prompting
50
  * 100+ styles from sdxl_prompt_styler
 
51
  * DeepCache and ToMe
52
  * optional TAESD
53
 
app.py CHANGED
@@ -169,13 +169,6 @@ with gr.Blocks(
169
  value=True,
170
  scale=1,
171
  )
172
- upscale_4x = gr.Checkbox(
173
- interactive=cfg.NUM_IMAGES == 1,
174
- elem_classes=["checkbox"],
175
- label="Upscale 4x",
176
- value=False,
177
- scale=3,
178
- )
179
 
180
  with gr.TabItem("🛠️ Advanced"):
181
  with gr.Group():
@@ -270,12 +263,6 @@ with gr.Blocks(
270
  js=seed_js,
271
  )
272
 
273
- num_images.change(
274
- lambda n, upscale: gr.Checkbox(interactive=n == 1, value=upscale if n == 1 else False),
275
- inputs=[num_images, upscale_4x],
276
- outputs=[upscale_4x],
277
- )
278
-
279
  file_format.change(
280
  lambda f: gr.Gallery(format=f),
281
  inputs=[file_format],
@@ -307,7 +294,6 @@ with gr.Blocks(
307
  increment_seed,
308
  deepcache_interval,
309
  tome_ratio,
310
- upscale_4x,
311
  ],
312
  )
313
 
 
169
  value=True,
170
  scale=1,
171
  )
 
 
 
 
 
 
 
172
 
173
  with gr.TabItem("🛠️ Advanced"):
174
  with gr.Group():
 
263
  js=seed_js,
264
  )
265
 
 
 
 
 
 
 
266
  file_format.change(
267
  lambda f: gr.Gallery(format=f),
268
  inputs=[file_format],
 
294
  increment_seed,
295
  deepcache_interval,
296
  tome_ratio,
 
297
  ],
298
  )
299
 
cli.py CHANGED
@@ -33,7 +33,6 @@ def main():
33
  parser.add_argument("--clip-skip", action="store_true")
34
  parser.add_argument("--truncate", action="store_true")
35
  parser.add_argument("--karras", action="store_true")
36
- parser.add_argument("--upscale", action="store_true")
37
  parser.add_argument("--no-increment", action="store_false")
38
  # fmt: on
39
 
@@ -57,7 +56,6 @@ def main():
57
  args.no_increment,
58
  args.deepcache,
59
  args.tome,
60
- args.upscale,
61
  )
62
  save_images(images, args.filename)
63
 
 
33
  parser.add_argument("--clip-skip", action="store_true")
34
  parser.add_argument("--truncate", action="store_true")
35
  parser.add_argument("--karras", action="store_true")
 
36
  parser.add_argument("--no-increment", action="store_false")
37
  # fmt: on
38
 
 
56
  args.no_increment,
57
  args.deepcache,
58
  args.tome,
 
59
  )
60
  save_images(images, args.filename)
61
 
generate.py CHANGED
@@ -10,7 +10,6 @@ from typing import Callable
10
  import spaces
11
  import tomesd
12
  import torch
13
- from aura_sr import AuraSR
14
  from compel import Compel, DiffusersTextualInversionManager, ReturnedEmbeddingsType
15
  from compel.prompt_parser import PromptParser
16
  from DeepCache import DeepCacheSDHelper
@@ -56,7 +55,6 @@ class Loader:
56
  def __new__(cls):
57
  if cls._instance is None:
58
  cls._instance = super(Loader, cls).__new__(cls)
59
- cls._instance.gan = None
60
  cls._instance.pipe = None
61
  return cls._instance
62
 
@@ -102,7 +100,7 @@ class Loader:
102
  model=model,
103
  )
104
 
105
- def load(self, model, scheduler, karras, taesd, deepcache_interval, upscale, dtype, device):
106
  model_lower = model.lower()
107
 
108
  schedulers = {
@@ -156,10 +154,6 @@ class Loader:
156
  or self.pipe.scheduler.config.use_karras_sigmas == karras
157
  )
158
 
159
- if upscale and not self.gan:
160
- print("Loading fal/AuraSR-v2...")
161
- self.gan = AuraSR.from_pretrained("fal/AuraSR-v2")
162
-
163
  if same_model:
164
  if not same_scheduler:
165
  print(f"Switching to {scheduler}...")
@@ -169,7 +163,7 @@ class Loader:
169
  self.pipe.scheduler = schedulers[scheduler](**scheduler_kwargs)
170
  self._load_vae(model_lower, taesd, variant)
171
  self._load_deepcache(interval=deepcache_interval)
172
- return self.pipe, self.gan
173
  else:
174
  print(f"Unloading {model_name.lower()}...")
175
  self.pipe = None
@@ -186,16 +180,8 @@ class Loader:
186
  self._load_vae(model_lower, taesd, variant)
187
  self._load_deepcache(interval=deepcache_interval)
188
 
189
- if upscale and self.gan is None:
190
- print("Loading fal/AuraSR-v2...")
191
- self.gan = AuraSR.from_pretrained("fal/AuraSR-v2")
192
-
193
- if not upscale and self.gan is not None:
194
- print("Unloading fal/AuraSR-v2...")
195
- self.gan = None
196
-
197
  torch.cuda.empty_cache()
198
- return self.pipe, self.gan
199
 
200
 
201
  # applies tome to the pipeline
@@ -261,7 +247,6 @@ def generate(
261
  increment_seed=True,
262
  deepcache_interval=1,
263
  tome_ratio=0,
264
- upscale=False,
265
  log: Callable[[str], None] = None,
266
  Error=Exception,
267
  ):
@@ -289,13 +274,12 @@ def generate(
289
  with torch.inference_mode():
290
  start = time.perf_counter()
291
  loader = Loader()
292
- pipe, gan = loader.load(
293
  model,
294
  scheduler,
295
  karras,
296
  taesd,
297
  deepcache_interval,
298
- upscale,
299
  DTYPE,
300
  DEVICE,
301
  )
@@ -347,10 +331,6 @@ def generate(
347
  height=height,
348
  width=width,
349
  ).images[0]
350
- if upscale:
351
- print("Upscaling image...")
352
- batch_size = 12 if ZERO_GPU else 4 # smaller batch to fit in 8GB
353
- image = gan.upscale_4x_overlapped(image, max_batch_size=batch_size)
354
  images.append((image, str(current_seed)))
355
  finally:
356
  if not ZERO_GPU:
@@ -362,7 +342,6 @@ def generate(
362
  if ZERO_GPU:
363
  # spaces always start fresh
364
  loader.pipe = None
365
- loader.gan = None
366
 
367
  diff = time.perf_counter() - start
368
  if log:
 
10
  import spaces
11
  import tomesd
12
  import torch
 
13
  from compel import Compel, DiffusersTextualInversionManager, ReturnedEmbeddingsType
14
  from compel.prompt_parser import PromptParser
15
  from DeepCache import DeepCacheSDHelper
 
55
  def __new__(cls):
56
  if cls._instance is None:
57
  cls._instance = super(Loader, cls).__new__(cls)
 
58
  cls._instance.pipe = None
59
  return cls._instance
60
 
 
100
  model=model,
101
  )
102
 
103
+ def load(self, model, scheduler, karras, taesd, deepcache_interval, dtype, device):
104
  model_lower = model.lower()
105
 
106
  schedulers = {
 
154
  or self.pipe.scheduler.config.use_karras_sigmas == karras
155
  )
156
 
 
 
 
 
157
  if same_model:
158
  if not same_scheduler:
159
  print(f"Switching to {scheduler}...")
 
163
  self.pipe.scheduler = schedulers[scheduler](**scheduler_kwargs)
164
  self._load_vae(model_lower, taesd, variant)
165
  self._load_deepcache(interval=deepcache_interval)
166
+ return self.pipe
167
  else:
168
  print(f"Unloading {model_name.lower()}...")
169
  self.pipe = None
 
180
  self._load_vae(model_lower, taesd, variant)
181
  self._load_deepcache(interval=deepcache_interval)
182
 
 
 
 
 
 
 
 
 
183
  torch.cuda.empty_cache()
184
+ return self.pipe
185
 
186
 
187
  # applies tome to the pipeline
 
247
  increment_seed=True,
248
  deepcache_interval=1,
249
  tome_ratio=0,
 
250
  log: Callable[[str], None] = None,
251
  Error=Exception,
252
  ):
 
274
  with torch.inference_mode():
275
  start = time.perf_counter()
276
  loader = Loader()
277
+ pipe = loader.load(
278
  model,
279
  scheduler,
280
  karras,
281
  taesd,
282
  deepcache_interval,
 
283
  DTYPE,
284
  DEVICE,
285
  )
 
331
  height=height,
332
  width=width,
333
  ).images[0]
 
 
 
 
334
  images.append((image, str(current_seed)))
335
  finally:
336
  if not ZERO_GPU:
 
342
  if ZERO_GPU:
343
  # spaces always start fresh
344
  loader.pipe = None
 
345
 
346
  diff = time.perf_counter() - start
347
  if log:
requirements.txt CHANGED
@@ -1,5 +1,4 @@
1
  accelerate
2
- aura-sr==0.0.4
3
  compel
4
  deepcache==0.1.1
5
  diffusers
 
1
  accelerate
 
2
  compel
3
  deepcache==0.1.1
4
  diffusers
usage.md CHANGED
@@ -55,10 +55,6 @@ Optionally, the [Karras](https://arxiv.org/abs/2206.00364) noise schedule can be
55
  * [LMS](https://huggingface.co/docs/diffusers/api/schedulers/lms_discrete)
56
  * [PNDM](https://huggingface.co/docs/diffusers/api/schedulers/pndm)
57
 
58
- #### Upscaler
59
-
60
- [AuraSR](https://huggingface.co/fal/AuraSR-v2) from [fal.ai](https://fal.ai) can be enabled to upscale your image 4x. It's disabled if `Images` is greater than **1**, so use it once you've finalized your parameters and found a seed.
61
-
62
  ### Advanced
63
 
64
  #### DeepCache
 
55
  * [LMS](https://huggingface.co/docs/diffusers/api/schedulers/lms_discrete)
56
  * [PNDM](https://huggingface.co/docs/diffusers/api/schedulers/pndm)
57
 
 
 
 
 
58
  ### Advanced
59
 
60
  #### DeepCache