Manjushri commited on
Commit
a1c1647
1 Parent(s): a1cecdb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -322,11 +322,11 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
322
  if upscale == "Yes":
323
  torch.cuda.empty_cache()
324
  torch.cuda.max_memory_allocated(device=device)
325
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
326
- pipe.enable_xformers_memory_efficient_attention()
327
- pipe = pipe.to(device)
328
  torch.cuda.empty_cache()
329
- upscaled = pipe(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
330
  torch.cuda.empty_cache()
331
  return upscaled
332
  else:
@@ -338,11 +338,11 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
338
  torch.cuda.empty_cache()
339
  torch.cuda.max_memory_allocated(device=device)
340
 
341
- pipe = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
342
- pipe.enable_xformers_memory_efficient_attention()
343
- pipe = pipe.to(device)
344
  torch.cuda.empty_cache()
345
- upscaled = pipe(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
346
  torch.cuda.empty_cache()
347
  return upscaled
348
  else:
 
322
  if upscale == "Yes":
323
  torch.cuda.empty_cache()
324
  torch.cuda.max_memory_allocated(device=device)
325
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
326
+ upscaler.enable_xformers_memory_efficient_attention()
327
+ upscaler = upscaler.to(device)
328
  torch.cuda.empty_cache()
329
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
330
  torch.cuda.empty_cache()
331
  return upscaled
332
  else:
 
338
  torch.cuda.empty_cache()
339
  torch.cuda.max_memory_allocated(device=device)
340
 
341
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
342
+ upscaler.enable_xformers_memory_efficient_attention()
343
+ upscaler = upscaler.to(device)
344
  torch.cuda.empty_cache()
345
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
346
  torch.cuda.empty_cache()
347
  return upscaled
348
  else: