Manjushri commited on
Commit
b9c2637
1 Parent(s): c73951b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -6
app.py CHANGED
@@ -255,10 +255,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
255
  return image
256
 
257
  if Model == "SDXL 1.0":
258
- #from diffusers import StableCascadeCombinedPipeline
259
-
260
- #sdxl = StableCascadeCombinedPipeline.from_pretrained("stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.bfloat16)
261
-
262
  torch.cuda.empty_cache()
263
  torch.cuda.max_memory_allocated(device=device)
264
  sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
@@ -269,7 +266,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
269
  if refine == "Yes":
270
  torch.cuda.max_memory_allocated(device=device)
271
  torch.cuda.empty_cache()
272
- image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=10, prior_num_inference_steps=20, prior_guidance_scale=3.0, output_type="latent").images
273
  torch.cuda.empty_cache()
274
  sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
275
  sdxl.enable_xformers_memory_efficient_attention()
@@ -301,7 +298,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
301
  return upscaled
302
  else:
303
 
304
- image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=10, prior_num_inference_steps=20, guidance_scale=3).images[0]
305
  torch.cuda.empty_cache()
306
 
307
  return image
 
255
  return image
256
 
257
  if Model == "SDXL 1.0":
258
+
 
 
 
259
  torch.cuda.empty_cache()
260
  torch.cuda.max_memory_allocated(device=device)
261
  sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
 
266
  if refine == "Yes":
267
  torch.cuda.max_memory_allocated(device=device)
268
  torch.cuda.empty_cache()
269
+ image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
270
  torch.cuda.empty_cache()
271
  sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
272
  sdxl.enable_xformers_memory_efficient_attention()
 
298
  return upscaled
299
  else:
300
 
301
+ image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
302
  torch.cuda.empty_cache()
303
 
304
  return image