Manjushri commited on
Commit
a3f33c1
1 Parent(s): 15a23dd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -84,7 +84,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, up
84
  semi.enable_xformers_memory_efficient_attention()
85
  torch.cuda.empty_cache()
86
  if upscale == "Yes":
87
- int_image = semi(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
88
  image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
89
  torch.cuda.empty_cache()
90
  return image
@@ -99,7 +99,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, up
99
  animagine.enable_xformers_memory_efficient_attention()
100
  torch.cuda.empty_cache()
101
  if upscale == "Yes":
102
- int_image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
103
  image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
104
  torch.cuda.empty_cache()
105
  return image
@@ -115,7 +115,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, up
115
  sdxl.enable_xformers_memory_efficient_attention()
116
  torch.cuda.empty_cache()
117
  if upscale == "Yes":
118
- int_image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
119
  image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
120
  torch.cuda.empty_cache()
121
  return image
 
84
  semi.enable_xformers_memory_efficient_attention()
85
  torch.cuda.empty_cache()
86
  if upscale == "Yes":
87
+ int_image = semi(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
88
  image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
89
  torch.cuda.empty_cache()
90
  return image
 
99
  animagine.enable_xformers_memory_efficient_attention()
100
  torch.cuda.empty_cache()
101
  if upscale == "Yes":
102
+ int_image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
103
  image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
104
  torch.cuda.empty_cache()
105
  return image
 
115
  sdxl.enable_xformers_memory_efficient_attention()
116
  torch.cuda.empty_cache()
117
  if upscale == "Yes":
118
+ int_image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
119
  image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
120
  torch.cuda.empty_cache()
121
  return image