Manjushri commited on
Commit
25e7c41
1 Parent(s): 5511722

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -37,6 +37,7 @@ def genie (prompt, negative_prompt, scale, steps, seed, upscaler):
37
  pipe.enable_xformers_memory_efficient_attention()
38
  upscaled = pipe(prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
39
  torch.cuda.empty_cache()
 
40
  else:
41
  torch.cuda.empty_cache()
42
  torch.cuda.max_memory_allocated(device=device)
@@ -45,7 +46,7 @@ def genie (prompt, negative_prompt, scale, steps, seed, upscaler):
45
  pipe.enable_xformers_memory_efficient_attention()
46
  image = pipe(prompt=prompt, image=int_image).images[0]
47
  torch.cuda.empty_cache()
48
- return (image, upscaled)
49
 
50
  gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
51
  gr.Textbox(label='What you Do Not want the AI to generate.'),
 
37
  pipe.enable_xformers_memory_efficient_attention()
38
  upscaled = pipe(prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
39
  torch.cuda.empty_cache()
40
+ return (image, upscaled)
41
  else:
42
  torch.cuda.empty_cache()
43
  torch.cuda.max_memory_allocated(device=device)
 
46
  pipe.enable_xformers_memory_efficient_attention()
47
  image = pipe(prompt=prompt, image=int_image).images[0]
48
  torch.cuda.empty_cache()
49
+ return image
50
 
51
  gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
52
  gr.Textbox(label='What you Do Not want the AI to generate.'),