Manjushri commited on
Commit
2f00ac5
1 Parent(s): a1c1647

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -232,7 +232,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
232
  animagine.enable_xformers_memory_efficient_attention()
233
  animagine = animagine.to(device)
234
  torch.cuda.empty_cache()
235
- upscaled = animagine(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
236
  torch.cuda.empty_cache()
237
  return upscaled
238
  else:
@@ -245,7 +245,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
245
  upscaler.enable_xformers_memory_efficient_attention()
246
  upscaler = upscaler.to(device)
247
  torch.cuda.empty_cache()
248
- upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
249
  torch.cuda.empty_cache()
250
  return upscaled
251
  else:
@@ -280,7 +280,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
280
  sdxl.enable_xformers_memory_efficient_attention()
281
  sdxl = sdxl.to(device)
282
  torch.cuda.empty_cache()
283
- upscaled = sdxl(prompt=Prompt, negative_prompt=negative_prompt, image=refined, num_inference_steps=15, guidance_scale=0).images[0]
284
  torch.cuda.empty_cache()
285
  return upscaled
286
  else:
@@ -294,7 +294,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
294
  upscaler.enable_xformers_memory_efficient_attention()
295
  upscaler = upscaler.to(device)
296
  torch.cuda.empty_cache()
297
- upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
298
  torch.cuda.empty_cache()
299
  return upscaled
300
  else:
@@ -326,7 +326,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
326
  upscaler.enable_xformers_memory_efficient_attention()
327
  upscaler = upscaler.to(device)
328
  torch.cuda.empty_cache()
329
- upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
330
  torch.cuda.empty_cache()
331
  return upscaled
332
  else:
@@ -342,7 +342,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
342
  upscaler.enable_xformers_memory_efficient_attention()
343
  upscaler = upscaler.to(device)
344
  torch.cuda.empty_cache()
345
- upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
346
  torch.cuda.empty_cache()
347
  return upscaled
348
  else:
@@ -355,8 +355,8 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
355
  gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Anime', 'Disney', 'StoryBook', 'SemiReal', 'Animagine XL 3.0', 'SDXL 1.0', 'FusionXL'], value='PhotoReal', label='Choose Model'),
356
  gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
357
  gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
358
- gr.Slider(512, 1536, 768, step=128, label='Height'),
359
- gr.Slider(512, 1536, 768, step=128, label='Width'),
360
  gr.Slider(1, maximum=15, value=5, step=.25, label='Guidance Scale'),
361
  gr.Slider(5, maximum=100, value=50, step=5, label='Number of Iterations'),
362
  gr.Slider(minimum=0, step=1, maximum=9999999999999999, randomize=True, label='Seed: 0 is Random'),
 
232
  animagine.enable_xformers_memory_efficient_attention()
233
  animagine = animagine.to(device)
234
  torch.cuda.empty_cache()
235
+ upscaled = animagine(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
236
  torch.cuda.empty_cache()
237
  return upscaled
238
  else:
 
245
  upscaler.enable_xformers_memory_efficient_attention()
246
  upscaler = upscaler.to(device)
247
  torch.cuda.empty_cache()
248
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
249
  torch.cuda.empty_cache()
250
  return upscaled
251
  else:
 
280
  sdxl.enable_xformers_memory_efficient_attention()
281
  sdxl = sdxl.to(device)
282
  torch.cuda.empty_cache()
283
+ upscaled = sdxl(prompt=Prompt, negative_prompt=negative_prompt, image=refined, num_inference_steps=5, guidance_scale=0).images[0]
284
  torch.cuda.empty_cache()
285
  return upscaled
286
  else:
 
294
  upscaler.enable_xformers_memory_efficient_attention()
295
  upscaler = upscaler.to(device)
296
  torch.cuda.empty_cache()
297
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
298
  torch.cuda.empty_cache()
299
  return upscaled
300
  else:
 
326
  upscaler.enable_xformers_memory_efficient_attention()
327
  upscaler = upscaler.to(device)
328
  torch.cuda.empty_cache()
329
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
330
  torch.cuda.empty_cache()
331
  return upscaled
332
  else:
 
342
  upscaler.enable_xformers_memory_efficient_attention()
343
  upscaler = upscaler.to(device)
344
  torch.cuda.empty_cache()
345
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
346
  torch.cuda.empty_cache()
347
  return upscaled
348
  else:
 
355
  gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Anime', 'Disney', 'StoryBook', 'SemiReal', 'Animagine XL 3.0', 'SDXL 1.0', 'FusionXL'], value='PhotoReal', label='Choose Model'),
356
  gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
357
  gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
358
+ gr.Slider(512, 1280, 768, step=128, label='Height'),
359
+ gr.Slider(512, 1280, 768, step=128, label='Width'),
360
  gr.Slider(1, maximum=15, value=5, step=.25, label='Guidance Scale'),
361
  gr.Slider(5, maximum=100, value=50, step=5, label='Number of Iterations'),
362
  gr.Slider(minimum=0, step=1, maximum=9999999999999999, randomize=True, label='Seed: 0 is Random'),