Manjushri commited on
Commit
8024d12
1 Parent(s): b9c2637

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -2
app.py CHANGED
@@ -300,10 +300,49 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
300
 
301
  image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
302
  torch.cuda.empty_cache()
303
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
304
  return image
305
 
306
- gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Anime', 'Disney', 'StoryBook', 'SemiReal', 'Animagine XL 3.0', 'SDXL 1.0'], value='PhotoReal', label='Choose Model'),
307
  gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
308
  gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
309
  gr.Slider(512, 1024, 768, step=128, label='Height'),
 
300
 
301
  image = sdxl(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
302
  torch.cuda.empty_cache()
303
+
304
+ if model == 'Test':
305
+
306
+ pipe = DiffusionPipeline.from_pretrained("circulus/canvers-fusionXL-v1", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("circulus/canvers-real-v3.8.1")
307
+ pipe.enable_xformers_memory_efficient_attention()
308
+ pipe = pipe.to(device)
309
+ torch.cuda.empty_cache()
310
+ if refine == "Yes":
311
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
312
+ refiner.enable_xformers_memory_efficient_attention()
313
+ refiner = refiner.to(device)
314
+ torch.cuda.empty_cache()
315
+ int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
316
+ image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
317
+ torch.cuda.empty_cache()
318
+ if upscale == "Yes":
319
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
320
+ refiner.enable_xformers_memory_efficient_attention()
321
+ refiner = refiner.to(device)
322
+ torch.cuda.empty_cache()
323
+ upscaled = refiner(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
324
+ torch.cuda.empty_cache()
325
+ return upscaled
326
+ else:
327
+ return image
328
+ else:
329
+ if upscale == "Yes":
330
+ image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
331
+ upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
332
+ upscaler.enable_xformers_memory_efficient_attention()
333
+ upscaler = upscaler.to(device)
334
+ torch.cuda.empty_cache()
335
+ upscaled = upscaler(prompt=Prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=15, guidance_scale=0).images[0]
336
+ torch.cuda.empty_cache()
337
+ return upscaled
338
+ else:
339
+ image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
340
+ torch.cuda.empty_cache()
341
+ return image
342
+
343
  return image
344
 
345
+ gr.Interface(fn=genie, inputs=[gr.Radio(['PhotoReal', 'Anime', 'Disney', 'StoryBook', 'SemiReal', 'Animagine XL 3.0', 'SDXL 1.0', 'Test'], value='PhotoReal', label='Choose Model'),
346
  gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
347
  gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
348
  gr.Slider(512, 1024, 768, step=128, label='Height'),