Manjushri commited on
Commit
9a96d60
1 Parent(s): 8d78d08

Update app.py

Browse files

Testing upscaler

Files changed (1) hide show
  1. app.py +29 -11
app.py CHANGED
@@ -11,30 +11,48 @@ from diffusers.models import AutoencoderKL
11
  login(token=os.environ.get('HF_KEY'))
12
 
13
  device = "cuda" if torch.cuda.is_available() else "cpu"
14
- torch.cuda.max_memory_allocated(device=device)
15
  vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", torch_dtype=torch.float16)
16
  torch.cuda.empty_cache()
17
 
18
- def genie (prompt, negative_prompt, scale, steps, seed):
 
19
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-0.9", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, vae=vae)
20
  pipe = pipe.to(device)
21
- #pipe.enable_xformers_memory_efficient_attention()
22
  torch.cuda.empty_cache()
23
  generator = torch.Generator(device=device).manual_seed(seed)
24
  int_image = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=steps, guidance_scale=scale, num_images_per_prompt=1, generator=generator).images
25
  torch.cuda.empty_cache()
26
- pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-0.9", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, vae=vae)
27
- pipe = pipe.to(device)
28
- #pipe.enable_xformers_memory_efficient_attention()
29
- torch.cuda.empty_cache()
30
- image = pipe(prompt=prompt, image=int_image).images[0]
31
- return image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
34
  gr.Textbox(label='What you Do Not want the AI to generate.'),
35
  gr.Slider(1, 15, 10), gr.Slider(25, maximum=100, value=50, step=1),
36
- gr.Slider(minimum=1, step=1, maximum=999999999999999999, randomize=True)],
37
- outputs='image',
 
38
  title="Stable Diffusion XL 0.9 GPU",
39
  description="SDXL 0.9 GPU. <b>WARNING:</b> Capable of producing NSFW images.",
40
  article = "Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(debug=True, max_threads=80)
 
11
  login(token=os.environ.get('HF_KEY'))
12
 
13
  device = "cuda" if torch.cuda.is_available() else "cpu"
14
+ torch.cuda.max_memory_allocated(device='cuda')
15
  vae = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", torch_dtype=torch.float16)
16
  torch.cuda.empty_cache()
17
 
18
+ def genie (prompt, negative_prompt, scale, steps, seed, upscale):
19
+ torch.cuda.max_memory_allocated(device='cuda')
20
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-0.9", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, vae=vae)
21
  pipe = pipe.to(device)
22
+ pipe.enable_xformers_memory_efficient_attention()
23
  torch.cuda.empty_cache()
24
  generator = torch.Generator(device=device).manual_seed(seed)
25
  int_image = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=steps, guidance_scale=scale, num_images_per_prompt=1, generator=generator).images
26
  torch.cuda.empty_cache()
27
+ if upscaler == 'Yes':
28
+ torch.cuda.max_memory_allocated(device='cuda')
29
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-0.9", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, vae=vae)
30
+ pipe = pipe.to(device)
31
+ pipe.enable_xformers_memory_efficient_attention()
32
+ image = pipe(prompt=prompt, image=int_image).images[0]
33
+ torch.cuda.empty_cache()
34
+ torch.cuda.max_memory_allocated(device='cuda')
35
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
36
+ pipe.to("cuda")
37
+ pipe.enable_xformers_memory_efficient_attention()
38
+ upscaled = pipe(prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
39
+ torch.cuda.empty_cache()
40
+ else:
41
+ torch.cuda.empty_cache()
42
+ torch.cuda.max_memory_allocated(device=device)
43
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-0.9", torch_dtype=torch.float16, variant="fp16", use_safetensors=True, vae=vae)
44
+ pipe = pipe.to(device)
45
+ pipe.enable_xformers_memory_efficient_attention()
46
+ image = pipe(prompt=prompt, image=int_image).images[0]
47
+ torch.cuda.empty_cache()
48
+ return (image, upscaled)
49
 
50
  gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit.'),
51
  gr.Textbox(label='What you Do Not want the AI to generate.'),
52
  gr.Slider(1, 15, 10), gr.Slider(25, maximum=100, value=50, step=1),
53
+ gr.Slider(minimum=1, step=1, maximum=999999999999999999, randomize=True),
54
+ gr.Radio(['Yes', 'No'], label='Upscale?')],
55
+ outputs=['image', 'image'],
56
  title="Stable Diffusion XL 0.9 GPU",
57
  description="SDXL 0.9 GPU. <b>WARNING:</b> Capable of producing NSFW images.",
58
  article = "Code Monkey: <a href=\"https://huggingface.co/Manjushri\">Manjushri</a>").launch(debug=True, max_threads=80)