Manjushri commited on
Commit
7c1a22d
1 Parent(s): 8656444

Update app.py

Browse files

Added :
n_steps = 40
high_noise_frac = 0.8

And

pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
upscaler.unet = torch.compile(upscaler.unet, mode="reduce-overhead", fullgraph=True)

Files changed (1) hide show
  1. app.py +10 -2
app.py CHANGED
@@ -16,23 +16,31 @@ if torch.cuda.is_available():
16
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
17
  pipe.enable_xformers_memory_efficient_attention()
18
  pipe = pipe.to(device)
 
19
  torch.cuda.empty_cache()
20
 
21
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16")
22
  refiner.enable_xformers_memory_efficient_attention()
23
  refiner = refiner.to(device)
 
24
  torch.cuda.empty_cache()
25
 
26
  upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
27
  upscaler.enable_xformers_memory_efficient_attention()
28
  upscaler = upscaler.to(device)
 
29
  torch.cuda.empty_cache()
30
  else:
31
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", use_safetensors=True)
32
  pipe = pipe.to(device)
 
33
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True)
34
  refiner = refiner.to(device)
35
-
 
 
 
 
36
  def genie (prompt, negative_prompt, height, width, scale, steps, seed, upscaling, prompt_2, negative_prompt_2):
37
  generator = torch.Generator(device=device).manual_seed(seed)
38
  int_image = pipe(prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, num_inference_steps=steps, height=height, width=width, guidance_scale=scale, num_images_per_prompt=1, generator=generator, output_type="latent").images
@@ -45,7 +53,7 @@ def genie (prompt, negative_prompt, height, width, scale, steps, seed, upscaling
45
  image = refiner(prompt=prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, image=int_image).images[0]
46
  torch.cuda.empty_cache()
47
  return (image, image)
48
-
49
  gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit. A Token is Any Word, Number, Symbol, or Punctuation. Everything Over 77 Will Be Truncated!'),
50
  gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
51
  gr.Slider(512, 1024, 768, step=128, label='Height'),
 
16
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
17
  pipe.enable_xformers_memory_efficient_attention()
18
  pipe = pipe.to(device)
19
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
20
  torch.cuda.empty_cache()
21
 
22
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16")
23
  refiner.enable_xformers_memory_efficient_attention()
24
  refiner = refiner.to(device)
25
+ refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
26
  torch.cuda.empty_cache()
27
 
28
  upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
29
  upscaler.enable_xformers_memory_efficient_attention()
30
  upscaler = upscaler.to(device)
31
+ upscaler.unet = torch.compile(upscaler.unet, mode="reduce-overhead", fullgraph=True)
32
  torch.cuda.empty_cache()
33
  else:
34
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", use_safetensors=True)
35
  pipe = pipe.to(device)
36
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
37
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True)
38
  refiner = refiner.to(device)
39
+ refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
40
+
41
+ n_steps = 40
42
+ high_noise_frac = 0.8
43
+
44
  def genie (prompt, negative_prompt, height, width, scale, steps, seed, upscaling, prompt_2, negative_prompt_2):
45
  generator = torch.Generator(device=device).manual_seed(seed)
46
  int_image = pipe(prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, num_inference_steps=steps, height=height, width=width, guidance_scale=scale, num_images_per_prompt=1, generator=generator, output_type="latent").images
 
53
  image = refiner(prompt=prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, image=int_image).images[0]
54
  torch.cuda.empty_cache()
55
  return (image, image)
56
+
57
  gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit. A Token is Any Word, Number, Symbol, or Punctuation. Everything Over 77 Will Be Truncated!'),
58
  gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
59
  gr.Slider(512, 1024, 768, step=128, label='Height'),