Manjushri commited on
Commit
e54a503
1 Parent(s): d82c469

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -14,20 +14,20 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
14
  torch.cuda.max_memory_allocated(device=device)
15
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-0.9", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
16
  pipe = pipe.to(device)
17
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
18
  pipe.enable_xformers_memory_efficient_attention()
19
  torch.cuda.empty_cache()
20
 
21
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-0.9", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
22
  refiner = refiner.to(device)
23
- refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
24
  refiner.enable_xformers_memory_efficient_attention()
25
  torch.cuda.empty_cache()
26
 
27
  def genie (prompt, negative_prompt, scale, steps, seed):
28
  torch.cuda.empty_cache()
29
  generator = torch.Generator(device=device).manual_seed(seed)
30
- int_image = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=steps, guidance_scale=scale, num_images_per_prompt=1, generator=generator, width=768, height=768, output_type="latent").images
31
  torch.cuda.empty_cache()
32
  image = refiner(prompt=prompt, image=int_image).images[0]
33
  return image
 
14
  torch.cuda.max_memory_allocated(device=device)
15
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-0.9", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
16
  pipe = pipe.to(device)
17
+ #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
18
  pipe.enable_xformers_memory_efficient_attention()
19
  torch.cuda.empty_cache()
20
 
21
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-0.9", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
22
  refiner = refiner.to(device)
23
+ #refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
24
  refiner.enable_xformers_memory_efficient_attention()
25
  torch.cuda.empty_cache()
26
 
27
  def genie (prompt, negative_prompt, scale, steps, seed):
28
  torch.cuda.empty_cache()
29
  generator = torch.Generator(device=device).manual_seed(seed)
30
+ int_image = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=steps, guidance_scale=scale, num_images_per_prompt=1, generator=generator, width=512, height=512, output_type="latent").images
31
  torch.cuda.empty_cache()
32
  image = refiner(prompt=prompt, image=int_image).images[0]
33
  return image