Manjushri commited on
Commit
8d3fd7c
·
verified ·
1 Parent(s): f7cf7f1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -29
app.py CHANGED
@@ -18,41 +18,20 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed):
18
  pipe.enable_xformers_memory_efficient_attention()
19
  pipe = pipe.to(device)
20
  torch.cuda.empty_cache()
21
- if refine == "Yes":
22
- refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
23
- refiner.enable_xformers_memory_efficient_attention()
24
- refiner = refiner.to(device)
25
- torch.cuda.empty_cache()
26
- int_image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images
27
- image = refiner(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
28
- torch.cuda.empty_cache()
29
- return image
30
- else:
31
- image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
32
- torch.cuda.empty_cache()
33
- return image
34
 
35
  if Model == "Animagine XL 3.0":
36
  animagine = DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0")
37
  animagine.enable_xformers_memory_efficient_attention()
38
  animagine = animagine.to(device)
39
  torch.cuda.empty_cache()
40
- if refine == "Yes":
41
- torch.cuda.empty_cache()
42
- torch.cuda.max_memory_allocated(device=device)
43
- int_image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale, output_type="latent").images
44
- torch.cuda.empty_cache()
45
- animagine = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16") if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0")
46
- animagine.enable_xformers_memory_efficient_attention()
47
- animagine = animagine.to(device)
48
- torch.cuda.empty_cache()
49
- image = animagine(Prompt, negative_prompt=negative_prompt, image=int_image, denoising_start=high_noise_frac).images[0]
50
- torch.cuda.empty_cache()
51
- return image
52
- else:
53
- image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
54
- torch.cuda.empty_cache()
55
- return image
56
 
57
 
58
  return image
 
18
  pipe.enable_xformers_memory_efficient_attention()
19
  pipe = pipe.to(device)
20
  torch.cuda.empty_cache()
21
+
22
+ image = pipe(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
23
+ torch.cuda.empty_cache()
24
+ return image
 
 
 
 
 
 
 
 
 
25
 
26
  if Model == "Animagine XL 3.0":
27
  animagine = DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0", torch_dtype=torch.float16, safety_checker=None) if torch.cuda.is_available() else DiffusionPipeline.from_pretrained("cagliostrolab/animagine-xl-3.0")
28
  animagine.enable_xformers_memory_efficient_attention()
29
  animagine = animagine.to(device)
30
  torch.cuda.empty_cache()
31
+
32
+ image = animagine(Prompt, negative_prompt=negative_prompt, height=height, width=width, num_inference_steps=steps, guidance_scale=scale).images[0]
33
+ torch.cuda.empty_cache()
34
+ return image
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
 
37
  return image