arthur-qiu commited on
Commit
72763fe
1 Parent(s): c8dda36
Files changed (1) hide show
  1. app.py +37 -5
app.py CHANGED
@@ -9,10 +9,41 @@ from pipeline_freescale import StableDiffusionXLPipeline
9
  from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d
10
 
11
  @spaces.GPU(duration=120)
12
- def infer_gpu_part(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale):
13
  pipe = pipe.to("cuda")
14
  generator = torch.Generator(device='cuda')
15
  generator = generator.manual_seed(seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  result = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
17
  num_inference_steps=ddim_steps, guidance_scale=guidance_scale,
18
  resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
@@ -26,27 +57,28 @@ def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, o
26
  if output_size == "2048 x 2048":
27
  resolutions_list = [[1024, 1024],
28
  [2048, 2048]]
 
29
  elif output_size == "2048 x 4096":
30
  resolutions_list = [[512, 1024],
31
  [1024, 2048],
32
  [2048, 4096]]
 
33
  elif output_size == "4096 x 2048":
34
  resolutions_list = [[1024, 512],
35
  [2048, 1024],
36
  [4096, 2048]]
 
37
  elif output_size == "4096 x 4096":
38
  resolutions_list = [[1024, 1024],
39
  [2048, 2048],
40
  [4096, 4096]]
 
41
 
42
  model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
43
  pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
44
- if not disable_freeu:
45
- register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
46
- register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
47
 
48
  print('GPU starts')
49
- result = infer_gpu_part(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale)
50
  print('GPU ends')
51
 
52
  image = result.images[0]
 
9
  from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d
10
 
11
  @spaces.GPU(duration=120)
12
+ def infer_gpu_fast(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu):
13
  pipe = pipe.to("cuda")
14
  generator = torch.Generator(device='cuda')
15
  generator = generator.manual_seed(seed)
16
+ if not disable_freeu:
17
+ register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
18
+ register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
19
+ result = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
20
+ num_inference_steps=ddim_steps, guidance_scale=guidance_scale,
21
+ resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
22
+ )
23
+ return result
24
+
25
+ @spaces.GPU(duration=240)
26
+ def infer_gpu_mid(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu):
27
+ pipe = pipe.to("cuda")
28
+ generator = torch.Generator(device='cuda')
29
+ generator = generator.manual_seed(seed)
30
+ if not disable_freeu:
31
+ register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
32
+ register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
33
+ result = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
34
+ num_inference_steps=ddim_steps, guidance_scale=guidance_scale,
35
+ resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
36
+ )
37
+ return result
38
+
39
+ @spaces.GPU(duration=360)
40
+ def infer_gpu_slow(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu):
41
+ pipe = pipe.to("cuda")
42
+ generator = torch.Generator(device='cuda')
43
+ generator = generator.manual_seed(seed)
44
+ if not disable_freeu:
45
+ register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
46
+ register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
47
  result = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
48
  num_inference_steps=ddim_steps, guidance_scale=guidance_scale,
49
  resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
 
57
  if output_size == "2048 x 2048":
58
  resolutions_list = [[1024, 1024],
59
  [2048, 2048]]
60
+ infer_gpu_part = infer_gpu_fast
61
  elif output_size == "2048 x 4096":
62
  resolutions_list = [[512, 1024],
63
  [1024, 2048],
64
  [2048, 4096]]
65
+ infer_gpu_part = infer_gpu_mid
66
  elif output_size == "4096 x 2048":
67
  resolutions_list = [[1024, 512],
68
  [2048, 1024],
69
  [4096, 2048]]
70
+ infer_gpu_part = infer_gpu_mid
71
  elif output_size == "4096 x 4096":
72
  resolutions_list = [[1024, 1024],
73
  [2048, 2048],
74
  [4096, 4096]]
75
+ infer_gpu_part = infer_gpu_slow
76
 
77
  model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
78
  pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
 
 
 
79
 
80
  print('GPU starts')
81
+ result = infer_gpu_part(pipe, seed, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu)
82
  print('GPU ends')
83
 
84
  image = result.images[0]