arthur-qiu commited on
Commit
24519bd
1 Parent(s): 401a78c
Files changed (1) hide show
  1. app.py +8 -9
app.py CHANGED
@@ -8,19 +8,20 @@ from PIL import Image
8
  from pipeline_freescale import StableDiffusionXLPipeline
9
  from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d
10
 
11
-
12
- def infer_gpu_part(pipe, generator, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu):
 
 
 
13
  if not disable_freeu:
14
  register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
15
  register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
16
- generator = generator.to("cuda")
17
  resul = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
18
  num_inference_steps=ddim_steps, guidance_scale=guidance_scale,
19
  resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
20
  )
21
  return result
22
 
23
- @spaces.GPU(duration=120)
24
  def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, options, negative_prompt):
25
 
26
  disable_freeu = 'Disable FreeU' in options
@@ -43,12 +44,10 @@ def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, o
43
 
44
  model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
45
  pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
46
- pipe = pipe.to("cuda")
47
-
48
- generator = torch.Generator()
49
- generator = generator.manual_seed(seed)
50
 
51
- result = infer_gpu_part(pipe, generator, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu)
 
 
52
 
53
  image = result.images[0]
54
  save_path = 'output.png'
 
8
  from pipeline_freescale import StableDiffusionXLPipeline
9
  from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d
10
 
11
+ @spaces.GPU(duration=120)
12
+ def infer_gpu_part(pipe, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu):
13
+ pipe = pipe.to("cuda")
14
+ generator = torch.Generator(device='cuda')
15
+ generator = generator.manual_seed(seed)
16
  if not disable_freeu:
17
  register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
18
  register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
 
19
  resul = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
20
  num_inference_steps=ddim_steps, guidance_scale=guidance_scale,
21
  resolutions_list=resolutions_list, fast_mode=fast_mode, cosine_scale=cosine_scale,
22
  )
23
  return result
24
 
 
25
  def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, options, negative_prompt):
26
 
27
  disable_freeu = 'Disable FreeU' in options
 
44
 
45
  model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
46
  pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
 
 
 
 
47
 
48
+ print('GPU starts')
49
+ result = infer_gpu_part(pipe, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu)
50
+ print('GPU ends')
51
 
52
  image = result.images[0]
53
  save_path = 'output.png'