arthur-qiu commited on
Commit
401a78c
1 Parent(s): fd2f5c4
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -8,12 +8,11 @@ from PIL import Image
8
  from pipeline_freescale import StableDiffusionXLPipeline
9
  from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d
10
 
11
- @spaces.GPU(duration=120)
12
  def infer_gpu_part(pipe, generator, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu):
13
  if not disable_freeu:
14
  register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
15
  register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
16
- pipe = pipe.to("cuda")
17
  generator = generator.to("cuda")
18
  resul = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
19
  num_inference_steps=ddim_steps, guidance_scale=guidance_scale,
@@ -21,6 +20,7 @@ def infer_gpu_part(pipe, generator, prompt, negative_prompt, ddim_steps, guidanc
21
  )
22
  return result
23
 
 
24
  def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, options, negative_prompt):
25
 
26
  disable_freeu = 'Disable FreeU' in options
@@ -43,6 +43,7 @@ def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, o
43
 
44
  model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
45
  pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
 
46
 
47
  generator = torch.Generator()
48
  generator = generator.manual_seed(seed)
@@ -161,7 +162,7 @@ with gr.Blocks(css=css) as demo:
161
  """
162
  )
163
 
164
- prompt_in = gr.Textbox(label="Prompt", placeholder="A chihuahua in astronaut suit floating in space, cinematic lighting, glow effect")
165
 
166
  with gr.Row():
167
  with gr.Accordion('FreeScale Parameters (feel free to adjust these parameters based on your prompt): ', open=False):
 
8
  from pipeline_freescale import StableDiffusionXLPipeline
9
  from free_lunch_utils import register_free_upblock2d, register_free_crossattn_upblock2d
10
 
11
+
12
  def infer_gpu_part(pipe, generator, prompt, negative_prompt, ddim_steps, guidance_scale, resolutions_list, fast_mode, cosine_scale, disable_freeu):
13
  if not disable_freeu:
14
  register_free_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
15
  register_free_crossattn_upblock2d(pipe, b1=1.1, b2=1.2, s1=0.6, s2=0.4)
 
16
  generator = generator.to("cuda")
17
  resul = pipe(prompt, negative_prompt=negative_prompt, generator=generator,
18
  num_inference_steps=ddim_steps, guidance_scale=guidance_scale,
 
20
  )
21
  return result
22
 
23
+ @spaces.GPU(duration=120)
24
  def infer(prompt, output_size, ddim_steps, guidance_scale, cosine_scale, seed, options, negative_prompt):
25
 
26
  disable_freeu = 'Disable FreeU' in options
 
43
 
44
  model_ckpt = "stabilityai/stable-diffusion-xl-base-1.0"
45
  pipe = StableDiffusionXLPipeline.from_pretrained(model_ckpt, torch_dtype=torch.float16)
46
+ pipe = pipe.to("cuda")
47
 
48
  generator = torch.Generator()
49
  generator = generator.manual_seed(seed)
 
162
  """
163
  )
164
 
165
+ prompt_in = gr.Textbox(label="Prompt", placeholder="A panda walking and munching bamboo in a bamboo forest.")
166
 
167
  with gr.Row():
168
  with gr.Accordion('FreeScale Parameters (feel free to adjust these parameters based on your prompt): ', open=False):