radames commited on
Commit
3cb2c68
1 Parent(s): 9e152c1

improve perf

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -44,8 +44,10 @@ else:
44
  # pipe.vae = AutoencoderTiny.from_pretrained(
45
  # "madebyollin/taesd", torch_dtype=torch.float16, use_safetensors=True
46
  # )
47
- pipe.to(torch_device="cuda", torch_dtype=torch.float16)
48
  pipe.set_progress_bar_config(disable=True)
 
 
 
49
  user_queue_map = {}
50
 
51
 
@@ -55,7 +57,7 @@ def predict(input_image, prompt, guidance_scale=8.0, strength=0.5, seed=2159232)
55
  num_inference_steps = 4
56
  results = pipe(
57
  prompt=prompt,
58
- generator=generator,
59
  image=input_image,
60
  strength=strength,
61
  num_inference_steps=num_inference_steps,
 
44
  # pipe.vae = AutoencoderTiny.from_pretrained(
45
  # "madebyollin/taesd", torch_dtype=torch.float16, use_safetensors=True
46
  # )
 
47
  pipe.set_progress_bar_config(disable=True)
48
+ pipe.to(torch_device="cuda", torch_dtype=torch.float16)
49
+ pipe.unet.to(memory_format=torch.channels_last)
50
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
51
  user_queue_map = {}
52
 
53
 
 
57
  num_inference_steps = 4
58
  results = pipe(
59
  prompt=prompt,
60
+ # generator=generator,
61
  image=input_image,
62
  strength=strength,
63
  num_inference_steps=num_inference_steps,