radames HF staff commited on
Commit
e31d3e1
1 Parent(s): 1af368e

add reference

Browse files
app-controlnet.py CHANGED
@@ -107,8 +107,8 @@ compel_proc = Compel(
107
  truncate_long_prompts=False,
108
  )
109
  if TORCH_COMPILE:
110
- pipe.unet = torch.compile(pipe.unet, mode="max-autotune", fullgraph=False)
111
- pipe.vae = torch.compile(pipe.vae, mode="max-autotune", fullgraph=False)
112
 
113
  pipe(prompt="warmup", image=[Image.new("RGB", (768, 768))], control_image=[Image.new("RGB", (768, 768))])
114
 
 
107
  truncate_long_prompts=False,
108
  )
109
  if TORCH_COMPILE:
110
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
111
+ pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
112
 
113
  pipe(prompt="warmup", image=[Image.new("RGB", (768, 768))], control_image=[Image.new("RGB", (768, 768))])
114
 
app-img2img.py CHANGED
@@ -79,8 +79,8 @@ if psutil.virtual_memory().total < 64 * 1024**3:
79
  pipe.enable_attention_slicing()
80
 
81
  if TORCH_COMPILE:
82
- pipe.unet = torch.compile(pipe.unet, mode="max-autotune", fullgraph=False)
83
- pipe.vae = torch.compile(pipe.vae, mode="max-autotune", fullgraph=False)
84
 
85
  pipe(prompt="warmup", image=[Image.new("RGB", (512, 512))])
86
 
 
79
  pipe.enable_attention_slicing()
80
 
81
  if TORCH_COMPILE:
82
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
83
+ pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
84
 
85
  pipe(prompt="warmup", image=[Image.new("RGB", (512, 512))])
86
 
app-txt2img.py CHANGED
@@ -79,8 +79,8 @@ if psutil.virtual_memory().total < 64 * 1024**3:
79
  pipe.enable_attention_slicing()
80
 
81
  if TORCH_COMPILE:
82
- pipe.unet = torch.compile(pipe.unet, mode="max-autotune", fullgraph=False)
83
- pipe.vae = torch.compile(pipe.vae, mode="max-autotune", fullgraph=False)
84
 
85
  pipe(prompt="warmup", num_inference_steps=1, guidance_scale=8.0)
86
 
 
79
  pipe.enable_attention_slicing()
80
 
81
  if TORCH_COMPILE:
82
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
83
+ pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
84
 
85
  pipe(prompt="warmup", num_inference_steps=1, guidance_scale=8.0)
86
 
latent_consistency_controlnet.py CHANGED
@@ -1,3 +1,4 @@
 
1
  # Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved.
2
  #
3
  # Licensed under the Apache License, Version 2.0 (the "License");
 
1
+ # from https://github.com/taabata/LCM_Inpaint_Outpaint_Comfy/blob/main/LCM/pipeline_cn.py
2
  # Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved.
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");