Merge branch 'main' into text2igm
Browse files- app-controlnet.py +2 -4
- app-img2img.py +2 -4
- app-txt2img.py +2 -4
- latent_consistency_controlnet.py +1 -0
app-controlnet.py
CHANGED
|
@@ -107,10 +107,8 @@ compel_proc = Compel(
|
|
| 107 |
truncate_long_prompts=False,
|
| 108 |
)
|
| 109 |
if TORCH_COMPILE:
|
| 110 |
-
pipe.
|
| 111 |
-
pipe.
|
| 112 |
-
pipe.unet = torch.compile(pipe.unet, mode="max-autotune", fullgraph=False)
|
| 113 |
-
pipe.vae = torch.compile(pipe.vae, mode="max-autotune", fullgraph=False)
|
| 114 |
|
| 115 |
pipe(prompt="warmup", image=[Image.new("RGB", (768, 768))], control_image=[Image.new("RGB", (768, 768))])
|
| 116 |
|
|
|
|
| 107 |
truncate_long_prompts=False,
|
| 108 |
)
|
| 109 |
if TORCH_COMPILE:
|
| 110 |
+
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
| 111 |
+
pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
|
|
|
|
|
|
|
| 112 |
|
| 113 |
pipe(prompt="warmup", image=[Image.new("RGB", (768, 768))], control_image=[Image.new("RGB", (768, 768))])
|
| 114 |
|
app-img2img.py
CHANGED
|
@@ -79,10 +79,8 @@ if psutil.virtual_memory().total < 64 * 1024**3:
|
|
| 79 |
pipe.enable_attention_slicing()
|
| 80 |
|
| 81 |
if TORCH_COMPILE:
|
| 82 |
-
pipe.
|
| 83 |
-
pipe.
|
| 84 |
-
pipe.unet = torch.compile(pipe.unet, mode="max-autotune", fullgraph=False)
|
| 85 |
-
pipe.vae = torch.compile(pipe.vae, mode="max-autotune", fullgraph=False)
|
| 86 |
|
| 87 |
pipe(prompt="warmup", image=[Image.new("RGB", (512, 512))])
|
| 88 |
|
|
|
|
| 79 |
pipe.enable_attention_slicing()
|
| 80 |
|
| 81 |
if TORCH_COMPILE:
|
| 82 |
+
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
| 83 |
+
pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
|
|
|
|
|
|
|
| 84 |
|
| 85 |
pipe(prompt="warmup", image=[Image.new("RGB", (512, 512))])
|
| 86 |
|
app-txt2img.py
CHANGED
|
@@ -79,10 +79,8 @@ if psutil.virtual_memory().total < 64 * 1024**3:
|
|
| 79 |
pipe.enable_attention_slicing()
|
| 80 |
|
| 81 |
if TORCH_COMPILE:
|
| 82 |
-
pipe.
|
| 83 |
-
pipe.
|
| 84 |
-
pipe.unet = torch.compile(pipe.unet, mode="max-autotune", fullgraph=False)
|
| 85 |
-
pipe.vae = torch.compile(pipe.vae, mode="max-autotune", fullgraph=False)
|
| 86 |
|
| 87 |
pipe(prompt="warmup", num_inference_steps=1, guidance_scale=8.0)
|
| 88 |
|
|
|
|
| 79 |
pipe.enable_attention_slicing()
|
| 80 |
|
| 81 |
if TORCH_COMPILE:
|
| 82 |
+
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
| 83 |
+
pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
|
|
|
|
|
|
|
| 84 |
|
| 85 |
pipe(prompt="warmup", num_inference_steps=1, guidance_scale=8.0)
|
| 86 |
|
latent_consistency_controlnet.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
|
|
| 1 |
# Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved.
|
| 2 |
#
|
| 3 |
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
| 1 |
+
# from https://github.com/taabata/LCM_Inpaint_Outpaint_Comfy/blob/main/LCM/pipeline_cn.py
|
| 2 |
# Copyright 2023 Stanford University Team and The HuggingFace Team. All rights reserved.
|
| 3 |
#
|
| 4 |
# Licensed under the Apache License, Version 2.0 (the "License");
|