Update app.py
Browse files
app.py
CHANGED
|
@@ -7,7 +7,7 @@ import time
|
|
| 7 |
from diffusers import DiffusionPipeline, AutoencoderTiny
|
| 8 |
from custom_pipeline import FluxWithCFGPipeline
|
| 9 |
|
| 10 |
-
torch.
|
| 11 |
|
| 12 |
# Constants
|
| 13 |
MAX_SEED = np.iinfo(np.int32).max
|
|
@@ -22,14 +22,9 @@ pipe = FluxWithCFGPipeline.from_pretrained(
|
|
| 22 |
"black-forest-labs/FLUX.1-schnell", torch_dtype=dtype
|
| 23 |
)
|
| 24 |
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
|
| 25 |
-
# pipe.load_lora_weights("ostris/OpenFLUX.1", weight_name="openflux1-v0.1.0-fast-lora.safetensors", adapter_name="fast")
|
| 26 |
-
# pipe.set_adapters("fast")
|
| 27 |
-
# pipe.fuse_lora(adapter_names=["fast"], lora_scale=1.0)
|
| 28 |
pipe.to("cuda")
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
# pipe.transformer, mode="max-autotune", fullgraph=True
|
| 32 |
-
# )
|
| 33 |
torch.cuda.empty_cache()
|
| 34 |
|
| 35 |
# Inference function
|
|
|
|
| 7 |
from diffusers import DiffusionPipeline, AutoencoderTiny
|
| 8 |
from custom_pipeline import FluxWithCFGPipeline
|
| 9 |
|
| 10 |
+
torch.backends.cuda.matmul.allow_tf32 = True
|
| 11 |
|
| 12 |
# Constants
|
| 13 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
|
| 22 |
"black-forest-labs/FLUX.1-schnell", torch_dtype=dtype
|
| 23 |
)
|
| 24 |
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
|
|
|
|
|
|
|
|
|
|
| 25 |
pipe.to("cuda")
|
| 26 |
+
pipe.enable_xformers_memory_efficient_attention()
|
| 27 |
+
|
|
|
|
|
|
|
| 28 |
torch.cuda.empty_cache()
|
| 29 |
|
| 30 |
# Inference function
|