Spaces:
Running
on
Zero
Running
on
Zero
AlekseyCalvin
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ import random
|
|
13 |
import time
|
14 |
from typing import Any, Dict, List, Optional, Union
|
15 |
from huggingface_hub import hf_hub_download
|
16 |
-
from diffusers import DiffusionPipeline, AutoencoderTiny, AutoPipelineForImage2Image, ConfigMixin, FluxTransformer2DModel
|
17 |
import safetensors.torch
|
18 |
from safetensors.torch import load_file
|
19 |
from pipeline import FluxWithCFGPipeline
|
@@ -38,8 +38,12 @@ with open('loras.json', 'r') as f:
|
|
38 |
|
39 |
dtype = torch.bfloat16
|
40 |
|
41 |
-
|
42 |
-
|
|
|
|
|
|
|
|
|
43 |
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to("cuda")
|
44 |
|
45 |
pipe.to("cuda")
|
@@ -57,9 +61,6 @@ pipe.text_encoder = clip_model.text_model
|
|
57 |
pipe.text_encoder.dtype = torch.bfloat16
|
58 |
torch.cuda.empty_cache()
|
59 |
|
60 |
-
num_single_layers=0
|
61 |
-
chunk_size=0
|
62 |
-
|
63 |
MAX_SEED = 2**32-1
|
64 |
|
65 |
class calculateDuration:
|
|
|
13 |
import time
|
14 |
from typing import Any, Dict, List, Optional, Union
|
15 |
from huggingface_hub import hf_hub_download
|
16 |
+
from diffusers import DiffusionPipeline, AutoencoderTiny, ModelMixin, AutoPipelineForImage2Image, ConfigMixin, FluxTransformer2DModel
|
17 |
import safetensors.torch
|
18 |
from safetensors.torch import load_file
|
19 |
from pipeline import FluxWithCFGPipeline
|
|
|
38 |
|
39 |
dtype = torch.bfloat16
|
40 |
|
41 |
+
model = FluxTransformer2DModel.from_pretrained("ostris/OpenFLUX.1", torch_dtype=dtype).to("cuda")
|
42 |
+
model.num_single_layers="0"
|
43 |
+
model.chunk_size="0"
|
44 |
+
model.pooled_projections="(_, _, 1)[0]"
|
45 |
+
model.pooled_projections_dim="1"
|
46 |
+
pipe = FluxWithCFGPipeline.from_pretrained("ostris/OpenFLUX.1", torch_dtype=dtype).to("cuda")
|
47 |
pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to("cuda")
|
48 |
|
49 |
pipe.to("cuda")
|
|
|
61 |
pipe.text_encoder.dtype = torch.bfloat16
|
62 |
torch.cuda.empty_cache()
|
63 |
|
|
|
|
|
|
|
64 |
MAX_SEED = 2**32-1
|
65 |
|
66 |
class calculateDuration:
|