Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,4 @@
|
|
1 |
#!/usr/bin/env python
|
2 |
-
#patch 0.04
|
3 |
-
#Func() Dalle Collage Moved Midjourney Space
|
4 |
-
#Pruned DalleCollage Space
|
5 |
import os
|
6 |
import random
|
7 |
import uuid
|
@@ -178,20 +175,14 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
|
|
178 |
|
179 |
|
180 |
|
181 |
-
DESCRIPTION = """##
|
182 |
-
|
183 |
-
Drop your best results in the community: [rb.gy/klkbs7](http://rb.gy/klkbs7), Have you tried the stable hamster space? [rb.gy/hfrm2f](http://rb.gy/hfrm2f)
|
184 |
"""
|
185 |
|
186 |
-
|
187 |
-
if not torch.cuda.is_available():
|
188 |
-
DESCRIPTION += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
|
189 |
-
|
190 |
MAX_SEED = np.iinfo(np.int32).max
|
191 |
-
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "
|
192 |
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048"))
|
193 |
-
USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "
|
194 |
-
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "
|
195 |
|
196 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
197 |
|
@@ -208,11 +199,9 @@ if torch.cuda.is_available():
|
|
208 |
pipe.enable_model_cpu_offload()
|
209 |
else:
|
210 |
pipe.to(device)
|
211 |
-
print("Loaded on Device!")
|
212 |
|
213 |
if USE_TORCH_COMPILE:
|
214 |
-
pipe.unet = torch.compile(pipe.unet, mode="
|
215 |
-
print("Model Compiled!")
|
216 |
|
217 |
def save_image(img, path):
|
218 |
img.save(path)
|
@@ -232,8 +221,8 @@ def generate(
|
|
232 |
filter_name: str = DEFAULT_FILTER_NAME,
|
233 |
grid_size: str = "2x2",
|
234 |
seed: int = 0,
|
235 |
-
width: int =
|
236 |
-
height: int =
|
237 |
guidance_scale: float = 3,
|
238 |
randomize_seed: bool = False,
|
239 |
use_resolution_binning: bool = True,
|
|
|
1 |
#!/usr/bin/env python
|
|
|
|
|
|
|
2 |
import os
|
3 |
import random
|
4 |
import uuid
|
|
|
175 |
|
176 |
|
177 |
|
178 |
+
DESCRIPTION = """## CloneJourney
|
|
|
|
|
179 |
"""
|
180 |
|
|
|
|
|
|
|
|
|
181 |
MAX_SEED = np.iinfo(np.int32).max
|
182 |
+
CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "lazy")
|
183 |
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048"))
|
184 |
+
USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "1") == "1"
|
185 |
+
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "1") == "1"
|
186 |
|
187 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
188 |
|
|
|
199 |
pipe.enable_model_cpu_offload()
|
200 |
else:
|
201 |
pipe.to(device)
|
|
|
202 |
|
203 |
if USE_TORCH_COMPILE:
|
204 |
+
pipe.unet = torch.compile(pipe.unet, mode="max-autotune", fullgraph=True)
|
|
|
205 |
|
206 |
def save_image(img, path):
|
207 |
img.save(path)
|
|
|
221 |
filter_name: str = DEFAULT_FILTER_NAME,
|
222 |
grid_size: str = "2x2",
|
223 |
seed: int = 0,
|
224 |
+
width: int = 896,
|
225 |
+
height: int = 1152,
|
226 |
guidance_scale: float = 3,
|
227 |
randomize_seed: bool = False,
|
228 |
use_resolution_binning: bool = True,
|