Spaces:
Sleeping
Sleeping
Commit
·
e7fedfd
1
Parent(s):
f5fe9a4
Commit
Browse files
app.py
CHANGED
|
@@ -40,7 +40,7 @@ def infer(prompt, seed=42, randomize_seed=False, aspect_ratio="4:3 landscape 115
|
|
| 40 |
generator = torch.Generator().manual_seed(seed)
|
| 41 |
|
| 42 |
# Load the selected LoRA weight and fuse it
|
| 43 |
-
lora_weight_path = os.path.join("
|
| 44 |
# pipe.load_lora_weights(weight_path)
|
| 45 |
# pipe.fuse_lora()
|
| 46 |
torch.cuda.empty_cache()
|
|
@@ -52,6 +52,7 @@ def infer(prompt, seed=42, randomize_seed=False, aspect_ratio="4:3 landscape 115
|
|
| 52 |
aspect_ratio,
|
| 53 |
seed,
|
| 54 |
num_inference_steps,
|
|
|
|
| 55 |
)
|
| 56 |
# Generate images
|
| 57 |
# for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
|
|
|
|
| 40 |
generator = torch.Generator().manual_seed(seed)
|
| 41 |
|
| 42 |
# Load the selected LoRA weight and fuse it
|
| 43 |
+
lora_weight_path = os.path.join("loras", lora_weight)
|
| 44 |
# pipe.load_lora_weights(weight_path)
|
| 45 |
# pipe.fuse_lora()
|
| 46 |
torch.cuda.empty_cache()
|
|
|
|
| 52 |
aspect_ratio,
|
| 53 |
seed,
|
| 54 |
num_inference_steps,
|
| 55 |
+
lora_weight,
|
| 56 |
)
|
| 57 |
# Generate images
|
| 58 |
# for img in pipe.flux_pipe_call_that_returns_an_iterable_of_images(
|
python.py
CHANGED
|
@@ -183,12 +183,6 @@ vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
|
|
| 183 |
saveimage = NODE_CLASS_MAPPINGS["SaveImage"]()
|
| 184 |
|
| 185 |
def model_loading():
|
| 186 |
-
# loraloadermodelonly = NODE_CLASS_MAPPINGS["LoraLoaderModelOnly"]()
|
| 187 |
-
# loraloadermodelonly_72 = loraloadermodelonly.load_lora_model_only(
|
| 188 |
-
# lora_name=lora_weight_path,
|
| 189 |
-
# strength_model=1,
|
| 190 |
-
# model=get_value_at_index(unetloader_12, 0),
|
| 191 |
-
# )
|
| 192 |
model_loaders = [dualcliploader_11, vaeloader_10, unetloader_12, loraloadermodelonly_72]
|
| 193 |
valid_models = [
|
| 194 |
getattr(loader[0], 'patcher', loader[0])
|
|
@@ -206,7 +200,9 @@ def generate_image(prompt,
|
|
| 206 |
aspect_ratio,
|
| 207 |
seed,
|
| 208 |
num_inference_steps,
|
|
|
|
| 209 |
):
|
|
|
|
| 210 |
cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
|
| 211 |
cliptextencode_6 = cliptextencode.encode(
|
| 212 |
text=prompt,
|
|
@@ -221,6 +217,14 @@ def generate_image(prompt,
|
|
| 221 |
upscale_factor=1.5,
|
| 222 |
batch_size=1,
|
| 223 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 224 |
with torch.inference_mode():
|
| 225 |
for q in range(1):
|
| 226 |
modelsamplingflux_61 = modelsamplingflux.patch(
|
|
@@ -261,7 +265,7 @@ def generate_image(prompt,
|
|
| 261 |
)
|
| 262 |
|
| 263 |
saveimage_9 = saveimage.save_images(
|
| 264 |
-
filename_prefix="
|
| 265 |
)
|
| 266 |
saved_path = f"output/{saveimage_9['ui']['images'][0]['filename']}"
|
| 267 |
return saved_path, seed
|
|
|
|
| 183 |
saveimage = NODE_CLASS_MAPPINGS["SaveImage"]()
|
| 184 |
|
| 185 |
def model_loading():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 186 |
model_loaders = [dualcliploader_11, vaeloader_10, unetloader_12, loraloadermodelonly_72]
|
| 187 |
valid_models = [
|
| 188 |
getattr(loader[0], 'patcher', loader[0])
|
|
|
|
| 200 |
aspect_ratio,
|
| 201 |
seed,
|
| 202 |
num_inference_steps,
|
| 203 |
+
lora_weight,
|
| 204 |
):
|
| 205 |
+
print(seed)
|
| 206 |
cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
|
| 207 |
cliptextencode_6 = cliptextencode.encode(
|
| 208 |
text=prompt,
|
|
|
|
| 217 |
upscale_factor=1.5,
|
| 218 |
batch_size=1,
|
| 219 |
)
|
| 220 |
+
loraloadermodelonly = NODE_CLASS_MAPPINGS["LoraLoaderModelOnly"]()
|
| 221 |
+
loraloadermodelonly_72 = loraloadermodelonly.load_lora_model_only(
|
| 222 |
+
lora_name=lora_weight_path,
|
| 223 |
+
strength_model=1,
|
| 224 |
+
model=get_value_at_index(unetloader_12, 0),
|
| 225 |
+
)
|
| 226 |
+
randomnoise = NODE_CLASS_MAPPINGS["RandomNoise"]()
|
| 227 |
+
randomnoise_25 = randomnoise.get_noise(noise_seed='42')
|
| 228 |
with torch.inference_mode():
|
| 229 |
for q in range(1):
|
| 230 |
modelsamplingflux_61 = modelsamplingflux.patch(
|
|
|
|
| 265 |
)
|
| 266 |
|
| 267 |
saveimage_9 = saveimage.save_images(
|
| 268 |
+
filename_prefix="image", images=get_value_at_index(vaedecode_8, 0)
|
| 269 |
)
|
| 270 |
saved_path = f"output/{saveimage_9['ui']['images'][0]['filename']}"
|
| 271 |
return saved_path, seed
|