Drastic Update to Image Generation to accomodate @spaces.GPU() excessive time usage
Browse files- app.py +218 -293
- utils/constants.py +1 -0
app.py
CHANGED
@@ -8,6 +8,7 @@ from typing import Optional, Union, List, Tuple
|
|
8 |
|
9 |
from PIL import Image, ImageFilter
|
10 |
import cv2
|
|
|
11 |
import utils.constants as constants
|
12 |
|
13 |
from haishoku.haishoku import Haishoku
|
@@ -91,6 +92,7 @@ from utils.version_info import (
|
|
91 |
#release_torch_resources,
|
92 |
#get_torch_info
|
93 |
)
|
|
|
94 |
import spaces
|
95 |
|
96 |
input_image_palette = []
|
@@ -199,113 +201,24 @@ condition_dict = {
|
|
199 |
"fill": 9,
|
200 |
}
|
201 |
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
# TODO: Add mask support
|
217 |
-
assert mask is None, "Mask not supported yet"
|
218 |
-
def get_condition(
|
219 |
-
self, condition_type: str, raw_img: Union[Image.Image, torch.Tensor]
|
220 |
-
) -> Union[Image.Image, torch.Tensor]:
|
221 |
-
"""
|
222 |
-
Returns the condition image.
|
223 |
-
"""
|
224 |
-
if condition_type == "depth":
|
225 |
-
from transformers import pipeline
|
226 |
-
depth_pipe = pipeline(
|
227 |
-
task="depth-estimation",
|
228 |
-
model="LiheYoung/depth-anything-small-hf",
|
229 |
-
device="cuda",
|
230 |
-
)
|
231 |
-
source_image = raw_img.convert("RGB")
|
232 |
-
condition_img = depth_pipe(source_image)["depth"].convert("RGB")
|
233 |
-
return condition_img
|
234 |
-
elif condition_type == "canny":
|
235 |
-
img = np.array(raw_img)
|
236 |
-
edges = cv2.Canny(img, 100, 200)
|
237 |
-
edges = Image.fromarray(edges).convert("RGB")
|
238 |
-
return edges
|
239 |
-
elif condition_type == "subject":
|
240 |
-
return raw_img
|
241 |
-
elif condition_type == "coloring":
|
242 |
-
return raw_img.convert("L").convert("RGB")
|
243 |
-
elif condition_type == "deblurring":
|
244 |
-
condition_image = (
|
245 |
-
raw_img.convert("RGB")
|
246 |
-
.filter(ImageFilter.GaussianBlur(10))
|
247 |
-
.convert("RGB")
|
248 |
-
)
|
249 |
-
return condition_image
|
250 |
-
elif condition_type == "fill":
|
251 |
-
return raw_img.convert("RGB")
|
252 |
-
return self.condition
|
253 |
-
@property
|
254 |
-
def type_id(self) -> int:
|
255 |
-
"""
|
256 |
-
Returns the type id of the condition.
|
257 |
-
"""
|
258 |
-
return condition_dict[self.condition_type]
|
259 |
-
@classmethod
|
260 |
-
def get_type_id(cls, condition_type: str) -> int:
|
261 |
-
"""
|
262 |
-
Returns the type id of the condition.
|
263 |
-
"""
|
264 |
-
return condition_dict[condition_type]
|
265 |
-
def _encode_image(self, pipe: FluxPipeline, cond_img: Image.Image) -> torch.Tensor:
|
266 |
-
"""
|
267 |
-
Encodes an image condition into tokens using the pipeline.
|
268 |
-
"""
|
269 |
-
cond_img = pipe.image_processor.preprocess(cond_img)
|
270 |
-
cond_img = cond_img.to(pipe.device).to(pipe.dtype)
|
271 |
-
cond_img = pipe.vae.encode(cond_img).latent_dist.sample()
|
272 |
-
cond_img = (
|
273 |
-
cond_img - pipe.vae.config.shift_factor
|
274 |
-
) * pipe.vae.config.scaling_factor
|
275 |
-
cond_tokens = pipe._pack_latents(cond_img, *cond_img.shape)
|
276 |
-
cond_ids = pipe._prepare_latent_image_ids(
|
277 |
-
cond_img.shape[0],
|
278 |
-
cond_img.shape[2]//2,
|
279 |
-
cond_img.shape[3]//2,
|
280 |
-
pipe.device,
|
281 |
-
pipe.dtype,
|
282 |
-
)
|
283 |
-
return cond_tokens, cond_ids
|
284 |
-
def encode(self, pipe: FluxPipeline) -> Tuple[torch.Tensor, torch.Tensor, int]:
|
285 |
-
"""
|
286 |
-
Encodes the condition into tokens, ids and type_id.
|
287 |
-
"""
|
288 |
-
if self.condition_type in [
|
289 |
-
"depth",
|
290 |
-
"canny",
|
291 |
-
"subject",
|
292 |
-
"coloring",
|
293 |
-
"deblurring",
|
294 |
-
"fill",
|
295 |
-
]:
|
296 |
-
tokens, ids = self._encode_image(pipe, self.condition)
|
297 |
-
else:
|
298 |
-
raise NotImplementedError(
|
299 |
-
f"Condition type {self.condition_type} not implemented"
|
300 |
-
)
|
301 |
-
type_id = torch.ones_like(ids[:, :1]) * self.type_id
|
302 |
-
return tokens, ids, type_id
|
303 |
|
304 |
-
# @spaces.GPU(duration=140, progress=gr.Progress(track_tqdm=True))
|
305 |
-
# def generate_image(pipe, generate_params, progress=gr.Progress(track_tqdm=True)):
|
306 |
-
# return pipe(**generate_params)
|
307 |
|
308 |
-
@spaces.GPU(duration=
|
|
|
309 |
def generate_image_lowmem(
|
310 |
text,
|
311 |
neg_prompt=None,
|
@@ -331,195 +244,205 @@ def generate_image_lowmem(
|
|
331 |
f"Available options: {list(PIPELINE_CLASSES.keys())}")
|
332 |
|
333 |
#initialize_cuda()
|
334 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
335 |
-
#from src.condition import Condition
|
336 |
|
337 |
print(f"device:{device}\nmodel_name:{model_name}\nlora_weights:{lora_weights}\n")
|
338 |
#print(f"\n {get_torch_info()}\n")
|
339 |
# Disable gradient calculations
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
|
|
347 |
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
else:
|
440 |
-
print(f"Method {method_name} not found in pipe.")
|
441 |
-
if 'condition_type' in config:
|
442 |
-
condition_type = config['condition_type']
|
443 |
-
if condition_type == "coloring":
|
444 |
-
#pipe.enable_coloring()
|
445 |
-
print("\nEnabled coloring.\n")
|
446 |
-
elif condition_type == "deblurring":
|
447 |
-
#pipe.enable_deblurring()
|
448 |
-
print("\nEnabled deblurring.\n")
|
449 |
-
elif condition_type == "fill":
|
450 |
-
#pipe.enable_fill()
|
451 |
-
print("\nEnabled fill.\n")
|
452 |
-
elif condition_type == "depth":
|
453 |
-
#pipe.enable_depth()
|
454 |
-
print("\nEnabled depth.\n")
|
455 |
-
elif condition_type == "canny":
|
456 |
-
#pipe.enable_canny()
|
457 |
-
print("\nEnabled canny.\n")
|
458 |
-
elif condition_type == "subject":
|
459 |
-
#pipe.enable_subject()
|
460 |
-
print("\nEnabled subject.\n")
|
461 |
else:
|
462 |
-
print(f"
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
489 |
-
|
490 |
-
|
491 |
-
|
492 |
-
|
493 |
-
|
494 |
-
|
495 |
-
|
|
|
|
|
|
|
|
|
496 |
}
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
"
|
503 |
-
"
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
|
520 |
-
|
521 |
-
|
522 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
523 |
|
524 |
def generate_ai_image_local (
|
525 |
map_option,
|
@@ -565,7 +488,7 @@ def generate_ai_image_local (
|
|
565 |
width = additional_parameters.pop('width', width)
|
566 |
num_inference_steps = additional_parameters.pop('num_inference_steps', num_inference_steps)
|
567 |
guidance_scale = additional_parameters.pop('guidance_scale', guidance_scale)
|
568 |
-
print("Generating image with the following parameters
|
569 |
print(f"Model: {model}")
|
570 |
print(f"LoRA Weights: {lora_weights}")
|
571 |
print(f"Prompt: {prompt}")
|
@@ -578,8 +501,8 @@ def generate_ai_image_local (
|
|
578 |
print(f"Additional Parameters: {additional_parameters}")
|
579 |
print(f"Conditioned Image: {conditioned_image}")
|
580 |
print(f"Conditioned Image Strength: {strength}")
|
581 |
-
print(f"pipeline: {pipeline_name}")
|
582 |
-
|
583 |
text=prompt,
|
584 |
model_name=model,
|
585 |
neg_prompt=negative_prompt,
|
@@ -594,6 +517,7 @@ def generate_ai_image_local (
|
|
594 |
strength=strength,
|
595 |
additional_parameters=additional_parameters
|
596 |
)
|
|
|
597 |
with NamedTemporaryFile(delete=False, suffix=".png") as tmp:
|
598 |
image.save(tmp.name, format="PNG")
|
599 |
constants.temp_files.append(tmp.name)
|
@@ -684,7 +608,8 @@ def on_prerendered_gallery_selection(event_data: gr.SelectData):
|
|
684 |
global current_prerendered_image
|
685 |
selected_index = event_data.index
|
686 |
selected_image = constants.pre_rendered_maps_paths[selected_index]
|
687 |
-
print(f"
|
|
|
688 |
current_prerendered_image.value = selected_image
|
689 |
return current_prerendered_image
|
690 |
|
@@ -1021,11 +946,11 @@ with gr.Blocks(css_paths="style_20250128.css", title=title, theme='Surn/beeuty',
|
|
1021 |
)
|
1022 |
|
1023 |
with gr.Row():
|
1024 |
-
with gr.Accordion("Generate AI Image (click here)", open = False):
|
1025 |
with gr.Row():
|
1026 |
with gr.Column():
|
1027 |
model_options = gr.Dropdown(
|
1028 |
-
label="Model
|
1029 |
choices=constants.MODELS + constants.LORA_WEIGHTS + ["Manual Entry"],
|
1030 |
value="Cossale/Frames2-Flex.1",
|
1031 |
elem_classes="solid"
|
|
|
8 |
|
9 |
from PIL import Image, ImageFilter
|
10 |
import cv2
|
11 |
+
|
12 |
import utils.constants as constants
|
13 |
|
14 |
from haishoku.haishoku import Haishoku
|
|
|
92 |
#release_torch_resources,
|
93 |
#get_torch_info
|
94 |
)
|
95 |
+
from src.condition import Condition
|
96 |
import spaces
|
97 |
|
98 |
input_image_palette = []
|
|
|
201 |
"fill": 9,
|
202 |
}
|
203 |
|
204 |
+
@spaces.GPU(duration=120, progress=gr.Progress(track_tqdm=True))
|
205 |
+
def generate_image(pipe, conditions, generate_params, progress=gr.Progress(track_tqdm=True)):
|
206 |
+
gr.Info("Generating AI image...",duration=5)
|
207 |
+
result = pipe(**generate_params)
|
208 |
+
image = result.images[0]
|
209 |
+
# Clean up
|
210 |
+
del result
|
211 |
+
del conditions
|
212 |
+
# Delete the pipeline and clear cache
|
213 |
+
del pipe
|
214 |
+
torch.cuda.empty_cache()
|
215 |
+
torch.cuda.ipc_collect()
|
216 |
+
print(torch.cuda.memory_summary(device=None, abbreviated=False))
|
217 |
+
return image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
218 |
|
|
|
|
|
|
|
219 |
|
220 |
+
@spaces.GPU(duration=90)
|
221 |
+
@torch.no_grad()
|
222 |
def generate_image_lowmem(
|
223 |
text,
|
224 |
neg_prompt=None,
|
|
|
244 |
f"Available options: {list(PIPELINE_CLASSES.keys())}")
|
245 |
|
246 |
#initialize_cuda()
|
247 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
248 |
|
249 |
print(f"device:{device}\nmodel_name:{model_name}\nlora_weights:{lora_weights}\n")
|
250 |
#print(f"\n {get_torch_info()}\n")
|
251 |
# Disable gradient calculations
|
252 |
+
#with torch.no_grad():
|
253 |
+
gr.Info("Initialize the pipeline inside the context manager",duration=5)
|
254 |
+
# Initialize the pipeline inside the context manager
|
255 |
+
pipe = pipeline_class.from_pretrained(
|
256 |
+
model_name,
|
257 |
+
torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32
|
258 |
+
).to(device)
|
259 |
+
# Optionally, don't use CPU offload if not necessary
|
260 |
|
261 |
+
# alternative version that may be more efficient
|
262 |
+
# pipe.enable_sequential_cpu_offload()
|
263 |
+
if pipeline_name == "FluxPipeline":
|
264 |
+
pipe.enable_model_cpu_offload()
|
265 |
+
pipe.vae.enable_slicing()
|
266 |
+
#pipe.vae.enable_tiling()
|
267 |
+
else:
|
268 |
+
pipe.enable_model_cpu_offload()
|
269 |
+
|
270 |
+
# Access the tokenizer from the pipeline
|
271 |
+
tokenizer = pipe.tokenizer
|
272 |
+
|
273 |
+
# Check if add_prefix_space is set and convert to slow tokenizer if necessary
|
274 |
+
if getattr(tokenizer, 'add_prefix_space', False):
|
275 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False, device_map = 'cpu')
|
276 |
+
# Update the pipeline's tokenizer
|
277 |
+
pipe.tokenizer = tokenizer
|
278 |
+
pipe.to(device)
|
279 |
+
|
280 |
+
flash_attention_enabled = torch.backends.cuda.flash_sdp_enabled()
|
281 |
+
if flash_attention_enabled == False:
|
282 |
+
#Enable xFormers memory-efficient attention (optional)
|
283 |
+
#pipe.enable_xformers_memory_efficient_attention()
|
284 |
+
print("\nEnabled xFormers memory-efficient attention.\n")
|
285 |
+
else:
|
286 |
+
pipe.attn_implementation="flash_attention_2"
|
287 |
+
print("\nEnabled flash_attention_2.\n")
|
288 |
+
|
289 |
+
condition_type = "subject"
|
290 |
+
# Load LoRA weights
|
291 |
+
# note: does not yet handle multiple LoRA weights with different names, needs .set_adapters(["depth", "hyper-sd"], adapter_weights=[0.85, 0.125])
|
292 |
+
if lora_weights:
|
293 |
+
for lora_weight in lora_weights:
|
294 |
+
lora_configs = constants.LORA_DETAILS.get(lora_weight, [])
|
295 |
+
lora_weight_set = False
|
296 |
+
if lora_configs:
|
297 |
+
for config in lora_configs:
|
298 |
+
# Load LoRA weights with optional weight_name and adapter_name
|
299 |
+
if 'weight_name' in config:
|
300 |
+
weight_name = config.get("weight_name")
|
301 |
+
adapter_name = config.get("adapter_name")
|
302 |
+
lora_collection = config.get("lora_collection")
|
303 |
+
if weight_name and adapter_name and lora_collection and lora_weight_set == False:
|
304 |
+
pipe.load_lora_weights(
|
305 |
+
lora_collection,
|
306 |
+
weight_name=weight_name,
|
307 |
+
adapter_name=adapter_name,
|
308 |
+
token=constants.HF_API_TOKEN
|
309 |
+
)
|
310 |
+
lora_weight_set = True
|
311 |
+
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}, lora_collection={lora_collection}\n")
|
312 |
+
elif weight_name and adapter_name==None and lora_collection and lora_weight_set == False:
|
313 |
+
pipe.load_lora_weights(
|
314 |
+
lora_collection,
|
315 |
+
weight_name=weight_name,
|
316 |
+
token=constants.HF_API_TOKEN
|
317 |
+
)
|
318 |
+
lora_weight_set = True
|
319 |
+
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}, lora_collection={lora_collection}\n")
|
320 |
+
elif weight_name and adapter_name and lora_weight_set == False:
|
321 |
+
pipe.load_lora_weights(
|
322 |
+
lora_weight,
|
323 |
+
weight_name=weight_name,
|
324 |
+
adapter_name=adapter_name,
|
325 |
+
token=constants.HF_API_TOKEN
|
326 |
+
)
|
327 |
+
lora_weight_set = True
|
328 |
+
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
|
329 |
+
elif weight_name and adapter_name==None and lora_weight_set == False:
|
330 |
+
pipe.load_lora_weights(
|
331 |
+
lora_weight,
|
332 |
+
weight_name=weight_name,
|
333 |
+
token=constants.HF_API_TOKEN
|
334 |
+
)
|
335 |
+
lora_weight_set = True
|
336 |
+
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
|
337 |
+
elif lora_weight_set == False:
|
338 |
+
pipe.load_lora_weights(
|
339 |
+
lora_weight,
|
340 |
+
token=constants.HF_API_TOKEN
|
341 |
+
)
|
342 |
+
lora_weight_set = True
|
343 |
+
print(f"\npipe.load_lora_weights({lora_weight}, weight_name={weight_name}, adapter_name={adapter_name}\n")
|
344 |
+
# Apply 'pipe' configurations if present
|
345 |
+
if 'pipe' in config:
|
346 |
+
pipe_config = config['pipe']
|
347 |
+
for method_name, params in pipe_config.items():
|
348 |
+
method = getattr(pipe, method_name, None)
|
349 |
+
if method:
|
350 |
+
print(f"Applying pipe method: {method_name} with params: {params}")
|
351 |
+
method(**params)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
352 |
else:
|
353 |
+
print(f"Method {method_name} not found in pipe.")
|
354 |
+
if 'condition_type' in config:
|
355 |
+
condition_type = config['condition_type']
|
356 |
+
if condition_type == "coloring":
|
357 |
+
#pipe.enable_coloring()
|
358 |
+
print("\nEnabled coloring.\n")
|
359 |
+
elif condition_type == "deblurring":
|
360 |
+
#pipe.enable_deblurring()
|
361 |
+
print("\nEnabled deblurring.\n")
|
362 |
+
elif condition_type == "fill":
|
363 |
+
#pipe.enable_fill()
|
364 |
+
print("\nEnabled fill.\n")
|
365 |
+
elif condition_type == "depth":
|
366 |
+
#pipe.enable_depth()
|
367 |
+
print("\nEnabled depth.\n")
|
368 |
+
elif condition_type == "canny":
|
369 |
+
#pipe.enable_canny()
|
370 |
+
print("\nEnabled canny.\n")
|
371 |
+
elif condition_type == "subject":
|
372 |
+
#pipe.enable_subject()
|
373 |
+
print("\nEnabled subject.\n")
|
374 |
+
else:
|
375 |
+
print(f"Condition type {condition_type} not implemented.")
|
376 |
+
else:
|
377 |
+
pipe.load_lora_weights(lora_weight, use_auth_token=constants.HF_API_TOKEN)
|
378 |
+
gr.Info("lora_weights are loaded",duration=5)
|
379 |
+
# Set the random seed for reproducibility
|
380 |
+
generator = torch.Generator(device=device).manual_seed(seed)
|
381 |
+
conditions = []
|
382 |
+
if conditioned_image is not None:
|
383 |
+
conditioned_image = crop_and_resize_image(conditioned_image, image_width, image_height)
|
384 |
+
condition = Condition(condition_type, conditioned_image)
|
385 |
+
conditions.append(condition)
|
386 |
+
print(f"\nAdded conditioned image.\n {conditioned_image.size}")
|
387 |
+
# Prepare the parameters for image generation
|
388 |
+
additional_parameters ={
|
389 |
+
"strength": strength,
|
390 |
+
"image": conditioned_image,
|
391 |
}
|
392 |
+
else:
|
393 |
+
print("\nNo conditioned image provided.")
|
394 |
+
if neg_prompt!=None:
|
395 |
+
true_cfg_scale=1.1
|
396 |
+
additional_parameters ={
|
397 |
+
"negative_prompt": neg_prompt,
|
398 |
+
"true_cfg_scale": true_cfg_scale,
|
399 |
+
}
|
400 |
+
# handle long prompts by splitting them
|
401 |
+
if approximate_token_count(text) > 76:
|
402 |
+
prompt, prompt2 = split_prompt_precisely(text)
|
403 |
+
prompt_parameters = {
|
404 |
+
"prompt" : prompt,
|
405 |
+
"prompt_2": prompt2
|
406 |
+
}
|
407 |
+
else:
|
408 |
+
prompt_parameters = {
|
409 |
+
"prompt" :text
|
410 |
+
}
|
411 |
+
additional_parameters.update(prompt_parameters)
|
412 |
+
# Combine all parameters
|
413 |
+
generate_params = {
|
414 |
+
"height": image_height,
|
415 |
+
"width": image_width,
|
416 |
+
"guidance_scale": guidance_scale,
|
417 |
+
"num_inference_steps": num_inference_steps,
|
418 |
+
"generator": generator,
|
419 |
+
}
|
420 |
+
if additional_parameters:
|
421 |
+
generate_params.update(additional_parameters)
|
422 |
+
generate_params = {k: v for k, v in generate_params.items() if v is not None}
|
423 |
+
print(f"generate_params: {generate_params}")
|
424 |
+
import pickle
|
425 |
+
|
426 |
+
try:
|
427 |
+
pickle.dumps(pipe)
|
428 |
+
print("pipe is picklable.\n")
|
429 |
+
except pickle.PicklingError:
|
430 |
+
print("pipe is not picklable\n.")
|
431 |
+
|
432 |
+
try:
|
433 |
+
pickle.dumps(conditions)
|
434 |
+
print("conditions is picklable.\n")
|
435 |
+
except pickle.PicklingError:
|
436 |
+
print("conditions is not picklable.\n")
|
437 |
+
|
438 |
+
try:
|
439 |
+
pickle.dumps(generator)
|
440 |
+
print("generator is picklable.\n")
|
441 |
+
except pickle.PicklingError:
|
442 |
+
print("generator is not picklable.\n")
|
443 |
+
|
444 |
+
return pipe, conditions, generate_params
|
445 |
+
|
446 |
|
447 |
def generate_ai_image_local (
|
448 |
map_option,
|
|
|
488 |
width = additional_parameters.pop('width', width)
|
489 |
num_inference_steps = additional_parameters.pop('num_inference_steps', num_inference_steps)
|
490 |
guidance_scale = additional_parameters.pop('guidance_scale', guidance_scale)
|
491 |
+
print("Generating image with the following parameters:\n")
|
492 |
print(f"Model: {model}")
|
493 |
print(f"LoRA Weights: {lora_weights}")
|
494 |
print(f"Prompt: {prompt}")
|
|
|
501 |
print(f"Additional Parameters: {additional_parameters}")
|
502 |
print(f"Conditioned Image: {conditioned_image}")
|
503 |
print(f"Conditioned Image Strength: {strength}")
|
504 |
+
print(f"pipeline: {pipeline_name}\n")
|
505 |
+
pipe, conditions, generate_params = generate_image_lowmem(
|
506 |
text=prompt,
|
507 |
model_name=model,
|
508 |
neg_prompt=negative_prompt,
|
|
|
517 |
strength=strength,
|
518 |
additional_parameters=additional_parameters
|
519 |
)
|
520 |
+
image = generate_image(pipe, conditions, **generate_params)
|
521 |
with NamedTemporaryFile(delete=False, suffix=".png") as tmp:
|
522 |
image.save(tmp.name, format="PNG")
|
523 |
constants.temp_files.append(tmp.name)
|
|
|
608 |
global current_prerendered_image
|
609 |
selected_index = event_data.index
|
610 |
selected_image = constants.pre_rendered_maps_paths[selected_index]
|
611 |
+
print(f"Template Image Selected: {selected_image} ({event_data.index})\n")
|
612 |
+
gr.Info(f"Template Image Selected: {selected_image} ({event_data.index})",duration=5)
|
613 |
current_prerendered_image.value = selected_image
|
614 |
return current_prerendered_image
|
615 |
|
|
|
946 |
)
|
947 |
|
948 |
with gr.Row():
|
949 |
+
with gr.Accordion("Generate AI Image (click here for options)", open = False):
|
950 |
with gr.Row():
|
951 |
with gr.Column():
|
952 |
model_options = gr.Dropdown(
|
953 |
+
label="Choose an AI Model*",
|
954 |
choices=constants.MODELS + constants.LORA_WEIGHTS + ["Manual Entry"],
|
955 |
value="Cossale/Frames2-Flex.1",
|
956 |
elem_classes="solid"
|
utils/constants.py
CHANGED
@@ -54,6 +54,7 @@ def load_env_vars(env_path):
|
|
54 |
# os.environ['TMPDIR'] = r'e:\\TMP'
|
55 |
# os.environ['XDG_CACHE_HOME'] = r'E:\\cache'
|
56 |
|
|
|
57 |
HF_API_TOKEN = os.getenv("HF_TOKEN")
|
58 |
if not HF_API_TOKEN:
|
59 |
raise ValueError("HF_TOKEN is not set. Please check your .env file.")
|
|
|
54 |
# os.environ['TMPDIR'] = r'e:\\TMP'
|
55 |
# os.environ['XDG_CACHE_HOME'] = r'E:\\cache'
|
56 |
|
57 |
+
USE_FLASH_ATTENTION = os.getenv("USE_FLASH_ATTENTION", "0") == "1"
|
58 |
HF_API_TOKEN = os.getenv("HF_TOKEN")
|
59 |
if not HF_API_TOKEN:
|
60 |
raise ValueError("HF_TOKEN is not set. Please check your .env file.")
|