Update handler.py
Browse files- handler.py +13 -9
handler.py
CHANGED
@@ -410,20 +410,24 @@ class EndpointHandler:
|
|
410 |
if self._current_lora_model != (config.lora_model_name, config.lora_model_weight_file):
|
411 |
# Unload previous LoRA if it exists and is different
|
412 |
if hasattr(self.text_to_video, 'unload_lora_weights'):
|
|
|
413 |
self.text_to_video.unload_lora_weights()
|
414 |
|
415 |
if support_image_prompt and hasattr(self.image_to_video, 'unload_lora_weights'):
|
|
|
416 |
self.image_to_video.unload_lora_weights()
|
417 |
|
418 |
if config.lora_model_name:
|
419 |
# Load new LoRA
|
420 |
if hasattr(self.text_to_video, 'load_lora_weights'):
|
|
|
421 |
self.text_to_video.load_lora_weights(
|
422 |
config.lora_model_name,
|
423 |
weight_name=config.lora_model_weight_file if config.lora_model_weight_file else None,
|
424 |
token=hf_token,
|
425 |
)
|
426 |
if support_image_prompt and hasattr(self.image_to_video, 'load_lora_weights'):
|
|
|
427 |
self.image_to_video.load_lora_weights(
|
428 |
config.lora_model_name,
|
429 |
weight_name=config.lora_model_weight_file if config.lora_model_weight_file else None,
|
@@ -435,15 +439,15 @@ class EndpointHandler:
|
|
435 |
if config.lora_model_trigger:
|
436 |
generation_kwargs["prompt"] = f"{config.lora_model_trigger} {generation_kwargs['prompt']}"
|
437 |
|
438 |
-
enhance_a_video_config = EnhanceAVideoConfig(
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
)
|
447 |
|
448 |
# Check if image-to-video generation is requested
|
449 |
if support_image_prompt and input_image:
|
|
|
410 |
if self._current_lora_model != (config.lora_model_name, config.lora_model_weight_file):
|
411 |
# Unload previous LoRA if it exists and is different
|
412 |
if hasattr(self.text_to_video, 'unload_lora_weights'):
|
413 |
+
print("Unloading LoRA weights for the text_to_video pipeline..")
|
414 |
self.text_to_video.unload_lora_weights()
|
415 |
|
416 |
if support_image_prompt and hasattr(self.image_to_video, 'unload_lora_weights'):
|
417 |
+
print("Unloading LoRA weights for the image_to_video pipeline..")
|
418 |
self.image_to_video.unload_lora_weights()
|
419 |
|
420 |
if config.lora_model_name:
|
421 |
# Load new LoRA
|
422 |
if hasattr(self.text_to_video, 'load_lora_weights'):
|
423 |
+
print("Loading LoRA weights for the text_to_video pipeline..")
|
424 |
self.text_to_video.load_lora_weights(
|
425 |
config.lora_model_name,
|
426 |
weight_name=config.lora_model_weight_file if config.lora_model_weight_file else None,
|
427 |
token=hf_token,
|
428 |
)
|
429 |
if support_image_prompt and hasattr(self.image_to_video, 'load_lora_weights'):
|
430 |
+
print("Loading LoRA weights for the image_to_video pipeline..")
|
431 |
self.image_to_video.load_lora_weights(
|
432 |
config.lora_model_name,
|
433 |
weight_name=config.lora_model_weight_file if config.lora_model_weight_file else None,
|
|
|
439 |
if config.lora_model_trigger:
|
440 |
generation_kwargs["prompt"] = f"{config.lora_model_trigger} {generation_kwargs['prompt']}"
|
441 |
|
442 |
+
#enhance_a_video_config = EnhanceAVideoConfig(
|
443 |
+
# weight=config.enhance_a_video_weight if config.enable_enhance_a_video else 0.0,
|
444 |
+
# # doing some testing
|
445 |
+
# num_frames_callback=lambda: (8 + 1),
|
446 |
+
# # num_frames_callback=lambda: config.num_frames,
|
447 |
+
# # num_frames_callback=lambda: (config.num_frames - 1),
|
448 |
+
#
|
449 |
+
# _attention_type=1
|
450 |
+
#)
|
451 |
|
452 |
# Check if image-to-video generation is requested
|
453 |
if support_image_prompt and input_image:
|