Spaces:
Runtime error
Runtime error
LeoXing1996
commited on
Commit
•
73e3005
1
Parent(s):
1d8e8f1
add debug print + add size silder
Browse files
animatediff/models/motion_module.py
CHANGED
@@ -472,6 +472,7 @@ class CrossAttention(nn.Module):
|
|
472 |
return hidden_states
|
473 |
|
474 |
def _memory_efficient_attention_pt20(self, query, key, value, attention_mask):
|
|
|
475 |
query = query.contiguous()
|
476 |
key = key.contiguous()
|
477 |
value = value.contiguous()
|
|
|
472 |
return hidden_states
|
473 |
|
474 |
def _memory_efficient_attention_pt20(self, query, key, value, attention_mask):
|
475 |
+
print('Use PT20 Attention')
|
476 |
query = query.contiguous()
|
477 |
key = key.contiguous()
|
478 |
value = value.contiguous()
|
app-huggingface.py
CHANGED
@@ -346,6 +346,7 @@ class AnimateController:
|
|
346 |
style,
|
347 |
with_text=False,
|
348 |
text_idx=0,
|
|
|
349 |
progress=gr.Progress(),
|
350 |
|
351 |
):
|
@@ -374,7 +375,7 @@ class AnimateController:
|
|
374 |
print(f'Seed: {seed}')
|
375 |
|
376 |
pipeline = self.pipeline_dict[style]
|
377 |
-
init_img, h, w = preprocess_img(init_img)
|
378 |
print(f'img size: {h, w}')
|
379 |
|
380 |
sample = pipeline(
|
@@ -517,6 +518,9 @@ def ui():
|
|
517 |
motion_scale_silder = gr.Slider(
|
518 |
label='Motion Scale (Larger value means larger motion but less identity consistency)',
|
519 |
value=1, step=1, minimum=1, maximum=len(RANGE_LIST))
|
|
|
|
|
|
|
520 |
|
521 |
with gr.Accordion('Advance Options', open=False):
|
522 |
negative_prompt_textbox = gr.Textbox(
|
@@ -567,6 +571,7 @@ def ui():
|
|
567 |
seed_textbox,
|
568 |
ip_adapter_scale,
|
569 |
style_dropdown,
|
|
|
570 |
],
|
571 |
outputs=[result_video, download])
|
572 |
|
|
|
346 |
style,
|
347 |
with_text=False,
|
348 |
text_idx=0,
|
349 |
+
max_size=512,
|
350 |
progress=gr.Progress(),
|
351 |
|
352 |
):
|
|
|
375 |
print(f'Seed: {seed}')
|
376 |
|
377 |
pipeline = self.pipeline_dict[style]
|
378 |
+
init_img, h, w = preprocess_img(init_img, max_size)
|
379 |
print(f'img size: {h, w}')
|
380 |
|
381 |
sample = pipeline(
|
|
|
518 |
motion_scale_silder = gr.Slider(
|
519 |
label='Motion Scale (Larger value means larger motion but less identity consistency)',
|
520 |
value=1, step=1, minimum=1, maximum=len(RANGE_LIST))
|
521 |
+
max_size_silder = gr.Slider(
|
522 |
+
label='Max size (The long edge of the input image will be resized to this value, larger value means slower inference speed)',
|
523 |
+
value=512, step=64, minimum=512, maximum=1024)
|
524 |
|
525 |
with gr.Accordion('Advance Options', open=False):
|
526 |
negative_prompt_textbox = gr.Textbox(
|
|
|
571 |
seed_textbox,
|
572 |
ip_adapter_scale,
|
573 |
style_dropdown,
|
574 |
+
max_size_silder,
|
575 |
],
|
576 |
outputs=[result_video, download])
|
577 |
|