Spaces:
Running
on
Zero
Running
on
Zero
Fabrice-TIERCELIN
commited on
Commit
•
84fdfe8
1
Parent(s):
e276a90
Do not touch the image if the size is good
Browse files
app.py
CHANGED
@@ -14,14 +14,11 @@ import uuid
|
|
14 |
import random
|
15 |
from huggingface_hub import hf_hub_download
|
16 |
import spaces
|
17 |
-
#gradio.helpers.CACHED_FOLDER = '/data/cache'
|
18 |
|
19 |
pipe = StableVideoDiffusionPipeline.from_pretrained(
|
20 |
"vdo/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float16, variant="fp16"
|
21 |
)
|
22 |
pipe.to("cuda")
|
23 |
-
#pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
24 |
-
#pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
|
25 |
|
26 |
max_64_bit_int = 2**63 - 1
|
27 |
|
@@ -51,7 +48,6 @@ def sample(
|
|
51 |
|
52 |
frames = pipe(image, decode_chunk_size=decoding_t, generator=generator, motion_bucket_id=motion_bucket_id, noise_aug_strength=0.1, num_frames=25).frames[0]
|
53 |
export_to_video(frames, video_path, fps=fps_id)
|
54 |
-
torch.manual_seed(seed)
|
55 |
|
56 |
return video_path, frames, seed
|
57 |
|
@@ -60,6 +56,10 @@ def resize_image(image, output_size=(1024, 576)):
|
|
60 |
target_aspect = output_size[0] / output_size[1] # Aspect ratio of the desired size
|
61 |
image_aspect = image.width / image.height # Aspect ratio of the original image
|
62 |
|
|
|
|
|
|
|
|
|
63 |
# Resize then crop if the original image is larger
|
64 |
if image_aspect > target_aspect:
|
65 |
# Resize the image to match the target height, maintaining aspect ratio
|
@@ -107,5 +107,4 @@ with gr.Blocks() as demo:
|
|
107 |
generate_btn.click(fn=sample, inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id], outputs=[video, gallery, seed], api_name="video")
|
108 |
|
109 |
if __name__ == "__main__":
|
110 |
-
#demo.queue(max_size=20, api_open=False)
|
111 |
demo.launch(share=True, show_api=False)
|
|
|
14 |
import random
|
15 |
from huggingface_hub import hf_hub_download
|
16 |
import spaces
|
|
|
17 |
|
18 |
pipe = StableVideoDiffusionPipeline.from_pretrained(
|
19 |
"vdo/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float16, variant="fp16"
|
20 |
)
|
21 |
pipe.to("cuda")
|
|
|
|
|
22 |
|
23 |
max_64_bit_int = 2**63 - 1
|
24 |
|
|
|
48 |
|
49 |
frames = pipe(image, decode_chunk_size=decoding_t, generator=generator, motion_bucket_id=motion_bucket_id, noise_aug_strength=0.1, num_frames=25).frames[0]
|
50 |
export_to_video(frames, video_path, fps=fps_id)
|
|
|
51 |
|
52 |
return video_path, frames, seed
|
53 |
|
|
|
56 |
target_aspect = output_size[0] / output_size[1] # Aspect ratio of the desired size
|
57 |
image_aspect = image.width / image.height # Aspect ratio of the original image
|
58 |
|
59 |
+
# Do not touch the image if the size is good
|
60 |
+
if image.width == output_size[0] and image.height == output_size[1]:
|
61 |
+
return image
|
62 |
+
|
63 |
# Resize then crop if the original image is larger
|
64 |
if image_aspect > target_aspect:
|
65 |
# Resize the image to match the target height, maintaining aspect ratio
|
|
|
107 |
generate_btn.click(fn=sample, inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id], outputs=[video, gallery, seed], api_name="video")
|
108 |
|
109 |
if __name__ == "__main__":
|
|
|
110 |
demo.launch(share=True, show_api=False)
|