xi0v Fabrice-TIERCELIN commited on
Commit
c8b4b1d
1 Parent(s): 6ca6cf4

Add more options, from the most useful to the least one (#10)

Browse files

- Add more options, from the most useful to the least one (01ac1c081925b409a4a920d2f7e69271e14e07d1)


Co-authored-by: Fabrice TIERCELIN <Fabrice-TIERCELIN@users.noreply.huggingface.co>

Files changed (1) hide show
  1. app.py +11 -9
app.py CHANGED
@@ -1,5 +1,4 @@
1
  import gradio as gr
2
- #import gradio.helpers
3
  import torch
4
  import os
5
  from glob import glob
@@ -12,7 +11,6 @@ from PIL import Image
12
 
13
  import uuid
14
  import random
15
- from huggingface_hub import hf_hub_download
16
  import spaces
17
 
18
  pipe = StableVideoDiffusionPipeline.from_pretrained(
@@ -29,9 +27,9 @@ def sample(
29
  randomize_seed: bool = True,
30
  motion_bucket_id: int = 127,
31
  fps_id: int = 6,
 
 
32
  version: str = "svd_xt",
33
- cond_aug: float = 0.02,
34
- decoding_t: int = 3, # Number of frames decoded at a time! This eats most VRAM. Reduce if necessary.
35
  device: str = "cuda",
36
  output_folder: str = "outputs",
37
  ):
@@ -46,7 +44,7 @@ def sample(
46
  base_count = len(glob(os.path.join(output_folder, "*.mp4")))
47
  video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
48
 
49
- frames = pipe(image, decode_chunk_size=decoding_t, generator=generator, motion_bucket_id=motion_bucket_id, noise_aug_strength=0.1, num_frames=25).frames[0]
50
  export_to_video(frames, video_path, fps=fps_id)
51
 
52
  return video_path, frames, seed
@@ -60,7 +58,7 @@ def resize_image(image, output_size=(1024, 576)):
60
  if image.width == output_size[0] and image.height == output_size[1]:
61
  return image
62
 
63
- # Resize then crop if the original image is larger
64
  if image_aspect > target_aspect:
65
  # Resize the image to match the target height, maintaining aspect ratio
66
  new_height = output_size[1]
@@ -94,17 +92,21 @@ with gr.Blocks() as demo:
94
  with gr.Column():
95
  image = gr.Image(label="Upload your image", type="pil")
96
  with gr.Accordion("Advanced options", open=False):
 
 
 
 
97
  seed = gr.Slider(label="Seed", value=42, randomize=True, minimum=0, maximum=max_64_bit_int, step=1)
98
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
99
- motion_bucket_id = gr.Slider(label="Motion bucket id", info="Controls how much motion to add/remove from the image", value=127, minimum=1, maximum=255)
100
- fps_id = gr.Slider(label="Frames per second", info="The length of your video in seconds will be 25/fps", value=6, minimum=5, maximum=30)
101
  generate_btn = gr.Button(value="Animate", variant="primary")
 
102
  with gr.Column():
103
  video = gr.Video(label="Generated video")
104
  gallery = gr.Gallery(label="Generated frames")
105
 
106
  image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
107
- generate_btn.click(fn=sample, inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id], outputs=[video, gallery, seed], api_name="video")
108
 
109
  if __name__ == "__main__":
110
  demo.launch(share=True, show_api=False)
 
1
  import gradio as gr
 
2
  import torch
3
  import os
4
  from glob import glob
 
11
 
12
  import uuid
13
  import random
 
14
  import spaces
15
 
16
  pipe = StableVideoDiffusionPipeline.from_pretrained(
 
27
  randomize_seed: bool = True,
28
  motion_bucket_id: int = 127,
29
  fps_id: int = 6,
30
+ noise_aug_strength: float = 0.1,
31
+ decoding_t: int = 3,
32
  version: str = "svd_xt",
 
 
33
  device: str = "cuda",
34
  output_folder: str = "outputs",
35
  ):
 
44
  base_count = len(glob(os.path.join(output_folder, "*.mp4")))
45
  video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")
46
 
47
+ frames = pipe(image, decode_chunk_size=decoding_t, generator=generator, motion_bucket_id=motion_bucket_id, noise_aug_strength=noise_aug_strength, num_frames=25).frames[0]
48
  export_to_video(frames, video_path, fps=fps_id)
49
 
50
  return video_path, frames, seed
 
58
  if image.width == output_size[0] and image.height == output_size[1]:
59
  return image
60
 
61
+ # Resize if the original image is larger
62
  if image_aspect > target_aspect:
63
  # Resize the image to match the target height, maintaining aspect ratio
64
  new_height = output_size[1]
 
92
  with gr.Column():
93
  image = gr.Image(label="Upload your image", type="pil")
94
  with gr.Accordion("Advanced options", open=False):
95
+ fps_id = gr.Slider(label="Frames per second", info="The length of your video in seconds will be 25/fps", value=6, minimum=5, maximum=30)
96
+ motion_bucket_id = gr.Slider(label="Motion bucket id", info="Controls how much motion to add/remove from the image", value=127, minimum=1, maximum=255)
97
+ noise_aug_strength = gr.Slider(label="Noise strength", info="The noise to add", value=0.1, minimum=0, maximum=1, step=0.1)
98
+ decoding_t = gr.Slider(label="Decoding", info="Number of frames decoded at a time; this eats more VRAM; reduce if necessary", value=3, minimum=1, maximum=5, step=1)
99
  seed = gr.Slider(label="Seed", value=42, randomize=True, minimum=0, maximum=max_64_bit_int, step=1)
100
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
+
 
102
  generate_btn = gr.Button(value="Animate", variant="primary")
103
+
104
  with gr.Column():
105
  video = gr.Video(label="Generated video")
106
  gallery = gr.Gallery(label="Generated frames")
107
 
108
  image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
109
+ generate_btn.click(fn=sample, inputs=[image, seed, randomize_seed, motion_bucket_id, fps_id, noise_aug_strength, decoding_t], outputs=[video, gallery, seed], api_name="video")
110
 
111
  if __name__ == "__main__":
112
  demo.launch(share=True, show_api=False)