File size: 4,053 Bytes
59c3dd8
ef187eb
 
3cf95dc
0cffd40
24478b9
ef187eb
2b0f02c
24478b9
 
 
 
96fa82a
 
24478b9
eb7c9df
24478b9
fec3be6
24478b9
 
fec3be6
 
8b1e96d
3cf95dc
3599676
ec35e66
4efab5c
 
 
ec35e66
 
4efab5c
 
 
 
 
 
 
 
8b1e96d
a0f72b8
96fa82a
 
 
 
 
fec3be6
24478b9
 
 
 
 
 
 
96fa82a
24478b9
4429dd4
3cf95dc
d94350f
 
24478b9
 
 
 
96fa82a
11fa80e
24478b9
 
 
d06d30a
96fa82a
 
 
 
 
 
 
 
 
 
 
 
 
24478b9
d06d30a
24478b9
 
0cffd40
8b3ca8d
24478b9
 
 
 
8b3ca8d
0cffd40
3958ec9
8b1e96d
0cffd40
4efab5c
24478b9
 
db04c05
24478b9
 
44ee61c
db04c05
 
44ee61c
db04c05
 
 
 
 
24478b9
 
 
 
 
 
 
 
 
 
96fa82a
24478b9
 
 
 
 
 
8b3ca8d
 
24478b9
 
 
fe16630
4b5a4e3
8b3ca8d
8b1e96d
24478b9
8b1e96d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import os
import gradio as gr
import torch
import numpy as np
import spaces
import random
from PIL import Image

from glob import glob
from pathlib import Path
from typing import Optional

from diffsynth import ModelManager, SVDVideoPipeline, HunyuanDiTImagePipeline
from diffsynth import ModelManager
from diffusers.utils import load_image, export_to_video

import uuid
from huggingface_hub import hf_hub_download


os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
HF_TOKEN = os.environ.get("HF_TOKEN", None)
# Constants
MAX_SEED = np.iinfo(np.int32).max

CSS = """
footer {
    visibility: hidden;
}
"""

JS = """function () {
  gradioURL = window.location.href
  if (!gradioURL.endsWith('?__theme=dark')) {
    window.location.replace(gradioURL + '?__theme=dark');
  }
}"""


# Ensure model and scheduler are initialized in GPU-enabled function
if torch.cuda.is_available():
    model_manager = ModelManager(
        torch_dtype=torch.float16, 
        device="cuda", 
        model_id_list=["stable-video-diffusion-img2vid-xt", "ExVideo-SVD-128f-v1"])
    pipe = SVDVideoPipeline.from_model_manager(model_manager)


# function source codes modified from multimodalart/stable-video-diffusion
@spaces.GPU(duration=120)
def generate(
    image: Image,
    seed: Optional[int] = -1,
    motion_bucket_id: int = 127,
    fps_id: int = 25,
    output_folder: str = "outputs",
    progress=gr.Progress(track_tqdm=True)):

    if seed == -1:
        seed = random.randint(0, MAX_SEED)
        
    if image.mode == "RGBA":
        image = image.convert("RGB")
        
    torch.manual_seed(seed)
    
    os.makedirs(output_folder, exist_ok=True)
    base_count = len(glob(os.path.join(output_folder, "*.mp4")))
    video_path = os.path.join(output_folder, f"{base_count:06d}.mp4")

    frames = pipe(
        input_image=image.resize((512, 512)), 
        num_frames=128, 
        fps=fps_id, 
        height=512, 
        width=512,
        motion_bucket_id=motion_bucket_id,
        num_inference_steps=50,
        min_cfg_scale=2, 
        max_cfg_scale=2, 
        contrast_enhance_scale=1.2
    ).frames[0]
    
    export_to_video(frames, video_path, fps=fps_id)
    
    return video_path, seed


examples = [
        "./train.jpg",
        "./girl.webp",
        "./robo.jpg",
    ]



# Gradio Interface

with gr.Blocks(css=CSS, js=JS, theme="soft") as demo:
    gr.HTML("<h1><center>Exvideo📽️</center></h1>")
    gr.HTML("<p><center><a href='https://huggingface.co/ECNU-CILab/ExVideo-SVD-128f-v1'>ExVideo</a> image-to-video generation<br><b>Update</b>: first version</center></p>")
    with gr.Row():
        image = gr.Image(label='Upload Image', height=600, scale=2)
        video = gr.Video(label="Generated Video", height=600, scale=2)
        with gr.Accordion("Advanced Options", open=True):
            with gr.Column(scale=1):
                seed = gr.Slider(
                    label="Seed (-1 Random)",
                    minimum=-1,
                    maximum=MAX_SEED,
                    step=1,
                    value=-1,
                )
                motion_bucket_id = gr.Slider(
                    label="Motion bucket id", 
                    info="Controls how much motion to add/remove from the image", 
                    value=127, 
                    minimum=1, 
                    maximum=255
                )
                fps_id = gr.Slider(
                    label="Frames per second", 
                    info="The length of your video in seconds will be 25/fps", 
                    value=25, 
                    minimum=5, 
                    maximum=30
                )

    submit_btn = gr.Button("Generate")
    clear_btn = gr.ClearButton("Clear")
    gr.Examples(
        examples=examples,
        inputs=image,
        outputs=[video, seed],
        fn=generate,
        cache_examples="lazy",
        examples_per_page=4,
    )

    generate_btn.click(fn=generate, inputs=[image, seed, motion_bucket_id, fps_id], outputs=[video, seed], api_name="video")
    
demo.queue().launch()