Spaces:
Sleeping
Sleeping
Commit
•
fb2478e
1
Parent(s):
6c2b3a4
Update app.py
Browse files
app.py
CHANGED
@@ -106,61 +106,13 @@ def sample(
|
|
106 |
return video_path, seed
|
107 |
|
108 |
|
109 |
-
def resize_image(image, output_size=(1024, 576)):
|
110 |
-
# Calculate aspect ratios
|
111 |
-
target_aspect = output_size[0] / output_size[1] # Aspect ratio of the desired size
|
112 |
-
image_aspect = image.width / image.height # Aspect ratio of the original image
|
113 |
-
|
114 |
-
# Resize then crop if the original image is larger
|
115 |
-
if image_aspect > target_aspect:
|
116 |
-
# Resize the image to match the target height, maintaining aspect ratio
|
117 |
-
new_height = output_size[1]
|
118 |
-
new_width = int(new_height * image_aspect)
|
119 |
-
resized_image = image.resize((new_width, new_height), Image.LANCZOS)
|
120 |
-
# Calculate coordinates for cropping
|
121 |
-
left = (new_width - output_size[0]) / 2
|
122 |
-
top = 0
|
123 |
-
right = (new_width + output_size[0]) / 2
|
124 |
-
bottom = output_size[1]
|
125 |
-
else:
|
126 |
-
# Resize the image to match the target width, maintaining aspect ratio
|
127 |
-
new_width = output_size[0]
|
128 |
-
new_height = int(new_width / image_aspect)
|
129 |
-
resized_image = image.resize((new_width, new_height), Image.LANCZOS)
|
130 |
-
# Calculate coordinates for cropping
|
131 |
-
left = 0
|
132 |
-
top = (new_height - output_size[1]) / 2
|
133 |
-
right = output_size[0]
|
134 |
-
bottom = (new_height + output_size[1]) / 2
|
135 |
-
|
136 |
-
# Crop the image
|
137 |
-
cropped_image = resized_image.crop((left, top, right, bottom))
|
138 |
-
return cropped_image
|
139 |
-
|
140 |
-
|
141 |
with gr.Blocks() as demo:
|
142 |
-
gr.Markdown(
|
143 |
-
"""
|
144 |
-
# [AnimateLCM: Accelerating the Animation of Personalized Diffusion Models and Adapters with Decoupled Consistency Learning](https://arxiv.org/abs/2402.00769)
|
145 |
-
Fu-Yun Wang, Zhaoyang Huang (*Corresponding Author), Xiaoyu Shi, Weikang Bian, Guanglu Song, Yu Liu, Hongsheng Li (*Corresponding Author)<br>
|
146 |
-
|
147 |
-
[arXiv Report](https://arxiv.org/abs/2402.00769) | [Project Page](https://animatelcm.github.io/) | [Github](https://github.com/G-U-N/AnimateLCM) | [Civitai](https://civitai.com/models/290375/animatelcm-fast-video-generation) | [Replicate](https://replicate.com/camenduru/animate-lcm)
|
148 |
-
|
149 |
-
Related Models:
|
150 |
-
[AnimateLCM-t2v](https://huggingface.co/wangfuyun/AnimateLCM): Personalized Text-to-Video Generation
|
151 |
-
[AnimateLCM-SVD-xt](https://huggingface.co/wangfuyun/AnimateLCM-SVD-xt): General Image-to-Video Generation
|
152 |
-
[AnimateLCM-i2v](https://huggingface.co/wangfuyun/AnimateLCM-I2V): Personalized Image-to-Video Generation
|
153 |
-
"""
|
154 |
-
)
|
155 |
with gr.Row():
|
156 |
with gr.Column():
|
157 |
image = gr.Image(label="Upload your image", type="pil")
|
158 |
generate_btn = gr.Button("Generate")
|
159 |
video = gr.Video()
|
160 |
-
|
161 |
-
safetensors_dropdown = gr.Dropdown(
|
162 |
-
label="Choose Safetensors", choices=get_safetensors_files()
|
163 |
-
)
|
164 |
seed = gr.Slider(
|
165 |
label="Seed",
|
166 |
value=42,
|
@@ -184,18 +136,22 @@ with gr.Blocks() as demo:
|
|
184 |
minimum=5,
|
185 |
maximum=30,
|
186 |
)
|
|
|
|
|
|
|
187 |
width = gr.Slider(
|
188 |
label="Width of input image",
|
189 |
info="It should be divisible by 64",
|
190 |
-
value=
|
191 |
-
minimum=
|
192 |
maximum=2048,
|
|
|
193 |
)
|
194 |
height = gr.Slider(
|
195 |
label="Height of input image",
|
196 |
info="It should be divisible by 64",
|
197 |
-
value=
|
198 |
-
minimum=
|
199 |
maximum=1152,
|
200 |
)
|
201 |
max_guidance_scale = gr.Slider(
|
@@ -221,7 +177,6 @@ with gr.Blocks() as demo:
|
|
221 |
step=1,
|
222 |
)
|
223 |
|
224 |
-
image.upload(fn=resize_image, inputs=image, outputs=image, queue=False)
|
225 |
generate_btn.click(
|
226 |
fn=sample,
|
227 |
inputs=[
|
@@ -239,21 +194,6 @@ with gr.Blocks() as demo:
|
|
239 |
outputs=[video, seed],
|
240 |
api_name="video",
|
241 |
)
|
242 |
-
safetensors_dropdown.change(fn=model_select, inputs=safetensors_dropdown)
|
243 |
-
|
244 |
-
gr.Examples(
|
245 |
-
examples=[
|
246 |
-
["test_imgs/ai-generated-8496135_1280.jpg"],
|
247 |
-
["test_imgs/dog-7396912_1280.jpg"],
|
248 |
-
["test_imgs/ship-7833921_1280.jpg"],
|
249 |
-
["test_imgs/girl-4898696_1280.jpg"],
|
250 |
-
["test_imgs/power-station-6579092_1280.jpg"]
|
251 |
-
],
|
252 |
-
inputs=[image],
|
253 |
-
outputs=[video, seed],
|
254 |
-
fn=sample,
|
255 |
-
cache_examples=True,
|
256 |
-
)
|
257 |
|
258 |
if __name__ == "__main__":
|
259 |
demo.queue()
|
|
|
106 |
return video_path, seed
|
107 |
|
108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
with gr.Blocks() as demo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
with gr.Row():
|
111 |
with gr.Column():
|
112 |
image = gr.Image(label="Upload your image", type="pil")
|
113 |
generate_btn = gr.Button("Generate")
|
114 |
video = gr.Video()
|
115 |
+
|
|
|
|
|
|
|
116 |
seed = gr.Slider(
|
117 |
label="Seed",
|
118 |
value=42,
|
|
|
136 |
minimum=5,
|
137 |
maximum=30,
|
138 |
)
|
139 |
+
# note: we want something that is close to 16:9 (1.7777)
|
140 |
+
# 576 / 320 = 1.8
|
141 |
+
# 448 / 256 = 1.75
|
142 |
width = gr.Slider(
|
143 |
label="Width of input image",
|
144 |
info="It should be divisible by 64",
|
145 |
+
value=576, # 256, 320, 384, 448
|
146 |
+
minimum=256,
|
147 |
maximum=2048,
|
148 |
+
step=64,
|
149 |
)
|
150 |
height = gr.Slider(
|
151 |
label="Height of input image",
|
152 |
info="It should be divisible by 64",
|
153 |
+
value=320, # 256, 320, 384, 448
|
154 |
+
minimum=256,
|
155 |
maximum=1152,
|
156 |
)
|
157 |
max_guidance_scale = gr.Slider(
|
|
|
177 |
step=1,
|
178 |
)
|
179 |
|
|
|
180 |
generate_btn.click(
|
181 |
fn=sample,
|
182 |
inputs=[
|
|
|
194 |
outputs=[video, seed],
|
195 |
api_name="video",
|
196 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
|
198 |
if __name__ == "__main__":
|
199 |
demo.queue()
|