Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
import subprocess
|
|
|
4 |
import cv2
|
5 |
import numpy as np
|
6 |
from moviepy.editor import VideoFileClip, concatenate_videoclips
|
@@ -119,7 +120,7 @@ def change_video_fps(input_path):
|
|
119 |
|
120 |
return 'output_video.mp4'
|
121 |
|
122 |
-
def run_inference(prompt, video_path, condition, video_length, seed
|
123 |
|
124 |
seed = math.floor(seed)
|
125 |
o_width = get_video_dimension(video_path)[0]
|
@@ -166,9 +167,9 @@ def run_inference(prompt, video_path, condition, video_length, seed, steps):
|
|
166 |
|
167 |
print(f"RUNNING INFERENCE ...")
|
168 |
if video_length > 12:
|
169 |
-
command = f"python inference.py --prompt '{prompt}' --inference_steps
|
170 |
else:
|
171 |
-
command = f"python inference.py --prompt '{prompt}' --inference_steps
|
172 |
|
173 |
try:
|
174 |
subprocess.run(command, shell=True)
|
@@ -190,11 +191,64 @@ def run_inference(prompt, video_path, condition, video_length, seed, steps):
|
|
190 |
print(f"GEN VIDEO FPS: {gen_fps}")
|
191 |
final = change_video_fps(video_path_output)
|
192 |
print(f"FINISHED !")
|
193 |
-
|
|
|
194 |
|
195 |
|
196 |
css="""
|
197 |
#col-container {max-width: 810px; margin-left: auto; margin-right: auto;}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
198 |
"""
|
199 |
with gr.Blocks(css=css) as demo:
|
200 |
with gr.Column(elem_id="col-container"):
|
@@ -216,7 +270,10 @@ with gr.Blocks(css=css) as demo:
|
|
216 |
submit_btn = gr.Button("Submit")
|
217 |
with gr.Column():
|
218 |
video_res = gr.Video(label="result", elem_id="video-out")
|
219 |
-
|
|
|
|
|
|
|
220 |
gr.Examples(
|
221 |
examples=[["Indiana Jones moonwalk in the Jungle", "./examples/moonwalk.mp4", 'depth', 12, 424242, 50]],
|
222 |
fn=run_inference,
|
@@ -224,10 +281,9 @@ with gr.Blocks(css=css) as demo:
|
|
224 |
video_path,
|
225 |
condition,
|
226 |
video_length,
|
227 |
-
seed
|
228 |
-
inference_steps
|
229 |
],
|
230 |
-
outputs=[
|
231 |
cache_examples=True
|
232 |
)
|
233 |
video_path.change(fn=get_frame_count,
|
@@ -240,9 +296,8 @@ with gr.Blocks(css=css) as demo:
|
|
240 |
video_path,
|
241 |
condition,
|
242 |
video_length,
|
243 |
-
seed
|
244 |
-
inference_steps
|
245 |
],
|
246 |
-
outputs=[
|
247 |
|
248 |
demo.queue(max_size=12).launch()
|
|
|
1 |
import gradio as gr
|
2 |
import os
|
3 |
import subprocess
|
4 |
+
from share_btn import community_icon_html, loading_icon_html, share_js
|
5 |
import cv2
|
6 |
import numpy as np
|
7 |
from moviepy.editor import VideoFileClip, concatenate_videoclips
|
|
|
120 |
|
121 |
return 'output_video.mp4'
|
122 |
|
123 |
+
def run_inference(prompt, video_path, condition, video_length, seed):
|
124 |
|
125 |
seed = math.floor(seed)
|
126 |
o_width = get_video_dimension(video_path)[0]
|
|
|
167 |
|
168 |
print(f"RUNNING INFERENCE ...")
|
169 |
if video_length > 12:
|
170 |
+
command = f"python inference.py --prompt '{prompt}' --inference_steps 50 --condition '{condition}' --video_path '{resized}' --output_path '{output_path}' --temp_chunk_path 'result' --width {r_width} --height {r_height} --fps {target_fps} --seed {seed} --video_length {video_length} --smoother_steps 19 20 --is_long_video"
|
171 |
else:
|
172 |
+
command = f"python inference.py --prompt '{prompt}' --inference_steps 50 --condition '{condition}' --video_path '{resized}' --output_path '{output_path}' --temp_chunk_path 'result' --width {r_width} --height {r_height} --fps {target_fps} --seed {seed} --video_length {video_length} --smoother_steps 19 20"
|
173 |
|
174 |
try:
|
175 |
subprocess.run(command, shell=True)
|
|
|
191 |
print(f"GEN VIDEO FPS: {gen_fps}")
|
192 |
final = change_video_fps(video_path_output)
|
193 |
print(f"FINISHED !")
|
194 |
+
|
195 |
+
return final
|
196 |
|
197 |
|
198 |
css="""
|
199 |
#col-container {max-width: 810px; margin-left: auto; margin-right: auto;}
|
200 |
+
.animate-spin {
|
201 |
+
animation: spin 1s linear infinite;
|
202 |
+
}
|
203 |
+
@keyframes spin {
|
204 |
+
from {
|
205 |
+
transform: rotate(0deg);
|
206 |
+
}
|
207 |
+
to {
|
208 |
+
transform: rotate(360deg);
|
209 |
+
}
|
210 |
+
}
|
211 |
+
#share-btn-container {
|
212 |
+
display: flex;
|
213 |
+
padding-left: 0.5rem !important;
|
214 |
+
padding-right: 0.5rem !important;
|
215 |
+
background-color: #000000;
|
216 |
+
justify-content: center;
|
217 |
+
align-items: center;
|
218 |
+
border-radius: 9999px !important;
|
219 |
+
max-width: 13rem;
|
220 |
+
}
|
221 |
+
#share-btn-container:hover {
|
222 |
+
background-color: #060606;
|
223 |
+
}
|
224 |
+
#share-btn {
|
225 |
+
all: initial;
|
226 |
+
color: #ffffff;
|
227 |
+
font-weight: 600;
|
228 |
+
cursor:pointer;
|
229 |
+
font-family: 'IBM Plex Sans', sans-serif;
|
230 |
+
margin-left: 0.5rem !important;
|
231 |
+
padding-top: 0.5rem !important;
|
232 |
+
padding-bottom: 0.5rem !important;
|
233 |
+
right:0;
|
234 |
+
}
|
235 |
+
#share-btn * {
|
236 |
+
all: unset;
|
237 |
+
}
|
238 |
+
#share-btn-container div:nth-child(-n+2){
|
239 |
+
width: auto !important;
|
240 |
+
min-height: 0px !important;
|
241 |
+
}
|
242 |
+
#share-btn-container .wrap {
|
243 |
+
display: none !important;
|
244 |
+
}
|
245 |
+
#share-btn-container.hidden {
|
246 |
+
display: none!important;
|
247 |
+
}
|
248 |
+
img[src*='#center'] {
|
249 |
+
display: block;
|
250 |
+
margin: auto;
|
251 |
+
}
|
252 |
"""
|
253 |
with gr.Blocks(css=css) as demo:
|
254 |
with gr.Column(elem_id="col-container"):
|
|
|
270 |
submit_btn = gr.Button("Submit")
|
271 |
with gr.Column():
|
272 |
video_res = gr.Video(label="result", elem_id="video-out")
|
273 |
+
with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
|
274 |
+
community_icon = gr.HTML(community_icon_html)
|
275 |
+
loading_icon = gr.HTML(loading_icon_html)
|
276 |
+
share_button = gr.Button("Share to community", elem_id="share-btn")
|
277 |
gr.Examples(
|
278 |
examples=[["Indiana Jones moonwalk in the Jungle", "./examples/moonwalk.mp4", 'depth', 12, 424242, 50]],
|
279 |
fn=run_inference,
|
|
|
281 |
video_path,
|
282 |
condition,
|
283 |
video_length,
|
284 |
+
seed
|
|
|
285 |
],
|
286 |
+
outputs=[video_res],
|
287 |
cache_examples=True
|
288 |
)
|
289 |
video_path.change(fn=get_frame_count,
|
|
|
296 |
video_path,
|
297 |
condition,
|
298 |
video_length,
|
299 |
+
seed
|
|
|
300 |
],
|
301 |
+
outputs=[video_res])
|
302 |
|
303 |
demo.queue(max_size=12).launch()
|