Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -171,7 +171,7 @@ def make_video(video_path, outdir='./vis_video_depth', encoder='vits'):
|
|
171 |
global frame_selected
|
172 |
global masks
|
173 |
masks = orig_frames
|
174 |
-
return final_vid, final_zip, orig_frames, depth_frames, masks[frame_selected] #output_path
|
175 |
|
176 |
def depth_edges_mask(depth):
|
177 |
"""Returns a mask of edges in the depth map.
|
@@ -304,7 +304,7 @@ def blur_image(image, depth, blur_data):
|
|
304 |
def loadurl(url):
|
305 |
return url
|
306 |
|
307 |
-
def select_frame(v
|
308 |
global frame_selected
|
309 |
global masks
|
310 |
masks[frame_selected] = v
|
@@ -313,7 +313,7 @@ def select_frame(v, evt: gr.SelectData):
|
|
313 |
frame_selected = evt.index
|
314 |
v = masks[frame_selected]
|
315 |
#print(v)
|
316 |
-
return
|
317 |
|
318 |
|
319 |
css = """
|
@@ -361,8 +361,7 @@ with gr.Blocks(css=css) as demo:
|
|
361 |
input_url = gr.Textbox(value="./examples/streetview.mp4", label="URL")
|
362 |
input_video = gr.Video(label="Input Video", format="mp4")
|
363 |
input_url.change(fn=loadurl, inputs=[input_url], outputs=[input_video])
|
364 |
-
output_frame = gr.Gallery(label="
|
365 |
-
output_depth = gr.Gallery(label="Depth", type='numpy', preview=True, columns=8192, interactive=False)
|
366 |
output_mask = gr.ImageEditor(interactive=True, transforms=(None,), eraser=gr.Eraser(), brush=gr.Brush(colors=['black', 'darkgray', 'gray', 'lightgray', 'white']), layers=True)
|
367 |
submit = gr.Button("Submit")
|
368 |
with gr.Column():
|
@@ -574,8 +573,7 @@ with gr.Blocks(css=css) as demo:
|
|
574 |
<a id='move' href='#'>move</a> <a id='rotate' href='#'>rotate</a> <a id='scale' href='#'>scale</a>
|
575 |
</pre>""")
|
576 |
selected = gr.Number(elem_id="fnum", value=0, minimum=0, maximum=256, interactive=False)
|
577 |
-
output_frame.select(fn=select_frame, inputs=[output_mask], outputs=[
|
578 |
-
output_depth.select(fn=select_frame, inputs=[output_mask], outputs=[output_frame, output_mask, selected], show_progress='hidden')
|
579 |
example_coords = """[
|
580 |
{"latLng": { "lat": 50.07379596793083, "lng": 14.437146122950555 } },
|
581 |
{"latLng": { "lat": 50.073799567020004, "lng": 14.437146774240507 } },
|
@@ -729,14 +727,14 @@ with gr.Blocks(css=css) as demo:
|
|
729 |
|
730 |
return output_video_path + (locations,)
|
731 |
|
732 |
-
submit.click(on_submit, inputs=[input_video, model_type, coords], outputs=[processed_video, processed_zip, output_frame,
|
733 |
render.click(partial(get_mesh), inputs=[output_frame, output_depth, blur_in, load_all], outputs=[result])
|
734 |
|
735 |
example_files = os.listdir('examples')
|
736 |
example_files.sort()
|
737 |
example_files = [os.path.join('examples', filename) for filename in example_files]
|
738 |
|
739 |
-
examples = gr.Examples(examples=example_files, inputs=[input_video], outputs=[processed_video, processed_zip, output_frame,
|
740 |
|
741 |
|
742 |
if __name__ == '__main__':
|
|
|
171 |
global frame_selected
|
172 |
global masks
|
173 |
masks = orig_frames
|
174 |
+
return final_vid, final_zip, np.concatenate((orig_frames, depth_frames), axis=0), masks[frame_selected] #output_path
|
175 |
|
176 |
def depth_edges_mask(depth):
|
177 |
"""Returns a mask of edges in the depth map.
|
|
|
304 |
def loadurl(url):
|
305 |
return url
|
306 |
|
307 |
+
def select_frame(v):
|
308 |
global frame_selected
|
309 |
global masks
|
310 |
masks[frame_selected] = v
|
|
|
313 |
frame_selected = evt.index
|
314 |
v = masks[frame_selected]
|
315 |
#print(v)
|
316 |
+
return v, frame_selected
|
317 |
|
318 |
|
319 |
css = """
|
|
|
361 |
input_url = gr.Textbox(value="./examples/streetview.mp4", label="URL")
|
362 |
input_video = gr.Video(label="Input Video", format="mp4")
|
363 |
input_url.change(fn=loadurl, inputs=[input_url], outputs=[input_video])
|
364 |
+
output_frame = gr.Gallery(label="Frames", type='numpy', preview=True, columns=256)
|
|
|
365 |
output_mask = gr.ImageEditor(interactive=True, transforms=(None,), eraser=gr.Eraser(), brush=gr.Brush(colors=['black', 'darkgray', 'gray', 'lightgray', 'white']), layers=True)
|
366 |
submit = gr.Button("Submit")
|
367 |
with gr.Column():
|
|
|
573 |
<a id='move' href='#'>move</a> <a id='rotate' href='#'>rotate</a> <a id='scale' href='#'>scale</a>
|
574 |
</pre>""")
|
575 |
selected = gr.Number(elem_id="fnum", value=0, minimum=0, maximum=256, interactive=False)
|
576 |
+
output_frame.select(fn=select_frame, inputs=[output_mask], outputs=[output_mask, selected], show_progress='hidden')
|
|
|
577 |
example_coords = """[
|
578 |
{"latLng": { "lat": 50.07379596793083, "lng": 14.437146122950555 } },
|
579 |
{"latLng": { "lat": 50.073799567020004, "lng": 14.437146774240507 } },
|
|
|
727 |
|
728 |
return output_video_path + (locations,)
|
729 |
|
730 |
+
submit.click(on_submit, inputs=[input_video, model_type, coords], outputs=[processed_video, processed_zip, output_frame, output_mask, coords])
|
731 |
render.click(partial(get_mesh), inputs=[output_frame, output_depth, blur_in, load_all], outputs=[result])
|
732 |
|
733 |
example_files = os.listdir('examples')
|
734 |
example_files.sort()
|
735 |
example_files = [os.path.join('examples', filename) for filename in example_files]
|
736 |
|
737 |
+
examples = gr.Examples(examples=example_files, inputs=[input_video], outputs=[processed_video, processed_zip, output_frame, output_mask, coords], fn=on_submit, cache_examples=True)
|
738 |
|
739 |
|
740 |
if __name__ == '__main__':
|