Spaces:
Running
on
Zero
Running
on
Zero
Update app
Browse files
app.py
CHANGED
@@ -17,7 +17,7 @@ from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
|
|
17 |
def predict_depth(model, image):
|
18 |
return model(image)
|
19 |
|
20 |
-
def make_video(video_path, outdir='./vis_video_depth',encoder='
|
21 |
# DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
22 |
# model = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(DEVICE).eval()
|
23 |
# Define path for temporary processed frames
|
@@ -149,7 +149,7 @@ with gr.Blocks(css=css) as demo:
|
|
149 |
|
150 |
with gr.Row():
|
151 |
input_video = gr.Video(label="Input Video")
|
152 |
-
model_type = gr.Dropdown(["vits", "
|
153 |
submit = gr.Button("Submit")
|
154 |
processed_video = gr.Video(label="Processed Video")
|
155 |
|
|
|
17 |
def predict_depth(model, image):
|
18 |
return model(image)
|
19 |
|
20 |
+
def make_video(video_path, outdir='./vis_video_depth',encoder='vitl'):
|
21 |
# DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
22 |
# model = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(DEVICE).eval()
|
23 |
# Define path for temporary processed frames
|
|
|
149 |
|
150 |
with gr.Row():
|
151 |
input_video = gr.Video(label="Input Video")
|
152 |
+
model_type = gr.Dropdown(["vits", "vitl"], type="value", label='Model Type')
|
153 |
submit = gr.Button("Submit")
|
154 |
processed_video = gr.Video(label="Processed Video")
|
155 |
|