Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,7 @@ from transformers import pipeline
|
|
6 |
import os
|
7 |
import torch
|
8 |
import torch.nn.functional as F
|
|
|
9 |
from torchvision.transforms import Compose
|
10 |
import tempfile
|
11 |
import spaces
|
@@ -27,6 +28,7 @@ def make_video(video_path, outdir='./vis_video_depth',encoder='vitl'):
|
|
27 |
temp_frame_dir = tempfile.mkdtemp()
|
28 |
|
29 |
margin_width = 50
|
|
|
30 |
|
31 |
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
32 |
DEVICE = "cuda"
|
@@ -91,7 +93,7 @@ def make_video(video_path, outdir='./vis_video_depth',encoder='vitl'):
|
|
91 |
frame = torch.from_numpy(frame).unsqueeze(0).to(DEVICE)
|
92 |
|
93 |
|
94 |
-
depth =
|
95 |
|
96 |
depth = F.interpolate(depth[None], (frame_height, frame_width), mode='bilinear', align_corners=False)[0, 0]
|
97 |
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
|
|
|
6 |
import os
|
7 |
import torch
|
8 |
import torch.nn.functional as F
|
9 |
+
from torchvision import transforms
|
10 |
from torchvision.transforms import Compose
|
11 |
import tempfile
|
12 |
import spaces
|
|
|
28 |
temp_frame_dir = tempfile.mkdtemp()
|
29 |
|
30 |
margin_width = 50
|
31 |
+
to_tensor_transform = transforms.ToTensor()
|
32 |
|
33 |
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
|
34 |
DEVICE = "cuda"
|
|
|
93 |
frame = torch.from_numpy(frame).unsqueeze(0).to(DEVICE)
|
94 |
|
95 |
|
96 |
+
depth = to_tensor_transform(predict_depth(depth_anything, frame_pil))
|
97 |
|
98 |
depth = F.interpolate(depth[None], (frame_height, frame_width), mode='bilinear', align_corners=False)[0, 0]
|
99 |
depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
|