Spaces:
Running
Running
Commit
·
d58199d
1
Parent(s):
1c9eed7
update
Browse files
app.py
CHANGED
@@ -55,7 +55,7 @@ from lib.eval_utils.custom_utils import load_slam_cam
|
|
55 |
from lib.vis.run_vis2 import lookat_matrix, run_vis2_on_video, run_vis2_on_video_cam
|
56 |
from lib.vis.renderer_world import Renderer
|
57 |
|
58 |
-
@spaces.GPU(duration=200)
|
59 |
def render_reconstruction(input_video, img_focal):
|
60 |
args = EasyDict()
|
61 |
args.video_path = input_video
|
@@ -67,13 +67,23 @@ def render_reconstruction(input_video, img_focal):
|
|
67 |
|
68 |
start_idx, end_idx, seq_folder, imgfiles = detect_track_video(args)
|
69 |
|
70 |
-
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
slam_path = os.path.join(seq_folder, f"SLAM/hawor_slam_w_scale_{start_idx}_{end_idx}.npz")
|
73 |
if not os.path.exists(slam_path):
|
74 |
hawor_slam(args, start_idx, end_idx)
|
75 |
R_w2c_sla_all, t_w2c_sla_all, R_c2w_sla_all, t_c2w_sla_all = load_slam_cam(slam_path)
|
76 |
|
|
|
|
|
|
|
|
|
|
|
77 |
pred_trans, pred_rot, pred_hand_pose, pred_betas, pred_valid = hawor_infiller(args, start_idx, end_idx, frame_chunks_all)
|
78 |
|
79 |
# vis sequence for this video
|
|
|
55 |
from lib.vis.run_vis2 import lookat_matrix, run_vis2_on_video, run_vis2_on_video_cam
|
56 |
from lib.vis.renderer_world import Renderer
|
57 |
|
58 |
+
# @spaces.GPU(duration=200)
|
59 |
def render_reconstruction(input_video, img_focal):
|
60 |
args = EasyDict()
|
61 |
args.video_path = input_video
|
|
|
67 |
|
68 |
start_idx, end_idx, seq_folder, imgfiles = detect_track_video(args)
|
69 |
|
70 |
+
if os.path.exists(f'{seq_folder}/tracks_{start_idx}_{end_idx}/frame_chunks_all.npy'):
|
71 |
+
print("skip hawor motion estimation")
|
72 |
+
frame_chunks_all = joblib.load(f'{seq_folder}/tracks_{start_idx}_{end_idx}/frame_chunks_all.npy')
|
73 |
+
img_focal = args.img_focal
|
74 |
+
else:
|
75 |
+
frame_chunks_all, img_focal = hawor_motion_estimation(args, start_idx, end_idx, seq_folder)
|
76 |
|
77 |
slam_path = os.path.join(seq_folder, f"SLAM/hawor_slam_w_scale_{start_idx}_{end_idx}.npz")
|
78 |
if not os.path.exists(slam_path):
|
79 |
hawor_slam(args, start_idx, end_idx)
|
80 |
R_w2c_sla_all, t_w2c_sla_all, R_c2w_sla_all, t_c2w_sla_all = load_slam_cam(slam_path)
|
81 |
|
82 |
+
out_path = infiller_and_vis(args, start_idx, end_idx, frame_chunks_all, R_w2c_sla_all, t_w2c_sla_all, R_c2w_sla_all, t_c2w_sla_all, seq_folder, imgfiles)
|
83 |
+
return out_path
|
84 |
+
|
85 |
+
@spaces.GPU(duration=80)
|
86 |
+
def infiller_and_vis(args, start_idx, end_idx, frame_chunks_all, R_w2c_sla_all, t_w2c_sla_all, R_c2w_sla_all, t_c2w_sla_all, seq_folder, imgfiles):
|
87 |
pred_trans, pred_rot, pred_hand_pose, pred_betas, pred_valid = hawor_infiller(args, start_idx, end_idx, frame_chunks_all)
|
88 |
|
89 |
# vis sequence for this video
|
scripts/scripts_test_video/hawor_slam.py
CHANGED
@@ -11,6 +11,7 @@ from tqdm import tqdm
|
|
11 |
import numpy as np
|
12 |
import torch
|
13 |
import cv2
|
|
|
14 |
from PIL import Image
|
15 |
from glob import glob
|
16 |
from pycocotools import mask as masktool
|
@@ -43,6 +44,7 @@ def split_list_by_interval(lst, interval=1000):
|
|
43 |
|
44 |
return start_indices, end_indices, split_lists
|
45 |
|
|
|
46 |
def hawor_slam(args, start_idx, end_idx):
|
47 |
# File and folders
|
48 |
file = args.video_path
|
|
|
11 |
import numpy as np
|
12 |
import torch
|
13 |
import cv2
|
14 |
+
import spaces
|
15 |
from PIL import Image
|
16 |
from glob import glob
|
17 |
from pycocotools import mask as masktool
|
|
|
44 |
|
45 |
return start_indices, end_indices, split_lists
|
46 |
|
47 |
+
@spaces.GPU(duration=80)
|
48 |
def hawor_slam(args, start_idx, end_idx):
|
49 |
# File and folders
|
50 |
file = args.video_path
|
scripts/scripts_test_video/hawor_video.py
CHANGED
@@ -9,6 +9,7 @@ import cv2
|
|
9 |
from tqdm import tqdm
|
10 |
from glob import glob
|
11 |
from natsort import natsorted
|
|
|
12 |
|
13 |
from lib.pipeline.tools import parse_chunks, parse_chunks_hand_frame
|
14 |
from lib.models.hawor import HAWOR
|
@@ -37,7 +38,7 @@ def load_hawor(checkpoint_path):
|
|
37 |
return model, model_cfg
|
38 |
|
39 |
|
40 |
-
|
41 |
def hawor_motion_estimation(args, start_idx, end_idx, seq_folder):
|
42 |
model, model_cfg = load_hawor(args.checkpoint)
|
43 |
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
|
|
|
9 |
from tqdm import tqdm
|
10 |
from glob import glob
|
11 |
from natsort import natsorted
|
12 |
+
import spaces
|
13 |
|
14 |
from lib.pipeline.tools import parse_chunks, parse_chunks_hand_frame
|
15 |
from lib.models.hawor import HAWOR
|
|
|
38 |
return model, model_cfg
|
39 |
|
40 |
|
41 |
+
@spaces.GPU(duration=80)
|
42 |
def hawor_motion_estimation(args, start_idx, end_idx, seq_folder):
|
43 |
model, model_cfg = load_hawor(args.checkpoint)
|
44 |
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
|