Spaces:
Running
Running
File size: 1,960 Bytes
3714210 517683d 10ff2d6 517683d 10ff2d6 517683d 10ff2d6 517683d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import torch
def subsample_tensor(tensor, original_fps=30, target_fps=25):
# Total number of original frames
total_frames = tensor.shape[0]
# Calculate new number of frames
new_frames = int(total_frames * (target_fps / original_fps))
# Create indices for subsampling
indices = torch.linspace(0, total_frames - 1, new_frames).long()
# Select frames
subsampled_tensor = tensor[indices]
return subsampled_tensor
def get_render(body_model_loaded,
body_trans,
body_orient, body_pose,
output_path, text='',
colors=[]):
from renderer.utils import run_smpl_fwd_vertices
vertices_list=[]
if not isinstance(body_trans, list):
body_trans = [body_trans]
if not isinstance(body_orient, list):
body_orient = [body_orient]
if not isinstance(body_pose, list):
body_pose = [body_pose]
for trans, orient, pose in zip(body_trans,
body_orient,
body_pose):
vertices= run_smpl_fwd_vertices(body_model_loaded,
trans,
orient,
pose)
vertices=vertices.vertices
# vertices = subsample_tensor(vertices, original_fps=30, target_fps=25)
vertices = vertices.detach().cpu().numpy()
vertices_list.append(vertices)
#Initialising the renderer
from renderer.humor import HumorRenderer
fps = 30.0
imw = 720 # 480
imh = 540 # 360
renderer = HumorRenderer(fps=fps, imw=imw, imh=imh)
if len(vertices_list)==2:
renderer(vertices_list, output_path, render_pair=True,
fps=fps,colors=colors)
else:
renderer(vertices_list[0], output_path, render_pair=False,
fps=fps,colors=colors)
return output_path
|