Datasets:
File size: 5,732 Bytes
ce33d81 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
# Copyright 2024 Xiao Fu, CUHK, Kuaishou Tech. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
# If you find this code useful, we kindly ask you to cite our paper in your work.
# More information about the method can be found at http://fuxiao0719.github.io/projects/3dtrajmaster
# --------------------------------------------------------------------------
import os
import numpy as np
from io import BytesIO
import imageio.v2 as imageio
import open3d
import math
from tools.vis_cam import get_camera_frustum, frustums2lineset
import trimesh
def parse_matrix(matrix_str):
rows = matrix_str.strip().split('] [')
matrix = []
for row in rows:
row = row.replace('[', '').replace(']', '')
matrix.append(list(map(float, row.split())))
return np.array(matrix)
def load_sceneposes(objs_file, obj_idx, obj_transl):
ext_poses = []
for i, key in enumerate(objs_file.keys()):
ext_poses.append(parse_matrix(objs_file[key][obj_idx]['matrix']))
ext_poses = np.stack(ext_poses)
ext_poses = np.transpose(ext_poses, (0,2,1))
ext_poses[:,:3,3] -= obj_transl
ext_poses[:,:3,3] /= 100.
ext_poses = ext_poses[:, :, [1,2,0,3]]
return ext_poses
def save_images2video(images, video_name, fps):
fps = fps
format = "mp4"
codec = "libx264"
ffmpeg_params = ["-crf", str(12)]
pixelformat = "yuv420p"
video_stream = BytesIO()
with imageio.get_writer(
video_stream,
fps=fps,
format=format,
codec=codec,
ffmpeg_params=ffmpeg_params,
pixelformat=pixelformat,
) as writer:
for idx in range(len(images)):
writer.append_data(images[idx])
video_data = video_stream.getvalue()
output_path = os.path.join(video_name + ".mp4")
with open(output_path, "wb") as f:
f.write(video_data)
def normalize(x):
return x / np.linalg.norm(x)
def viewmatrix(z, up, pos):
vec2 = normalize(z)
vec1_avg = up
vec0 = normalize(np.cross(vec1_avg, vec2))
vec1 = normalize(np.cross(vec2, vec0))
m = np.stack([vec0, vec1, vec2, pos], 1)
return m
def matrix_to_euler_angles(matrix):
sy = math.sqrt(matrix[0][0] * matrix[0][0] + matrix[1][0] * matrix[1][0])
singular = sy < 1e-6
if not singular:
x = math.atan2(matrix[2][1], matrix[2][2])
y = math.atan2(-matrix[2][0], sy)
z = math.atan2(matrix[1][0], matrix[0][0])
else:
x = math.atan2(-matrix[1][2], matrix[1][1])
y = math.atan2(-matrix[2][0], sy)
z = 0
return math.degrees(x), math.degrees(y), math.degrees(z)
def eul2rot(theta) :
R = np.array([[np.cos(theta[1])*np.cos(theta[2]), np.sin(theta[0])*np.sin(theta[1])*np.cos(theta[2]) - np.sin(theta[2])*np.cos(theta[0]), np.sin(theta[1])*np.cos(theta[0])*np.cos(theta[2]) + np.sin(theta[0])*np.sin(theta[2])],
[np.sin(theta[2])*np.cos(theta[1]), np.sin(theta[0])*np.sin(theta[1])*np.sin(theta[2]) + np.cos(theta[0])*np.cos(theta[2]), np.sin(theta[1])*np.sin(theta[2])*np.cos(theta[0]) - np.sin(theta[0])*np.cos(theta[2])],
[-np.sin(theta[1]), np.sin(theta[0])*np.cos(theta[1]), np.cos(theta[0])*np.cos(theta[1])]])
return R.T
def extract_location_rotation(data):
results = {}
for key, value in data.items():
matrix = parse_matrix(value)
location = np.array([matrix[3][0], matrix[3][1], matrix[3][2]])
rotation = eul2rot(matrix_to_euler_angles(matrix))
transofmed_matrix = np.identity(4)
transofmed_matrix[:3,3] = location
transofmed_matrix[:3,:3] = rotation
results[key] = transofmed_matrix
return results
def get_cam_points_vis(W, H, intrinsics, ext_pose, color,frustum_length):
cam = get_camera_frustum((W, H), intrinsics, np.linalg.inv(ext_pose), frustum_length=frustum_length, color=[0., 0., 1.])
cam_points = cam[0]
for item in cam[1]:
cam_points = np.concatenate((cam_points, np.linspace(cam[0][item[0]], cam[0][item[1]], num=1000, endpoint=True, retstep=False, dtype=None)))
cam_points[:,0]*=-1
cam_points = trimesh.points.PointCloud(vertices = cam_points, colors=[0, 255, 0, 255])
cam_points_vis = open3d.geometry.PointCloud()
cam_points_vis.points = open3d.utility.Vector3dVector(cam_points)
cam_points_vis.paint_uniform_color(color)
return cam_points_vis
def batch_axis_angle_to_rotation_matrix(r_batch):
batch_size = r_batch.shape[0]
rotation_matrices = []
for i in range(batch_size):
r = r_batch[i]
theta = np.linalg.norm(r)
if theta == 0:
rotation_matrices.append(np.eye(3))
else:
k = r / theta
kx, ky, kz = k
K = np.array([
[0, -kz, ky],
[kz, 0, -kx],
[-ky, kx, 0]
])
R = np.eye(3) + np.sin(theta) * K + (1 - np.cos(theta)) * np.dot(K, K)
rotation_matrices.append(R)
return np.array(rotation_matrices) |