Spaces:
Runtime error
Runtime error
Delete utils/.ipynb_checkpoints
Browse files- utils/.ipynb_checkpoints/__init__-checkpoint.py +0 -0
- utils/.ipynb_checkpoints/camera-checkpoint.py +0 -120
- utils/.ipynb_checkpoints/depth-checkpoint.py +0 -62
- utils/.ipynb_checkpoints/general-checkpoint.py +0 -140
- utils/.ipynb_checkpoints/graphics-checkpoint.py +0 -83
- utils/.ipynb_checkpoints/image-checkpoint.py +0 -20
- utils/.ipynb_checkpoints/loss-checkpoint.py +0 -99
- utils/.ipynb_checkpoints/sh-checkpoint.py +0 -120
- utils/.ipynb_checkpoints/system-checkpoint.py +0 -29
- utils/.ipynb_checkpoints/trajectory-checkpoint.py +0 -621
utils/.ipynb_checkpoints/__init__-checkpoint.py
DELETED
File without changes
|
utils/.ipynb_checkpoints/camera-checkpoint.py
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# Copyright (C) 2023, Inria
|
3 |
-
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
4 |
-
# All rights reserved.
|
5 |
-
#
|
6 |
-
# This software is free for non-commercial, research and evaluation use
|
7 |
-
# under the terms of the LICENSE.md file.
|
8 |
-
#
|
9 |
-
# For inquiries contact george.drettakis@inria.fr
|
10 |
-
#
|
11 |
-
import json
|
12 |
-
|
13 |
-
import numpy as np
|
14 |
-
import torch
|
15 |
-
|
16 |
-
from scene.cameras import Camera, MiniCam
|
17 |
-
from utils.general import PILtoTorch
|
18 |
-
from utils.graphics import fov2focal, focal2fov, getWorld2View, getProjectionMatrix
|
19 |
-
|
20 |
-
|
21 |
-
WARNED = False
|
22 |
-
|
23 |
-
|
24 |
-
def load_json(path, H, W):
|
25 |
-
cams = []
|
26 |
-
with open(path) as json_file:
|
27 |
-
contents = json.load(json_file)
|
28 |
-
FoVx = contents["camera_angle_x"]
|
29 |
-
FoVy = focal2fov(fov2focal(FoVx, W), H)
|
30 |
-
zfar = 100.0
|
31 |
-
znear = 0.01
|
32 |
-
|
33 |
-
frames = contents["frames"]
|
34 |
-
for idx, frame in enumerate(frames):
|
35 |
-
# NeRF 'transform_matrix' is a camera-to-world transform
|
36 |
-
c2w = np.array(frame["transform_matrix"])
|
37 |
-
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
|
38 |
-
c2w[:3, 1:3] *= -1
|
39 |
-
if c2w.shape[0] == 3:
|
40 |
-
one = np.zeros((1, 4))
|
41 |
-
one[0, -1] = 1
|
42 |
-
c2w = np.concatenate((c2w, one), axis=0)
|
43 |
-
|
44 |
-
# get the world-to-camera transform and set R, T
|
45 |
-
w2c = np.linalg.inv(c2w)
|
46 |
-
R = np.transpose(w2c[:3, :3]) # R is stored transposed due to 'glm' in CUDA code
|
47 |
-
T = w2c[:3, 3]
|
48 |
-
|
49 |
-
w2c = torch.as_tensor(getWorld2View(R, T)).T.cuda()
|
50 |
-
proj = getProjectionMatrix(znear, zfar, FoVx, FoVy).T.cuda()
|
51 |
-
cams.append(MiniCam(W, H, FoVx, FoVy, znear, zfar, w2c, w2c @ proj))
|
52 |
-
return cams
|
53 |
-
|
54 |
-
|
55 |
-
def loadCam(args, id, cam_info, resolution_scale):
|
56 |
-
orig_w, orig_h = cam_info.image.size
|
57 |
-
|
58 |
-
if args.resolution in [1, 2, 4, 8]:
|
59 |
-
resolution = round(orig_w/(resolution_scale * args.resolution)), round(orig_h/(resolution_scale * args.resolution))
|
60 |
-
else: # should be a type that converts to float
|
61 |
-
if args.resolution == -1:
|
62 |
-
if orig_w > 1600:
|
63 |
-
global WARNED
|
64 |
-
if not WARNED:
|
65 |
-
print("[ INFO ] Encountered quite large input images (>1.6K pixels width), rescaling to 1.6K.\n "
|
66 |
-
"If this is not desired, please explicitly specify '--resolution/-r' as 1")
|
67 |
-
WARNED = True
|
68 |
-
global_down = orig_w / 1600
|
69 |
-
else:
|
70 |
-
global_down = 1
|
71 |
-
else:
|
72 |
-
global_down = orig_w / args.resolution
|
73 |
-
|
74 |
-
scale = float(global_down) * float(resolution_scale)
|
75 |
-
resolution = (int(orig_w / scale), int(orig_h / scale))
|
76 |
-
|
77 |
-
resized_image_rgb = PILtoTorch(cam_info.image, resolution)
|
78 |
-
|
79 |
-
gt_image = resized_image_rgb[:3, ...]
|
80 |
-
loaded_mask = None
|
81 |
-
|
82 |
-
if resized_image_rgb.shape[1] == 4:
|
83 |
-
loaded_mask = resized_image_rgb[3:4, ...]
|
84 |
-
|
85 |
-
return Camera(colmap_id=cam_info.uid, R=cam_info.R, T=cam_info.T,
|
86 |
-
FoVx=cam_info.FovX, FoVy=cam_info.FovY,
|
87 |
-
image=gt_image, gt_alpha_mask=loaded_mask,
|
88 |
-
image_name=cam_info.image_name, uid=id, data_device=args.data_device)
|
89 |
-
|
90 |
-
|
91 |
-
def cameraList_from_camInfos(cam_infos, resolution_scale, args):
|
92 |
-
camera_list = []
|
93 |
-
|
94 |
-
for id, c in enumerate(cam_infos):
|
95 |
-
camera_list.append(loadCam(args, id, c, resolution_scale))
|
96 |
-
|
97 |
-
return camera_list
|
98 |
-
|
99 |
-
|
100 |
-
def camera_to_JSON(id, camera : Camera):
|
101 |
-
Rt = np.zeros((4, 4))
|
102 |
-
Rt[:3, :3] = camera.R.transpose()
|
103 |
-
Rt[:3, 3] = camera.T
|
104 |
-
Rt[3, 3] = 1.0
|
105 |
-
|
106 |
-
W2C = np.linalg.inv(Rt)
|
107 |
-
pos = W2C[:3, 3]
|
108 |
-
rot = W2C[:3, :3]
|
109 |
-
serializable_array_2d = [x.tolist() for x in rot]
|
110 |
-
camera_entry = {
|
111 |
-
'id' : id,
|
112 |
-
'img_name' : camera.image_name,
|
113 |
-
'width' : camera.width,
|
114 |
-
'height' : camera.height,
|
115 |
-
'position': pos.tolist(),
|
116 |
-
'rotation': serializable_array_2d,
|
117 |
-
'fy' : fov2focal(camera.FovY, camera.height),
|
118 |
-
'fx' : fov2focal(camera.FovX, camera.width)
|
119 |
-
}
|
120 |
-
return camera_entry
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/.ipynb_checkpoints/depth-checkpoint.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
import matplotlib
|
2 |
-
import matplotlib.cm
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
|
6 |
-
|
7 |
-
def colorize(value, vmin=None, vmax=None, cmap='jet', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None):
|
8 |
-
"""Converts a depth map to a color image.
|
9 |
-
|
10 |
-
Args:
|
11 |
-
value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed
|
12 |
-
vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None.
|
13 |
-
vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None.
|
14 |
-
cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'.
|
15 |
-
invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99.
|
16 |
-
invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None.
|
17 |
-
background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255).
|
18 |
-
gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False.
|
19 |
-
value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None.
|
20 |
-
|
21 |
-
Returns:
|
22 |
-
numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4)
|
23 |
-
"""
|
24 |
-
if isinstance(value, torch.Tensor):
|
25 |
-
value = value.detach().cpu().numpy()
|
26 |
-
|
27 |
-
value = value.squeeze()
|
28 |
-
if invalid_mask is None:
|
29 |
-
invalid_mask = value == invalid_val
|
30 |
-
mask = np.logical_not(invalid_mask)
|
31 |
-
|
32 |
-
# normalize
|
33 |
-
vmin = np.percentile(value[mask],2) if vmin is None else vmin
|
34 |
-
vmax = np.percentile(value[mask],98) if vmax is None else vmax
|
35 |
-
if vmin != vmax:
|
36 |
-
value = (value - vmin) / (vmax - vmin) # vmin..vmax
|
37 |
-
else:
|
38 |
-
# Avoid 0-division
|
39 |
-
value = value * 0.
|
40 |
-
|
41 |
-
# squeeze last dim if it exists
|
42 |
-
# grey out the invalid values
|
43 |
-
|
44 |
-
value[invalid_mask] = np.nan
|
45 |
-
cmapper = matplotlib.cm.get_cmap(cmap)
|
46 |
-
if value_transform:
|
47 |
-
value = value_transform(value)
|
48 |
-
# value = value / value.max()
|
49 |
-
value = cmapper(value, bytes=True) # (nxmx4)
|
50 |
-
|
51 |
-
# img = value[:, :, :]
|
52 |
-
img = value[...]
|
53 |
-
img[invalid_mask] = background_color
|
54 |
-
|
55 |
-
# return img.transpose((2, 0, 1))
|
56 |
-
if gamma_corrected:
|
57 |
-
# gamma correction
|
58 |
-
img = img / 255
|
59 |
-
img = np.power(img, 2.2)
|
60 |
-
img = img * 255
|
61 |
-
img = img.astype(np.uint8)
|
62 |
-
return img
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/.ipynb_checkpoints/general-checkpoint.py
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# Copyright (C) 2023, Inria
|
3 |
-
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
4 |
-
# All rights reserved.
|
5 |
-
#
|
6 |
-
# This software is free for non-commercial, research and evaluation use
|
7 |
-
# under the terms of the LICENSE.md file.
|
8 |
-
#
|
9 |
-
# For inquiries contact george.drettakis@inria.fr
|
10 |
-
#
|
11 |
-
import sys
|
12 |
-
import random
|
13 |
-
from datetime import datetime
|
14 |
-
import numpy as np
|
15 |
-
import torch
|
16 |
-
|
17 |
-
|
18 |
-
def inverse_sigmoid(x):
|
19 |
-
return torch.log(x/(1-x))
|
20 |
-
|
21 |
-
|
22 |
-
def PILtoTorch(pil_image, resolution):
|
23 |
-
resized_image_PIL = pil_image.resize(resolution)
|
24 |
-
resized_image = torch.from_numpy(np.array(resized_image_PIL)) / 255.0
|
25 |
-
if len(resized_image.shape) == 3:
|
26 |
-
return resized_image.permute(2, 0, 1)
|
27 |
-
else:
|
28 |
-
return resized_image.unsqueeze(dim=-1).permute(2, 0, 1)
|
29 |
-
|
30 |
-
|
31 |
-
def get_expon_lr_func(
|
32 |
-
lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000
|
33 |
-
):
|
34 |
-
"""
|
35 |
-
Copied from Plenoxels
|
36 |
-
|
37 |
-
Continuous learning rate decay function. Adapted from JaxNeRF
|
38 |
-
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
|
39 |
-
is log-linearly interpolated elsewhere (equivalent to exponential decay).
|
40 |
-
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
|
41 |
-
function of lr_delay_mult, such that the initial learning rate is
|
42 |
-
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
|
43 |
-
to the normal learning rate when steps>lr_delay_steps.
|
44 |
-
:param conf: config subtree 'lr' or similar
|
45 |
-
:param max_steps: int, the number of steps during optimization.
|
46 |
-
:return HoF which takes step as input
|
47 |
-
"""
|
48 |
-
|
49 |
-
def helper(step):
|
50 |
-
if step < 0 or (lr_init == 0.0 and lr_final == 0.0):
|
51 |
-
# Disable this parameter
|
52 |
-
return 0.0
|
53 |
-
if lr_delay_steps > 0:
|
54 |
-
# A kind of reverse cosine decay.
|
55 |
-
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(
|
56 |
-
0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)
|
57 |
-
)
|
58 |
-
else:
|
59 |
-
delay_rate = 1.0
|
60 |
-
t = np.clip(step / max_steps, 0, 1)
|
61 |
-
log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)
|
62 |
-
return delay_rate * log_lerp
|
63 |
-
|
64 |
-
return helper
|
65 |
-
|
66 |
-
|
67 |
-
def strip_lowerdiag(L):
|
68 |
-
uncertainty = torch.zeros((L.shape[0], 6), dtype=torch.float, device="cuda")
|
69 |
-
|
70 |
-
uncertainty[:, 0] = L[:, 0, 0]
|
71 |
-
uncertainty[:, 1] = L[:, 0, 1]
|
72 |
-
uncertainty[:, 2] = L[:, 0, 2]
|
73 |
-
uncertainty[:, 3] = L[:, 1, 1]
|
74 |
-
uncertainty[:, 4] = L[:, 1, 2]
|
75 |
-
uncertainty[:, 5] = L[:, 2, 2]
|
76 |
-
return uncertainty
|
77 |
-
|
78 |
-
|
79 |
-
def strip_symmetric(sym):
|
80 |
-
return strip_lowerdiag(sym)
|
81 |
-
|
82 |
-
|
83 |
-
def build_rotation(r):
|
84 |
-
norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3])
|
85 |
-
|
86 |
-
q = r / norm[:, None]
|
87 |
-
|
88 |
-
R = torch.zeros((q.size(0), 3, 3), device='cuda')
|
89 |
-
|
90 |
-
r = q[:, 0]
|
91 |
-
x = q[:, 1]
|
92 |
-
y = q[:, 2]
|
93 |
-
z = q[:, 3]
|
94 |
-
|
95 |
-
R[:, 0, 0] = 1 - 2 * (y*y + z*z)
|
96 |
-
R[:, 0, 1] = 2 * (x*y - r*z)
|
97 |
-
R[:, 0, 2] = 2 * (x*z + r*y)
|
98 |
-
R[:, 1, 0] = 2 * (x*y + r*z)
|
99 |
-
R[:, 1, 1] = 1 - 2 * (x*x + z*z)
|
100 |
-
R[:, 1, 2] = 2 * (y*z - r*x)
|
101 |
-
R[:, 2, 0] = 2 * (x*z - r*y)
|
102 |
-
R[:, 2, 1] = 2 * (y*z + r*x)
|
103 |
-
R[:, 2, 2] = 1 - 2 * (x*x + y*y)
|
104 |
-
return R
|
105 |
-
|
106 |
-
|
107 |
-
def build_scaling_rotation(s, r):
|
108 |
-
L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda")
|
109 |
-
R = build_rotation(r)
|
110 |
-
|
111 |
-
L[:,0,0] = s[:,0]
|
112 |
-
L[:,1,1] = s[:,1]
|
113 |
-
L[:,2,2] = s[:,2]
|
114 |
-
|
115 |
-
L = R @ L
|
116 |
-
return L
|
117 |
-
|
118 |
-
|
119 |
-
def safe_state(silent):
|
120 |
-
old_f = sys.stdout
|
121 |
-
class F:
|
122 |
-
def __init__(self, silent):
|
123 |
-
self.silent = silent
|
124 |
-
|
125 |
-
def write(self, x):
|
126 |
-
if not self.silent:
|
127 |
-
if x.endswith("\n"):
|
128 |
-
old_f.write(x.replace("\n", " [{}]\n".format(str(datetime.now().strftime("%d/%m %H:%M:%S")))))
|
129 |
-
else:
|
130 |
-
old_f.write(x)
|
131 |
-
|
132 |
-
def flush(self):
|
133 |
-
old_f.flush()
|
134 |
-
|
135 |
-
sys.stdout = F(silent)
|
136 |
-
|
137 |
-
random.seed(0)
|
138 |
-
np.random.seed(0)
|
139 |
-
torch.manual_seed(0)
|
140 |
-
torch.cuda.set_device(torch.device("cuda:0"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/.ipynb_checkpoints/graphics-checkpoint.py
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# Copyright (C) 2023, Inria
|
3 |
-
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
4 |
-
# All rights reserved.
|
5 |
-
#
|
6 |
-
# This software is free for non-commercial, research and evaluation use
|
7 |
-
# under the terms of the LICENSE.md file.
|
8 |
-
#
|
9 |
-
# For inquiries contact george.drettakis@inria.fr
|
10 |
-
#
|
11 |
-
import math
|
12 |
-
from typing import NamedTuple
|
13 |
-
import numpy as np
|
14 |
-
import torch
|
15 |
-
|
16 |
-
|
17 |
-
class BasicPointCloud(NamedTuple):
|
18 |
-
points : np.array
|
19 |
-
colors : np.array
|
20 |
-
normals : np.array
|
21 |
-
|
22 |
-
|
23 |
-
def geom_transform_points(points, transf_matrix):
|
24 |
-
P, _ = points.shape
|
25 |
-
ones = torch.ones(P, 1, dtype=points.dtype, device=points.device)
|
26 |
-
points_hom = torch.cat([points, ones], dim=1)
|
27 |
-
points_out = torch.matmul(points_hom, transf_matrix.unsqueeze(0))
|
28 |
-
|
29 |
-
denom = points_out[..., 3:] + 0.0000001
|
30 |
-
return (points_out[..., :3] / denom).squeeze(dim=0)
|
31 |
-
|
32 |
-
|
33 |
-
def getWorld2View(R, t):
|
34 |
-
Rt = np.zeros((4, 4))
|
35 |
-
Rt[:3, :3] = R.transpose()
|
36 |
-
Rt[:3, 3] = t
|
37 |
-
Rt[3, 3] = 1.0
|
38 |
-
return np.float32(Rt)
|
39 |
-
|
40 |
-
|
41 |
-
def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
|
42 |
-
Rt = np.zeros((4, 4))
|
43 |
-
Rt[:3, :3] = R.transpose()
|
44 |
-
Rt[:3, 3] = t
|
45 |
-
Rt[3, 3] = 1.0
|
46 |
-
|
47 |
-
C2W = np.linalg.inv(Rt)
|
48 |
-
cam_center = C2W[:3, 3]
|
49 |
-
cam_center = (cam_center + translate) * scale
|
50 |
-
C2W[:3, 3] = cam_center
|
51 |
-
Rt = np.linalg.inv(C2W)
|
52 |
-
return np.float32(Rt)
|
53 |
-
|
54 |
-
|
55 |
-
def getProjectionMatrix(znear, zfar, fovX, fovY):
|
56 |
-
tanHalfFovY = math.tan((fovY / 2))
|
57 |
-
tanHalfFovX = math.tan((fovX / 2))
|
58 |
-
|
59 |
-
top = tanHalfFovY * znear
|
60 |
-
bottom = -top
|
61 |
-
right = tanHalfFovX * znear
|
62 |
-
left = -right
|
63 |
-
|
64 |
-
P = torch.zeros(4, 4)
|
65 |
-
|
66 |
-
z_sign = 1.0
|
67 |
-
|
68 |
-
P[0, 0] = 2.0 * znear / (right - left)
|
69 |
-
P[1, 1] = 2.0 * znear / (top - bottom)
|
70 |
-
P[0, 2] = (right + left) / (right - left)
|
71 |
-
P[1, 2] = (top + bottom) / (top - bottom)
|
72 |
-
P[3, 2] = z_sign
|
73 |
-
P[2, 2] = z_sign * zfar / (zfar - znear)
|
74 |
-
P[2, 3] = -(zfar * znear) / (zfar - znear)
|
75 |
-
return P
|
76 |
-
|
77 |
-
|
78 |
-
def fov2focal(fov, pixels):
|
79 |
-
return pixels / (2 * math.tan(fov / 2))
|
80 |
-
|
81 |
-
|
82 |
-
def focal2fov(focal, pixels):
|
83 |
-
return 2*math.atan(pixels/(2*focal))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/.ipynb_checkpoints/image-checkpoint.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# Copyright (C) 2023, Inria
|
3 |
-
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
4 |
-
# All rights reserved.
|
5 |
-
#
|
6 |
-
# This software is free for non-commercial, research and evaluation use
|
7 |
-
# under the terms of the LICENSE.md file.
|
8 |
-
#
|
9 |
-
# For inquiries contact george.drettakis@inria.fr
|
10 |
-
#
|
11 |
-
import torch
|
12 |
-
|
13 |
-
|
14 |
-
def mse(img1, img2):
|
15 |
-
return (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
|
16 |
-
|
17 |
-
|
18 |
-
def psnr(img1, img2):
|
19 |
-
mse = (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
|
20 |
-
return 20 * torch.log10(1.0 / torch.sqrt(mse))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/.ipynb_checkpoints/loss-checkpoint.py
DELETED
@@ -1,99 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# Copyright (C) 2023, Inria
|
3 |
-
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
4 |
-
# All rights reserved.
|
5 |
-
#
|
6 |
-
# This software is free for non-commercial, research and evaluation use
|
7 |
-
# under the terms of the LICENSE.md file.
|
8 |
-
#
|
9 |
-
# For inquiries contact george.drettakis@inria.fr
|
10 |
-
#
|
11 |
-
from math import exp
|
12 |
-
|
13 |
-
import torch
|
14 |
-
import torch.nn.functional as F
|
15 |
-
from torch.autograd import Variable
|
16 |
-
|
17 |
-
|
18 |
-
def l1_loss(network_output, gt):
|
19 |
-
return torch.abs((network_output - gt)).mean()
|
20 |
-
|
21 |
-
|
22 |
-
def l2_loss(network_output, gt):
|
23 |
-
return ((network_output - gt) ** 2).mean()
|
24 |
-
|
25 |
-
|
26 |
-
def gaussian(window_size, sigma):
|
27 |
-
gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
|
28 |
-
return gauss / gauss.sum()
|
29 |
-
|
30 |
-
|
31 |
-
def create_window(window_size, channel):
|
32 |
-
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
|
33 |
-
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
|
34 |
-
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
|
35 |
-
return window
|
36 |
-
|
37 |
-
|
38 |
-
def ssim(img1, img2, window_size=11, size_average=True):
|
39 |
-
channel = img1.size(-3)
|
40 |
-
window = create_window(window_size, channel)
|
41 |
-
|
42 |
-
if img1.is_cuda:
|
43 |
-
window = window.cuda(img1.get_device())
|
44 |
-
window = window.type_as(img1)
|
45 |
-
|
46 |
-
return _ssim(img1, img2, window, window_size, channel, size_average)
|
47 |
-
|
48 |
-
|
49 |
-
def _ssim(img1, img2, window, window_size, channel, size_average=True):
|
50 |
-
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
|
51 |
-
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
|
52 |
-
|
53 |
-
mu1_sq = mu1.pow(2)
|
54 |
-
mu2_sq = mu2.pow(2)
|
55 |
-
mu1_mu2 = mu1 * mu2
|
56 |
-
|
57 |
-
sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
|
58 |
-
sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
|
59 |
-
sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
|
60 |
-
|
61 |
-
C1 = 0.01 ** 2
|
62 |
-
C2 = 0.03 ** 2
|
63 |
-
|
64 |
-
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
|
65 |
-
|
66 |
-
if size_average:
|
67 |
-
return ssim_map.mean()
|
68 |
-
else:
|
69 |
-
return ssim_map.mean(1).mean(1).mean(1)
|
70 |
-
|
71 |
-
|
72 |
-
import numpy as np
|
73 |
-
import cv2
|
74 |
-
def image2canny(image, thres1, thres2, isEdge1=True):
|
75 |
-
""" image: (H, W, 3)"""
|
76 |
-
canny_mask = torch.from_numpy(cv2.Canny((image.detach().cpu().numpy()*255.).astype(np.uint8), thres1, thres2)/255.)
|
77 |
-
if not isEdge1:
|
78 |
-
canny_mask = 1. - canny_mask
|
79 |
-
return canny_mask.float()
|
80 |
-
|
81 |
-
with torch.no_grad():
|
82 |
-
kernelsize=3
|
83 |
-
conv = torch.nn.Conv2d(1, 1, kernel_size=kernelsize, padding=(kernelsize//2))
|
84 |
-
kernel = torch.tensor([[0.,1.,0.],[1.,0.,1.],[0.,1.,0.]]).reshape(1,1,kernelsize,kernelsize)
|
85 |
-
conv.weight.data = kernel #torch.ones((1,1,kernelsize,kernelsize))
|
86 |
-
conv.bias.data = torch.tensor([0.])
|
87 |
-
conv.requires_grad_(False)
|
88 |
-
conv = conv.cuda()
|
89 |
-
|
90 |
-
|
91 |
-
def nearMean_map(array, mask, kernelsize=3):
|
92 |
-
""" array: (H,W) / mask: (H,W) """
|
93 |
-
cnt_map = torch.ones_like(array)
|
94 |
-
|
95 |
-
nearMean_map = conv((array * mask)[None,None])
|
96 |
-
cnt_map = conv((cnt_map * mask)[None,None])
|
97 |
-
nearMean_map = (nearMean_map / (cnt_map+1e-8)).squeeze()
|
98 |
-
|
99 |
-
return nearMean_map
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/.ipynb_checkpoints/sh-checkpoint.py
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
# Copyright 2021 The PlenOctree Authors.
|
2 |
-
# Redistribution and use in source and binary forms, with or without
|
3 |
-
# modification, are permitted provided that the following conditions are met:
|
4 |
-
#
|
5 |
-
# 1. Redistributions of source code must retain the above copyright notice,
|
6 |
-
# this list of conditions and the following disclaimer.
|
7 |
-
#
|
8 |
-
# 2. Redistributions in binary form must reproduce the above copyright notice,
|
9 |
-
# this list of conditions and the following disclaimer in the documentation
|
10 |
-
# and/or other materials provided with the distribution.
|
11 |
-
#
|
12 |
-
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
13 |
-
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
14 |
-
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
15 |
-
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
16 |
-
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
17 |
-
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
18 |
-
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
19 |
-
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
20 |
-
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
21 |
-
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
22 |
-
# POSSIBILITY OF SUCH DAMAGE.
|
23 |
-
import torch
|
24 |
-
|
25 |
-
|
26 |
-
C0 = 0.28209479177387814
|
27 |
-
C1 = 0.4886025119029199
|
28 |
-
C2 = [
|
29 |
-
1.0925484305920792,
|
30 |
-
-1.0925484305920792,
|
31 |
-
0.31539156525252005,
|
32 |
-
-1.0925484305920792,
|
33 |
-
0.5462742152960396
|
34 |
-
]
|
35 |
-
C3 = [
|
36 |
-
-0.5900435899266435,
|
37 |
-
2.890611442640554,
|
38 |
-
-0.4570457994644658,
|
39 |
-
0.3731763325901154,
|
40 |
-
-0.4570457994644658,
|
41 |
-
1.445305721320277,
|
42 |
-
-0.5900435899266435
|
43 |
-
]
|
44 |
-
C4 = [
|
45 |
-
2.5033429417967046,
|
46 |
-
-1.7701307697799304,
|
47 |
-
0.9461746957575601,
|
48 |
-
-0.6690465435572892,
|
49 |
-
0.10578554691520431,
|
50 |
-
-0.6690465435572892,
|
51 |
-
0.47308734787878004,
|
52 |
-
-1.7701307697799304,
|
53 |
-
0.6258357354491761,
|
54 |
-
]
|
55 |
-
|
56 |
-
|
57 |
-
def eval_sh(deg, sh, dirs):
|
58 |
-
"""
|
59 |
-
Evaluate spherical harmonics at unit directions
|
60 |
-
using hardcoded SH polynomials.
|
61 |
-
Works with torch/np/jnp.
|
62 |
-
... Can be 0 or more batch dimensions.
|
63 |
-
Args:
|
64 |
-
deg: int SH deg. Currently, 0-3 supported
|
65 |
-
sh: jnp.ndarray SH coeffs [..., C, (deg + 1) ** 2]
|
66 |
-
dirs: jnp.ndarray unit directions [..., 3]
|
67 |
-
Returns:
|
68 |
-
[..., C]
|
69 |
-
"""
|
70 |
-
assert deg <= 4 and deg >= 0
|
71 |
-
coeff = (deg + 1) ** 2
|
72 |
-
assert sh.shape[-1] >= coeff
|
73 |
-
|
74 |
-
result = C0 * sh[..., 0]
|
75 |
-
if deg > 0:
|
76 |
-
x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3]
|
77 |
-
result = (result -
|
78 |
-
C1 * y * sh[..., 1] +
|
79 |
-
C1 * z * sh[..., 2] -
|
80 |
-
C1 * x * sh[..., 3])
|
81 |
-
|
82 |
-
if deg > 1:
|
83 |
-
xx, yy, zz = x * x, y * y, z * z
|
84 |
-
xy, yz, xz = x * y, y * z, x * z
|
85 |
-
result = (result +
|
86 |
-
C2[0] * xy * sh[..., 4] +
|
87 |
-
C2[1] * yz * sh[..., 5] +
|
88 |
-
C2[2] * (2.0 * zz - xx - yy) * sh[..., 6] +
|
89 |
-
C2[3] * xz * sh[..., 7] +
|
90 |
-
C2[4] * (xx - yy) * sh[..., 8])
|
91 |
-
|
92 |
-
if deg > 2:
|
93 |
-
result = (result +
|
94 |
-
C3[0] * y * (3 * xx - yy) * sh[..., 9] +
|
95 |
-
C3[1] * xy * z * sh[..., 10] +
|
96 |
-
C3[2] * y * (4 * zz - xx - yy)* sh[..., 11] +
|
97 |
-
C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12] +
|
98 |
-
C3[4] * x * (4 * zz - xx - yy) * sh[..., 13] +
|
99 |
-
C3[5] * z * (xx - yy) * sh[..., 14] +
|
100 |
-
C3[6] * x * (xx - 3 * yy) * sh[..., 15])
|
101 |
-
|
102 |
-
if deg > 3:
|
103 |
-
result = (result + C4[0] * xy * (xx - yy) * sh[..., 16] +
|
104 |
-
C4[1] * yz * (3 * xx - yy) * sh[..., 17] +
|
105 |
-
C4[2] * xy * (7 * zz - 1) * sh[..., 18] +
|
106 |
-
C4[3] * yz * (7 * zz - 3) * sh[..., 19] +
|
107 |
-
C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20] +
|
108 |
-
C4[5] * xz * (7 * zz - 3) * sh[..., 21] +
|
109 |
-
C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22] +
|
110 |
-
C4[7] * xz * (xx - 3 * yy) * sh[..., 23] +
|
111 |
-
C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy)) * sh[..., 24])
|
112 |
-
return result
|
113 |
-
|
114 |
-
|
115 |
-
def RGB2SH(rgb):
|
116 |
-
return (rgb - 0.5) / C0
|
117 |
-
|
118 |
-
|
119 |
-
def SH2RGB(sh):
|
120 |
-
return sh * C0 + 0.5
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/.ipynb_checkpoints/system-checkpoint.py
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
#
|
2 |
-
# Copyright (C) 2023, Inria
|
3 |
-
# GRAPHDECO research group, https://team.inria.fr/graphdeco
|
4 |
-
# All rights reserved.
|
5 |
-
#
|
6 |
-
# This software is free for non-commercial, research and evaluation use
|
7 |
-
# under the terms of the LICENSE.md file.
|
8 |
-
#
|
9 |
-
# For inquiries contact george.drettakis@inria.fr
|
10 |
-
#
|
11 |
-
from errno import EEXIST
|
12 |
-
from os import makedirs, path
|
13 |
-
import os
|
14 |
-
|
15 |
-
|
16 |
-
def mkdir_p(folder_path):
|
17 |
-
# Creates a directory. equivalent to using mkdir -p on the command line
|
18 |
-
try:
|
19 |
-
makedirs(folder_path)
|
20 |
-
except OSError as exc: # Python >2.5
|
21 |
-
if exc.errno == EEXIST and path.isdir(folder_path):
|
22 |
-
pass
|
23 |
-
else:
|
24 |
-
raise
|
25 |
-
|
26 |
-
|
27 |
-
def searchForMaxIteration(folder):
|
28 |
-
saved_iters = [int(fname.split("_")[-1]) for fname in os.listdir(folder)]
|
29 |
-
return max(saved_iters)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/.ipynb_checkpoints/trajectory-checkpoint.py
DELETED
@@ -1,621 +0,0 @@
|
|
1 |
-
# Copyright (C) 2023, Computer Vision Lab, Seoul National University, https://cv.snu.ac.kr
|
2 |
-
#
|
3 |
-
# Copyright 2023 LucidDreamer Authors
|
4 |
-
#
|
5 |
-
# Computer Vision Lab, SNU, its affiliates and licensors retain all intellectual
|
6 |
-
# property and proprietary rights in and to this material, related
|
7 |
-
# documentation and any modifications thereto. Any use, reproduction,
|
8 |
-
# disclosure or distribution of this material and related documentation
|
9 |
-
# without an express license agreement from the Computer Vision Lab, SNU or
|
10 |
-
# its affiliates is strictly prohibited.
|
11 |
-
#
|
12 |
-
# For permission requests, please contact robot0321@snu.ac.kr, esw0116@snu.ac.kr, namhj28@gmail.com, jarin.lee@gmail.com.
|
13 |
-
import os
|
14 |
-
import numpy as np
|
15 |
-
import torch
|
16 |
-
|
17 |
-
|
18 |
-
def generate_seed(scale, viewangle):
|
19 |
-
# World 2 Camera
|
20 |
-
#### rotate x,y
|
21 |
-
render_poses = [np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0]])]
|
22 |
-
ang = 5
|
23 |
-
for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,0,0,0]):
|
24 |
-
th, phi = i/180*np.pi, j/180*np.pi
|
25 |
-
posetemp = np.zeros((3, 4))
|
26 |
-
posetemp[:3,:3] = np.matmul(np.eye(3),
|
27 |
-
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))) # Turn left
|
28 |
-
posetemp[:3,3:4] = np.array([0,0,0]).reshape(3,1) # * scale # Transition vector
|
29 |
-
render_poses.append(posetemp)
|
30 |
-
|
31 |
-
for i,j in zip([-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0], [0,0,0,ang,ang,ang,ang,ang,0,-ang,-ang,-ang,-ang,-ang,0,0,0,0]):
|
32 |
-
th, phi = i/180*np.pi, j/180*np.pi
|
33 |
-
posetemp = np.zeros((3, 4))
|
34 |
-
posetemp[:3,:3] = np.matmul(np.array([[np.cos(-3*ang/180*np.pi), 0, np.sin(-3*ang/180*np.pi)], [0, 1, 0], [-np.sin(-3*ang/180*np.pi), 0, np.cos(-3*ang/180*np.pi)]]),
|
35 |
-
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]])))
|
36 |
-
posetemp[:3,3:4] = np.array([1,0,0]).reshape(3,1) # * scale # Transition vector
|
37 |
-
render_poses.append(posetemp)
|
38 |
-
|
39 |
-
for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,ang,ang,ang,ang,0,-ang,-ang,-ang,-ang,-ang,0,0,0,0]):
|
40 |
-
th, phi = i/180*np.pi, j/180*np.pi
|
41 |
-
posetemp = np.zeros((3, 4))
|
42 |
-
posetemp[:3,:3] = np.matmul(np.array([[np.cos(3*ang/180*np.pi), 0, np.sin(3*ang/180*np.pi)], [0, 1, 0], [-np.sin(3*ang/180*np.pi), 0, np.cos(3*ang/180*np.pi)]]),
|
43 |
-
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]])))
|
44 |
-
posetemp[:3,3:4] = np.array([-1,0,0]).reshape(3,1) # * scale # Transition vector
|
45 |
-
render_poses.append(posetemp)
|
46 |
-
|
47 |
-
# for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,0,0,0]):
|
48 |
-
# th, phi = i/180*np.pi, j/180*np.pi
|
49 |
-
# posetemp = np.zeros((3, 4))
|
50 |
-
# posetemp[:3,:3] = np.matmul(np.eye(3),
|
51 |
-
# np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]])))
|
52 |
-
# posetemp[:3,3:4] = np.array([0,0,1]).reshape(3,1) # * scale # Transition vector
|
53 |
-
# render_poses.append(posetemp)
|
54 |
-
|
55 |
-
|
56 |
-
rot_cam=viewangle/3
|
57 |
-
for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,0,0,0]):
|
58 |
-
th, phi = i/180*np.pi, j/180*np.pi
|
59 |
-
posetemp = np.zeros((3, 4))
|
60 |
-
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
|
61 |
-
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))) # Turn left
|
62 |
-
posetemp[:3,3:4] = np.array([0,0,0]).reshape(3,1) # * scale # Transition vector
|
63 |
-
render_poses.append(posetemp)
|
64 |
-
|
65 |
-
for i,j in zip([-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0], [0,0,0,ang,ang,ang,ang,ang,0,-ang,-ang,-ang,-ang,-ang,0,0,0,0]):
|
66 |
-
th, phi = i/180*np.pi, j/180*np.pi
|
67 |
-
posetemp = np.zeros((3, 4))
|
68 |
-
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
|
69 |
-
np.matmul(np.array([[np.cos(-3*ang/180*np.pi), 0, np.sin(-3*ang/180*np.pi)], [0, 1, 0], [-np.sin(-3*ang/180*np.pi), 0, np.cos(-3*ang/180*np.pi)]]),
|
70 |
-
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))))
|
71 |
-
posetemp[:3,3:4] = np.array([0,0,1]).reshape(3,1) # * scale # Transition vector
|
72 |
-
render_poses.append(posetemp)
|
73 |
-
|
74 |
-
for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,ang,ang,ang,ang,0,-ang,-ang,-ang,-ang,-ang,0,0,0,0]):
|
75 |
-
th, phi = i/180*np.pi, j/180*np.pi
|
76 |
-
posetemp = np.zeros((3, 4))
|
77 |
-
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
|
78 |
-
np.matmul(np.array([[np.cos(3*ang/180*np.pi), 0, np.sin(3*ang/180*np.pi)], [0, 1, 0], [-np.sin(3*ang/180*np.pi), 0, np.cos(3*ang/180*np.pi)]]),
|
79 |
-
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))))
|
80 |
-
posetemp[:3,3:4] = np.array([0,0,-1]).reshape(3,1) # * scale # Transition vector
|
81 |
-
render_poses.append(posetemp)
|
82 |
-
|
83 |
-
# for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,0,0,0]):
|
84 |
-
# th, phi = i/180*np.pi, j/180*np.pi
|
85 |
-
# posetemp = np.zeros((3, 4))
|
86 |
-
# posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
|
87 |
-
# np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]])))
|
88 |
-
# posetemp[:3,3:4] = np.array([1,0,0]).reshape(3,1) # * scale # Transition vector
|
89 |
-
# render_poses.append(posetemp)
|
90 |
-
|
91 |
-
|
92 |
-
rot_cam=viewangle*2/3
|
93 |
-
for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,0,0,0]):
|
94 |
-
th, phi = i/180*np.pi, j/180*np.pi
|
95 |
-
posetemp = np.zeros((3, 4))
|
96 |
-
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
|
97 |
-
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))) # Turn left
|
98 |
-
posetemp[:3,3:4] = np.array([0,0,0]).reshape(3,1) # * scale # Transition vector
|
99 |
-
render_poses.append(posetemp)
|
100 |
-
|
101 |
-
for i,j in zip([-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0], [0,0,0,ang,ang,ang,ang,ang,0,-ang,-ang,-ang,-ang,-ang,0,0,0,0]):
|
102 |
-
th, phi = i/180*np.pi, j/180*np.pi
|
103 |
-
posetemp = np.zeros((3, 4))
|
104 |
-
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
|
105 |
-
np.matmul(np.array([[np.cos(-3*ang/180*np.pi), 0, np.sin(-3*ang/180*np.pi)], [0, 1, 0], [-np.sin(-3*ang/180*np.pi), 0, np.cos(-3*ang/180*np.pi)]]),
|
106 |
-
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))))
|
107 |
-
posetemp[:3,3:4] = np.array([-1,0,0]).reshape(3,1) # * scale # Transition vector
|
108 |
-
render_poses.append(posetemp)
|
109 |
-
|
110 |
-
for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,ang,ang,ang,ang,0,-ang,-ang,-ang,-ang,-ang,0,0,0,0]):
|
111 |
-
th, phi = i/180*np.pi, j/180*np.pi
|
112 |
-
posetemp = np.zeros((3, 4))
|
113 |
-
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
|
114 |
-
np.matmul(np.array([[np.cos(3*ang/180*np.pi), 0, np.sin(3*ang/180*np.pi)], [0, 1, 0], [-np.sin(3*ang/180*np.pi), 0, np.cos(3*ang/180*np.pi)]]),
|
115 |
-
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))))
|
116 |
-
posetemp[:3,3:4] = np.array([1,0,0]).reshape(3,1) # * scale # Transition vector
|
117 |
-
render_poses.append(posetemp)
|
118 |
-
|
119 |
-
# for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,0,0,0]):
|
120 |
-
# th, phi = i/180*np.pi, j/180*np.pi
|
121 |
-
# posetemp = np.zeros((3, 4))
|
122 |
-
# posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
|
123 |
-
# np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]])))
|
124 |
-
# posetemp[:3,3:4] = np.array([0,0,-1]).reshape(3,1) # * scale # Transition vector
|
125 |
-
# render_poses.append(posetemp)
|
126 |
-
|
127 |
-
rot_cam=viewangle
|
128 |
-
for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,0,0,0]):
|
129 |
-
th, phi = i/180*np.pi, j/180*np.pi
|
130 |
-
posetemp = np.zeros((3, 4))
|
131 |
-
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
|
132 |
-
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))) # Turn left
|
133 |
-
posetemp[:3,3:4] = np.array([0,0,0]).reshape(3,1) # * scale # Transition vector
|
134 |
-
render_poses.append(posetemp)
|
135 |
-
|
136 |
-
for i,j in zip([-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0], [0,0,0,ang,ang,ang,ang,ang,0,-ang,-ang,-ang,-ang,-ang,0,0,0,0]):
|
137 |
-
th, phi = i/180*np.pi, j/180*np.pi
|
138 |
-
posetemp = np.zeros((3, 4))
|
139 |
-
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
|
140 |
-
np.matmul(np.array([[np.cos(-3*ang/180*np.pi), 0, np.sin(-3*ang/180*np.pi)], [0, 1, 0], [-np.sin(-3*ang/180*np.pi), 0, np.cos(-3*ang/180*np.pi)]]),
|
141 |
-
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))))
|
142 |
-
posetemp[:3,3:4] = np.array([0,0,-1]).reshape(3,1) # * scale # Transition vector
|
143 |
-
render_poses.append(posetemp)
|
144 |
-
|
145 |
-
for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,ang,ang,ang,ang,0,-ang,-ang,-ang,-ang,-ang,0,0,0,0]):
|
146 |
-
th, phi = i/180*np.pi, j/180*np.pi
|
147 |
-
posetemp = np.zeros((3, 4))
|
148 |
-
posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
|
149 |
-
np.matmul(np.array([[np.cos(3*ang/180*np.pi), 0, np.sin(3*ang/180*np.pi)], [0, 1, 0], [-np.sin(3*ang/180*np.pi), 0, np.cos(3*ang/180*np.pi)]]),
|
150 |
-
np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]]))))
|
151 |
-
posetemp[:3,3:4] = np.array([0,0,1]).reshape(3,1) # * scale # Transition vector
|
152 |
-
render_poses.append(posetemp)
|
153 |
-
|
154 |
-
# for i,j in zip([ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,ang,2*ang,3*ang,2*ang,ang,0], [0,0,0,ang,2*ang,3*ang,2*ang,ang,0,-ang,-2*ang,-3*ang,-2*ang,-ang,0,0,0,0]):
|
155 |
-
# th, phi = i/180*np.pi, j/180*np.pi
|
156 |
-
# posetemp = np.zeros((3, 4))
|
157 |
-
# posetemp[:3,:3] = np.matmul(np.array([[np.cos(rot_cam/180*np.pi), 0, np.sin(rot_cam/180*np.pi)], [0, 1, 0], [-np.sin(rot_cam/180*np.pi), 0, np.cos(rot_cam/180*np.pi)]]),
|
158 |
-
# np.matmul(np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]]), np.array([[1, 0, 0], [0, np.cos(phi), -np.sin(phi)], [0, np.sin(phi), np.cos(phi)]])))
|
159 |
-
# posetemp[:3,3:4] = np.array([-1,0,0]).reshape(3,1) # * scale # Transition vector
|
160 |
-
# render_poses.append(posetemp)
|
161 |
-
|
162 |
-
render_poses.append(np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0]]))
|
163 |
-
render_poses = np.stack(render_poses, axis=0)
|
164 |
-
|
165 |
-
return render_poses
|
166 |
-
|
167 |
-
|
168 |
-
def generate_seed_360(viewangle, n_views):
|
169 |
-
N = n_views
|
170 |
-
render_poses = np.zeros((N, 3, 4))
|
171 |
-
for i in range(N):
|
172 |
-
th = (viewangle/N)*i/180*np.pi
|
173 |
-
render_poses[i,:3,:3] = np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]])
|
174 |
-
render_poses[i,:3,3:4] = np.random.randn(3,1)*0.0 # Transition vector
|
175 |
-
|
176 |
-
return render_poses
|
177 |
-
|
178 |
-
|
179 |
-
def generate_seed_360_half(viewangle, n_views):
|
180 |
-
N = n_views // 2
|
181 |
-
halfangle = viewangle / 2
|
182 |
-
render_poses = np.zeros((N*2, 3, 4))
|
183 |
-
for i in range(N):
|
184 |
-
th = (halfangle/N)*i/180*np.pi
|
185 |
-
render_poses[i,:3,:3] = np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]])
|
186 |
-
render_poses[i,:3,3:4] = np.random.randn(3,1)*0.0 # Transition vector
|
187 |
-
for i in range(N):
|
188 |
-
th = -(halfangle/N)*i/180*np.pi
|
189 |
-
render_poses[i+N,:3,:3] = np.array([[np.cos(th), 0, np.sin(th)], [0, 1, 0], [-np.sin(th), 0, np.cos(th)]])
|
190 |
-
render_poses[i+N,:3,3:4] = np.random.randn(3,1)*0.0 # Transition vector
|
191 |
-
return render_poses
|
192 |
-
|
193 |
-
|
194 |
-
def generate_seed_preset():
|
195 |
-
degsum = 60
|
196 |
-
thlist = np.concatenate((np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:], np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:], np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:]))
|
197 |
-
philist = np.concatenate((np.linspace(0,0,7), np.linspace(-22.5,-22.5,7), np.linspace(22.5,22.5,7)))
|
198 |
-
assert len(thlist) == len(philist)
|
199 |
-
|
200 |
-
render_poses = np.zeros((len(thlist), 3, 4))
|
201 |
-
for i in range(len(thlist)):
|
202 |
-
th = thlist[i]
|
203 |
-
phi = philist[i]
|
204 |
-
|
205 |
-
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
|
206 |
-
render_poses[i,:3,3:4] = np.zeros((3,1))
|
207 |
-
|
208 |
-
return render_poses
|
209 |
-
|
210 |
-
|
211 |
-
def generate_seed_newpreset():
|
212 |
-
degsum = 60
|
213 |
-
thlist = np.concatenate((np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:], np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:]))
|
214 |
-
philist = np.concatenate((np.linspace(0,0,7), np.linspace(22.5,22.5,7)))
|
215 |
-
assert len(thlist) == len(philist)
|
216 |
-
|
217 |
-
render_poses = np.zeros((len(thlist), 3, 4))
|
218 |
-
for i in range(len(thlist)):
|
219 |
-
th = thlist[i]
|
220 |
-
phi = philist[i]
|
221 |
-
|
222 |
-
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
|
223 |
-
render_poses[i,:3,3:4] = np.zeros((3,1))
|
224 |
-
|
225 |
-
return render_poses
|
226 |
-
|
227 |
-
|
228 |
-
def generate_seed_horizon():
|
229 |
-
movement = np.linspace(0, 5, 11)
|
230 |
-
render_poses = np.zeros((len(movement), 3, 4))
|
231 |
-
for i in range(len(movement)):
|
232 |
-
|
233 |
-
render_poses[i,:3,:3] = np.eye(3)
|
234 |
-
render_poses[i,:3,3:4] = np.array([[-movement[i]], [0], [0]])
|
235 |
-
|
236 |
-
return render_poses
|
237 |
-
|
238 |
-
|
239 |
-
def generate_seed_backward():
|
240 |
-
movement = np.linspace(0, 5, 11)
|
241 |
-
render_poses = np.zeros((len(movement), 3, 4))
|
242 |
-
for i in range(len(movement)):
|
243 |
-
render_poses[i,:3,:3] = np.eye(3)
|
244 |
-
render_poses[i,:3,3:4] = np.array([[0], [0], [movement[i]]])
|
245 |
-
return render_poses
|
246 |
-
|
247 |
-
|
248 |
-
def generate_seed_arc():
|
249 |
-
degree = 5
|
250 |
-
# thlist = np.array([degree, 0, 0, 0, -degree])
|
251 |
-
thlist = np.arange(0, degree, 5) + np.arange(0, -degree, 5)[1:]
|
252 |
-
phi = 0
|
253 |
-
|
254 |
-
render_poses = np.zeros((len(thlist), 3, 4))
|
255 |
-
for i in range(len(thlist)):
|
256 |
-
th = thlist[i]
|
257 |
-
d = 4.3 # 얘를 조절하면 초기 자세 기준으로 앞으로 d만큼 떨어진 점을 기준으로 도는 자세가 만들어짐
|
258 |
-
|
259 |
-
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
|
260 |
-
render_poses[0,:3,3:4] = np.array([d*np.sin(th/180*np.pi), 0, d-d*np.cos(th/180*np.pi)]).reshape(3,1) + np.array([0, d*np.sin(phi/180*np.pi), d-d*np.cos(phi/180*np.pi)]).reshape(3,1)# Transition vector
|
261 |
-
# render_poses[i,:3,3:4] = np.zeros((3,1))
|
262 |
-
|
263 |
-
return render_poses
|
264 |
-
|
265 |
-
|
266 |
-
def generate_seed_hemisphere(center_depth, degree=5):
|
267 |
-
degree = 5
|
268 |
-
thlist = np.array([degree, 0, 0, 0, -degree])
|
269 |
-
philist = np.array([0, -degree, 0, degree, 0])
|
270 |
-
assert len(thlist) == len(philist)
|
271 |
-
|
272 |
-
render_poses = np.zeros((len(thlist), 3, 4))
|
273 |
-
for i in range(len(thlist)):
|
274 |
-
th = thlist[i]
|
275 |
-
phi = philist[i]
|
276 |
-
# curr_pose = np.zeros((1, 3, 4))
|
277 |
-
d = center_depth # 얘를 조절하면 초기 자세 기준으로 앞으로 d만큼 떨어진 점을 기준으로 도는 자세가 만들어짐
|
278 |
-
|
279 |
-
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
|
280 |
-
render_poses[0,:3,3:4] = np.array([d*np.sin(th/180*np.pi), 0, d-d*np.cos(th/180*np.pi)]).reshape(3,1) + np.array([0, d*np.sin(phi/180*np.pi), d-d*np.cos(phi/180*np.pi)]).reshape(3,1)# Transition vector
|
281 |
-
# render_poses[i,:3,3:4] = np.zeros((3,1))
|
282 |
-
|
283 |
-
return render_poses
|
284 |
-
|
285 |
-
|
286 |
-
def generate_seed_hemisphere_(degree, nviews):
|
287 |
-
# thlist = np.array([degree, 0, 0, 0, -degree])
|
288 |
-
# philist = np.array([0, -degree, 0, degree, 0])
|
289 |
-
thlist = degree * np.sin(np.linspace(0, 2*np.pi, nviews))
|
290 |
-
philist = degree * np.cos(np.linspace(0, 2*np.pi, nviews))
|
291 |
-
assert len(thlist) == len(philist)
|
292 |
-
|
293 |
-
render_poses = np.zeros((len(thlist), 3, 4))
|
294 |
-
for i in range(len(thlist)):
|
295 |
-
th = thlist[i]
|
296 |
-
phi = philist[i]
|
297 |
-
# curr_pose = np.zeros((1, 3, 4))
|
298 |
-
d = 4.3 # 얘를 조절하면 초기 자세 기준으로 앞으로 d만큼 떨어진 점을 기준으로 도는 자세가 만들어짐
|
299 |
-
|
300 |
-
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
|
301 |
-
render_poses[0,:3,3:4] = np.array([d*np.sin(th/180*np.pi), 0, d-d*np.cos(th/180*np.pi)]).reshape(3,1) + np.array([0, d*np.sin(phi/180*np.pi), d-d*np.cos(phi/180*np.pi)]).reshape(3,1)# Transition vector
|
302 |
-
return render_poses
|
303 |
-
|
304 |
-
|
305 |
-
def generate_seed_nothing():
|
306 |
-
degree = 5
|
307 |
-
thlist = np.array([0])
|
308 |
-
philist = np.array([0])
|
309 |
-
assert len(thlist) == len(philist)
|
310 |
-
|
311 |
-
render_poses = np.zeros((len(thlist), 3, 4))
|
312 |
-
for i in range(len(thlist)):
|
313 |
-
th = thlist[i]
|
314 |
-
phi = philist[i]
|
315 |
-
# curr_pose = np.zeros((1, 3, 4))
|
316 |
-
d = 4.3 # 얘를 조절하면 초기 자세 기준으로 앞으로 d만큼 떨어진 점을 기준으로 도는 자세가 만들어짐
|
317 |
-
|
318 |
-
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
|
319 |
-
render_poses[0,:3,3:4] = np.array([d*np.sin(th/180*np.pi), 0, d-d*np.cos(th/180*np.pi)]).reshape(3,1) + np.array([0, d*np.sin(phi/180*np.pi), d-d*np.cos(phi/180*np.pi)]).reshape(3,1)# Transition vector
|
320 |
-
# render_poses[i,:3,3:4] = np.zeros((3,1))
|
321 |
-
|
322 |
-
return render_poses
|
323 |
-
|
324 |
-
|
325 |
-
def generate_seed_lookaround():
|
326 |
-
degsum = 60
|
327 |
-
thlist = np.concatenate((np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:], np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:], np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:]))
|
328 |
-
philist = np.concatenate((np.linspace(0,0,7), np.linspace(22.5,22.5,7), np.linspace(-22.5,-22.5,7)))
|
329 |
-
assert len(thlist) == len(philist)
|
330 |
-
|
331 |
-
render_poses = []
|
332 |
-
# up / left --> right
|
333 |
-
thlist = np.linspace(-degsum, degsum, 2*degsum+1)
|
334 |
-
for i in range(len(thlist)):
|
335 |
-
render_pose = np.zeros((3,4))
|
336 |
-
th = thlist[i]
|
337 |
-
phi = 22.5
|
338 |
-
|
339 |
-
render_pose[:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
|
340 |
-
render_pose[:3,3:4] = np.zeros((3,1))
|
341 |
-
render_poses.append(render_pose)
|
342 |
-
|
343 |
-
# right / up --> center
|
344 |
-
phlist = np.linspace(22.5, 0, 23)
|
345 |
-
# Exclude first frame (same as last frame before)
|
346 |
-
phlist = phlist[1:]
|
347 |
-
for i in range(len(phlist)):
|
348 |
-
render_pose = np.zeros((3,4))
|
349 |
-
th = degsum
|
350 |
-
phi = phlist[i]
|
351 |
-
|
352 |
-
render_pose[:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
|
353 |
-
render_pose[:3,3:4] = np.zeros((3,1))
|
354 |
-
render_poses.append(render_pose)
|
355 |
-
|
356 |
-
# center / right --> left
|
357 |
-
thlist = np.linspace(degsum, -degsum, 2*degsum+1)
|
358 |
-
thlist = thlist[1:]
|
359 |
-
for i in range(len(thlist)):
|
360 |
-
render_pose = np.zeros((3,4))
|
361 |
-
th = thlist[i]
|
362 |
-
phi = 0
|
363 |
-
|
364 |
-
render_pose[:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
|
365 |
-
render_pose[:3,3:4] = np.zeros((3,1))
|
366 |
-
render_poses.append(render_pose)
|
367 |
-
|
368 |
-
# left / center --> down
|
369 |
-
phlist = np.linspace(0, -22.5, 23)
|
370 |
-
phlist = phlist[1:]
|
371 |
-
for i in range(len(phlist)):
|
372 |
-
render_pose = np.zeros((3,4))
|
373 |
-
th = -degsum
|
374 |
-
phi = phlist[i]
|
375 |
-
|
376 |
-
render_pose[:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
|
377 |
-
render_pose[:3,3:4] = np.zeros((3,1))
|
378 |
-
render_poses.append(render_pose)
|
379 |
-
|
380 |
-
|
381 |
-
thlist = np.linspace(-degsum, degsum, 2*degsum+1)
|
382 |
-
for i in range(len(thlist)):
|
383 |
-
render_pose = np.zeros((3,4))
|
384 |
-
th = thlist[i]
|
385 |
-
phi = -22.5
|
386 |
-
|
387 |
-
render_pose[:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
|
388 |
-
render_pose[:3,3:4] = np.zeros((3,1))
|
389 |
-
render_poses.append(render_pose)
|
390 |
-
|
391 |
-
return render_poses
|
392 |
-
|
393 |
-
|
394 |
-
def generate_seed_lookdown():
|
395 |
-
degsum = 60
|
396 |
-
thlist = np.concatenate((np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:], np.linspace(0, degsum, 4), np.linspace(0, -degsum, 4)[1:]))
|
397 |
-
philist = np.concatenate((np.linspace(0,0,7), np.linspace(-22.5,-22.5,7)))
|
398 |
-
assert len(thlist) == len(philist)
|
399 |
-
|
400 |
-
render_poses = np.zeros((len(thlist), 3, 4))
|
401 |
-
for i in range(len(thlist)):
|
402 |
-
th = thlist[i]
|
403 |
-
phi = philist[i]
|
404 |
-
|
405 |
-
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
|
406 |
-
render_poses[i,:3,3:4] = np.zeros((3,1))
|
407 |
-
|
408 |
-
return render_poses
|
409 |
-
|
410 |
-
|
411 |
-
def generate_seed_back():
|
412 |
-
movement = np.linspace(0, 5, 101)
|
413 |
-
render_poses = [] # np.zeros((len(movement), 3, 4))
|
414 |
-
for i in range(len(movement)):
|
415 |
-
render_pose = np.zeros((3,4))
|
416 |
-
render_pose[:3,:3] = np.eye(3)
|
417 |
-
render_pose[:3,3:4] = np.array([[0], [0], [movement[i]]])
|
418 |
-
render_poses.append(render_pose)
|
419 |
-
|
420 |
-
movement = np.linspace(5, 0, 101)
|
421 |
-
movement = movement[1:]
|
422 |
-
for i in range(len(movement)):
|
423 |
-
render_pose = np.zeros((3,4))
|
424 |
-
render_pose[:3,:3] = np.eye(3)
|
425 |
-
render_pose[:3,3:4] = np.array([[0], [0], [movement[i]]])
|
426 |
-
render_poses.append(render_pose)
|
427 |
-
|
428 |
-
return render_poses
|
429 |
-
|
430 |
-
|
431 |
-
def generate_seed_llff(degree, nviews, round=4, d=2.3):
|
432 |
-
assert round%4==0
|
433 |
-
# thlist = np.array([degree, 0, 0, 0, -degree])
|
434 |
-
# philist = np.array([0, -degree, 0, degree, 0])
|
435 |
-
# d = 2.3
|
436 |
-
thlist = degree * np.sin(np.linspace(0, 2*np.pi*round, nviews))
|
437 |
-
philist = degree * np.cos(np.linspace(0, 2*np.pi*round, nviews))
|
438 |
-
zlist = d/15 * np.sin(np.linspace(0, 2*np.pi*round//4, nviews))
|
439 |
-
assert len(thlist) == len(philist)
|
440 |
-
|
441 |
-
render_poses = np.zeros((len(thlist), 3, 4))
|
442 |
-
for i in range(len(thlist)):
|
443 |
-
th = thlist[i]
|
444 |
-
phi = philist[i]
|
445 |
-
z = zlist[i]
|
446 |
-
# curr_pose = np.zeros((1, 3, 4))
|
447 |
-
# d = 4.3 # 얘를 조절하면 초기 자세 기준으로 앞으로 d만큼 떨어진 점을 기준으로 도는 자세가 만들어짐
|
448 |
-
|
449 |
-
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
|
450 |
-
render_poses[i,:3,3:4] = np.array([d*np.sin(th/180*np.pi), 0, -z+d-d*np.cos(th/180*np.pi)]).reshape(3,1) + np.array([0, d*np.sin(phi/180*np.pi), -z+d-d*np.cos(phi/180*np.pi)]).reshape(3,1)# Transition vector
|
451 |
-
return render_poses
|
452 |
-
|
453 |
-
|
454 |
-
def generate_seed_headbanging(maxdeg, nviews_per_round, round=3, fullround=1):
|
455 |
-
radius = np.concatenate((np.linspace(0, maxdeg, nviews_per_round*round), maxdeg*np.ones(nviews_per_round*fullround), np.linspace(maxdeg, 0, nviews_per_round*round)))
|
456 |
-
thlist = 2.66*radius * np.sin(np.linspace(0, 2*np.pi*(round+fullround+round), nviews_per_round*(round+fullround+round)))
|
457 |
-
philist = radius * np.cos(np.linspace(0, 2*np.pi*(round+fullround+round), nviews_per_round*(round+fullround+round)))
|
458 |
-
assert len(thlist) == len(philist)
|
459 |
-
|
460 |
-
render_poses = np.zeros((len(thlist), 3, 4))
|
461 |
-
for i in range(len(thlist)):
|
462 |
-
th = thlist[i]
|
463 |
-
phi = philist[i]
|
464 |
-
|
465 |
-
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
|
466 |
-
render_poses[i,:3,3:4] = np.zeros((3,1))
|
467 |
-
|
468 |
-
return render_poses
|
469 |
-
|
470 |
-
|
471 |
-
def generate_seed_headbanging_circle(maxdeg, nviews_per_round, round=3, fullround=1):
|
472 |
-
radius = np.concatenate((np.linspace(0, maxdeg, nviews_per_round*round), maxdeg*np.ones(nviews_per_round*fullround), np.linspace(maxdeg, 0, nviews_per_round*round)))
|
473 |
-
thlist = 2.66*radius * np.sin(np.linspace(0, 2*np.pi*(round+fullround+round), nviews_per_round*(round+fullround+round)))
|
474 |
-
philist = radius * np.cos(np.linspace(0, 2*np.pi*(round+fullround+round), nviews_per_round*(round+fullround+round)))
|
475 |
-
assert len(thlist) == len(philist)
|
476 |
-
|
477 |
-
render_poses = np.zeros((len(thlist), 3, 4))
|
478 |
-
for i in range(len(thlist)):
|
479 |
-
th = thlist[i]
|
480 |
-
phi = philist[i]
|
481 |
-
|
482 |
-
render_poses[i,:3,:3] = np.matmul(np.array([[np.cos(th/180*np.pi), 0, -np.sin(th/180*np.pi)], [0, 1, 0], [np.sin(th/180*np.pi), 0, np.cos(th/180*np.pi)]]), np.array([[1, 0, 0], [0, np.cos(phi/180*np.pi), -np.sin(phi/180*np.pi)], [0, np.sin(phi/180*np.pi), np.cos(phi/180*np.pi)]]))
|
483 |
-
render_poses[i,:3,3:4] = np.zeros((3,1))
|
484 |
-
|
485 |
-
return render_poses
|
486 |
-
|
487 |
-
|
488 |
-
def get_pcdGenPoses(pcdgenpath, argdict={}):
|
489 |
-
if pcdgenpath == 'rotate360':
|
490 |
-
render_poses = generate_seed_360(360, 10)
|
491 |
-
elif pcdgenpath == 'lookaround':
|
492 |
-
render_poses = generate_seed_preset()
|
493 |
-
elif pcdgenpath == 'moveright':
|
494 |
-
render_poses = generate_seed_horizon()
|
495 |
-
elif pcdgenpath == 'moveback':
|
496 |
-
render_poses = generate_seed_backward()
|
497 |
-
elif pcdgenpath == 'arc':
|
498 |
-
render_poses = generate_seed_arc()
|
499 |
-
elif pcdgenpath == 'lookdown':
|
500 |
-
render_poses = generate_seed_newpreset()
|
501 |
-
elif pcdgenpath == 'hemisphere':
|
502 |
-
render_poses = generate_seed_hemisphere(argdict['center_depth'])
|
503 |
-
else:
|
504 |
-
raise("Invalid pcdgenpath")
|
505 |
-
return render_poses
|
506 |
-
|
507 |
-
|
508 |
-
def get_camerapaths():
|
509 |
-
preset_json = {}
|
510 |
-
for cam_path in ["back_and_forth", "llff", "headbanging"]:
|
511 |
-
if cam_path == 'back_and_forth':
|
512 |
-
render_poses = generate_seed_back()
|
513 |
-
elif cam_path == 'llff':
|
514 |
-
render_poses = generate_seed_llff(5, 400, round=4, d=2)
|
515 |
-
elif cam_path == 'headbanging':
|
516 |
-
render_poses = generate_seed_headbanging(maxdeg=15, nviews_per_round=180, round=2, fullround=0)
|
517 |
-
else:
|
518 |
-
raise("Unknown pass")
|
519 |
-
|
520 |
-
yz_reverse = np.array([[1,0,0], [0,-1,0], [0,0,-1]])
|
521 |
-
blender_train_json = {"frames": []}
|
522 |
-
for render_pose in render_poses:
|
523 |
-
curr_frame = {}
|
524 |
-
### Transform world to pixel
|
525 |
-
Rw2i = render_pose[:3,:3]
|
526 |
-
Tw2i = render_pose[:3,3:4]
|
527 |
-
|
528 |
-
# Transfrom cam2 to world + change sign of yz axis
|
529 |
-
Ri2w = np.matmul(yz_reverse, Rw2i).T
|
530 |
-
Ti2w = -np.matmul(Ri2w, np.matmul(yz_reverse, Tw2i))
|
531 |
-
Pc2w = np.concatenate((Ri2w, Ti2w), axis=1)
|
532 |
-
Pc2w = np.concatenate((Pc2w, np.array([0,0,0,1]).reshape((1,4))), axis=0)
|
533 |
-
|
534 |
-
curr_frame["transform_matrix"] = Pc2w.tolist()
|
535 |
-
blender_train_json["frames"].append(curr_frame)
|
536 |
-
|
537 |
-
preset_json[cam_path] = blender_train_json
|
538 |
-
|
539 |
-
return preset_json
|
540 |
-
|
541 |
-
|
542 |
-
def main():
|
543 |
-
cam_path = 'headbanging_circle'
|
544 |
-
os.makedirs("poses_supplementary", exist_ok=True)
|
545 |
-
|
546 |
-
if cam_path == 'lookaround':
|
547 |
-
render_poses = generate_seed_lookaround()
|
548 |
-
elif cam_path == 'back':
|
549 |
-
render_poses = generate_seed_back()
|
550 |
-
elif cam_path == '360':
|
551 |
-
render_poses = generate_seed_360(360, 360)
|
552 |
-
elif cam_path == '1440':
|
553 |
-
render_poses = generate_seed_360(360, 1440)
|
554 |
-
elif cam_path == 'llff':
|
555 |
-
d = 8
|
556 |
-
render_poses = generate_seed_llff(5, 400, round=4, d=d)
|
557 |
-
elif cam_path == 'headbanging':
|
558 |
-
round=3
|
559 |
-
render_poses = generate_seed_headbanging_(maxdeg=15, nviews_per_round=180, round=round, fullround=0)
|
560 |
-
elif cam_path == 'headbanging_circle':
|
561 |
-
round=2
|
562 |
-
render_poses = generate_seed_headbanging_circle(maxdeg=5, nviews_per_round=180, round=round, fullround=0)
|
563 |
-
|
564 |
-
|
565 |
-
yz_reverse = np.array([[1,0,0], [0,-1,0], [0,0,-1]])
|
566 |
-
|
567 |
-
c2w_poses = []
|
568 |
-
for render_pose in render_poses:
|
569 |
-
### Transform world to pixel
|
570 |
-
Rw2i = render_pose[:3,:3]
|
571 |
-
Tw2i = render_pose[:3,3:4]
|
572 |
-
|
573 |
-
# Transfrom cam2 to world + change sign of yz axis
|
574 |
-
Ri2w = np.matmul(yz_reverse, Rw2i).T
|
575 |
-
Ti2w = -np.matmul(Ri2w, np.matmul(yz_reverse, Tw2i))
|
576 |
-
Pc2w = np.concatenate((Ri2w, Ti2w), axis=1)
|
577 |
-
# Pc2w = np.concatenate((Pc2w, np.array([[0,0,0,1]])), axis=0)
|
578 |
-
|
579 |
-
c2w_poses.append(Pc2w)
|
580 |
-
|
581 |
-
c2w_poses = np.stack(c2w_poses, axis=0)
|
582 |
-
|
583 |
-
# np.save(f'poses_supplementary/{cam_path}.npy', c2w_poses)
|
584 |
-
|
585 |
-
FX = 5.8269e+02
|
586 |
-
W = 512
|
587 |
-
fov_x = 2*np.arctan(W / (2*FX))
|
588 |
-
if cam_path in ['360', '1440', 'llff', 'headbanging']:
|
589 |
-
fov_x = fov_x * 1.2
|
590 |
-
blender_train_json = {}
|
591 |
-
blender_train_json["camera_angle_x"] = fov_x
|
592 |
-
blender_train_json["frames"] = []
|
593 |
-
|
594 |
-
for render_pose in render_poses:
|
595 |
-
curr_frame = {}
|
596 |
-
### Transform world to pixel
|
597 |
-
Rw2i = render_pose[:3,:3]
|
598 |
-
Tw2i = render_pose[:3,3:4]
|
599 |
-
|
600 |
-
# Transfrom cam2 to world + change sign of yz axis
|
601 |
-
Ri2w = np.matmul(yz_reverse, Rw2i).T
|
602 |
-
Ti2w = -np.matmul(Ri2w, np.matmul(yz_reverse, Tw2i))
|
603 |
-
Pc2w = np.concatenate((Ri2w, Ti2w), axis=1)
|
604 |
-
|
605 |
-
curr_frame["transform_matrix"] = Pc2w.tolist()
|
606 |
-
(blender_train_json["frames"]).append(curr_frame)
|
607 |
-
|
608 |
-
import json
|
609 |
-
if cam_path=='llff':
|
610 |
-
train_json_path = f"poses_supplementary/{cam_path}_d{d}.json"
|
611 |
-
elif cam_path=='headbanging':
|
612 |
-
train_json_path = f"poses_supplementary/{cam_path}_r{round}.json"
|
613 |
-
else:
|
614 |
-
train_json_path = f"poses_supplementary/{cam_path}.json"
|
615 |
-
|
616 |
-
with open(train_json_path, 'w') as outfile:
|
617 |
-
json.dump(blender_train_json, outfile, indent=4)
|
618 |
-
|
619 |
-
|
620 |
-
if __name__ == '__main__':
|
621 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|