dreamgaussian4d / gaussian_model_4d.py
jiaweir
init
878fa33
raw
history blame
37.9 kB
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
import torch
import numpy as np
from utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation
from torch import nn
import os
from utils.system_utils import mkdir_p
from plyfile import PlyData, PlyElement
from random import randint
from utils.sh_utils import RGB2SH
from utils.graphics_utils import BasicPointCloud
from utils.general_utils import strip_symmetric, build_scaling_rotation
from scene.deformation import deform_network
from scene.regulation import compute_plane_smoothness
def gaussian_3d_coeff(xyzs, covs):
# xyzs: [N, 3]
# covs: [N, 6]
x, y, z = xyzs[:, 0], xyzs[:, 1], xyzs[:, 2]
a, b, c, d, e, f = covs[:, 0], covs[:, 1], covs[:, 2], covs[:, 3], covs[:, 4], covs[:, 5]
# eps must be small enough !!!
inv_det = 1 / (a * d * f + 2 * e * c * b - e**2 * a - c**2 * d - b**2 * f + 1e-24)
inv_a = (d * f - e**2) * inv_det
inv_b = (e * c - b * f) * inv_det
inv_c = (e * b - c * d) * inv_det
inv_d = (a * f - c**2) * inv_det
inv_e = (b * c - e * a) * inv_det
inv_f = (a * d - b**2) * inv_det
power = -0.5 * (x**2 * inv_a + y**2 * inv_d + z**2 * inv_f) - x * y * inv_b - x * z * inv_c - y * z * inv_e
power[power > 0] = -1e10 # abnormal values... make weights 0
return torch.exp(power)
class GaussianModel:
def setup_functions(self):
def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
L = build_scaling_rotation(scaling_modifier * scaling, rotation)
actual_covariance = L @ L.transpose(1, 2)
symm = strip_symmetric(actual_covariance)
return symm
self.scaling_activation = torch.exp
self.scaling_inverse_activation = torch.log
self.covariance_activation = build_covariance_from_scaling_rotation
self.opacity_activation = torch.sigmoid
self.inverse_opacity_activation = inverse_sigmoid
self.rotation_activation = torch.nn.functional.normalize
def __init__(self, sh_degree : int, args):
self.active_sh_degree = 0
self.max_sh_degree = sh_degree
self._xyz = torch.empty(0)
# self._deformation = torch.empty(0)
self._deformation = deform_network(args)
# self.grid = TriPlaneGrid()
self._features_dc = torch.empty(0)
self._features_rest = torch.empty(0)
self._scaling = torch.empty(0)
self._rotation = torch.empty(0)
self._opacity = torch.empty(0)
self.max_radii2D = torch.empty(0)
self.xyz_gradient_accum = torch.empty(0)
self.denom = torch.empty(0)
self.optimizer = None
self.percent_dense = 0
self.spatial_lr_scale = 0
self._deformation_table = torch.empty(0)
self.setup_functions()
def capture(self):
return (
self.active_sh_degree,
self._xyz,
self._deformation.state_dict(),
self._deformation_table,
# self.grid,
self._features_dc,
self._features_rest,
self._scaling,
self._rotation,
self._opacity,
self.max_radii2D,
self.xyz_gradient_accum,
self.denom,
self.optimizer.state_dict(),
self.spatial_lr_scale,
)
def restore(self, model_args, training_args):
(self.active_sh_degree,
self._xyz,
self._deformation_table,
self._deformation,
# self.grid,
self._features_dc,
self._features_rest,
self._scaling,
self._rotation,
self._opacity,
self.max_radii2D,
xyz_gradient_accum,
denom,
opt_dict,
self.spatial_lr_scale) = model_args
self.training_setup(training_args)
self.xyz_gradient_accum = xyz_gradient_accum
self.denom = denom
self.optimizer.load_state_dict(opt_dict)
@property
def get_scaling(self):
return self.scaling_activation(self._scaling)
@property
def get_rotation(self):
return self.rotation_activation(self._rotation)
@property
def get_xyz(self):
return self._xyz
@property
def get_features(self):
features_dc = self._features_dc
features_rest = self._features_rest
return torch.cat((features_dc, features_rest), dim=1)
@property
def get_opacity(self):
return self.opacity_activation(self._opacity)
def get_deformed_everything(self, time):
means3D = self.get_xyz
time = torch.tensor(time).to(means3D.device).repeat(means3D.shape[0],1)
time = ((time.float() / self.T) - 0.5) * 2
opacity = self._opacity
scales = self._scaling
rotations = self._rotation
deformation_point = self._deformation_table
means3D_deform, scales_deform, rotations_deform, opacity_deform = self._deformation(means3D[deformation_point], scales[deformation_point],
rotations[deformation_point], opacity[deformation_point],
time[deformation_point])
means3D_final = means3D + means3D_deform
rotations_final = rotations + rotations_deform
scales_final = scales + scales_deform
opacity_final = opacity
return means3D_final, rotations_final, scales_final, opacity_final
@torch.no_grad()
def extract_fields_t(self, resolution=128, num_blocks=16, relax_ratio=1.5, t=0):
# resolution: resolution of field
block_size = 2 / num_blocks
assert resolution % block_size == 0
split_size = resolution // num_blocks
xyzs, rotation, scale, opacities = self.get_deformed_everything(t)
scale = self.scaling_activation(scale)
opacities = self.opacity_activation(opacities)
# pre-filter low opacity gaussians to save computation
mask = (opacities > 0.005).squeeze(1)
opacities = opacities[mask]
xyzs = xyzs[mask]
stds = scale[mask]
# normalize to ~ [-1, 1]
mn, mx = xyzs.amin(0), xyzs.amax(0)
self.center = (mn + mx) / 2
self.scale = 1.8 / (mx - mn).amax().item()
xyzs = (xyzs - self.center) * self.scale
stds = stds * self.scale
covs = self.covariance_activation(stds, 1, rotation[mask])
# tile
device = opacities.device
occ = torch.zeros([resolution] * 3, dtype=torch.float32, device=device)
X = torch.linspace(-1, 1, resolution).split(split_size)
Y = torch.linspace(-1, 1, resolution).split(split_size)
Z = torch.linspace(-1, 1, resolution).split(split_size)
# loop blocks (assume max size of gaussian is small than relax_ratio * block_size !!!)
for xi, xs in enumerate(X):
for yi, ys in enumerate(Y):
for zi, zs in enumerate(Z):
xx, yy, zz = torch.meshgrid(xs, ys, zs)
# sample points [M, 3]
pts = torch.cat([xx.reshape(-1, 1), yy.reshape(-1, 1), zz.reshape(-1, 1)], dim=-1).to(device)
# in-tile gaussians mask
vmin, vmax = pts.amin(0), pts.amax(0)
vmin -= block_size * relax_ratio
vmax += block_size * relax_ratio
mask = (xyzs < vmax).all(-1) & (xyzs > vmin).all(-1)
# if hit no gaussian, continue to next block
if not mask.any():
continue
mask_xyzs = xyzs[mask] # [L, 3]
mask_covs = covs[mask] # [L, 6]
mask_opas = opacities[mask].view(1, -1) # [L, 1] --> [1, L]
# query per point-gaussian pair.
g_pts = pts.unsqueeze(1).repeat(1, mask_covs.shape[0], 1) - mask_xyzs.unsqueeze(0) # [M, L, 3]
g_covs = mask_covs.unsqueeze(0).repeat(pts.shape[0], 1, 1) # [M, L, 6]
# batch on gaussian to avoid OOM
batch_g = 1024
val = 0
for start in range(0, g_covs.shape[1], batch_g):
end = min(start + batch_g, g_covs.shape[1])
w = gaussian_3d_coeff(g_pts[:, start:end].reshape(-1, 3), g_covs[:, start:end].reshape(-1, 6)).reshape(pts.shape[0], -1) # [M, l]
val += (mask_opas[:, start:end] * w).sum(-1)
# kiui.lo(val, mask_opas, w)
occ[xi * split_size: xi * split_size + len(xs),
yi * split_size: yi * split_size + len(ys),
zi * split_size: zi * split_size + len(zs)] = val.reshape(len(xs), len(ys), len(zs))
return occ
def extract_mesh_t(self, path, density_thresh=1, t=0, resolution=128, decimate_target=1e5):
from mesh import Mesh
from mesh_utils import decimate_mesh, clean_mesh
os.makedirs(os.path.dirname(path), exist_ok=True)
occ = self.extract_fields_t(resolution, t=t).detach().cpu().numpy()
import mcubes
vertices, triangles = mcubes.marching_cubes(occ, density_thresh)
vertices = vertices / (resolution - 1.0) * 2 - 1
# transform back to the original space
vertices = vertices / self.scale + self.center.detach().cpu().numpy()
vertices, triangles = clean_mesh(vertices, triangles, remesh=True, remesh_size=0.015)
if decimate_target > 0 and triangles.shape[0] > decimate_target:
vertices, triangles = decimate_mesh(vertices, triangles, decimate_target)
v = torch.from_numpy(vertices.astype(np.float32)).contiguous().cuda()
f = torch.from_numpy(triangles.astype(np.int32)).contiguous().cuda()
print(
f"[INFO] marching cubes result: {v.shape} ({v.min().item()}-{v.max().item()}), {f.shape}"
)
mesh = Mesh(v=v, f=f, device='cuda')
return mesh
def get_covariance(self, scaling_modifier = 1):
return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation)
def oneupSHdegree(self):
if self.active_sh_degree < self.max_sh_degree:
self.active_sh_degree += 1
def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float, time_line: int):
from simple_knn._C import distCUDA2
self.spatial_lr_scale = spatial_lr_scale
fused_point_cloud = torch.tensor(np.asarray(pcd.points)).float().cuda()
fused_color = RGB2SH(torch.tensor(np.asarray(pcd.colors)).float().cuda())
features = torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2)).float().cuda()
features[:, :3, 0 ] = fused_color
features[:, 3:, 1:] = 0.0
print("Number of points at initialisation : ", fused_point_cloud.shape[0])
dist2 = torch.clamp_min(distCUDA2(torch.from_numpy(np.asarray(pcd.points)).float().cuda()), 0.0000001)
scales = torch.log(torch.sqrt(dist2))[...,None].repeat(1, 3)
rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda")
rots[:, 0] = 1
opacities = inverse_sigmoid(0.1 * torch.ones((fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda"))
self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True))
self._deformation = self._deformation.to("cuda")
# self.grid = self.grid.to("cuda")
self._features_dc = nn.Parameter(features[:,:,0:1].transpose(1, 2).contiguous().requires_grad_(True))
self._features_rest = nn.Parameter(features[:,:,1:].transpose(1, 2).contiguous().requires_grad_(True))
self._scaling = nn.Parameter(scales.requires_grad_(True))
self._rotation = nn.Parameter(rots.requires_grad_(True))
self._opacity = nn.Parameter(opacities.requires_grad_(True))
self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
self._deformation_table = torch.gt(torch.ones((self.get_xyz.shape[0]),device="cuda"),0)
def training_setup(self, training_args):
self.percent_dense = training_args.percent_dense
self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
self._deformation_accum = torch.zeros((self.get_xyz.shape[0],3),device="cuda")
self.T = training_args.batch_size
if training_args.optimize_gaussians:
l = [
{'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"},
{'params': list(self._deformation.get_mlp_parameters()), 'lr': training_args.deformation_lr_init * self.spatial_lr_scale, "name": "deformation"},
{'params': list(self._deformation.get_grid_parameters()), 'lr': training_args.grid_lr_init * self.spatial_lr_scale, "name": "grid"},
{'params': [self._features_dc], 'lr': training_args.feature_lr, "name": "f_dc"},
{'params': [self._features_rest], 'lr': training_args.feature_lr / 20.0, "name": "f_rest"},
{'params': [self._opacity], 'lr': training_args.opacity_lr, "name": "opacity"},
{'params': [self._scaling], 'lr': training_args.scaling_lr, "name": "scaling"},
{'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"}
]
else:
l = [
{'params': list(self._deformation.get_mlp_parameters()), 'lr': training_args.deformation_lr_init * self.spatial_lr_scale, "name": "deformation"},
{'params': list(self._deformation.get_grid_parameters()), 'lr': training_args.grid_lr_init * self.spatial_lr_scale, "name": "grid"},
]
self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15)
self.xyz_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale,
lr_final=training_args.position_lr_final*self.spatial_lr_scale,
lr_delay_mult=training_args.position_lr_delay_mult,
max_steps=training_args.position_lr_max_steps)
self.deformation_scheduler_args = get_expon_lr_func(lr_init=training_args.deformation_lr_init*self.spatial_lr_scale,
lr_final=training_args.deformation_lr_final*self.spatial_lr_scale,
lr_delay_mult=training_args.deformation_lr_delay_mult,
max_steps=training_args.position_lr_max_steps)
self.grid_scheduler_args = get_expon_lr_func(lr_init=training_args.grid_lr_init*self.spatial_lr_scale,
lr_final=training_args.grid_lr_final*self.spatial_lr_scale,
lr_delay_mult=training_args.deformation_lr_delay_mult,
max_steps=training_args.position_lr_max_steps)
def update_learning_rate(self, iteration):
''' Learning rate scheduling per step '''
for param_group in self.optimizer.param_groups:
if param_group["name"] == "xyz":
lr = self.xyz_scheduler_args(iteration)
param_group['lr'] = lr
# return lr
if "grid" in param_group["name"]:
lr = self.grid_scheduler_args(iteration)
param_group['lr'] = lr
# return lr
elif param_group["name"] == "deformation":
lr = self.deformation_scheduler_args(iteration)
param_group['lr'] = lr
# return lr
def construct_list_of_attributes(self):
l = ['x', 'y', 'z', 'nx', 'ny', 'nz']
# All channels except the 3 DC
for i in range(self._features_dc.shape[1]*self._features_dc.shape[2]):
l.append('f_dc_{}'.format(i))
for i in range(self._features_rest.shape[1]*self._features_rest.shape[2]):
l.append('f_rest_{}'.format(i))
l.append('opacity')
for i in range(self._scaling.shape[1]):
l.append('scale_{}'.format(i))
for i in range(self._rotation.shape[1]):
l.append('rot_{}'.format(i))
return l
def compute_deformation(self,time):
deform = self._deformation[:,:,:time].sum(dim=-1)
xyz = self._xyz + deform
return xyz
def load_model(self, path, name):
print("loading model from exists{}".format(path))
weight_dict = torch.load(os.path.join(path, name+"_deformation.pth"),map_location="cuda")
self._deformation.load_state_dict(weight_dict)
self._deformation = self._deformation.to("cuda")
self._deformation_table = torch.gt(torch.ones((self.get_xyz.shape[0]),device="cuda"),0)
self._deformation_accum = torch.zeros((self.get_xyz.shape[0],3),device="cuda")
if os.path.exists(os.path.join(path, name+"_deformation_table.pth")):
self._deformation_table = torch.load(os.path.join(path, name+"_deformation_table.pth"),map_location="cuda")
if os.path.exists(os.path.join(path,name+"_deformation_accum.pth")):
self._deformation_accum = torch.load(os.path.join(path, name+"_deformation_accum.pth"),map_location="cuda")
self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
def save_deformation(self, path, name):
torch.save(self._deformation.state_dict(),os.path.join(path, name+"_deformation.pth"))
torch.save(self._deformation_table,os.path.join(path, name+"_deformation_table.pth"))
torch.save(self._deformation_accum,os.path.join(path, name+"_deformation_accum.pth"))
def save_ply(self, path):
mkdir_p(os.path.dirname(path))
xyz = self._xyz.detach().cpu().numpy()
normals = np.zeros_like(xyz)
f_dc = self._features_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
opacities = self._opacity.detach().cpu().numpy()
scale = self._scaling.detach().cpu().numpy()
rotation = self._rotation.detach().cpu().numpy()
dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()]
elements = np.empty(xyz.shape[0], dtype=dtype_full)
attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1)
elements[:] = list(map(tuple, attributes))
el = PlyElement.describe(elements, 'vertex')
PlyData([el]).write(path)
def save_frame_ply(self, path, t):
mkdir_p(os.path.dirname(path))
xyzs, rotation, scale, opacities = self.get_deformed_everything(t)
xyz = xyzs.detach().cpu().numpy()
normals = np.zeros_like(xyz)
f_dc = self._features_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
opacities = opacities.detach().cpu().numpy()
scale = scale.detach().cpu().numpy()
rotation = rotation.detach().cpu().numpy()
dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()]
elements = np.empty(xyz.shape[0], dtype=dtype_full)
attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1)
elements[:] = list(map(tuple, attributes))
el = PlyElement.describe(elements, 'vertex')
PlyData([el]).write(path)
# def save_frame_ply(self, path, t):
# mkdir_p(os.path.dirname(path))
# xyz = self._xyz.detach().cpu().numpy()
# normals = np.zeros_like(xyz)
# f_dc = self._features_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
# f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
# opacities = self._opacity.detach().cpu().numpy()
# scale = self._scaling.detach().cpu().numpy()
# rotation = self._rotation.detach().cpu().numpy()
# dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()]
# elements = np.empty(xyz.shape[0], dtype=dtype_full)
# attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1)
# elements[:] = list(map(tuple, attributes))
# el = PlyElement.describe(elements, 'vertex')
# PlyData([el]).write(path)
def reset_opacity(self):
opacities_new = inverse_sigmoid(torch.min(self.get_opacity, torch.ones_like(self.get_opacity)*0.01))
optimizable_tensors = self.replace_tensor_to_optimizer(opacities_new, "opacity")
self._opacity = optimizable_tensors["opacity"]
def load_ply(self, path):
self.spatial_lr_scale = 1
plydata = PlyData.read(path)
xyz = np.stack((np.asarray(plydata.elements[0]["x"]),
np.asarray(plydata.elements[0]["y"]),
np.asarray(plydata.elements[0]["z"])), axis=1)
opacities = np.asarray(plydata.elements[0]["opacity"])[..., np.newaxis]
features_dc = np.zeros((xyz.shape[0], 3, 1))
features_dc[:, 0, 0] = np.asarray(plydata.elements[0]["f_dc_0"])
features_dc[:, 1, 0] = np.asarray(plydata.elements[0]["f_dc_1"])
features_dc[:, 2, 0] = np.asarray(plydata.elements[0]["f_dc_2"])
extra_f_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("f_rest_")]
extra_f_names = sorted(extra_f_names, key = lambda x: int(x.split('_')[-1]))
assert len(extra_f_names)==3*(self.max_sh_degree + 1) ** 2 - 3
features_extra = np.zeros((xyz.shape[0], len(extra_f_names)))
for idx, attr_name in enumerate(extra_f_names):
features_extra[:, idx] = np.asarray(plydata.elements[0][attr_name])
# Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC)
features_extra = features_extra.reshape((features_extra.shape[0], 3, (self.max_sh_degree + 1) ** 2 - 1))
scale_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("scale_")]
scale_names = sorted(scale_names, key = lambda x: int(x.split('_')[-1]))
scales = np.zeros((xyz.shape[0], len(scale_names)))
for idx, attr_name in enumerate(scale_names):
scales[:, idx] = np.asarray(plydata.elements[0][attr_name])
rot_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("rot")]
rot_names = sorted(rot_names, key = lambda x: int(x.split('_')[-1]))
rots = np.zeros((xyz.shape[0], len(rot_names)))
for idx, attr_name in enumerate(rot_names):
rots[:, idx] = np.asarray(plydata.elements[0][attr_name])
self._xyz = nn.Parameter(torch.tensor(xyz, dtype=torch.float, device="cuda").requires_grad_(True))
self._features_dc = nn.Parameter(torch.tensor(features_dc, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True))
self._features_rest = nn.Parameter(torch.tensor(features_extra, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True))
self._opacity = nn.Parameter(torch.tensor(opacities, dtype=torch.float, device="cuda").requires_grad_(True))
self._scaling = nn.Parameter(torch.tensor(scales, dtype=torch.float, device="cuda").requires_grad_(True))
self._rotation = nn.Parameter(torch.tensor(rots, dtype=torch.float, device="cuda").requires_grad_(True))
self.active_sh_degree = self.max_sh_degree
self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
self._deformation = self._deformation.to("cuda")
self._deformation_table = torch.gt(torch.ones((self.get_xyz.shape[0]),device="cuda"),0) # everything deformed
print(self._xyz.shape)
def replace_tensor_to_optimizer(self, tensor, name):
optimizable_tensors = {}
for group in self.optimizer.param_groups:
if group["name"] == name:
stored_state = self.optimizer.state.get(group['params'][0], None)
stored_state["exp_avg"] = torch.zeros_like(tensor)
stored_state["exp_avg_sq"] = torch.zeros_like(tensor)
del self.optimizer.state[group['params'][0]]
group["params"][0] = nn.Parameter(tensor.requires_grad_(True))
self.optimizer.state[group['params'][0]] = stored_state
optimizable_tensors[group["name"]] = group["params"][0]
return optimizable_tensors
def _prune_optimizer(self, mask):
optimizable_tensors = {}
for group in self.optimizer.param_groups:
if len(group["params"]) > 1:
continue
stored_state = self.optimizer.state.get(group['params'][0], None)
if stored_state is not None:
stored_state["exp_avg"] = stored_state["exp_avg"][mask]
stored_state["exp_avg_sq"] = stored_state["exp_avg_sq"][mask]
del self.optimizer.state[group['params'][0]]
group["params"][0] = nn.Parameter((group["params"][0][mask].requires_grad_(True)))
self.optimizer.state[group['params'][0]] = stored_state
optimizable_tensors[group["name"]] = group["params"][0]
else:
group["params"][0] = nn.Parameter(group["params"][0][mask].requires_grad_(True))
optimizable_tensors[group["name"]] = group["params"][0]
return optimizable_tensors
def prune_points(self, mask):
valid_points_mask = ~mask
optimizable_tensors = self._prune_optimizer(valid_points_mask)
self._xyz = optimizable_tensors["xyz"]
self._features_dc = optimizable_tensors["f_dc"]
self._features_rest = optimizable_tensors["f_rest"]
self._opacity = optimizable_tensors["opacity"]
self._scaling = optimizable_tensors["scaling"]
self._rotation = optimizable_tensors["rotation"]
self._deformation_accum = self._deformation_accum[valid_points_mask]
self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask]
self._deformation_table = self._deformation_table[valid_points_mask]
self.denom = self.denom[valid_points_mask]
self.max_radii2D = self.max_radii2D[valid_points_mask]
def cat_tensors_to_optimizer(self, tensors_dict):
optimizable_tensors = {}
for group in self.optimizer.param_groups:
if len(group["params"])>1:continue
assert len(group["params"]) == 1
extension_tensor = tensors_dict[group["name"]]
stored_state = self.optimizer.state.get(group['params'][0], None)
if stored_state is not None:
stored_state["exp_avg"] = torch.cat((stored_state["exp_avg"], torch.zeros_like(extension_tensor)), dim=0)
stored_state["exp_avg_sq"] = torch.cat((stored_state["exp_avg_sq"], torch.zeros_like(extension_tensor)), dim=0)
del self.optimizer.state[group['params'][0]]
group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
self.optimizer.state[group['params'][0]] = stored_state
optimizable_tensors[group["name"]] = group["params"][0]
else:
group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
optimizable_tensors[group["name"]] = group["params"][0]
return optimizable_tensors
def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation, new_deformation_table):
d = {"xyz": new_xyz,
"f_dc": new_features_dc,
"f_rest": new_features_rest,
"opacity": new_opacities,
"scaling" : new_scaling,
"rotation" : new_rotation,
# "deformation": new_deformation
}
optimizable_tensors = self.cat_tensors_to_optimizer(d)
self._xyz = optimizable_tensors["xyz"]
self._features_dc = optimizable_tensors["f_dc"]
self._features_rest = optimizable_tensors["f_rest"]
self._opacity = optimizable_tensors["opacity"]
self._scaling = optimizable_tensors["scaling"]
self._rotation = optimizable_tensors["rotation"]
# self._deformation = optimizable_tensors["deformation"]
self._deformation_table = torch.cat([self._deformation_table,new_deformation_table],-1)
self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
self._deformation_accum = torch.zeros((self.get_xyz.shape[0], 3), device="cuda")
self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):
n_init_points = self.get_xyz.shape[0]
# Extract points that satisfy the gradient condition
padded_grad = torch.zeros((n_init_points), device="cuda")
padded_grad[:grads.shape[0]] = grads.squeeze()
selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False)
selected_pts_mask = torch.logical_and(selected_pts_mask,
torch.max(self.get_scaling, dim=1).values > self.percent_dense*scene_extent)
if not selected_pts_mask.any():
return
stds = self.get_scaling[selected_pts_mask].repeat(N,1)
means =torch.zeros((stds.size(0), 3),device="cuda")
samples = torch.normal(mean=means, std=stds)
rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N,1,1)
new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[selected_pts_mask].repeat(N, 1)
new_scaling = self.scaling_inverse_activation(self.get_scaling[selected_pts_mask].repeat(N,1) / (0.8*N))
new_rotation = self._rotation[selected_pts_mask].repeat(N,1)
new_features_dc = self._features_dc[selected_pts_mask].repeat(N,1,1)
new_features_rest = self._features_rest[selected_pts_mask].repeat(N,1,1)
new_opacity = self._opacity[selected_pts_mask].repeat(N,1)
new_deformation_table = self._deformation_table[selected_pts_mask].repeat(N)
self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacity, new_scaling, new_rotation, new_deformation_table)
prune_filter = torch.cat((selected_pts_mask, torch.zeros(N * selected_pts_mask.sum(), device="cuda", dtype=bool)))
self.prune_points(prune_filter)
def densify_and_clone(self, grads, grad_threshold, scene_extent):
# Extract points that satisfy the gradient condition
selected_pts_mask = torch.where(torch.norm(grads, dim=-1) >= grad_threshold, True, False)
selected_pts_mask = torch.logical_and(selected_pts_mask,
torch.max(self.get_scaling, dim=1).values <= self.percent_dense*scene_extent)
new_xyz = self._xyz[selected_pts_mask]
# - 0.001 * self._xyz.grad[selected_pts_mask]
new_features_dc = self._features_dc[selected_pts_mask]
new_features_rest = self._features_rest[selected_pts_mask]
new_opacities = self._opacity[selected_pts_mask]
new_scaling = self._scaling[selected_pts_mask]
new_rotation = self._rotation[selected_pts_mask]
new_deformation_table = self._deformation_table[selected_pts_mask]
self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation, new_deformation_table)
def prune(self, min_opacity, extent, max_screen_size):
prune_mask = (self.get_opacity < min_opacity).squeeze()
# prune_mask_2 = torch.logical_and(self.get_opacity <= inverse_sigmoid(0.101 , dtype=torch.float, device="cuda"), self.get_opacity >= inverse_sigmoid(0.999 , dtype=torch.float, device="cuda"))
# prune_mask = torch.logical_or(prune_mask, prune_mask_2)
# deformation_sum = abs(self._deformation).sum(dim=-1).mean(dim=-1)
# deformation_mask = (deformation_sum < torch.quantile(deformation_sum, torch.tensor([0.5]).to("cuda")))
# prune_mask = prune_mask & deformation_mask
if max_screen_size:
big_points_vs = self.max_radii2D > max_screen_size
big_points_ws = self.get_scaling.max(dim=1).values > 0.1 * extent
prune_mask = torch.logical_or(prune_mask, big_points_vs)
prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws)
self.prune_points(prune_mask)
torch.cuda.empty_cache()
def densify(self, max_grad, min_opacity, extent, max_screen_size):
grads = self.xyz_gradient_accum / self.denom
grads[grads.isnan()] = 0.0
self.densify_and_clone(grads, max_grad, extent)
self.densify_and_split(grads, max_grad, extent)
def standard_constaint(self):
means3D = self._xyz.detach()
scales = self._scaling.detach()
rotations = self._rotation.detach()
opacity = self._opacity.detach()
time = torch.tensor(0).to("cuda").repeat(means3D.shape[0],1)
means3D_deform, scales_deform, rotations_deform, _ = self._deformation(means3D, scales, rotations, opacity, time)
position_error = (means3D_deform - means3D)**2
rotation_error = (rotations_deform - rotations)**2
scaling_erorr = (scales_deform - scales)**2
return position_error.mean() + rotation_error.mean() + scaling_erorr.mean()
def add_densification_stats(self, viewspace_point_tensor, update_filter):
self.xyz_gradient_accum[update_filter] += torch.norm(viewspace_point_tensor[update_filter,:2], dim=-1, keepdim=True)
self.denom[update_filter] += 1
@torch.no_grad()
def update_deformation_table(self,threshold):
# print("origin deformation point nums:",self._deformation_table.sum())
self._deformation_table = torch.gt(self._deformation_accum.max(dim=-1).values/100,threshold)
def print_deformation_weight_grad(self):
for name, weight in self._deformation.named_parameters():
if weight.requires_grad:
if weight.grad is None:
print(name," :",weight.grad)
else:
if weight.grad.mean() != 0:
print(name," :",weight.grad.mean(), weight.grad.min(), weight.grad.max())
print("-"*50)
def _plane_regulation(self):
multi_res_grids = self._deformation.deformation_net.grid.grids
total = 0
# model.grids is 6 x [1, rank * F_dim, reso, reso]
for grids in multi_res_grids:
if len(grids) == 3:
time_grids = []
else:
time_grids = [0,1,3]
for grid_id in time_grids:
total += compute_plane_smoothness(grids[grid_id])
return total
def _time_regulation(self):
multi_res_grids = self._deformation.deformation_net.grid.grids
total = 0
# model.grids is 6 x [1, rank * F_dim, reso, reso]
for grids in multi_res_grids:
if len(grids) == 3:
time_grids = []
else:
time_grids =[2, 4, 5]
for grid_id in time_grids:
total += compute_plane_smoothness(grids[grid_id])
return total
def _l1_regulation(self):
# model.grids is 6 x [1, rank * F_dim, reso, reso]
multi_res_grids = self._deformation.deformation_net.grid.grids
total = 0.0
for grids in multi_res_grids:
if len(grids) == 3:
continue
else:
# These are the spatiotemporal grids
spatiotemporal_grids = [2, 4, 5]
for grid_id in spatiotemporal_grids:
total += torch.abs(1 - grids[grid_id]).mean()
return total
def compute_regulation(self, time_smoothness_weight, l1_time_planes_weight, plane_tv_weight):
return plane_tv_weight * self._plane_regulation() + time_smoothness_weight * self._time_regulation() + l1_time_planes_weight * self._l1_regulation()
def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):
grads = self.xyz_gradient_accum / self.denom
grads[grads.isnan()] = 0.0
self.densify_and_clone(grads, max_grad, extent)
self.densify_and_split(grads, max_grad, extent)
prune_mask = (self.get_opacity < min_opacity).squeeze()
if max_screen_size:
big_points_vs = self.max_radii2D > max_screen_size
big_points_ws = self.get_scaling.max(dim=1).values > 0.1 * extent
prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws)
self.prune_points(prune_mask)
torch.cuda.empty_cache()