file_path
stringlengths 21
202
| content
stringlengths 13
1.02M
| size
int64 13
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 5.43
98.5
| max_line_length
int64 12
993
| alphanum_fraction
float64 0.27
0.91
|
---|---|---|---|---|---|---|
NVlabs/ACID/ACID/src/training.py | import numpy as np
from collections import defaultdict
from tqdm import tqdm
class BaseTrainer(object):
''' Base trainer class.
'''
def evaluate(self, val_loader):
''' Performs an evaluation.
Args:
val_loader (dataloader): pytorch dataloader
'''
eval_list = defaultdict(list)
for data in tqdm(val_loader):
eval_step_dict = self.eval_step(data)
for k, v in eval_step_dict.items():
eval_list[k].append(v)
eval_dict = {k: np.mean(v) for k, v in eval_list.items()}
return eval_dict
def train_step(self, *args, **kwargs):
''' Performs a training step.
'''
raise NotImplementedError
def eval_step(self, *args, **kwargs):
''' Performs an evaluation step.
'''
raise NotImplementedError
def visualize(self, *args, **kwargs):
''' Performs visualization.
'''
raise NotImplementedError
| 988 | Python | 23.724999 | 65 | 0.571862 |
NVlabs/ACID/ACID/src/common.py | # import multiprocessing
import torch
import numpy as np
import math
import numpy as np
def compute_iou(occ1, occ2):
''' Computes the Intersection over Union (IoU) value for two sets of
occupancy values.
Args:
occ1 (tensor): first set of occupancy values
occ2 (tensor): second set of occupancy values
'''
occ1 = np.asarray(occ1)
occ2 = np.asarray(occ2)
# Put all data in second dimension
# Also works for 1-dimensional data
if occ1.ndim >= 2:
occ1 = occ1.reshape(occ1.shape[0], -1)
if occ2.ndim >= 2:
occ2 = occ2.reshape(occ2.shape[0], -1)
# Convert to boolean values
occ1 = (occ1 >= 0.5)
occ2 = (occ2 >= 0.5)
# Compute IOU
area_union = (occ1 | occ2).astype(np.float32).sum(axis=-1)
area_intersect = (occ1 & occ2).astype(np.float32).sum(axis=-1)
iou = (area_intersect / area_union)
return iou
def chamfer_distance(points1, points2, give_id=False):
''' Returns the chamfer distance for the sets of points.
Args:
points1 (numpy array): first point set
points2 (numpy array): second point set
use_kdtree (bool): whether to use a kdtree
give_id (bool): whether to return the IDs of nearest points
'''
return chamfer_distance_naive(points1, points2)
def chamfer_distance_naive(points1, points2):
''' Naive implementation of the Chamfer distance.
Args:
points1 (numpy array): first point set
points2 (numpy array): second point set
'''
assert(points1.size() == points2.size())
batch_size, T, _ = points1.size()
points1 = points1.view(batch_size, T, 1, 3)
points2 = points2.view(batch_size, 1, T, 3)
distances = (points1 - points2).pow(2).sum(-1)
chamfer1 = distances.min(dim=1)[0].mean(dim=1)
chamfer2 = distances.min(dim=2)[0].mean(dim=1)
chamfer = chamfer1 + chamfer2
return chamfer
def make_3d_grid(bb_min, bb_max, shape):
''' Makes a 3D grid.
Args:
bb_min (tuple): bounding box minimum
bb_max (tuple): bounding box maximum
shape (tuple): output shape
'''
size = shape[0] * shape[1] * shape[2]
pxs = torch.linspace(bb_min[0], bb_max[0], shape[0])
pys = torch.linspace(bb_min[1], bb_max[1], shape[1])
pzs = torch.linspace(bb_min[2], bb_max[2], shape[2])
pxs = pxs.view(-1, 1, 1).expand(*shape).contiguous().view(size)
pys = pys.view(1, -1, 1).expand(*shape).contiguous().view(size)
pzs = pzs.view(1, 1, -1).expand(*shape).contiguous().view(size)
p = torch.stack([pxs, pys, pzs], dim=1)
return p
def transform_points(points, transform):
''' Transforms points with regard to passed camera information.
Args:
points (tensor): points tensor
transform (tensor): transformation matrices
'''
assert(points.size(2) == 3)
assert(transform.size(1) == 3)
assert(points.size(0) == transform.size(0))
if transform.size(2) == 4:
R = transform[:, :, :3]
t = transform[:, :, 3:]
points_out = points @ R.transpose(1, 2) + t.transpose(1, 2)
elif transform.size(2) == 3:
K = transform
points_out = points @ K.transpose(1, 2)
return points_out
def b_inv(b_mat):
''' Performs batch matrix inversion.
Arguments:
b_mat: the batch of matrices that should be inverted
'''
eye = b_mat.new_ones(b_mat.size(-1)).diag().expand_as(b_mat)
b_inv, _ = torch.gesv(eye, b_mat)
return b_inv
def project_to_camera(points, transform):
''' Projects points to the camera plane.
Args:
points (tensor): points tensor
transform (tensor): transformation matrices
'''
p_camera = transform_points(points, transform)
p_camera = p_camera[..., :2] / p_camera[..., 2:]
return p_camera
def fix_Rt_camera(Rt, loc, scale):
''' Fixes Rt camera matrix.
Args:
Rt (tensor): Rt camera matrix
loc (tensor): location
scale (float): scale
'''
# Rt is B x 3 x 4
# loc is B x 3 and scale is B
batch_size = Rt.size(0)
R = Rt[:, :, :3]
t = Rt[:, :, 3:]
scale = scale.view(batch_size, 1, 1)
R_new = R * scale
t_new = t + R @ loc.unsqueeze(2)
Rt_new = torch.cat([R_new, t_new], dim=2)
assert(Rt_new.size() == (batch_size, 3, 4))
return Rt_new
def normalize_coordinate(p, padding=0.1, plane='xz'):
''' Normalize coordinate to [0, 1] for unit cube experiments
Args:
p (tensor): point
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
plane (str): plane feature type, ['xz', 'xy', 'yz']
'''
if plane == 'xz':
xy = p[:, :, [0, 2]]
elif plane =='xy':
xy = p[:, :, [0, 1]]
else:
xy = p[:, :, [1, 2]]
xy_new = xy / (1 + padding + 10e-6) # (-0.5, 0.5)
xy_new = xy_new + 0.5 # range (0, 1)
# f there are outliers out of the range
if xy_new.max() >= 1:
xy_new[xy_new >= 1] = 1 - 10e-6
if xy_new.min() < 0:
xy_new[xy_new < 0] = 0.0
return xy_new
def normalize_3d_coordinate(p, padding=0.1):
''' Normalize coordinate to [0, 1] for unit cube experiments.
Corresponds to our 3D model
Args:
p (tensor): point
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
'''
p_nor = p / (1 + padding + 10e-4) # (-0.5, 0.5)
p_nor = p_nor + 0.5 # range (0, 1)
# f there are outliers out of the range
if p_nor.max() >= 1:
p_nor[p_nor >= 1] = 1 - 10e-4
if p_nor.min() < 0:
p_nor[p_nor < 0] = 0.0
return p_nor
def normalize_coord(p, vol_range, plane='xz'):
''' Normalize coordinate to [0, 1] for sliding-window experiments
Args:
p (tensor): point
vol_range (numpy array): volume boundary
plane (str): feature type, ['xz', 'xy', 'yz'] - canonical planes; ['grid'] - grid volume
'''
p[:, 0] = (p[:, 0] - vol_range[0][0]) / (vol_range[1][0] - vol_range[0][0])
p[:, 1] = (p[:, 1] - vol_range[0][1]) / (vol_range[1][1] - vol_range[0][1])
p[:, 2] = (p[:, 2] - vol_range[0][2]) / (vol_range[1][2] - vol_range[0][2])
if plane == 'xz':
x = p[:, [0, 2]]
elif plane =='xy':
x = p[:, [0, 1]]
elif plane =='yz':
x = p[:, [1, 2]]
else:
x = p
return x
def coordinate2index(x, reso, coord_type='2d'):
''' Normalize coordinate to [0, 1] for unit cube experiments.
Corresponds to our 3D model
Args:
x (tensor): coordinate
reso (int): defined resolution
coord_type (str): coordinate type
'''
x = (x * reso).long()
if coord_type == '2d': # plane
index = x[:, :, 0] + reso * x[:, :, 1]
elif coord_type == '3d': # grid
index = x[:, :, 0] + reso * (x[:, :, 1] + reso * x[:, :, 2])
index = index[:, None, :]
return index
def coord2index(p, vol_range, reso=None, plane='xz'):
''' Normalize coordinate to [0, 1] for sliding-window experiments.
Corresponds to our 3D model
Args:
p (tensor): points
vol_range (numpy array): volume boundary
reso (int): defined resolution
plane (str): feature type, ['xz', 'xy', 'yz'] - canonical planes; ['grid'] - grid volume
'''
# normalize to [0, 1]
x = normalize_coord(p, vol_range, plane=plane)
if isinstance(x, np.ndarray):
x = np.floor(x * reso).astype(int)
else: #* pytorch tensor
x = (x * reso).long()
if x.shape[1] == 2:
index = x[:, 0] + reso * x[:, 1]
index[index > reso**2] = reso**2
elif x.shape[1] == 3:
index = x[:, 0] + reso * (x[:, 1] + reso * x[:, 2])
index[index > reso**3] = reso**3
return index[None]
def update_reso(reso, depth):
''' Update the defined resolution so that UNet can process.
Args:
reso (int): defined resolution
depth (int): U-Net number of layers
'''
base = 2**(int(depth) - 1)
if ~(reso / base).is_integer(): # when this is not integer, U-Net dimension error
for i in range(base):
if ((reso + i) / base).is_integer():
reso = reso + i
break
return reso
def decide_total_volume_range(query_vol_metric, recep_field, unit_size, unet_depth):
''' Update the defined resolution so that UNet can process.
Args:
query_vol_metric (numpy array): query volume size
recep_field (int): defined the receptive field for U-Net
unit_size (float): the defined voxel size
unet_depth (int): U-Net number of layers
'''
reso = query_vol_metric / unit_size + recep_field - 1
reso = update_reso(int(reso), unet_depth) # make sure input reso can be processed by UNet
input_vol_metric = reso * unit_size
p_c = np.array([0.0, 0.0, 0.0]).astype(np.float32)
lb_input_vol, ub_input_vol = p_c - input_vol_metric/2, p_c + input_vol_metric/2
lb_query_vol, ub_query_vol = p_c - query_vol_metric/2, p_c + query_vol_metric/2
input_vol = [lb_input_vol, ub_input_vol]
query_vol = [lb_query_vol, ub_query_vol]
# handle the case when resolution is too large
if reso > 10000:
reso = 1
return input_vol, query_vol, reso
def add_key(base, new, base_name, new_name, device=None):
''' Add new keys to the given input
Args:
base (tensor): inputs
new (tensor): new info for the inputs
base_name (str): name for the input
new_name (str): name for the new info
device (device): pytorch device
'''
if (new is not None) and (isinstance(new, dict)):
if device is not None:
for key in new.keys():
new[key] = new[key].to(device)
base = {base_name: base,
new_name: new}
return base
class map2local(object):
''' Add new keys to the given input
Args:
s (float): the defined voxel size
pos_encoding (str): method for the positional encoding, linear|sin_cos
'''
def __init__(self, s, pos_encoding='linear'):
super().__init__()
self.s = s
self.pe = positional_encoding(basis_function=pos_encoding)
def __call__(self, p):
p = torch.remainder(p, self.s) / self.s # always possitive
# p = torch.fmod(p, self.s) / self.s # same sign as input p!
p = self.pe(p)
return p
class positional_encoding(object):
''' Positional Encoding (presented in NeRF)
Args:
basis_function (str): basis function
'''
def __init__(self, basis_function='sin_cos'):
super().__init__()
self.func = basis_function
L = 10
freq_bands = 2.**(np.linspace(0, L-1, L))
self.freq_bands = freq_bands * math.pi
def __call__(self, p):
if self.func == 'sin_cos':
out = []
p = 2.0 * p - 1.0 # chagne to the range [-1, 1]
for freq in self.freq_bands:
out.append(torch.sin(freq * p))
out.append(torch.cos(freq * p))
p = torch.cat(out, dim=2)
return p
| 11,186 | Python | 29.399456 | 109 | 0.562846 |
NVlabs/ACID/ACID/src/config.py | import yaml
from torchvision import transforms
from src import data
from src import conv_onet
method_dict = {
'conv_onet': conv_onet
}
# General config
def load_config(path, default_path=None):
''' Loads config file.
Args:
path (str): path to config file
default_path (bool): whether to use default path
'''
# Load configuration from file itself
with open(path, 'r') as f:
cfg_special = yaml.load(f)
# Check if we should inherit from a config
inherit_from = cfg_special.get('inherit_from')
# If yes, load this config first as default
# If no, use the default_path
if inherit_from is not None:
cfg = load_config(inherit_from, default_path)
elif default_path is not None:
with open(default_path, 'r') as f:
cfg = yaml.load(f)
else:
cfg = dict()
# Include main configuration
update_recursive(cfg, cfg_special)
return cfg
def update_recursive(dict1, dict2):
''' Update two config dictionaries recursively.
Args:
dict1 (dict): first dictionary to be updated
dict2 (dict): second dictionary which entries should be used
'''
for k, v in dict2.items():
if k not in dict1:
dict1[k] = dict()
if isinstance(v, dict):
update_recursive(dict1[k], v)
else:
dict1[k] = v
# Models
def get_model(cfg, device=None, dataset=None):
''' Returns the model instance.
Args:
cfg (dict): config dictionary
device (device): pytorch device
dataset (dataset): dataset
'''
method = cfg['method']
model = method_dict[method].config.get_model(
cfg, device=device, dataset=dataset)
return model
# Trainer
def get_trainer(model, optimizer, cfg, device):
''' Returns a trainer instance.
Args:
model (nn.Module): the model which is used
optimizer (optimizer): pytorch optimizer
cfg (dict): config dictionary
device (device): pytorch device
'''
method = cfg['method']
trainer = method_dict[method].config.get_trainer(
model, optimizer, cfg, device)
return trainer
# Generator for final mesh extraction
def get_generator(model, cfg, device):
''' Returns a generator instance.
Args:
model (nn.Module): the model which is used
cfg (dict): config dictionary
device (device): pytorch device
'''
method = cfg['method']
generator = method_dict[method].config.get_generator(model, cfg, device)
return generator
| 2,573 | Python | 23.990291 | 76 | 0.624563 |
NVlabs/ACID/ACID/src/checkpoints.py | import os
import urllib
import torch
from torch.utils import model_zoo
class CheckpointIO(object):
''' CheckpointIO class.
It handles saving and loading checkpoints.
Args:
checkpoint_dir (str): path where checkpoints are saved
'''
def __init__(self, checkpoint_dir='./chkpts', **kwargs):
self.module_dict = kwargs
self.checkpoint_dir = checkpoint_dir
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
def register_modules(self, **kwargs):
''' Registers modules in current module dictionary.
'''
self.module_dict.update(kwargs)
def save(self, filename, **kwargs):
''' Saves the current module dictionary.
Args:
filename (str): name of output file
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
outdict = kwargs
for k, v in self.module_dict.items():
outdict[k] = v.state_dict()
torch.save(outdict, filename)
def load(self, filename):
'''Loads a module dictionary from local file or url.
Args:
filename (str): name of saved module dictionary
'''
if is_url(filename):
return self.load_url(filename)
else:
return self.load_file(filename)
def load_file(self, filename):
'''Loads a module dictionary from file.
Args:
filename (str): name of saved module dictionary
'''
if not os.path.isabs(filename):
filename = os.path.join(self.checkpoint_dir, filename)
if os.path.exists(filename):
print(filename)
print('=> Loading checkpoint from local file...')
state_dict = torch.load(filename)
scalars = self.parse_state_dict(state_dict)
return scalars
else:
raise FileExistsError
def load_url(self, url):
'''Load a module dictionary from url.
Args:
url (str): url to saved model
'''
print(url)
print('=> Loading checkpoint from url...')
state_dict = model_zoo.load_url(url, progress=True)
scalars = self.parse_state_dict(state_dict)
return scalars
def parse_state_dict(self, state_dict):
'''Parse state_dict of model and return scalars.
Args:
state_dict (dict): State dict of model
'''
for k, v in self.module_dict.items():
if k in state_dict:
v.load_state_dict(state_dict[k])
else:
print('Warning: Could not find %s in checkpoint!' % k)
scalars = {k: v for k, v in state_dict.items()
if k not in self.module_dict}
return scalars
def is_url(url):
scheme = urllib.parse.urlparse(url).scheme
return scheme in ('http', 'https') | 2,962 | Python | 28.63 | 70 | 0.568535 |
NVlabs/ACID/ACID/src/layers.py | import torch
import torch.nn as nn
# Resnet Blocks
class ResnetBlockFC(nn.Module):
''' Fully connected ResNet Block class.
Args:
size_in (int): input dimension
size_out (int): output dimension
size_h (int): hidden dimension
'''
def __init__(self, size_in, size_out=None, size_h=None):
super().__init__()
# Attributes
if size_out is None:
size_out = size_in
if size_h is None:
size_h = min(size_in, size_out)
self.size_in = size_in
self.size_h = size_h
self.size_out = size_out
# Submodules
self.fc_0 = nn.Linear(size_in, size_h)
self.fc_1 = nn.Linear(size_h, size_out)
self.actvn = nn.ReLU()
if size_in == size_out:
self.shortcut = None
else:
self.shortcut = nn.Linear(size_in, size_out, bias=False)
# Initialization
nn.init.zeros_(self.fc_1.weight)
def forward(self, x):
net = self.fc_0(self.actvn(x))
dx = self.fc_1(self.actvn(net))
if self.shortcut is not None:
x_s = self.shortcut(x)
else:
x_s = x
return x_s + dx | 1,203 | Python | 24.083333 | 68 | 0.532835 |
NVlabs/ACID/ACID/src/conv_onet/training.py | import os
import numpy as np
import torch
from torch.nn import functional as F
from src.common import compute_iou
from src.utils import common_util, plushsim_util
from src.training import BaseTrainer
from sklearn.metrics import roc_curve
from scipy import interp
import matplotlib.pyplot as plt
from collections import defaultdict
from tqdm import tqdm
from src.utils.plushsim_util import find_nn_cpu, find_emd_cpu
class PlushTrainer(BaseTrainer):
''' Trainer object for the Occupancy Network.
Args:
model (nn.Module): Occupancy Network model
optimizer (optimizer): pytorch optimizer object
device (device): pytorch device
input_type (str): input type
vis_dir (str): visualization directory
threshold (float): threshold value
eval_sample (bool): whether to evaluate samples
'''
def __init__(self, model, optimizer, cfg, device=None, vis_dir=None, ):
self.model = model
self.optimizer = optimizer
self.device = device
self.vis_dir = vis_dir
self.threshold = cfg['test']['threshold']
self.pos_weight = torch.FloatTensor([cfg['training']['pos_weight']]).to(device)
if 'corr_dim' in cfg['model']['decoder_kwargs'] and cfg['model']['decoder_kwargs']['corr_dim'] > 0:
self.contrastive_threshold = cfg['loss']['contrastive_threshold']
self.use_geodesics = cfg['loss']['use_geodesics']
self.loss_type = cfg['loss']['type']
self.contrastive_coeff_neg = cfg['loss'].get('contrastive_coeff_neg', 1.)
self.contrastive_neg_thres = cfg['loss'].get('contrastive_neg_thres', 1.)
self.contrastive_coeff_pos = cfg['loss'].get('contrastive_coeff_pos', 1.)
self.contrastive_pos_thres= cfg['loss'].get('contrastive_pos_thres', 0.1)
self.scale_with_geodesics = cfg['loss'].get('scale_with_geodesics', False)
if vis_dir is not None and not os.path.exists(vis_dir):
os.makedirs(vis_dir)
self.max_thres = 0.2
self.discretization = 1000
self.base_fpr = np.linspace(0,1,101)
self.base_thres = np.linspace(0,self.max_thres,self.discretization)
def train_step(self, data, it):
''' Performs a training step.
Args:
data (dict): data dictionary
'''
self.model.train()
self.optimizer.zero_grad()
losses = self.compute_loss(data, it)
loss = 0
for v in losses.values():
loss += v
loss.backward()
self.optimizer.step()
return {k:v.item() for k,v in losses.items()}
def evaluate(self, val_loader):
''' Performs an evaluation.
Args:
val_loader (dataloader): pytorch dataloader
'''
eval_list = defaultdict(list)
agg_list = defaultdict(list)
for data in tqdm(val_loader):
eval_step_dict, agg_step_dict = self.eval_step(data)
for k, v in eval_step_dict.items():
eval_list[k].append(v)
for k, v in agg_step_dict.items():
agg_list[k].append(v)
eval_dict = {k: np.mean(v) for k, v in eval_list.items()}
# - shape completion ROC
figs = {}
if 'tpr' in agg_list:
figs['OCC_ROC'] = self._get_shape_completion_ROC(agg_list['tpr'])
if 'fmr_hits' in agg_list:
fmr = np.array(agg_list['fmr_hits'])
idx01 = int(0.01 * (self.discretization-1) / self.max_thres)
idx02 = int(0.02 * (self.discretization-1) / self.max_thres)
idx05 = int(0.05 * (self.discretization-1) / self.max_thres)
idx10 = int(0.10 * (self.discretization-1) / self.max_thres)
eval_dict['FMR.01m_5%'] = np.mean(fmr[:,idx01] > 0.05)
eval_dict['FMR.02m_5%'] = np.mean(fmr[:,idx02] > 0.05)
eval_dict['FMR.05m_5%'] = np.mean(fmr[:,idx05] > 0.05)
eval_dict['FMR.10m_5%'] = np.mean(fmr[:,idx10] > 0.05)
fmr_std = fmr.std(axis=0)
eval_dict['FMR.01m_5%_std'] = fmr_std[idx01]
eval_dict['FMR.02m_5%_std'] = fmr_std[idx02]
eval_dict['FMR.05m_5%_std'] = fmr_std[idx05]
eval_dict['FMR.10m_5%_std'] = fmr_std[idx10]
for tau2 in np.linspace(0.01,0.2,5):
figs[f'FMR_tau1_wrt_tau2={tau2:.3f}']= self._get_FMR_curve_tau1(fmr, tau2=tau2)
figs['FMR_tau1']= self._get_FMR_curve_tau1(fmr)
for tau1 in np.linspace(0.01,0.1,5):
figs[f'FMR_tau2_wrt_tau1={tau1:.3f}']= self._get_FMR_curve_tau2(fmr, tau1=tau1)
#ax.scatter(fpr, tpr, s=100, alpha=0.5, color="blue")
if 'pair_dist' in agg_list:
all_dists = np.concatenate(agg_list['pair_dist'])
eval_dict['pair_dist'] = all_dists.mean()
eval_dict['pair_dist_std'] = all_dists.std()
figs['dist_hist'] = self._get_pair_distance_histogram(all_dists)
return eval_dict, figs
def _get_pair_distance_histogram(self, all_dists):
fig, ax = plt.subplots(figsize=(10,7))
counts, bins, patches = ax.hist(all_dists, density=True, bins=40) # density=False would make counts
ax.set_ylabel('Density')
ax.set_xlabel('Pair Distance')
return fig
def _get_shape_completion_ROC(self, tpr):
tprs = np.array(tpr)
mean_tprs = tprs.mean(axis=0)
std = tprs.std(axis=0)
tprs_upper = np.minimum(mean_tprs + std, 1)
tprs_lower = np.maximum(mean_tprs - std, 0)
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(self.base_fpr, mean_tprs, 'b')
ax.fill_between(self.base_fpr, tprs_lower, tprs_upper, color='grey', alpha=0.3)
ax.plot([0, 1], [0, 1],'r--')
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
ax.set_ylabel('True Positive Rate')
ax.set_xlabel('False Positive Rate')
return fig
def _get_FMR_curve_tau2(self, fmrs, tau1=0.1):
idx05 = int(tau1 * (self.discretization-1) / self.max_thres)
# fix tau 1
means = []
tau1_min = 0.001
tau1_max = 0.25
tau1_ticks = np.linspace(tau1_min, tau1_max, 1000)
for t in tau1_ticks:
means.append(np.mean(fmrs[:,idx05] > t, axis=0))
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(tau1_ticks, means, 'b')
ax.set_xlim([tau1_min, tau1_max])
ax.set_ylim([0.0, 1.0])
ax.set_ylabel('Feature Match Recall')
ax.set_xlabel('Inlier Ratio threshold')
return fig
def _get_FMR_curve_tau1(self, fmrs, tau2=0.05):
# tau2 = 0.05 is the inlier ratio
# fix tau 2
mean_fmrs = np.mean(fmrs > tau2, axis=0)
fig, ax = plt.subplots(figsize=(10,7))
ax.plot(self.base_thres, mean_fmrs, 'b')
ax.set_xlim([0.0, self.max_thres])
ax.set_ylim([0.0, 1.0])
ax.set_ylabel('Feature Match Recall')
ax.set_xlabel('Inlier Distance Threshold')
return fig
def eval_step(self, data):
''' Performs an evaluation step.
Args:
data (dict): data dictionary
'''
self.model.eval()
device = self.device
for k,v in data.items():
data[k] = v.to(device)
eval_dict = {}
agg = {}
idx = data['idx'].item()
# Compute iou
with torch.no_grad():
outputs = self.model(data)
gt_occ = data['sampled_occ']
B,_,N = gt_occ.shape
gt_occ = gt_occ.reshape([B*2, N])
occ_iou_np = (gt_occ >= 0.5).cpu().numpy()
occ_iou_hat_np = (outputs['occ'].probs >= self.threshold).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np).mean()
eval_dict['iou'] = iou
eval_dict[f'iou_{self.threshold}'] = iou
occ_iou_hat_np_2 = (outputs['occ'].probs >= 0.5).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np_2).mean()
eval_dict['iou_0.5'] = iou
intermediate = (self.threshold + 0.5) / 2
occ_iou_hat_np_3 = (outputs['occ'].probs >= intermediate).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np_3).mean()
eval_dict[f'iou_{intermediate}'] = iou
if 'flow' in outputs:
gt_flow = data['sampled_flow']
gt_flow = gt_flow.reshape([B*2, N, 3])
constant = torch.from_numpy(np.array((12.,12.,4.)) / 10. / (1.1,1.1,1.1)).float().cuda()
loss_flow = F.mse_loss(
outputs['flow'] * constant,
gt_flow * constant,
reduction='none')
eval_dict['flow_all_field'] = loss_flow.sum(-1).mean().item()
loss_flow_np = loss_flow.sum(-1).cpu().numpy()
loss_flow_pos = loss_flow_np[occ_iou_np]
# if empty scene, no flow of the object will be present
if len(loss_flow_pos) > 0:
eval_dict['flow'] = loss_flow_pos.mean()
gt_pts = data['sampled_pts'].reshape([B*2, N, 3]).cpu().numpy()
if 'flow' in outputs:
flow_vis_mean = []
for i in range(B*2):
gt_occ_pts = gt_pts[i][occ_iou_np[i]] * (1200, 1200, 400) / (1.1,1.1,1.1) + (0,0,180)
vis_idx = plushsim_util.render_points(gt_occ_pts,
plushsim_util.CAM_EXTR,
plushsim_util.CAM_INTR,
return_index=True)
vis_pts = gt_occ_pts[vis_idx]
flow_vis_mean.append(loss_flow_np[i][occ_iou_np[i]][vis_idx].mean())
eval_dict['flow_only_vis'] = np.mean(flow_vis_mean)
if idx % 10000 == 9999:
# do expensive evaluations
# occupancy ROC curve
fpr, tpr, _ = roc_curve(occ_iou_np.flatten(),
outputs['occ'].probs.cpu().numpy().flatten())
base_fpr = np.linspace(0, 1, 101)
tpr = interp(base_fpr, fpr, tpr)
tpr[0] = 0.0
agg['tpr'] = tpr
f1 = []
for i in range(B*2):
gt_occ_pts = common_util.subsample_points(gt_pts[i][occ_iou_np[i]], return_index=False)
pred_pts = common_util.subsample_points(gt_pts[i][occ_iou_hat_np[i]], return_index=False)
f1.append(common_util.f1_score(pred_pts, gt_occ_pts))
f1 = np.array(f1)
f1score, precision, recall = f1.mean(axis=0)
eval_dict['f1'] = f1score
eval_dict['precision'] = precision
eval_dict['recall'] = recall
if 'corr' in outputs:
# data prep corr
corr_f = outputs['corr']
num_pairs = corr_f.shape[1]
gt_match = np.arange(num_pairs)
src_f = corr_f[0].cpu().numpy()
tgt_f = corr_f[1].cpu().numpy()
# data prep pts
pts = data['sampled_pts'].cpu().numpy().squeeze()
src_pts = pts[0][:num_pairs] * (12,12,4) / (1.1,1.1,1.1)
tgt_pts = pts[1][:num_pairs] * (12,12,4) / (1.1,1.1,1.1)
# normalize points to maximum length of 1.
tgt_pts = tgt_pts / np.ptp(tgt_pts, axis=0).max()
_, nn_inds_st = find_emd_cpu(src_f, tgt_f)
# doing Feature-match recall.
eval_dict['match_exact'] = np.mean(gt_match == nn_inds_st)
dist_st = np.linalg.norm(tgt_pts - tgt_pts[nn_inds_st], axis=1)
eval_dict['match_0.05'] = np.mean(dist_st < 0.05)
eval_dict['match_0.1'] = np.mean(dist_st < 0.1)
hits = np.array([np.mean(dist_st < f) for f in self.base_thres])
agg['fmr_hits'] = hits
agg['pair_dist'] = dist_st
return eval_dict, agg
def compute_loss(self, data, it):
''' Computes the loss.
Args:
data (dict): data dictionary
'''
device = self.device
for k,v in data.items():
data[k] = v.to(device)
outputs = self.model(data)
loss = {}
eval_dict = {}
# Occupancy Loss
if 'occ' in outputs:
# gt points
gt_occ = data['sampled_occ']
B,_,N = gt_occ.shape
gt_occ = gt_occ.reshape([B*2, N])
occ_iou_np = (gt_occ >= 0.5).cpu().numpy()
# pred
logits = outputs['occ'].logits
loss_i = F.binary_cross_entropy_with_logits(
logits, gt_occ, reduction='none', pos_weight=self.pos_weight)
loss['occ'] = loss_i.mean()
# eval infos
occ_iou_hat_np = (outputs['occ'].probs >= self.threshold).cpu().numpy()
iou = compute_iou(occ_iou_np, occ_iou_hat_np).mean()
eval_dict['iou'] = iou
if 'flow' in outputs :
gt_occ = data['sampled_occ']
B,_,N = gt_occ.shape
gt_occ = gt_occ.reshape([B*2, N])
mask = (gt_occ > 0.5).bool()
gt_flow = data['sampled_flow']
gt_flow = gt_flow.reshape([B*2, N, 3])
flow_gt_0 = gt_flow[~mask]
flow_gt_1 = gt_flow[mask]
flow_pred = outputs['flow']
flow_pred_0 = flow_pred[~mask]
flow_pred_1 = flow_pred[mask]
loss['flow'] = F.mse_loss(flow_pred_1, flow_gt_1) + 0.01 * F.mse_loss(flow_pred_0, flow_gt_0)
if 'corr' in outputs:
dist_vec = data['geo_dists']
corr_f = outputs['corr']
src_f = corr_f[0]
src_pos = src_f[dist_vec <= self.contrastive_threshold]
num_positive = (dist_vec <= self.contrastive_threshold).sum()
tgt_f = corr_f[1]
tgt_pos = tgt_f[dist_vec <= self.contrastive_threshold]
if self.loss_type == "contrastive":
if num_positive > 0:
src_neg = src_f[dist_vec > self.contrastive_threshold]
tgt_neg = tgt_f[dist_vec > self.contrastive_threshold]
# Positive loss
pos_loss = F.relu(((src_pos - tgt_pos).pow(2).sum(1) + 1e-4).sqrt()
- self.contrastive_pos_thres).pow(2)
pos_loss_mean = pos_loss.mean()
loss['contrastive_pos'] = self.contrastive_coeff_pos * pos_loss_mean
# Negative loss
neg_dist = (dist_vec[dist_vec > self.contrastive_threshold]
/ self.contrastive_threshold).log() + 1.
neg_dist = torch.clamp(neg_dist, max=2)
neg_loss = F.relu(neg_dist -
((src_neg - tgt_neg).pow(2).sum(1) + 1e-4).sqrt()).pow(2)
if self.scale_with_geodesics:
neg_loss = neg_loss / neg_dist
neg_loss_mean = neg_loss.mean()
loss['contrastive_neg'] = self.contrastive_coeff_neg * neg_loss_mean
return loss
| 15,474 | Python | 42.105849 | 109 | 0.511439 |
NVlabs/ACID/ACID/src/conv_onet/config.py | import os
from src.encoder import encoder_dict
from src.conv_onet import models, training
from src.conv_onet import generation
from src import data
def get_model(cfg,device=None, dataset=None, **kwargs):
if cfg['model']['type'] == 'geom':
return get_geom_model(cfg,device,dataset)
elif cfg['model']['type'] == 'combined':
return get_combined_model(cfg,device,dataset)
def get_combined_model(cfg, device=None, dataset=None, **kwargs):
''' Return the Occupancy Network model.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
dataset (dataset): dataset
'''
dim = cfg['data']['dim']
act_dim = cfg['data']['act_dim']
obj_c_dim = cfg['model']['obj_c_dim']
decoder_kwargs = cfg['model']['decoder_kwargs']
obj_encoder_kwargs = cfg['model']['obj_encoder_kwargs']
padding = cfg['data']['padding']
decoder = 'combined_decoder'
encoder = 'geom_encoder'
if 'env_c_dim' in cfg['model'] and 'env_c_dim' != 0:
env_c_dim = cfg['model']['env_c_dim']
env_encoder_kwargs = cfg['model']['env_encoder_kwargs']
env_encoder = encoder_dict[encoder](
dim=dim, c_dim=env_c_dim, padding=padding,
**env_encoder_kwargs
)
else:
env_c_dim = 0
env_encoder=None
decoder = models.decoder_dict[decoder](
dim=dim,
c_per_dim=obj_c_dim+env_c_dim,
c_act_dim=obj_c_dim+env_c_dim,
padding=padding,
**decoder_kwargs
)
obj_per_encoder = encoder_dict[encoder](
dim=dim, c_dim=obj_c_dim, padding=padding,
**obj_encoder_kwargs
)
obj_act_encoder = encoder_dict[encoder](
dim=act_dim, c_dim=obj_c_dim, padding=padding,
**obj_encoder_kwargs
)
model = models.ConvImpDyn(
obj_per_encoder, obj_act_encoder, env_encoder, decoder, device=device
)
return model
def get_geom_model(cfg, device=None, dataset=None, **kwargs):
''' Return the Occupancy Network model.
Args:
cfg (dict): imported yaml config
device (device): pytorch device
dataset (dataset): dataset
'''
dim = cfg['data']['dim']
obj_c_dim = cfg['model']['obj_c_dim']
decoder_kwargs = cfg['model']['decoder_kwargs']
obj_encoder_kwargs = cfg['model']['obj_encoder_kwargs']
padding = cfg['data']['padding']
decoder = 'geom_decoder'
encoder = 'geom_encoder'
if 'env_c_dim' in cfg['model'] and 'env_c_dim' != 0:
env_c_dim = cfg['model']['env_c_dim']
env_encoder_kwargs = cfg['model']['env_encoder_kwargs']
env_encoder = encoder_dict[encoder](
dim=dim, c_dim=env_c_dim, padding=padding,
**env_encoder_kwargs
)
else:
env_c_dim = 0
env_encoder=None
decoder = models.decoder_dict[decoder](
dim=dim, c_dim=obj_c_dim+env_c_dim, padding=padding,
**decoder_kwargs
)
obj_encoder = encoder_dict[encoder](
dim=dim, c_dim=obj_c_dim, padding=padding,
**obj_encoder_kwargs
)
model = models.ConvOccGeom(
obj_encoder, env_encoder, decoder, device=device
)
return model
def get_trainer(model, optimizer, cfg, device, **kwargs):
''' Returns the trainer object.
Args:
model (nn.Module): the Occupancy Network model
optimizer (optimizer): pytorch optimizer object
cfg (dict): imported yaml config
device (device): pytorch device
'''
out_dir = cfg['training']['out_dir']
vis_dir = os.path.join(out_dir, 'vis')
trainer = training.PlushTrainer(
model, optimizer, cfg,
device=device,
vis_dir=vis_dir )
return trainer
def get_generator(model, cfg, device, **kwargs):
''' Returns the generator object.
Args:
model (nn.Module): Occupancy Network model
cfg (dict): imported yaml config
device (device): pytorch device
'''
generator = generation.Generator3D(
model,
device=device,
threshold=cfg['test']['threshold'],
resolution0=cfg['generation']['resolution_0'],
upsampling_steps=cfg['generation']['upsampling_steps'],
sample=cfg['generation']['use_sampling'],
refinement_step=cfg['generation']['refinement_step'],
simplify_nfaces=cfg['generation']['simplify_nfaces'],
padding=cfg['data']['padding'],
vol_info = None,
vol_bound = None,
)
return generator
| 4,514 | Python | 29.1 | 77 | 0.597475 |
NVlabs/ACID/ACID/src/conv_onet/__init__.py | from src.conv_onet import (
config, generation, training, models
)
__all__ = [
config, generation, training, models
]
| 127 | Python | 14.999998 | 40 | 0.661417 |
NVlabs/ACID/ACID/src/conv_onet/generation.py | import torch
import torch.optim as optim
from torch import autograd
import numpy as np
from tqdm import trange, tqdm
import trimesh
from src.utils import libmcubes, common_util
from src.common import make_3d_grid, normalize_coord, add_key, coord2index
from src.utils.libmise import MISE
import time
import math
counter = 0
class Generator3D(object):
''' Generator class for Occupancy Networks.
It provides functions to generate the final mesh as well refining options.
Args:
model (nn.Module): trained Occupancy Network model
points_batch_size (int): batch size for points evaluation
threshold (float): threshold value
refinement_step (int): number of refinement steps
device (device): pytorch device
resolution0 (int): start resolution for MISE
upsampling steps (int): number of upsampling steps
with_normals (bool): whether normals should be estimated
padding (float): how much padding should be used for MISE
sample (bool): whether z should be sampled
input_type (str): type of input
vol_info (dict): volume infomation
vol_bound (dict): volume boundary
simplify_nfaces (int): number of faces the mesh should be simplified to
'''
def __init__(self, model, points_batch_size=100000,
threshold=0.5, refinement_step=0, device=None,
resolution0=16, upsampling_steps=3,
with_normals=False, padding=0.1, sample=False,
input_type = None,
vol_info = None,
vol_bound = None,
simplify_nfaces=None):
self.model = model.to(device)
self.points_batch_size = points_batch_size
self.refinement_step = refinement_step
self.threshold = threshold
self.device = device
self.resolution0 = resolution0
self.upsampling_steps = upsampling_steps
self.with_normals = with_normals
self.input_type = input_type
self.padding = padding
self.sample = sample
self.simplify_nfaces = simplify_nfaces
# for pointcloud_crop
self.vol_bound = vol_bound
if vol_info is not None:
self.input_vol, _, _ = vol_info
def generate_mesh(self, data, return_stats=True):
''' Generates the output mesh.
Args:
data (tensor): data tensor
return_stats (bool): whether stats should be returned
'''
self.model.eval()
device = self.device
for k,v in data.items():
data[k] = v.to(device)
stats_dict = {}
t0 = time.time()
# obtain features for all crops
with torch.no_grad():
c = self.model.encode_inputs(data)
if type(c) is tuple:
for cs in c:
for k,v in cs.items():
cs[k] = v[0].unsqueeze(0)
else:
for k,v in c.items():
c[k] = v[0].unsqueeze(0)
stats_dict['time (encode inputs)'] = time.time() - t0
mesh = self.generate_from_latent(c, stats_dict=stats_dict)
if return_stats:
return mesh, stats_dict
else:
return mesh
def generate_from_latent(self, c=None, stats_dict={}, **kwargs):
''' Generates mesh from latent.
Works for shapes normalized to a unit cube
Args:
c (tensor): latent conditioned code c
stats_dict (dict): stats dictionary
'''
threshold = np.log(self.threshold) - np.log(1. - self.threshold)
t0 = time.time()
# Compute bounding box size
box_size = 1 + self.padding
# Shortcut
if self.upsampling_steps == 0:
nx = self.resolution0
pointsf = box_size * make_3d_grid(
(-0.5,)*3, (0.5,)*3, (nx,)*3
)
values = self.eval_points(pointsf, c, **kwargs).cpu().numpy()
value_grid = values.reshape(nx, nx, nx)
else:
mesh_extractor = MISE(
self.resolution0, self.upsampling_steps, threshold)
points = mesh_extractor.query()
while points.shape[0] != 0:
# Query points
pointsf = points / mesh_extractor.resolution
# Normalize to bounding box
pointsf = box_size * (pointsf - 0.5)
pointsf = torch.FloatTensor(pointsf).to(self.device)
# Evaluate model and update
values = self.eval_points(pointsf, c, **kwargs).cpu().numpy()
values = values.astype(np.float64)
mesh_extractor.update(points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
# Extract mesh
stats_dict['time (eval points)'] = time.time() - t0
mesh = self.extract_mesh(value_grid, c, stats_dict=stats_dict)
return mesh
def eval_points(self, p, c=None, vol_bound=None, **kwargs):
''' Evaluates the occupancy values for the points.
Args:
p (tensor): points
c (tensor): encoded feature volumes
'''
p_split = torch.split(p, self.points_batch_size)
occ_hats = []
for pi in p_split:
pi = pi.unsqueeze(0).to(self.device)
with torch.no_grad():
occ_hat = self.model.eval_points(pi, c, **kwargs)['occ'].logits
occ_hats.append(occ_hat.squeeze(0).detach().cpu())
occ_hat = torch.cat(occ_hats, dim=0)
return occ_hat
def extract_mesh(self, occ_hat, c=None, stats_dict=dict()):
''' Extracts the mesh from the predicted occupancy grid.
Args:
occ_hat (tensor): value grid of occupancies
c (tensor): encoded feature volumes
stats_dict (dict): stats dictionary
'''
# Some short hands
n_x, n_y, n_z = occ_hat.shape
box_size = 1 + self.padding
threshold = np.log(self.threshold) - np.log(1. - self.threshold)
# Make sure that mesh is watertight
t0 = time.time()
occ_hat_padded = np.pad(
occ_hat, 1, 'constant', constant_values=-1e6)
vertices, triangles = libmcubes.marching_cubes(
occ_hat_padded, threshold)
stats_dict['time (marching cubes)'] = time.time() - t0
# Strange behaviour in libmcubes: vertices are shifted by 0.5
vertices -= 0.5
# # Undo padding
vertices -= 1
if self.vol_bound is not None:
# Scale the mesh back to its original metric
bb_min = self.vol_bound['query_vol'][:, 0].min(axis=0)
bb_max = self.vol_bound['query_vol'][:, 1].max(axis=0)
mc_unit = max(bb_max - bb_min) / (self.vol_bound['axis_n_crop'].max() * self.resolution0*2**self.upsampling_steps)
vertices = vertices * mc_unit + bb_min
else:
# Normalize to bounding box
vertices /= np.array([n_x-1, n_y-1, n_z-1])
vertices = box_size * (vertices - 0.5)
# Create mesh
mesh = trimesh.Trimesh(vertices / (1., 1., 3), triangles,
vertex_normals=None,
process=False)
# Directly return if mesh is empty
if vertices.shape[0] == 0:
return mesh
# TODO: normals are lost here
if self.simplify_nfaces is not None:
t0 = time.time()
from src.utils.libsimplify import simplify_mesh
mesh = simplify_mesh(mesh, self.simplify_nfaces, 5.)
stats_dict['time (simplify)'] = time.time() - t0
# Refine mesh
if self.refinement_step > 0:
t0 = time.time()
self.refine_mesh(mesh, occ_hat, c)
stats_dict['time (refine)'] = time.time() - t0
return mesh
def generate_pointcloud(self, data, threshold=0.75, use_gt_occ=False):
self.model.eval()
device = self.device
self.model.eval()
device = self.device
for k,v in data.items():
data[k] = v.to(device)
stats_dict = {}
t0 = time.time()
# obtain features for all crops
with torch.no_grad():
c = self.model.encode_inputs(data)
pts = data['sampled_pts']
B,_,N,C = pts.shape
pts = pts.reshape([B*2,N,C])
p_split = torch.split(pts, self.points_batch_size, dim=-1)
occ_hats = []
features = []
flows = []
for pi in p_split:
with torch.no_grad():
outputs = self.model.eval_points(pi, c)
occ_hats.append((outputs['occ'].probs > threshold).detach().cpu())
if 'corr' in outputs:
features.append(outputs['corr'].detach().cpu())
if 'flow' in outputs:
flows.append(outputs['flow'].detach().cpu())
pts = pts.cpu().numpy()
occ_hat = torch.cat(occ_hats, dim=1).numpy()
if use_gt_occ:
occ_hat = data['sampled_occ'].reshape([B*2, N]).cpu().numpy()
pos_pts0 = pts[0][occ_hat[0] == 1.].reshape((-1,3))
pos_idx0 = common_util.subsample_points(pos_pts0, resolution=0.013)
pos_pts0 = pos_pts0[pos_idx0]
pos_pts1 = pts[1][occ_hat[1] == 1.].reshape((-1,3))
pos_idx1 = common_util.subsample_points(pos_pts1, resolution=0.013)
pos_pts1 = pos_pts1[pos_idx1]
pos_pts = np.concatenate([pos_pts0, pos_pts1], axis=0) / (1.,1.,3.)
if len(features) != 0:
feature = torch.cat(features, dim=1).numpy()
f_dim = feature.shape[-1]
pos_f0 = feature[0][occ_hat[0] == 1.].reshape((-1,f_dim))
pos_f1 = feature[1][occ_hat[1] == 1.].reshape((-1,f_dim))
pos_f0 = pos_f0[pos_idx0]
pos_f1 = pos_f1[pos_idx1]
pos_f = np.concatenate([pos_f0, pos_f1], axis=0)
if pos_f.shape[0] < 100:
pcloud_both = pos_pts
else:
tsne_result = common_util.embed_tsne(pos_f)
colors = common_util.get_color_map(tsne_result)
pcloud_both = np.concatenate([pos_pts, colors], axis=1)
else:
pcloud_both = pos_pts
pcloud0 = pcloud_both[:pos_pts0.shape[0]]
pcloud1 = pcloud_both[pos_pts0.shape[0]:]
if len(flows) != 0:
flow = torch.cat(flows, dim=1).numpy() / 10.
pos_f0 = flow[0][occ_hat[0] == 1.].reshape((-1,3))
pos_f1 = flow[1][occ_hat[1] == 1.].reshape((-1,3))
pos_f0 = pos_f0[pos_idx0]
pos_f1 = pos_f1[pos_idx1]
pcloud_unroll_0 = pcloud0.copy()
pcloud_unroll_0[:,:3] += pos_f0 / (1.,1.,3.)
pcloud_unroll_1 = pcloud1.copy()
pcloud_unroll_1[:,:3] += pos_f1 / (1.,1.,3.)
return pcloud0, pcloud1,pcloud_unroll_0,pcloud_unroll_1
return pcloud0, pcloud1
def refine_mesh(self, mesh, occ_hat, c=None):
''' Refines the predicted mesh.
Args:
mesh (trimesh object): predicted mesh
occ_hat (tensor): predicted occupancy grid
c (tensor): latent conditioned code c
'''
self.model.eval()
# Some shorthands
n_x, n_y, n_z = occ_hat.shape
assert(n_x == n_y == n_z)
# threshold = np.log(self.threshold) - np.log(1. - self.threshold)
threshold = self.threshold
# Vertex parameter
v0 = torch.FloatTensor(mesh.vertices).to(self.device)
v = torch.nn.Parameter(v0.clone())
# Faces of mesh
faces = torch.LongTensor(mesh.faces).to(self.device)
# Start optimization
optimizer = optim.RMSprop([v], lr=1e-4)
for it_r in trange(self.refinement_step):
optimizer.zero_grad()
# Loss
face_vertex = v[faces]
eps = np.random.dirichlet((0.5, 0.5, 0.5), size=faces.shape[0])
eps = torch.FloatTensor(eps).to(self.device)
face_point = (face_vertex * eps[:, :, None]).sum(dim=1)
face_v1 = face_vertex[:, 1, :] - face_vertex[:, 0, :]
face_v2 = face_vertex[:, 2, :] - face_vertex[:, 1, :]
face_normal = torch.cross(face_v1, face_v2)
face_normal = face_normal / \
(face_normal.norm(dim=1, keepdim=True) + 1e-10)
face_value = torch.sigmoid(
self.model.eval_points(face_point.unsqueeze(0), c)['occ'].logits
)
normal_target = -autograd.grad(
[face_value.sum()], [face_point], create_graph=True)[0]
normal_target = \
normal_target / \
(normal_target.norm(dim=1, keepdim=True) + 1e-10)
loss_target = (face_value - threshold).pow(2).mean()
loss_normal = \
(face_normal - normal_target).pow(2).sum(dim=1).mean()
loss = loss_target + 0.01 * loss_normal
# Update
loss.backward()
optimizer.step()
mesh.vertices = v.data.cpu().numpy()
return mesh
def generate_occ_grid(self, c=None, stats_dict={}, **kwargs):
''' Generates mesh from latent.
Works for shapes normalized to a unit cube
Args:
c (tensor): latent conditioned code c
stats_dict (dict): stats dictionary
'''
threshold = np.log(self.threshold) - np.log(1. - self.threshold)
t0 = time.time()
# Compute bounding box size
box_size = 1 + self.padding
# Shortcut
if self.upsampling_steps == 0:
nx = self.resolution0
pointsf = box_size * make_3d_grid(
(-0.5,)*3, (0.5,)*3, (nx,)*3
)
values = self.eval_points(pointsf, c, **kwargs).cpu().numpy()
value_grid = values.reshape(nx, nx, nx)
else:
mesh_extractor = MISE(
self.resolution0, self.upsampling_steps, threshold)
points = mesh_extractor.query()
while points.shape[0] != 0:
# Query points
pointsf = points / mesh_extractor.resolution
# Normalize to bounding box
pointsf = box_size * (pointsf - 0.5)
pointsf = torch.FloatTensor(pointsf).to(self.device)
# Evaluate model and update
values = self.eval_points(pointsf, c, **kwargs).cpu().numpy()
values = values.astype(np.float64)
mesh_extractor.update(points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
return value_grid
| 14,928 | Python | 36.044665 | 126 | 0.536509 |
NVlabs/ACID/ACID/src/conv_onet/models/decoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.layers import ResnetBlockFC
from src.common import normalize_coordinate, normalize_3d_coordinate, map2local
class GeomDecoder(nn.Module):
''' Decoder.
Instead of conditioning on global features, on plane/volume local features.
Args:
dim (int): input dimension
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
n_blocks (int): number of blocks ResNetBlockFC layers
leaky (bool): whether to use leaky ReLUs
sample_mode (str): sampling feature strategy, bilinear|nearest
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
'''
def __init__(self, dim=3, c_dim=128,
corr_dim=0, corr_head=True,
hidden_size=256, n_blocks=5, leaky=False,
sample_mode='bilinear', padding=0.1):
super().__init__()
self.c_dim = c_dim
self.n_blocks = n_blocks
self.corr_dim = corr_dim
self.corr_head = corr_head
self.fc_c_occ = nn.ModuleList([
nn.Linear(c_dim, hidden_size) for i in range(n_blocks)
])
self.fc_p = nn.Linear(dim, hidden_size)
self.blocks_occ = nn.ModuleList([
ResnetBlockFC(hidden_size) for i in range(n_blocks)
])
self.fc_occ = nn.Linear(hidden_size, 1)
if self.corr_dim != 0 and corr_head:
self.fc_out_corr = nn.Linear(hidden_size, corr_dim)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
self.sample_mode = sample_mode
self.padding = padding
def sample_plane_feature(self, p, c, plane='xz'):
xy = normalize_coordinate(p.clone(), plane=plane, padding=self.padding) # normalize to the range of (0, 1)
xy = xy[:, :, None].float()
vgrid = 2.0 * xy - 1.0 # normalize to (-1, 1)
c = F.grid_sample(c, vgrid, padding_mode='border', align_corners=True, mode=self.sample_mode).squeeze(-1)
return c
def forward(self, p, c_plane, **kwargs):
c = 0
c += self.sample_plane_feature(p, c_plane['xz'], plane='xz')
c += self.sample_plane_feature(p, c_plane['xy'], plane='xy')
c += self.sample_plane_feature(p, c_plane['yz'], plane='yz')
c = c.transpose(1, 2)
p = p.float()
x = self.fc_p(p)
net = x
for i in range(self.n_blocks):
net = net + self.fc_c_occ[i](c)
net = self.blocks_occ[i](net)
results = {}
if self.corr_dim != 0 and not self.corr_head:
results['corr'] = net
net = self.actvn(net)
results['occ'] = self.fc_occ(net).squeeze(-1)
if self.corr_dim != 0 and self.corr_head:
results['corr'] = self.fc_out_corr(net)
return results
class CombinedDecoder(nn.Module):
''' Decoder.
Instead of conditioning on global features, on plane/volume local features.
Args:
dim (int): input dimension
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
n_blocks (int): number of blocks ResNetBlockFC layers
leaky (bool): whether to use leaky ReLUs
sample_mode (str): sampling feature strategy, bilinear|nearest
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
'''
def __init__(self, dim=3, c_per_dim=128, c_act_dim=128,
corr_dim=0, corr_head=True,
hidden_size=256, n_blocks=5, leaky=False,
sample_mode='bilinear', padding=0.1, fuse=True, detach=False, anneal_gradient=True):
super().__init__()
self.c_per_dim = c_per_dim
self.c_act_dim = c_act_dim
self.n_blocks = n_blocks
self.corr_dim = corr_dim
self.corr_head = corr_head
self.fuse = fuse
self.detach = detach
self.anneal_gradient = anneal_gradient
self.fc_c_per = nn.ModuleList([
nn.Linear(c_per_dim, hidden_size) for i in range(n_blocks)
])
self.fc_c_act = nn.ModuleList([
nn.Linear(c_act_dim, hidden_size) for i in range(n_blocks)
])
if self.fuse:
self.fc_c_merge = nn.ModuleList([
nn.Linear(hidden_size*2, hidden_size) for i in range(n_blocks)
])
self.fc_p_per = nn.Linear(dim, hidden_size)
self.fc_p_act = nn.Linear(dim, hidden_size)
self.blocks_per = nn.ModuleList([
ResnetBlockFC(hidden_size) for i in range(n_blocks)
])
self.blocks_act = nn.ModuleList([
ResnetBlockFC(hidden_size) for i in range(n_blocks)
])
self.fc_occ = nn.Linear(hidden_size, 1)
self.fc_flow= nn.Linear(hidden_size, 3)
if self.corr_dim != 0 and corr_head:
self.fc_out_corr = nn.Linear(hidden_size, corr_dim)
if self.fuse:
self.fc_act_corr_merge = nn.Linear(hidden_size+corr_dim, hidden_size)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
self.sample_mode = sample_mode
self.padding = padding
def sample_plane_feature(self, p, c, plane='xz'):
xy = normalize_coordinate(p.clone(), plane=plane, padding=self.padding) # normalize to the range of (0, 1)
xy = xy[:, :, None].float()
vgrid = 2.0 * xy - 1.0 # normalize to (-1, 1)
c = F.grid_sample(c, vgrid, padding_mode='border', align_corners=True, mode=self.sample_mode).squeeze(-1)
return c
def decode_perception(self, p, c_per_plane):
c_per = 0
c_per += self.sample_plane_feature(p, c_per_plane['xz'], plane='xz')
c_per += self.sample_plane_feature(p, c_per_plane['xy'], plane='xy')
c_per += self.sample_plane_feature(p, c_per_plane['yz'], plane='yz')
c_per = c_per.transpose(1, 2)
p = p.float()
net_per = self.fc_p_per(p)
features = []
for i in range(self.n_blocks):
net_per = net_per + self.fc_c_per[i](c_per)
net_per = self.blocks_per[i](net_per)
if self.detach:
features.append(net_per.detach())
else:
features.append(net_per)
net_per = self.actvn(net_per)
results = {}
results['occ'] = self.fc_occ(net_per).squeeze(-1)
if self.corr_dim != 0 and self.corr_head:
corr = self.fc_out_corr(net_per)
features.append(corr)
results['corr'] = corr
# if self.anneal_gradient:
# for i,p in enumerate(features):
# features[i] = p * 0.1 + p.detach() * 0.9
return results, features
def decode_action(self, p, c_act_plane, per_features):
c_act = 0
c_act += self.sample_plane_feature(p, c_act_plane['xz'], plane='xz')
c_act += self.sample_plane_feature(p, c_act_plane['xy'], plane='xy')
c_act += self.sample_plane_feature(p, c_act_plane['yz'], plane='yz')
c_act = c_act.transpose(1, 2)
p = p.float()
net_act = self.fc_p_act(p)
for i in range(self.n_blocks):
net_act = net_act + self.fc_c_act[i](c_act)
if self.fuse:
net_act = self.blocks_act[i](
self.fc_c_merge[i](
torch.cat( ( net_act, per_features[i]), dim=-1)))
# (net_per.detach()*0.9+net_per * 0.1)), dim=-1)))
else:
net_act = self.blocks_act[i](net_act)
net_act = self.actvn(net_act)
if self.corr_dim != 0 and self.corr_head:
if self.fuse:
net_act = self.fc_act_corr_merge(
torch.cat((net_act, per_features[-1].detach()), dim=-1))
return {'flow':self.fc_flow(net_act)}
def forward(self, p, c_per_plane, c_act_plane):
results, per_features = self.decode_perception(p, c_per_plane)
results['flow'] = self.decode_action(p, c_act_plane, per_features)['flow']
return results
| 8,333 | Python | 35.876106 | 114 | 0.554062 |
NVlabs/ACID/ACID/src/conv_onet/models/__init__.py | import torch
import numpy as np
import torch.nn as nn
from torch import distributions as dist
from src.conv_onet.models import decoder
from src.utils import plushsim_util
# Decoder dictionary
decoder_dict = {
'geom_decoder': decoder.GeomDecoder,
'combined_decoder': decoder.CombinedDecoder,
}
class ConvImpDyn(nn.Module):
def __init__(self, obj_per_encoder, obj_act_encoder, env_encoder, decoder, device=None, env_scale_factor=2.):
super().__init__()
self.decoder = decoder.to(device)
self.obj_per_encoder = obj_per_encoder.to(device)
self.obj_act_encoder = obj_act_encoder.to(device)
if env_encoder is None:
self.env_encoder = env_encoder
else:
self.env_encoder = env_encoder.to(device)
self.env_upsample = torch.nn.UpsamplingBilinear2d(scale_factor=env_scale_factor)
self._device = device
def forward(self, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
#############
c_per, c_act = self.encode_inputs(inputs)
return self.decode(inputs, c_per, c_act, **kwargs)
def forward_perception(self, inputs, filter=True,):
c_per, c_env = self.encode_perception(inputs, merge_env_feature=False)
for k in c_per.keys():
env_f = self.env_upsample(c_env[k])
c_env[k] = env_f
c_per[k] = torch.cat([c_per[k], env_f], dim=1)
# get curr observation state and features
p = inputs['sampled_pts']
if len(p.shape) > 3:
B,_,N,C = p.shape
curr_p = p.reshape([B*2,N,C])
else:
curr_p = p
curr_state, per_features = self.decoder.decode_perception(curr_p, c_per)
occ_pred = dist.Bernoulli(logits=curr_state['occ']).probs >= 0.5
curr_state['occ'] = occ_pred
if filter:
curr_p = curr_p[occ_pred]
if 'corr' in curr_state:
curr_state['corr'] = curr_state['corr'][occ_pred]
for i,p in enumerate(per_features):
per_features[i] = p[occ_pred]
return c_per, c_env, curr_p, curr_state, per_features
def rollout(self, pts, per_features, c_env, actions):
actions = actions.squeeze()
num_sequence = actions.shape[0]
num_actions = actions.shape[-2]
all_traj = []
total_time_act_render = 0
total_time_act_decode = 0
import time
# from functools import partial
# render_pts_func = partial(plushsim_util.render_points, return_index=True)
curr_pts = [pts for _ in range(num_sequence)]
for j in range(num_actions):
act_traj = []
points_world = [p.cpu().numpy().squeeze()
* (1200, 1200, 400)
/ (1.1,1.1,1.1)
+ (0, 0, 180) for p in curr_pts]
for i in range(num_sequence):
g,t = actions[i,0,j], actions[i,1,j]
start_time = time.time()
c_act, act_partial = self.get_action_encoding(curr_pts[i], g, t, c_env)
total_time_act_render += time.time() - start_time
act_traj.append(act_partial)
start_time = time.time()
flow = self.decoder.decode_action(curr_pts[i], c_act, per_features)['flow']
curr_pts[i] = curr_pts[i] + flow / 10.
total_time_act_decode += time.time() - start_time
all_traj.append((curr_pts.copy(), act_traj))
print("total time render: ",total_time_act_render)
print("total time decode: ",total_time_act_decode)
return all_traj
def rollout_async(self, pts, per_features, c_env, actions):
actions = actions.squeeze()
num_sequence = actions.shape[0]
num_actions = actions.shape[-2]
all_traj = []
total_time_act_render = 0
total_time_act_decode = 0
total_async_time_act_render = 0
import time
from functools import partial
render_pts_func = partial(plushsim_util.render_points, return_index=True)
curr_pts = [pts for _ in range(num_sequence)]
for j in range(num_actions):
start_time = time.time()
points_world = [p.cpu().numpy().squeeze()
* (1200, 1200, 400)
/ (1.1,1.1,1.1)
+ (0, 0, 180) for p in curr_pts]
from multiprocessing import Pool
with Pool(16) as p:
vis_idxes = p.map(render_pts_func, points_world)
xyzs, acts = [],[]
for i in range(num_sequence):
g,t = actions[i,0,j], actions[i,1,j]
# c_act, act_partial = self.get_action_encoding(
# curr_pts[i], g, t, c_env, vis_idx=vis_idxes[i])
obj_xyz, obj_act = self.get_action_encoding_new(
curr_pts[i], g, t, c_env, vis_idx=vis_idxes[i])
xyzs.append(obj_xyz)
acts.append(obj_act)
total_time_act_render += time.time() - start_time
n = 20
start_time = time.time()
xyz_chunks = [xyzs[i:i+n] for i in range(0, num_sequence, n)]
act_chunks = [acts[i:i+n] for i in range(0, num_sequence, n)]
c_acts = []
for xyz, act in zip(xyz_chunks, act_chunks):
obj_xyz = torch.as_tensor(np.stack(xyz).astype(np.float32)).to(self._device)
obj_act = torch.as_tensor(np.stack(act).astype(np.float32)).to(self._device)
c_act_new = self.obj_act_encoder((obj_xyz, obj_act))
for chunk_i in range(len(xyz)):
c_act = {}
for k in c_act_new.keys():
c_act[k] = torch.cat([c_act_new[k][chunk_i].unsqueeze(0), c_env[k]], dim=1)
c_acts.append(c_act)
total_time_act_decode += time.time() - start_time
from src.utils import common_util
from PIL import Image
for k,v in c_acts[0].items():
v_np = v.squeeze().permute(1,2,0).cpu().numpy()
feature_plane = v_np.reshape([-1, v_np.shape[-1]])
tsne_result = common_util.embed_tsne(feature_plane)
colors = common_util.get_color_map(tsne_result)
colors = colors.reshape((128,128,-1)).astype(np.float32)
colors = (colors * 255 / np.max(colors)).astype('uint8')
img = Image.fromarray(colors)
img.save(f"act_{k}.png")
import pdb; pdb.set_trace()
for i in range(num_sequence):
flow = self.decoder.decode_action(curr_pts[i], c_acts[i], per_features)['flow']
curr_pts[i] = curr_pts[i] + flow / 10.
all_traj.append(([p.cpu().numpy().squeeze() for p in curr_pts], xyzs))
return all_traj
def get_action_encoding_new(self, pts, grasp_loc, target_loc, c_env, vis_idx=None):
# pts: B*2, N, 3
import time
start_time = time.time()
B,N,_ = pts.shape
pts = pts.cpu().numpy()
xyzs, acts = [], []
# get visable points by rendering pts
occ_pts = pts[0]
occ_pts_t = occ_pts * (1200, 1200, 400) / (1.1,1.1,1.1) + (0,0,180)
if vis_idx is None:
vis_idx = plushsim_util.render_points(occ_pts_t,
plushsim_util.CAM_EXTR,
plushsim_util.CAM_INTR,
return_index=True)
obj_xyz = occ_pts[vis_idx]
#print("time split 1: ", time.time() - start_time)
start_time = time.time()
# subsample pts
indices = np.random.randint(obj_xyz.shape[0], size=5000)
obj_xyz = obj_xyz[indices]
# make action feature
tiled_grasp_loc = np.tile(grasp_loc.cpu().numpy(), (len(obj_xyz), 1)).astype(np.float32)
tiled_target_loc = np.tile(target_loc.cpu().numpy(), (len(obj_xyz), 1)).astype(np.float32)
obj_act = np.concatenate([tiled_target_loc, obj_xyz - tiled_grasp_loc], axis=-1)
return obj_xyz, obj_act
def get_action_encoding(self, pts, grasp_loc, target_loc, c_env, vis_idx=None):
# pts: B*2, N, 3
import time
start_time = time.time()
B,N,_ = pts.shape
pts = pts.cpu().numpy()
xyzs, acts = [], []
# get visable points by rendering pts
occ_pts = pts[0]
occ_pts_t = occ_pts * (1200, 1200, 400) / (1.1,1.1,1.1) + (0,0,180)
if vis_idx is None:
vis_idx = plushsim_util.render_points(occ_pts_t,
plushsim_util.CAM_EXTR,
plushsim_util.CAM_INTR,
return_index=True)
obj_xyz = occ_pts[vis_idx]
#print("time split 1: ", time.time() - start_time)
start_time = time.time()
# subsample pts
indices = np.random.randint(obj_xyz.shape[0], size=5000)
obj_xyz = obj_xyz[indices]
# make action feature
tiled_grasp_loc = np.tile(grasp_loc.cpu().numpy(), (len(obj_xyz), 1)).astype(np.float32)
tiled_target_loc = np.tile(target_loc.cpu().numpy(), (len(obj_xyz), 1)).astype(np.float32)
obj_act = np.concatenate([tiled_target_loc, obj_xyz - tiled_grasp_loc], axis=-1)
xyzs.append(obj_xyz)
acts.append(obj_act)
obj_xyz = torch.as_tensor(np.stack(xyzs).astype(np.float32)).to(self._device)
obj_act = torch.as_tensor(np.stack(acts).astype(np.float32)).to(self._device)
#print("time split 2: ", time.time() - start_time)
start_time = time.time()
c_act_new = self.obj_act_encoder((obj_xyz, obj_act))
#print("time split 3: ", time.time() - start_time)
start_time = time.time()
for k in c_act_new.keys():
c_act_new[k] = torch.cat([c_act_new[k], c_env[k]], dim=1)
#print("time split 4: ", time.time() - start_time)
start_time = time.time()
return c_act_new, obj_xyz
def encode_perception(self, inputs, merge_env_feature=True):
obj_pcloud = inputs['obj_obs']
if len(obj_pcloud.shape) > 3:
B,_,N,C = obj_pcloud.shape
obj_pcloud = obj_pcloud.reshape([B*2,N,C])
obj_xyz, obj_rgb = obj_pcloud[...,:3],obj_pcloud[...,3:6]
c_per = self.obj_per_encoder((obj_xyz, obj_rgb))
if self.env_encoder is not None:
env_pcloud = inputs['env_obs'].cuda()
if len(env_pcloud.shape) > 3:
B,_,N,C = env_pcloud.shape
env_pcloud = env_pcloud.reshape([B*2,N,C])
env_xyz, env_rgb = env_pcloud[...,:3],env_pcloud[...,3:]
env_features = self.env_encoder((env_xyz, env_rgb))
if merge_env_feature:
for k in c_per.keys():
env_f = self.env_upsample(env_features[k])
c_per[k] = torch.cat([c_per[k], env_f], dim=1)
else:
return c_per, env_features
return c_per
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
obj_pcloud = inputs['obj_obs']
B,_,N,C = obj_pcloud.shape
obj_pcloud = obj_pcloud.reshape([B*2,N,C])
obj_xyz, obj_rgb, obj_act = obj_pcloud[...,:3],obj_pcloud[...,3:6],obj_pcloud[...,6:]
c_per = self.obj_per_encoder((obj_xyz, obj_rgb))
c_act = self.obj_act_encoder((obj_xyz, obj_act))
if self.env_encoder is not None:
env_pcloud = inputs['env_obs'].cuda()
B,_,N,C = env_pcloud.shape
env_pcloud = env_pcloud.reshape([B*2,N,C])
env_xyz, env_rgb = env_pcloud[...,:3],env_pcloud[...,3:]
env_features = self.env_encoder((env_xyz, env_rgb))
for k in c_per.keys():
env_f = self.env_upsample(env_features[k])
c_per[k] = torch.cat([c_per[k], env_f], dim=1)
c_act[k] = torch.cat([c_act[k], env_f], dim=1)
return c_per, c_act
def eval_points(self, pts, c):
outputs = self.decoder(pts, *c)
if 'occ' in outputs:
outputs['occ'] = dist.Bernoulli(logits=outputs['occ'])
return outputs
def decode(self, inputs, c1, c2, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
c (tensor): latent conditioned code c
'''
p = inputs['sampled_pts']
B,_,N,C = p.shape
p = p.reshape([B*2,N,C])
outputs = self.decoder(p, c1, c2)
if 'occ' in outputs:
outputs['occ'] = dist.Bernoulli(logits=outputs['occ'])
if 'corr' in outputs:
_,N,C = outputs['corr'].shape
corr_f = outputs['corr'].reshape([B,2,N,C])
if 'skip_indexing' not in kwargs:
corr_f = torch.transpose(corr_f, 0, 1)
corr_f = torch.flatten(corr_f, 1, 2)
inds = inputs['pair_indices']
corr_f = corr_f[:,inds]
outputs['corr'] = corr_f
return outputs
def to(self, device):
''' Puts the model to the device.
Args:
device (device): pytorch device
'''
model = super().to(device)
model._device = device
return model
class ConvOccGeom(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
device (device): torch device
'''
def __init__(self, obj_encoder, env_encoder, decoder, device=None, env_scale_factor=2.):
super().__init__()
self.decoder = decoder.to(device)
self.obj_encoder = obj_encoder.to(device)
if env_encoder is None:
self.env_encoder = env_encoder
else:
self.env_encoder = env_encoder.to(device)
self.env_upsample = torch.nn.UpsamplingBilinear2d(scale_factor=env_scale_factor)
self._device = device
def forward(self, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
#############
c = self.encode_inputs(inputs)
return self.decode(inputs, c, **kwargs)
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
obj_pcloud = inputs['obj_obs']
B,_,N,C = obj_pcloud.shape
obj_pcloud = obj_pcloud.reshape([B*2,N,C])
obj_xyz, obj_rgb = obj_pcloud[...,:3],obj_pcloud[...,3:]
obj_features = self.obj_encoder((obj_xyz, obj_rgb))
if self.env_encoder is None:
return obj_features
env_pcloud = inputs['env_obs'].cuda()
B,_,N,C = env_pcloud.shape
env_pcloud = env_pcloud.reshape([B*2,N,C])
env_xyz, env_rgb = env_pcloud[...,:3],env_pcloud[...,3:]
env_features = self.env_encoder((env_xyz, env_rgb))
joint_features = {}
for k in obj_features.keys():
env_f = self.env_upsample(env_features[k])
joint_features[k] = torch.cat([obj_features[k], env_f], dim=1)
return joint_features
def eval_points(self, pts, c):
outputs = self.decoder(pts, c)
if 'occ' in outputs:
outputs['occ'] = dist.Bernoulli(logits=outputs['occ'])
return outputs
def decode(self, inputs, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
c (tensor): latent conditioned code c
'''
p = inputs['sampled_pts']
B,_,N,C = p.shape
p = p.reshape([B*2,N,C])
outputs = self.decoder(p, c, **kwargs)
if 'occ' in outputs:
outputs['occ'] = dist.Bernoulli(logits=outputs['occ'])
if 'corr' in outputs:
_,N,C = outputs['corr'].shape
corr_f = outputs['corr'].reshape([B,2,N,C])
corr_f = torch.transpose(corr_f, 0, 1)
corr_f = torch.flatten(corr_f, 1, 2)
inds = inputs['pair_indices']
corr_f = corr_f[:,inds]
outputs['corr'] = corr_f
return outputs
def to(self, device):
''' Puts the model to the device.
Args:
device (device): pytorch device
'''
model = super().to(device)
model._device = device
return model
| 17,056 | Python | 39.80622 | 113 | 0.525797 |
NVlabs/ACID/ACID/src/encoder/__init__.py | from src.encoder import (
pointnet
)
encoder_dict = {
'geom_encoder': pointnet.GeomEncoder,
}
| 104 | Python | 10.666665 | 41 | 0.663462 |
NVlabs/ACID/ACID/src/encoder/pointnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.layers import ResnetBlockFC
from torch_scatter import scatter_mean, scatter_max
from src.common import coordinate2index, normalize_coordinate
from src.encoder.unet import UNet
class GeomEncoder(nn.Module):
''' PointNet-based encoder network with ResNet blocks for each point.
Number of input points are fixed.
Args:
c_dim (int): dimension of latent code c
dim (int): input points dimension
hidden_dim (int): hidden dimension of the network
scatter_type (str): feature aggregation when doing local pooling
unet (bool): weather to use U-Net
unet_kwargs (str): U-Net parameters
unet3d (bool): weather to use 3D U-Net
unet3d_kwargs (str): 3D U-Net parameters
plane_resolution (int): defined resolution for plane feature
grid_resolution (int): defined resolution for grid feature
plane_type (str): feature type, 'xz' - 1-plane, ['xz', 'xy', 'yz'] - 3-plane, ['grid'] - 3D grid volume
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
n_blocks (int): number of blocks ResNetBlockFC layers
'''
def __init__(self, c_dim=128, dim=3, f_dim=9, hidden_dim=128, scatter_type='max',
unet_kwargs=None, plane_resolution=None, padding=0.1, n_blocks=5):
super().__init__()
self.c_dim = c_dim
self.fc_pos = nn.Linear(dim+f_dim, 2*hidden_dim)
self.blocks = nn.ModuleList([
ResnetBlockFC(2*hidden_dim, hidden_dim) for i in range(n_blocks)
])
self.fc_c = nn.Linear(hidden_dim, c_dim)
self.actvn = nn.ReLU()
self.hidden_dim = hidden_dim
self.unet = UNet(c_dim, in_channels=c_dim, **unet_kwargs)
self.reso_plane = plane_resolution
self.padding = padding
if scatter_type == 'max':
self.scatter = scatter_max
elif scatter_type == 'mean':
self.scatter = scatter_mean
else:
raise ValueError('incorrect scatter type')
def generate_plane_features(self, p, c, plane='xz'):
# acquire indices of features in plane
xy = normalize_coordinate(p.clone(), plane=plane, padding=self.padding) # normalize to the range of (0, 1)
index = coordinate2index(xy, self.reso_plane)
# scatter plane features from points
fea_plane = c.new_zeros(p.size(0), self.c_dim, self.reso_plane**2)
c = c.permute(0, 2, 1) # B x 512 x T
fea_plane = scatter_mean(c, index, out=fea_plane) # B x 512 x reso^2
fea_plane = fea_plane.reshape(p.size(0), self.c_dim, self.reso_plane, self.reso_plane) # sparce matrix (B x 512 x reso x reso)
# process the plane features with UNet
fea_plane = self.unet(fea_plane)
return fea_plane
def pool_local(self, xy, index, c):
bs, fea_dim = c.size(0), c.size(2)
keys = xy.keys()
c_out = 0
for key in keys:
# scatter plane features from points
fea = self.scatter(c.permute(0, 2, 1), index[key], dim_size=self.reso_plane**2)
if self.scatter == scatter_max:
fea = fea[0]
# gather feature back to points
fea = fea.gather(dim=2, index=index[key].expand(-1, fea_dim, -1))
c_out += fea
return c_out.permute(0, 2, 1)
def forward(self, p):
if type(p) is tuple:
p, pf = p
else:
pf = None
# acquire the index for each point
coord = {}
index = {}
coord['xz'] = normalize_coordinate(p.clone(), plane='xz', padding=self.padding)
index['xz'] = coordinate2index(coord['xz'], self.reso_plane)
coord['xy'] = normalize_coordinate(p.clone(), plane='xy', padding=self.padding)
index['xy'] = coordinate2index(coord['xy'], self.reso_plane)
coord['yz'] = normalize_coordinate(p.clone(), plane='yz', padding=self.padding)
index['yz'] = coordinate2index(coord['yz'], self.reso_plane)
net = self.fc_pos(torch.cat([p, pf],dim=-1))
net = self.blocks[0](net)
for block in self.blocks[1:]:
pooled = self.pool_local(coord, index, net)
net = torch.cat([net, pooled], dim=2)
net = block(net)
c = self.fc_c(net)
fea = {}
fea['xz'] = self.generate_plane_features(p, c, plane='xz')
fea['xy'] = self.generate_plane_features(p, c, plane='xy')
fea['yz'] = self.generate_plane_features(p, c, plane='yz')
return fea
| 4,654 | Python | 37.791666 | 134 | 0.592609 |
NVlabs/ACID/ACID/src/encoder/unet.py | '''
Codes are from:
https://github.com/jaxony/unet-pytorch/blob/master/model.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from torch.nn import init
import numpy as np
def conv3x3(in_channels, out_channels, stride=1,
padding=1, bias=True, groups=1):
return nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=padding,
bias=bias,
groups=groups)
def upconv2x2(in_channels, out_channels, mode='transpose'):
if mode == 'transpose':
return nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size=2,
stride=2)
else:
# out_channels is always going to be the same
# as in_channels
return nn.Sequential(
nn.Upsample(mode='bilinear', scale_factor=2),
conv1x1(in_channels, out_channels))
def conv1x1(in_channels, out_channels, groups=1):
return nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
groups=groups,
stride=1)
class DownConv(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 MaxPool.
A ReLU activation follows each convolution.
"""
def __init__(self, in_channels, out_channels, pooling=True):
super(DownConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.pooling = pooling
self.conv1 = conv3x3(self.in_channels, self.out_channels)
self.conv2 = conv3x3(self.out_channels, self.out_channels)
if self.pooling:
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
before_pool = x
if self.pooling:
x = self.pool(x)
return x, before_pool
class UpConv(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 UpConvolution.
A ReLU activation follows each convolution.
"""
def __init__(self, in_channels, out_channels,
merge_mode='concat', up_mode='transpose'):
super(UpConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.merge_mode = merge_mode
self.up_mode = up_mode
self.upconv = upconv2x2(self.in_channels, self.out_channels,
mode=self.up_mode)
if self.merge_mode == 'concat':
self.conv1 = conv3x3(
2*self.out_channels, self.out_channels)
else:
# num of input channels to conv2 is same
self.conv1 = conv3x3(self.out_channels, self.out_channels)
self.conv2 = conv3x3(self.out_channels, self.out_channels)
def forward(self, from_down, from_up):
""" Forward pass
Arguments:
from_down: tensor from the encoder pathway
from_up: upconv'd tensor from the decoder pathway
"""
from_up = self.upconv(from_up)
if self.merge_mode == 'concat':
x = torch.cat((from_up, from_down), 1)
else:
x = from_up + from_down
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
return x
class UNet(nn.Module):
""" `UNet` class is based on https://arxiv.org/abs/1505.04597
The U-Net is a convolutional encoder-decoder neural network.
Contextual spatial information (from the decoding,
expansive pathway) about an input tensor is merged with
information representing the localization of details
(from the encoding, compressive pathway).
Modifications to the original paper:
(1) padding is used in 3x3 convolutions to prevent loss
of border pixels
(2) merging outputs does not require cropping due to (1)
(3) residual connections can be used by specifying
UNet(merge_mode='add')
(4) if non-parametric upsampling is used in the decoder
pathway (specified by upmode='upsample'), then an
additional 1x1 2d convolution occurs after upsampling
to reduce channel dimensionality by a factor of 2.
This channel halving happens with the convolution in
the tranpose convolution (specified by upmode='transpose')
"""
def __init__(self, num_classes, in_channels=3, depth=5,
start_filts=64, up_mode='transpose',
merge_mode='concat', **kwargs):
"""
Arguments:
in_channels: int, number of channels in the input tensor.
Default is 3 for RGB images.
depth: int, number of MaxPools in the U-Net.
start_filts: int, number of convolutional filters for the
first conv.
up_mode: string, type of upconvolution. Choices: 'transpose'
for transpose convolution or 'upsample' for nearest neighbour
upsampling.
"""
super(UNet, self).__init__()
if up_mode in ('transpose', 'upsample'):
self.up_mode = up_mode
else:
raise ValueError("\"{}\" is not a valid mode for "
"upsampling. Only \"transpose\" and "
"\"upsample\" are allowed.".format(up_mode))
if merge_mode in ('concat', 'add'):
self.merge_mode = merge_mode
else:
raise ValueError("\"{}\" is not a valid mode for"
"merging up and down paths. "
"Only \"concat\" and "
"\"add\" are allowed.".format(up_mode))
# NOTE: up_mode 'upsample' is incompatible with merge_mode 'add'
if self.up_mode == 'upsample' and self.merge_mode == 'add':
raise ValueError("up_mode \"upsample\" is incompatible "
"with merge_mode \"add\" at the moment "
"because it doesn't make sense to use "
"nearest neighbour to reduce "
"depth channels (by half).")
self.num_classes = num_classes
self.in_channels = in_channels
self.start_filts = start_filts
self.depth = depth
self.down_convs = []
self.up_convs = []
# create the encoder pathway and add to a list
for i in range(depth):
ins = self.in_channels if i == 0 else outs
outs = self.start_filts*(2**i)
pooling = True if i < depth-1 else False
down_conv = DownConv(ins, outs, pooling=pooling)
self.down_convs.append(down_conv)
# create the decoder pathway and add to a list
# - careful! decoding only requires depth-1 blocks
for i in range(depth-1):
ins = outs
outs = ins // 2
up_conv = UpConv(ins, outs, up_mode=up_mode,
merge_mode=merge_mode)
self.up_convs.append(up_conv)
# add the list of modules to current module
self.down_convs = nn.ModuleList(self.down_convs)
self.up_convs = nn.ModuleList(self.up_convs)
self.conv_final = conv1x1(outs, self.num_classes)
self.reset_params()
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
init.constant_(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
def forward(self, x):
encoder_outs = []
# encoder pathway, save outputs for merging
for i, module in enumerate(self.down_convs):
x, before_pool = module(x)
encoder_outs.append(before_pool)
for i, module in enumerate(self.up_convs):
before_pool = encoder_outs[-(i+2)]
x = module(before_pool, x)
# No softmax is used. This means you need to use
# nn.CrossEntropyLoss is your training script,
# as this module includes a softmax already.
x = self.conv_final(x)
return x
if __name__ == "__main__":
"""
testing
"""
model = UNet(1, depth=5, merge_mode='concat', in_channels=1, start_filts=32)
print(model)
print(sum(p.numel() for p in model.parameters()))
reso = 176
x = np.zeros((1, 1, reso, reso))
x[:,:,int(reso/2-1), int(reso/2-1)] = np.nan
x = torch.FloatTensor(x)
out = model(x)
print('%f'%(torch.sum(torch.isnan(out)).detach().cpu().numpy()/(reso*reso)))
# loss = torch.sum(out)
# loss.backward()
| 8,696 | Python | 32.57915 | 80 | 0.575092 |
NVlabs/ACID/ACID/src/utils/common_util.py | import os
import glob
import json
import scipy
import itertools
import numpy as np
from PIL import Image
from scipy.spatial.transform import Rotation
from sklearn.neighbors import NearestNeighbors
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
def get_color_map(x):
colours = plt.cm.Spectral(x)
return colours[:, :3]
def embed_tsne(data):
"""
N x D np.array data
"""
tsne = TSNE(n_components=1, verbose=0, perplexity=40, n_iter=300, random_state=0)
tsne_results = tsne.fit_transform(data)
tsne_results = np.squeeze(tsne_results)
tsne_min = np.min(tsne_results)
tsne_max = np.max(tsne_results)
return (tsne_results - tsne_min) / (tsne_max - tsne_min)
########################################################################
# Viewpoint transform
########################################################################
view_to_order = {
'cam0': ('X', 'Y', 'Z'),
'cam1': ('-Z', 'Y', 'X'),
'cam2': ('Z', 'Y', '-X'),
'cam3': ('-X', 'Y', '-Z'),
}
def get_axis_pt(val, x, y, z):
multiplier = -1 if '-' in val else 1
if "X" in val:
return x * multiplier
elif "Y" in val:
return y * multiplier
elif "Z" in val:
return z * multiplier
def world_coord_view_augmentation(view, pts):
order = view_to_order[view]
pts = pts.reshape([-1,3])
x,y,z = np.moveaxis(pts, 1, 0)
return np.array([get_axis_pt(o,x,y,z) for o in order]).T
########################################################################
# partial observation projection / transform / rendering utilities
########################################################################
def transform_points_cam_to_world(cam_pts, camera_pose):
world_pts = np.transpose(
np.dot(camera_pose[0:3, 0:3], np.transpose(cam_pts)) + np.tile(camera_pose[0:3, 3:], (1, cam_pts.shape[0])))
return world_pts
def transform_points_world_to_cam(world_points, cam_extr):
return np.transpose(
np.dot(
np.linalg.inv(
cam_extr[0:3, 0:3]),
np.transpose(world_points)
- np.tile(cam_extr[0:3, 3:], (1, world_points.shape[0]))))
def render_points_slowest(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
cam_pts_x = np.rint(cam_pts_x).astype(int)
cam_pts_y = np.rint(cam_pts_y).astype(int)
points = np.stack([cam_pts_y, cam_pts_x, cam_pts_z, np.arange(len(cam_pts_x))]).T
sorted_pts = sorted(points, key=lambda x: (x[0], x[1]))
grouped_pts = [[*j] for i, j in itertools.groupby(
sorted_pts,
key=lambda x: (x[0] // 3, x[1] // 3))]
min_depth = np.array([sorted(p, key=lambda x: -x[2])[0] for p in grouped_pts])
min_idx = min_depth[:,-1]
min_depth = min_depth[:,:-1]
return world_points[min_idx.astype(int)]
def render_points_slow(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
points = np.stack([cam_pts_y, cam_pts_x, cam_pts_z, np.arange(len(cam_pts_x))]).T
points[:,:2] = np.rint(points[:,:2] / 2)
points = points[points[:,1].argsort()]
points = points[points[:,0].argsort(kind='mergesort')]
grouped_pts = np.split(points[:,2:], np.unique(points[:, :2], axis=0, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
return world_points[min_idx]
def render_points(world_points, cam_extr, cam_intr, return_index=False):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
idx = np.rint(cam_pts_y / 2) * 1000 + np.rint(cam_pts_x / 2)
val = np.stack([cam_pts_z, np.arange(len(cam_pts_x))]).T
order = idx.argsort()
idx = idx[order]
val = val[order]
grouped_pts = np.split(val, np.unique(idx, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
if return_index:
return min_idx
return world_points[min_idx]
def project_depth_world_space(depth_image, camera_intr, camera_pose, keep_dim=False, project_factor=1.):
cam_pts = project_depth_cam_space(depth_image, camera_intr, keep_dim=False,project_factor=project_factor)
world_pts = transform_points_cam_to_world(cam_pts, camera_pose)
W, H = depth_image.shape
if keep_dim:
world_pts = world_pts.reshape([W, H, 3])
return world_pts
def project_depth_cam_space(depth_img, camera_intrinsics, keep_dim=True, project_factor=1.):
# Get depth image size
im_h = depth_img.shape[0]
im_w = depth_img.shape[1]
# Project depth into 3D point cloud in camera coordinates
pix_x, pix_y = np.meshgrid(np.linspace(0, im_w - 1, im_w), np.linspace(0, im_h - 1, im_h))
cam_pts_x = np.multiply(pix_x - im_w / 2., -depth_img / camera_intrinsics[0, 0])
cam_pts_y = np.multiply(pix_y - im_h / 2., depth_img / camera_intrinsics[1, 1])
cam_pts_z = depth_img.copy()
cam_pts_x.shape = (im_h * im_w, 1)
cam_pts_y.shape = (im_h * im_w, 1)
cam_pts_z.shape = (im_h * im_w, 1)
cam_pts = np.concatenate((cam_pts_x, cam_pts_y, cam_pts_z), axis=1) * project_factor
if keep_dim:
cam_pts = cam_pts.reshape([im_h, im_w, 3])
return cam_pts
def get_trunc_ab(mean, std, a, b):
return (a - mean) / std, (b - mean) /std
def get_trunc_ab_range(mean_min, mean_max, std, a, b):
return (a - mean_min) / std, (b - mean_max) /std
def transform_points(pointcloud, from_range, to_range):
if len(pointcloud.shape) == 1:
pointcloud = pointcloud.reshape([1,-1])
if pointcloud.shape[1] == 6:
xyz = pointcloud[:,:3]
rgb = pointcloud[:,3:]
else:
xyz = pointcloud
rgb = None
from_center = np.mean(from_range, axis=0)
from_size = np.ptp(from_range, axis=0)
to_center = np.mean(to_range, axis=0)
to_size = np.ptp(to_range, axis=0)
xyz = (xyz - from_center) / from_size * to_size + to_center
if rgb is None:
return xyz
else:
return np.concatenate([xyz, rgb], axis=-1)
def extent_to_cube(extent):
min_x,min_y,min_z = extent[0]
max_x,max_y,max_z = extent[1]
verts = np.array([
(max_x,max_y,max_z),
(max_x,max_y,min_z),
(max_x,min_y,max_z),
(max_x,min_y,min_z),
(min_x,max_y,max_z),
(min_x,max_y,min_z),
(min_x,min_y,max_z),
(min_x,min_y,min_z),])
faces = np.array([
(1,5,7,3),
(4,3,7,8),
(8,7,5,6),
(6,2,4,8),
(2,1,3,4),
(6,5,1,2),])
return verts, faces
########################################################################
# Visualization
########################################################################
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
def set_background_blank(ax):
# Hide grid lines
ax.grid(False)
ax.set_axis_off()
# Hide axes ticks
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
# First remove fill
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
# Now set color to white (or whatever is "invisible")
ax.xaxis.pane.set_edgecolor((1.0, 1.0, 1.0, 0.0))
ax.yaxis.pane.set_edgecolor((1.0, 1.0, 1.0, 0.0))
ax.zaxis.pane.set_edgecolor((1.0, 1.0, 1.0, 0.0))
def side_by_side_point_clouds(point_clouds, angle=(90,0)):
fig = plt.figure()
W = int(len(point_clouds) ** 0.5)
H = math.ceil(len(point_clouds) / W)
for i, pcloud in enumerate(point_clouds):
action = None
flow = None
pts = pcloud['pts']
title = pcloud['title']
col = pcloud.get('col', None)
flow = pcloud.get('flow', None)
action = pcloud.get('action', None)
ax = fig.add_subplot(W, H, i+1,projection='3d')
ax.set_title(title)
if flow is not None:
flow_norm = np.linalg.norm(flow, axis=1)
viz_idx = flow_norm > 0.0
flow = flow[viz_idx]
ax.quiver(
pts[:,0][viz_idx],
pts[:,1][viz_idx],
pts[:,2][viz_idx],
flow[:,0], flow[:,1], flow[:,2],
color = 'red', linewidth=3, alpha=0.2
)
if col is None:
col = 'blue'
ax.scatter(pts[:,0],
pts[:,1],
pts[:,2], color=col,s=0.5)
ax.view_init(*angle)
if action is not None:
ax.scatter(action[0], action[1], 0.,
edgecolors='tomato', color='turquoise', marker='*',s=80)
set_axes_equal(ax)
set_background_blank(ax)
fig.tight_layout()
return fig
def write_pointcoud_as_obj(path, xyzrgb, faces=None):
with open(path, 'w') as fp:
if xyzrgb.shape[1] == 6:
for x,y,z,r,g,b in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f} {r:.3f} {g:.3f} {b:.3f}\n")
else:
for x,y,z in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f}\n")
if faces is not None:
for f in faces:
f_str = " ".join([str(i) for i in f])
fp.write(f"f {f_str}\n")
#################################
# Distance Metric
#################################
def subsample_points(points, resolution=0.0125, return_index=True):
if points.shape[1] == 6:
xyz = points[:,:3]
else:
xyz = points
if points.shape[0] == 0:
if return_index:
return np.arange(0)
return points
idx = np.unique(xyz// resolution * resolution, axis=0, return_index=True)[1]
if return_index:
return idx
return points[idx]
from sklearn.neighbors import NearestNeighbors
def chamfer_distance(x, y, metric='l2', direction='bi'):
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
min_y_to_x = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
min_x_to_y = y_nn.kneighbors(x)[0]
return np.mean(min_y_to_x) + np.mean(min_x_to_y)
def f1_score(x, y, metric='l2', th=0.01):
# x is pred
# y is gt
if x.shape[0] == 0:
return 0,0,0
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
d2 = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
d1 = y_nn.kneighbors(x)[0]
recall = float(sum(d < th for d in d2)) / float(len(d2))
precision = float(sum(d < th for d in d1)) / float(len(d1))
if recall+precision > 0:
fscore = 2 * recall * precision / (recall + precision)
else:
fscore = 0
return fscore, precision, recall | 12,618 | Python | 36.005865 | 116 | 0.560628 |
NVlabs/ACID/ACID/src/utils/io.py | import os
from plyfile import PlyElement, PlyData
import numpy as np
def export_pointcloud(vertices, out_file, as_text=True):
assert(vertices.shape[1] == 3)
vertices = vertices.astype(np.float32)
vertices = np.ascontiguousarray(vertices)
vector_dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]
vertices = vertices.view(dtype=vector_dtype).flatten()
plyel = PlyElement.describe(vertices, 'vertex')
plydata = PlyData([plyel], text=as_text)
plydata.write(out_file)
def load_pointcloud(in_file):
plydata = PlyData.read(in_file)
vertices = np.stack([
plydata['vertex']['x'],
plydata['vertex']['y'],
plydata['vertex']['z']
], axis=1)
return vertices
def read_off(file):
"""
Reads vertices and faces from an off file.
:param file: path to file to read
:type file: str
:return: vertices and faces as lists of tuples
:rtype: [(float)], [(int)]
"""
assert os.path.exists(file), 'file %s not found' % file
with open(file, 'r') as fp:
lines = fp.readlines()
lines = [line.strip() for line in lines]
# Fix for ModelNet bug were 'OFF' and the number of vertices and faces
# are all in the first line.
if len(lines[0]) > 3:
assert lines[0][:3] == 'OFF' or lines[0][:3] == 'off', \
'invalid OFF file %s' % file
parts = lines[0][3:].split(' ')
assert len(parts) == 3
num_vertices = int(parts[0])
assert num_vertices > 0
num_faces = int(parts[1])
assert num_faces > 0
start_index = 1
# This is the regular case!
else:
assert lines[0] == 'OFF' or lines[0] == 'off', \
'invalid OFF file %s' % file
parts = lines[1].split(' ')
assert len(parts) == 3
num_vertices = int(parts[0])
assert num_vertices > 0
num_faces = int(parts[1])
assert num_faces > 0
start_index = 2
vertices = []
for i in range(num_vertices):
vertex = lines[start_index + i].split(' ')
vertex = [float(point.strip()) for point in vertex if point != '']
assert len(vertex) == 3
vertices.append(vertex)
faces = []
for i in range(num_faces):
face = lines[start_index + num_vertices + i].split(' ')
face = [index.strip() for index in face if index != '']
# check to be sure
for index in face:
assert index != '', \
'found empty vertex index: %s (%s)' \
% (lines[start_index + num_vertices + i], file)
face = [int(index) for index in face]
assert face[0] == len(face) - 1, \
'face should have %d vertices but as %d (%s)' \
% (face[0], len(face) - 1, file)
assert face[0] == 3, \
'only triangular meshes supported (%s)' % file
for index in face:
assert index >= 0 and index < num_vertices, \
'vertex %d (of %d vertices) does not exist (%s)' \
% (index, num_vertices, file)
assert len(face) > 1
faces.append(face)
return vertices, faces
assert False, 'could not open %s' % file
| 3,415 | Python | 29.230088 | 78 | 0.513616 |
NVlabs/ACID/ACID/src/utils/visualize.py | import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import src.common as common
def visualize_data(data, data_type, out_file):
r''' Visualizes the data with regard to its type.
Args:
data (tensor): batch of data
data_type (string): data type (img, voxels or pointcloud)
out_file (string): output file
'''
if data_type == 'voxels':
visualize_voxels(data, out_file=out_file)
elif data_type == 'pointcloud':
visualize_pointcloud(data, out_file=out_file)
elif data_type is None or data_type == 'idx':
pass
else:
raise ValueError('Invalid data_type "%s"' % data_type)
def visualize_voxels(voxels, out_file=None, show=False):
r''' Visualizes voxel data.
Args:
voxels (tensor): voxel data
out_file (string): output file
show (bool): whether the plot should be shown
'''
# Use numpy
voxels = np.asarray(voxels)
# Create plot
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
voxels = voxels.transpose(2, 0, 1)
ax.voxels(voxels, edgecolor='k')
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.view_init(elev=30, azim=45)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
plt.close(fig)
def visualize_pointcloud(points, normals=None,
out_file=None, show=False):
r''' Visualizes point cloud data.
Args:
points (tensor): point data
normals (tensor): normal data (if existing)
out_file (string): output file
show (bool): whether the plot should be shown
'''
# Use numpy
points = np.asarray(points)
# Create plot
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
ax.scatter(points[:, 2], points[:, 0], points[:, 1])
if normals is not None:
ax.quiver(
points[:, 2], points[:, 0], points[:, 1],
normals[:, 2], normals[:, 0], normals[:, 1],
length=0.1, color='k'
)
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.set_zlim(-0.5, 0.5)
ax.view_init(elev=30, azim=45)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
plt.close(fig)
| 2,378 | Python | 26.66279 | 65 | 0.585786 |
NVlabs/ACID/ACID/src/utils/mentalsim_util.py | import os
import glob
import json
import scipy
import itertools
import numpy as np
from PIL import Image
from scipy.spatial.transform import Rotation
from sklearn.neighbors import NearestNeighbors
########################################################################
# Viewpoint transform
########################################################################
view_to_order = {
'cam0': ('X', 'Y', 'Z'),
'cam1': ('-Z', 'Y', 'X'),
'cam2': ('Z', 'Y', '-X'),
'cam3': ('-X', 'Y', '-Z'),
}
def get_axis_pt(val, x, y, z):
multiplier = -1 if '-' in val else 1
if "X" in val:
return x * multiplier
elif "Y" in val:
return y * multiplier
elif "Z" in val:
return z * multiplier
def world_coord_view_augmentation(view, pts):
order = view_to_order[view]
pts = pts.reshape([-1,3])
x,y,z = np.moveaxis(pts, 1, 0)
return np.array([get_axis_pt(o,x,y,z) for o in order]).T
########################################################################
# partial observation projection / transform / rendering utilities
########################################################################
def transform_points_cam_to_world(cam_pts, camera_pose):
world_pts = np.transpose(
np.dot(camera_pose[0:3, 0:3], np.transpose(cam_pts)) + np.tile(camera_pose[0:3, 3:], (1, cam_pts.shape[0])))
return world_pts
def transform_points_world_to_cam(world_points, cam_extr):
return np.transpose(
np.dot(
np.linalg.inv(
cam_extr[0:3, 0:3]),
np.transpose(world_points)
- np.tile(cam_extr[0:3, 3:], (1, world_points.shape[0]))))
def render_points_slowest(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
cam_pts_x = np.rint(cam_pts_x).astype(int)
cam_pts_y = np.rint(cam_pts_y).astype(int)
points = np.stack([cam_pts_y, cam_pts_x, cam_pts_z, np.arange(len(cam_pts_x))]).T
sorted_pts = sorted(points, key=lambda x: (x[0], x[1]))
grouped_pts = [[*j] for i, j in itertools.groupby(
sorted_pts,
key=lambda x: (x[0] // 3, x[1] // 3))]
min_depth = np.array([sorted(p, key=lambda x: -x[2])[0] for p in grouped_pts])
min_idx = min_depth[:,-1]
min_depth = min_depth[:,:-1]
return world_points[min_idx.astype(int)]
def render_points_slow(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
points = np.stack([cam_pts_y, cam_pts_x, cam_pts_z, np.arange(len(cam_pts_x))]).T
points[:,:2] = np.rint(points[:,:2] / 2)
points = points[points[:,1].argsort()]
points = points[points[:,0].argsort(kind='mergesort')]
grouped_pts = np.split(points[:,2:], np.unique(points[:, :2], axis=0, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
return world_points[min_idx]
def render_points(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
idx = np.rint(cam_pts_y / 2) * 1000 + np.rint(cam_pts_x / 2)
val = np.stack([cam_pts_z, np.arange(len(cam_pts_x))]).T
order = idx.argsort()
idx = idx[order]
val = val[order]
grouped_pts = np.split(val, np.unique(idx, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
return world_points[min_idx]
def project_depth_world_space(depth_image, camera_intr, camera_pose, keep_dim=False, project_factor=1.):
cam_pts = project_depth_cam_space(depth_image, camera_intr, keep_dim=False,project_factor=project_factor)
world_pts = transform_points_cam_to_world(cam_pts, camera_pose)
W, H = depth_image.shape
if keep_dim:
world_pts = world_pts.reshape([W, H, 3])
return world_pts
def project_depth_cam_space(depth_img, camera_intrinsics, keep_dim=True, project_factor=1.):
# Get depth image size
im_h = depth_img.shape[0]
im_w = depth_img.shape[1]
# Project depth into 3D point cloud in camera coordinates
pix_x, pix_y = np.meshgrid(np.linspace(0, im_w - 1, im_w), np.linspace(0, im_h - 1, im_h))
cam_pts_x = np.multiply(pix_x - im_w / 2., -depth_img / camera_intrinsics[0, 0])
cam_pts_y = np.multiply(pix_y - im_h / 2., depth_img / camera_intrinsics[1, 1])
cam_pts_z = depth_img.copy()
cam_pts_x.shape = (im_h * im_w, 1)
cam_pts_y.shape = (im_h * im_w, 1)
cam_pts_z.shape = (im_h * im_w, 1)
cam_pts = np.concatenate((cam_pts_x, cam_pts_y, cam_pts_z), axis=1) * project_factor
if keep_dim:
cam_pts = cam_pts.reshape([im_h, im_w, 3])
return cam_pts
def get_trunc_ab(mean, std, a, b):
return (a - mean) / std, (b - mean) /std
########################################################################
# partial observation getter for full experiment
########################################################################
CAM_EXTR = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.6427898318479135, -0.766043895201295, -565.0],
[0.0, 0.766047091387779, 0.6427871499290135, 550.0], [0.0, 0.0, 0.0, 1.0]])
CAM_INTR = np.array([[687.1868314210544, 0.0, 360.0], [0.0, 687.1868314210544, 360.0], [0.0, 0.0, 1.0]])
SCENE_RANGE = np.array([[-600, -400, 0], [600, 400, 400]])
def get_scene_partial_pointcloud(model_category, model_name, split_id, int_id, frame_id, data_root):
path = f"{data_root}/{split_id}/{model_category}/{model_name}/img/{{}}_{int_id:04d}_{frame_id:06d}.{{}}"
depth_img = path.format('depth', 'png')
depth_img = np.array(Image.open(depth_img).convert(mode='I'))
depth_vals = -np.array(depth_img).astype(float) / 1000.
rgb_img = path.format('rgb', 'jpg')
rgb_img = np.array(Image.open(rgb_img).convert(mode="RGB")).astype(float) / 255
seg_img = path.format('seg', 'jpg')
seg_img = np.array(Image.open(seg_img).convert('L')).squeeze()
non_env = np.where(seg_img != 0)
env = np.where(seg_img == 0)
partial_points = project_depth_world_space(depth_vals, CAM_INTR, CAM_EXTR, keep_dim=True, project_factor=100.)
partial_points_rgb = np.concatenate([partial_points, rgb_img], axis=-1)
obj_pts = partial_points_rgb[non_env]
env_pts = partial_points_rgb[env]
return obj_pts, env_pts
########################################################################
# Get geometric state (full experiment)
########################################################################
def get_object_full_points(model_category, model_name, split_id, int_id, frame_id, data_root):
path = f"{data_root}/{split_id}/{model_category}/{model_name}/geom/{int_id:04d}_{frame_id:06d}.npz"
geom_data = np.load(path)
loc = geom_data['loc']
print(geom_data['rot'])
w,x,y,z= geom_data['rot']
rot = Rotation.from_quat(np.array([x,y,z,w]))
scale = geom_data['scale']
sim_pts = (rot.apply(geom_data['sim'] * scale)) + loc
vis_pts = (rot.apply(geom_data['vis'] * scale)) + loc
return sim_pts, vis_pts
########################################################################
# partial observation getter for teddy toy example
########################################################################
def get_teddy_partial_pointcloud(int_group, int_id, frame_id, data_root, cam_id='cam0'):
#depth_img = glob.glob(f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_*{frame_id:03d}_depth.png")[0]
depth_img = f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_{frame_id:03d}_depth.png"
depth_img = np.array(Image.open(depth_img).convert(mode='I'))
depth_vals = -np.array(depth_img).astype(float) / 1000.
#rgb_img = glob.glob(f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_*{frame_id:03d}_rgb.png")[0]
rgb_img = f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_{frame_id:03d}_rgb.png"
rgb_img = np.array(Image.open(rgb_img).convert(mode="RGB")).astype(float) / 255
#seg_img = glob.glob(f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_*{frame_id:03d}_seg.png")[0]
seg_img = f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_{frame_id:03d}_seg.png"
seg_img = np.array(Image.open(seg_img))
non_env = np.where(seg_img != 0)
ospdir= os.path.dirname
root_dir = ospdir(ospdir(ospdir(os.path.realpath(__file__))))
camera_json = os.path.join(root_dir, "metadata", "camera.json")
with open(camera_json, 'r') as fp:
cam_info = json.load(fp)
for k in cam_info.keys():
cam_extr, cam_intr = cam_info[k]
cam_info[k] = np.array(cam_extr), np.array(cam_intr)
cam_extr, cam_intr = cam_info[cam_id]
partial_points = project_depth_world_space(depth_vals, cam_intr, cam_extr, keep_dim=True)
partial_points_rgb = np.concatenate([partial_points, rgb_img], axis=-1)
xyzrgb = partial_points_rgb[non_env]
xyz = xyzrgb[:,:3]
xyz = world_coord_view_augmentation(cam_id, xyz)
rgb = xyzrgb[:,3:]
return xyz/ 10. * 1.1, rgb
########################################################################
# Get meta info (teddy toy example)
########################################################################
def get_teddy_loc(int_group, int_id, frame_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
int_info = json.load(fp)
return np.array(dict(zip(int_info['frames'], int_info['teddy_loc']))[frame_id])
def get_teddy_rot(int_group, int_id, frame_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
int_info = json.load(fp)
w,x,y,z = np.array(dict(zip(int_info['frames'], int_info['teddy_rot']))[frame_id])
return np.array([x,y,z,w])
def get_action_info(int_group, int_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
int_info = json.load(fp)
grasp_loc = np.array(int_info['grasp'])
target_loc = np.array(int_info['target'])
return grasp_loc, target_loc
def get_release_frame(int_group, int_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
return json.load(fp)['release_frame']
# name = glob.glob(
# f"{data_root}/{int_group}/geom/{int_id:06d}_release_*_sim.npy")[0].split("/")[-1]
# return int(name.split("_")[-2])
def get_end_frame(int_group, int_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
return json.load(fp)['end_frame']
# name = glob.glob(
# f"{data_root}/{int_group}/geom/{int_id:06d}_static_*_sim.npy")[0].split("/")[-1]
# return int(name.split("_")[-2])
########################################################################
# Get geometric state (teddy toy example)
########################################################################
def get_teddy_full_points(int_group, int_id, frame_id, data_root):
#sim_data = glob.glob(f"{data_root}/{int_group}/geom/{int_id:06d}_*{frame_id:03d}_sim.npy")[0]
sim_data = f"{data_root}/{int_group}/geom/{int_id:06d}_{frame_id:03d}_sim.npy"
points = np.load(sim_data)
teddy_loc = get_teddy_loc(int_group, int_id, frame_id, data_root)
teddy_rot = Rotation.from_quat(get_teddy_rot(int_group, int_id, frame_id, data_root))
return ( teddy_rot.apply(points) + teddy_loc ) / 10. * 1.1
#return ( points + teddy_loc ) / 10. * 1.1
def get_teddy_vis_points(int_group, int_id, frame_id, data_root):
#sim_data = glob.glob(f"{data_root}/{int_group}/geom/{int_id:06d}_*{frame_id:03d}_vis.npy")[0]
sim_data = f"{data_root}/{int_group}/geom/{int_id:06d}_{frame_id:03d}_vis.npy"
points = np.load(sim_data)
teddy_loc = get_teddy_loc(int_group, int_id, frame_id, data_root)
teddy_rot = Rotation.from_quat(get_teddy_rot(int_group, int_id, frame_id, data_root))
return ( teddy_rot.apply(points) + teddy_loc ) / 10. * 1.1
#return ( points + teddy_loc ) / 10. * 1.1
########################################################################
# Get point-based supervision data for implicit functions (teddy toy example)
########################################################################
def sample_occupancies(int_group, int_id, frame_id, data_root, sample_scheme='uniform'):
if sample_scheme not in ['uniform', 'gaussian']:
raise ValueError('Unsupported sampling scheme for occupancy')
num_pts = 100000
if sample_scheme == 'uniform':
pts = np.random.rand(num_pts, 3)
pts = 1.1 * (pts - 0.5)
else:
x,y,z= get_teddy_loc(int_group, int_id, frame_id, data_root) / 10. * 1.1
std = 0.18
a, b = -0.55, 0.55
xs = scipy.stats.truncnorm.rvs(*get_trunc_ab(x, std, a, b), loc=x, scale=std, size=num_pts)
ys = scipy.stats.truncnorm.rvs(*get_trunc_ab(y, std, a, b), loc=y, scale=std, size=num_pts)
zs = scipy.stats.truncnorm.rvs(*get_trunc_ab(z, std, a, b), loc=z, scale=std, size=num_pts)
pts = np.array([xs,ys,zs]).T
teddy_sim_points = get_teddy_full_points(int_group, int_id, frame_id, data_root)
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric='l2').fit(teddy_sim_points)
dist, ind = x_nn.kneighbors(pts)#[0].squeeze()
dist = dist.squeeze()
ind = ind.squeeze()
occ = dist < 0.01
pt_class = ind[occ != 0]
return pts, occ, pt_class
def sample_occupancies_with_flow(int_group, int_id, release_frame, end_frame, data_root, sample_scheme='uniform'):
pts, occ, ind = sample_occupancies(int_group, int_id, 0, data_root, sample_scheme)
xyz0 = get_teddy_full_points(int_group, int_id, 0, data_root)
f1 = get_teddy_full_points(int_group, int_id, release_frame, data_root) - xyz0
f2 = get_teddy_full_points(int_group, int_id, end_frame, data_root) - xyz0
return pts, occ, ind, f1[ind],f2[ind]
########################################################################
# Visualization
########################################################################
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
def side_by_side_point_clouds(point_clouds, angle=(90,0)):
fig = plt.figure()
W = int(len(point_clouds) ** 0.5)
H = math.ceil(len(point_clouds) / W)
for i, pcloud in enumerate(point_clouds):
action = None
flow = None
pts = pcloud['pts']
title = pcloud['title']
col = pcloud.get('col', None)
flow = pcloud.get('flow', None)
action = pcloud.get('action', None)
ax = fig.add_subplot(W, H, i+1,projection='3d')
ax.set_title(title)
if flow is not None:
flow_norm = np.linalg.norm(flow, axis=1)
viz_idx = flow_norm > 0.0
flow = flow[viz_idx]
ax.quiver(
pts[:,0][viz_idx],
pts[:,2][viz_idx],
pts[:,1][viz_idx],
flow[:,0], flow[:,2], flow[:,1],
color = 'red', linewidth=3, alpha=0.2
)
if col is None:
col = 'blue'
ax.scatter(pts[:,0],
pts[:,2],
pts[:,1], color=col,s=0.5)
set_axes_equal(ax)
ax.view_init(*angle)
if action is not None:
ax.scatter(action[0], action[1], 0.,
edgecolors='tomato', color='turquoise', marker='*',s=80)
return fig
def write_pointcoud_as_obj(xyzrgb, path):
if xyzrgb.shape[1] == 6:
with open(path, 'w') as fp:
for x,y,z,r,g,b in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f} {r:.3f} {g:.3f} {b:.3f}\n")
else:
with open(path, 'w') as fp:
for x,y,z in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f}\n")
#################################
# Distance Metric
#################################
def subsample_points(points, resolution=0.0125, return_index=True):
idx = np.unique(points// resolution * resolution, axis=0, return_index=True)[1]
if return_index:
return idx
return points[idx]
from sklearn.neighbors import NearestNeighbors
def chamfer_distance(x, y, metric='l2', direction='bi'):
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
min_y_to_x = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
min_x_to_y = y_nn.kneighbors(x)[0]
return np.mean(min_y_to_x) + np.mean(min_x_to_y)
def f1_score(x, y, metric='l2', th=0.01):
# x is pred
# y is gt
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
d2 = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
d1 = y_nn.kneighbors(x)[0]
recall = float(sum(d < th for d in d2)) / float(len(d2))
precision = float(sum(d < th for d in d1)) / float(len(d1))
if recall+precision > 0:
fscore = 2 * recall * precision / (recall + precision)
else:
fscore = 0
return fscore, precision, recall | 19,039 | Python | 44.118483 | 116 | 0.564578 |
NVlabs/ACID/ACID/src/utils/plushsim_util.py | import os
import glob
import json
import scipy
import itertools
import numpy as np
from PIL import Image
from scipy.spatial.transform import Rotation
from sklearn.neighbors import NearestNeighbors
from .common_util import *
########################################################################
# Some file getters
########################################################################
def get_model_dir(data_root, split_id, model_category, model_name):
return f"{data_root}/{split_id}/{model_category}/{model_name}"
def get_interaction_info_file(data_root, split_id, model_category, model_name, reset_id):
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/info/interaction_info_{reset_id:04d}.npz"
def get_geom_file(data_root, split_id, model_category, model_name, reset_id, frame_id):
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/geom/{reset_id:04d}_{frame_id:06d}.npz"
def get_image_file_template(data_root, split_id, model_category, model_name, reset_id, frame_id):
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/img/{{}}_{reset_id:04d}_{frame_id:06d}.{{}}"
def get_rgb(data_root, split_id, model_category, model_name, reset_id, frame_id):
temp = get_image_file_template(data_root, split_id, model_category, model_name, reset_id, frame_id)
return temp.format('rgb', 'jpg')
def get_depth(data_root, split_id, model_category, model_name, reset_id, frame_id):
temp = get_image_file_template(data_root, split_id, model_category, model_name, reset_id, frame_id)
return temp.format('depth', 'png')
def get_seg(data_root, split_id, model_category, model_name, reset_id, frame_id):
temp = get_image_file_template(data_root, split_id, model_category, model_name, reset_id, frame_id)
return temp.format('seg', 'jpg')
def get_flow_data_file(flow_root,split_id, model_id, reset_id, int_id):
return f"{flow_root}/{split_id}/{model_id}/{reset_id:03d}_{int_id:03d}.npz"
def get_flow_pair_data_file(pair_root,split_id, model_id, reset_id, int_id):
return f"{pair_root}/{split_id}/{model_id}/pair_{reset_id:03d}_{int_id:03d}.npz"
def get_geom_data_file(geom_root,split_id, model_id, reset_id, frame_id):
return f"{geom_root}/{split_id}/{model_id}/{reset_id:03d}_{frame_id:06d}.npz"
def get_pair_data_file(pair_root,split_id, model_id, reset_id, frame_id):
return f"{pair_root}/{split_id}/{model_id}/pair_{reset_id:03d}_{frame_id:06d}.npz"
# Getters for plan data
def get_plan_geom_file(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
if sequence_id == 'gt':
seq_str = sequence_id
else:
seq_str = f"{sequence_id:04d}"
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/geom/{scenario_id:04d}_{seq_str}_{frame_id}.npz"
def get_plan_interaction_info_file(data_root, split_id, model_category, model_name, scenario_id, sequence_id):
if sequence_id == 'gt':
seq_str = sequence_id
else:
seq_str = f"{sequence_id:04d}"
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/info/interaction_info_{scenario_id:04d}_{seq_str}.npz"
def get_plan_image_file_template(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
if sequence_id == 'gt':
seq_str = sequence_id
else:
seq_str = f"{sequence_id:04d}"
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/img/{{}}_{scenario_id:04d}_{seq_str}_{frame_id}.{{}}"
def get_plan_rgb(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
temp = get_plan_image_file_template(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
return temp.format('rgb', 'jpg')
def get_plan_depth(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
temp = get_plan_image_file_template(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
return temp.format('depth', 'png')
def get_plan_seg(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
temp = get_plan_image_file_template(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
return temp.format('seg', 'jpg')
def get_plan_perf_file(data_root, split_id, model_category, model_name, scenario_id):
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/info/perf_{scenario_id:04d}.npz"
########################################################################
# partial observation getter for full experiment
########################################################################
CAM_EXTR = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.6427898318479135, -0.766043895201295, -565.0],
[0.0, 0.766047091387779, 0.6427871499290135, 550.0], [0.0, 0.0, 0.0, 1.0]])
CAM_INTR = np.array([[687.1868314210544, 0.0, 360.0], [0.0, 687.1868314210544, 360.0], [0.0, 0.0, 1.0]])
SCENE_RANGE = np.array([[-600, -600, -20], [600, 600, 380]])
def get_plan_scene_partial_pointcloud(
model_category, model_name, split_id, scenario_id, sequence_id, frame_id, data_root):
depth_img = get_plan_depth(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
depth_img = np.array(Image.open(depth_img).convert(mode='I'))
depth_vals = -np.array(depth_img).astype(float) / 1000.
rgb_img = get_plan_rgb(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
rgb_img = np.array(Image.open(rgb_img).convert(mode="RGB")).astype(float) / 255
seg_img = get_plan_seg(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
seg_img = np.array(Image.open(seg_img).convert('L')).squeeze()
non_env = np.where(seg_img != 0)
env = np.where(seg_img == 0)
partial_points = project_depth_world_space(depth_vals, CAM_INTR, CAM_EXTR, keep_dim=True, project_factor=100.)
partial_points_rgb = np.concatenate([partial_points, rgb_img], axis=-1)
obj_pts = partial_points_rgb[non_env]
env_pts = partial_points_rgb[env]
return obj_pts, env_pts
def get_scene_partial_pointcloud(model_category, model_name, split_id, reset_id, frame_id, data_root):
depth_img = get_depth(data_root, split_id, model_category, model_name, reset_id, frame_id)
depth_img = np.array(Image.open(depth_img).convert(mode='I'))
depth_vals = -np.array(depth_img).astype(float) / 1000.
rgb_img = get_rgb(data_root, split_id, model_category, model_name, reset_id, frame_id)
rgb_img = np.array(Image.open(rgb_img).convert(mode="RGB")).astype(float) / 255
seg_img = get_seg(data_root, split_id, model_category, model_name, reset_id, frame_id)
seg_img = np.array(Image.open(seg_img).convert('L')).squeeze()
non_env = np.where(seg_img != 0)
env = np.where(seg_img == 0)
partial_points = project_depth_world_space(depth_vals, CAM_INTR, CAM_EXTR, keep_dim=True, project_factor=100.)
partial_points_rgb = np.concatenate([partial_points, rgb_img], axis=-1)
obj_pts = partial_points_rgb[non_env]
env_pts = partial_points_rgb[env]
return obj_pts, env_pts
def render_points(world_points, cam_extr=None, cam_intr=None, return_index=False, filter_in_cam=True):
if cam_extr is None:
cam_extr = CAM_EXTR
if cam_intr is None:
cam_intr = CAM_INTR
cam_points = transform_points_world_to_cam(world_points, cam_extr) / 100.
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
idx = np.rint(cam_pts_y / 6) * 1000 + np.rint(cam_pts_x / 6)
val = np.stack([cam_pts_z, np.arange(len(cam_pts_x))]).T
order = idx.argsort()
idx = idx[order]
val = val[order]
grouped_pts = np.split(val, np.unique(idx, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
if filter_in_cam:
in_cam = np.where(np.logical_and(cam_pts_x > 0, cam_pts_y > 0))[0]
min_idx = np.intersect1d(in_cam, min_idx, assume_unique=True)
if return_index:
return min_idx
return world_points[min_idx]
########################################################################
# Get geometric state (full experiment)
########################################################################
def extract_full_points(path):
geom_data = np.load(path)
loc = geom_data['loc']
w,x,y,z= geom_data['rot']
rot = Rotation.from_quat(np.array([x,y,z,w]))
scale = geom_data['scale']
sim_pts = (rot.apply(geom_data['sim'] * scale)) + loc
vis_pts = (rot.apply(geom_data['vis'] * scale)) + loc
return sim_pts, vis_pts, loc, rot, scale
def get_object_full_points(model_category, model_name, split_id, reset_id, frame_id, data_root):
path = get_geom_file(data_root, split_id, model_category, model_name, reset_id, frame_id)
return extract_full_points(path)
def get_action_info(model_category, model_name, split_id, reset_id, interaction_id, data_root):
obj_info = get_interaction_info_file(data_root, split_id, model_category, model_name, reset_id)
int_info = np.load(obj_info)
grasp_loc = np.array(int_info['grasp_points'][interaction_id])
target_loc = np.array(int_info['target_points'][interaction_id])
start_frame = int_info['start_frames'][interaction_id]
release_frame = int_info['release_frames'][interaction_id]
static_frame = int_info['static_frames'][interaction_id]
return grasp_loc, target_loc, start_frame, release_frame, static_frame
########################################################################
# Get point-based supervision data for implicit functions (teddy toy example)
########################################################################
def sample_occupancies(full_pts, center,
sample_scheme='gaussian',
num_pts = 100000, bound=0.55,
std=0.1):
if sample_scheme not in ['uniform', 'gaussian', 'object']:
raise ValueError('Unsupported sampling scheme for occupancy')
if sample_scheme == 'uniform':
pts = np.random.rand(num_pts, 3)
pts = 1.1 * (pts - 0.5)
elif sample_scheme == 'object':
displace = full_pts[np.random.randint(full_pts.shape[0], size=num_pts)]
x_min,y_min,z_min = full_pts.min(axis=0)
x_max,y_max,z_max = full_pts.max(axis=0)
a, b = -bound, bound
xs = scipy.stats.truncnorm.rvs(*get_trunc_ab_range(x_min, x_max, std, a, b), loc=0, scale=std, size=num_pts)
ys = scipy.stats.truncnorm.rvs(*get_trunc_ab_range(y_min, y_max, std, a, b), loc=0, scale=std, size=num_pts)
zs = scipy.stats.truncnorm.rvs(*get_trunc_ab_range(z_min, z_max, std, a, b), loc=0, scale=std, size=num_pts)
pts = np.array([xs,ys,zs]).T + displace
else:
x,y,z= center
a, b = -bound, bound
xs = scipy.stats.truncnorm.rvs(*get_trunc_ab(x, std, a, b), loc=x, scale=std, size=num_pts)
ys = scipy.stats.truncnorm.rvs(*get_trunc_ab(y, std, a, b), loc=y, scale=std, size=num_pts)
zs = scipy.stats.truncnorm.rvs(*get_trunc_ab(z, std, a, b), loc=z, scale=std, size=num_pts)
pts = np.array([xs,ys,zs]).T
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric='l2').fit(full_pts)
dist, ind = x_nn.kneighbors(pts)#[0].squeeze()
dist = dist.squeeze()
ind = ind.squeeze()
#points_in = points_uniform[np.where(points_distance< 0.1)]
occ = dist < 0.01
#pt_class = ind[np.where(dist < 0.01)]
pt_class = ind[occ != 0]
return pts, occ, pt_class
########################################################################
# Visualization
########################################################################
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
def side_by_side_point_clouds(point_clouds, angle=(90,0)):
fig = plt.figure()
W = int(len(point_clouds) ** 0.5)
H = math.ceil(len(point_clouds) / W)
for i, pcloud in enumerate(point_clouds):
action = None
flow = None
pts = pcloud['pts']
title = pcloud['title']
col = pcloud.get('col', None)
flow = pcloud.get('flow', None)
action = pcloud.get('action', None)
ax = fig.add_subplot(W, H, i+1,projection='3d')
ax.set_title(title)
if flow is not None:
flow_norm = np.linalg.norm(flow, axis=1)
viz_idx = flow_norm > 0.0
flow = flow[viz_idx]
ax.quiver(
pts[:,0][viz_idx],
pts[:,2][viz_idx],
pts[:,1][viz_idx],
flow[:,0], flow[:,2], flow[:,1],
color = 'red', linewidth=3, alpha=0.2
)
if col is None:
col = 'blue'
ax.scatter(pts[:,0],
pts[:,2],
pts[:,1], color=col,s=0.5)
set_axes_equal(ax)
ax.view_init(*angle)
if action is not None:
ax.scatter(action[0], action[1], 0.,
edgecolors='tomato', color='turquoise', marker='*',s=80)
return fig
def write_pointcoud_as_obj(xyzrgb, path):
if xyzrgb.shape[1] == 6:
with open(path, 'w') as fp:
for x,y,z,r,g,b in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f} {r:.3f} {g:.3f} {b:.3f}\n")
else:
with open(path, 'w') as fp:
for x,y,z in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f}\n")
#################################
# Distance Metric
#################################
def subsample_points(points, resolution=0.0125, return_index=True):
idx = np.unique(points// resolution * resolution, axis=0, return_index=True)[1]
if return_index:
return idx
return points[idx]
def miou(x, y, th=0.01):
x = subsample_points(x, resolution=th, return_index=False) // th
y = subsample_points(y, resolution=th, return_index=False) // th
xset = set([tuple(i) for i in x])
yset = set([tuple(i) for i in y])
return len(xset & yset) / len(xset | yset)
from sklearn.neighbors import NearestNeighbors
def chamfer_distance(x, y, metric='l2', direction='bi'):
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
min_y_to_x = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
min_x_to_y = y_nn.kneighbors(x)[0]
return np.mean(min_y_to_x) + np.mean(min_x_to_y)
def f1_score(x, y, metric='l2', th=0.01):
# x is pred
# y is gt
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
d2 = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
d1 = y_nn.kneighbors(x)[0]
recall = float(sum(d < th for d in d2)) / float(len(d2))
precision = float(sum(d < th for d in d1)) / float(len(d1))
if recall+precision > 0:
fscore = 2 * recall * precision / (recall + precision)
else:
fscore = 0
return fscore, precision, recall
from scipy.spatial import cKDTree
def find_nn_cpu(feat0, feat1, return_distance=False):
feat1tree = cKDTree(feat1)
dists, nn_inds = feat1tree.query(feat0, k=1, n_jobs=-1)
if return_distance:
return nn_inds, dists
else:
return nn_inds
def find_emd_cpu(feat0, feat1, return_distance=False):
import time
from scipy.spatial.distance import cdist
from scipy.optimize import linear_sum_assignment
d = cdist(feat0, feat1)
feat0_inds, feat1_inds = linear_sum_assignment(d)
return feat0_inds, feat1_inds
def find_nn_cpu_symmetry_consistent(feat0, feat1, pts0, pts1, n_neighbor=10, local_radis=0.05, return_distance=False):
feat1tree = cKDTree(feat1)
dists, nn_inds = feat1tree.query(feat0, k=n_neighbor, n_jobs=-1)
if return_distance:
return nn_inds, dists
else:
return nn_inds
#################################
# ranking utilities
def overlap(list1, list2, depth):
"""Overlap which accounts for possible ties.
This isn't mentioned in the paper but should be used in the ``rbo*()``
functions below, otherwise overlap at a given depth might be > depth which
inflates the result.
There are no guidelines in the paper as to what's a good way to calculate
this, but a good guess is agreement scaled by the minimum between the
requested depth and the lengths of the considered lists (overlap shouldn't
be larger than the number of ranks in the shorter list, otherwise results
are conspicuously wrong when the lists are of unequal lengths -- rbo_ext is
not between rbo_min and rbo_min + rbo_res.
>>> overlap("abcd", "abcd", 3)
3.0
>>> overlap("abcd", "abcd", 5)
4.0
>>> overlap(["a", {"b", "c"}, "d"], ["a", {"b", "c"}, "d"], 2)
2.0
>>> overlap(["a", {"b", "c"}, "d"], ["a", {"b", "c"}, "d"], 3)
3.0
"""
return agreement(list1, list2, depth) * min(depth, len(list1), len(list2))
def rbo_ext(list1, list2, p=0.9):
"""RBO point estimate based on extrapolating observed overlap.
See equation (32) in paper.
NOTE: The doctests weren't verified against manual computations but seem
plausible.
>>> _round(rbo_ext("abcdefg", "abcdefg", .9))
1.0
>>> _round(rbo_ext("abcdefg", "bacdefg", .9))
0.9
"""
S, L = sorted((list1, list2), key=len)
s, l = len(S), len(L)
x_l = overlap(list1, list2, l)
x_s = overlap(list1, list2, s)
# the paper says overlap(..., d) / d, but it should be replaced by
# agreement(..., d) defined as per equation (28) so that ties are handled
# properly (otherwise values > 1 will be returned)
# sum1 = sum(p**d * overlap(list1, list2, d)[0] / d for d in range(1, l + 1))
sum1 = sum(p ** d * agreement(list1, list2, d) for d in range(1, l + 1))
sum2 = sum(p ** d * x_s * (d - s) / s / d for d in range(s + 1, l + 1))
term1 = (1 - p) / p * (sum1 + sum2)
term2 = p ** l * ((x_l - x_s) / l + x_s / s)
return term1 + term2
def set_at_depth(lst, depth):
ans = set()
for v in lst[:depth]:
if isinstance(v, set):
ans.update(v)
else:
ans.add(v)
return ans
def raw_overlap(list1, list2, depth):
"""Overlap as defined in the article.
"""
set1, set2 = set_at_depth(list1, depth), set_at_depth(list2, depth)
return len(set1.intersection(set2)), len(set1), len(set2)
def agreement(list1, list2, depth):
"""Proportion of shared values between two sorted lists at given depth.
>>> _round(agreement("abcde", "abdcf", 1))
1.0
>>> _round(agreement("abcde", "abdcf", 3))
0.667
>>> _round(agreement("abcde", "abdcf", 4))
1.0
>>> _round(agreement("abcde", "abdcf", 5))
0.8
>>> _round(agreement([{1, 2}, 3], [1, {2, 3}], 1))
0.667
>>> _round(agreement([{1, 2}, 3], [1, {2, 3}], 2))
1.0
"""
len_intersection, len_set1, len_set2 = raw_overlap(list1, list2, depth)
return 2 * len_intersection / (len_set1 + len_set2)
| 20,693 | Python | 43.407725 | 124 | 0.605905 |
NVlabs/ACID/ACID/src/utils/libmise/__init__.py | from .mise import MISE
__all__ = [
MISE
]
| 47 | Python | 6.999999 | 22 | 0.531915 |
NVlabs/ACID/ACID/src/utils/libmise/test.py | import numpy as np
from mise import MISE
import time
t0 = time.time()
extractor = MISE(1, 2, 0.)
p = extractor.query()
i = 0
while p.shape[0] != 0:
print(i)
print(p)
v = 2 * (p.sum(axis=-1) > 2).astype(np.float64) - 1
extractor.update(p, v)
p = extractor.query()
i += 1
if (i >= 8):
break
print(extractor.to_dense())
# p, v = extractor.get_points()
# print(p)
# print(v)
print('Total time: %f' % (time.time() - t0))
| 456 | Python | 16.576922 | 55 | 0.570175 |
NVlabs/ACID/ACID/src/utils/libsimplify/__init__.py | from .simplify_mesh import (
mesh_simplify
)
import trimesh
def simplify_mesh(mesh, f_target=10000, agressiveness=7.):
vertices = mesh.vertices
faces = mesh.faces
vertices, faces = mesh_simplify(vertices, faces, f_target, agressiveness)
mesh_simplified = trimesh.Trimesh(vertices, faces, process=False)
return mesh_simplified
| 355 | Python | 21.249999 | 77 | 0.723944 |
NVlabs/ACID/ACID/src/utils/libsimplify/test.py | from simplify_mesh import mesh_simplify
import numpy as np
v = np.random.rand(100, 3)
f = np.random.choice(range(100), (50, 3))
mesh_simplify(v, f, 50) | 153 | Python | 20.999997 | 41 | 0.705882 |
NVlabs/ACID/ACID/src/utils/libsimplify/Simplify.h | /////////////////////////////////////////////
//
// Mesh Simplification Tutorial
//
// (C) by Sven Forstmann in 2014
//
// License : MIT
// http://opensource.org/licenses/MIT
//
//https://github.com/sp4cerat/Fast-Quadric-Mesh-Simplification
//
// 5/2016: Chris Rorden created minimal version for OSX/Linux/Windows compile
//#include <iostream>
//#include <stddef.h>
//#include <functional>
//#include <sys/stat.h>
//#include <stdbool.h>
#include <string.h>
//#include <ctype.h>
//#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <map>
#include <vector>
#include <string>
#include <math.h>
#include <float.h> //FLT_EPSILON, DBL_EPSILON
#define loopi(start_l,end_l) for ( int i=start_l;i<end_l;++i )
#define loopi(start_l,end_l) for ( int i=start_l;i<end_l;++i )
#define loopj(start_l,end_l) for ( int j=start_l;j<end_l;++j )
#define loopk(start_l,end_l) for ( int k=start_l;k<end_l;++k )
struct vector3
{
double x, y, z;
};
struct vec3f
{
double x, y, z;
inline vec3f( void ) {}
//inline vec3f operator =( vector3 a )
// { vec3f b ; b.x = a.x; b.y = a.y; b.z = a.z; return b;}
inline vec3f( vector3 a )
{ x = a.x; y = a.y; z = a.z; }
inline vec3f( const double X, const double Y, const double Z )
{ x = X; y = Y; z = Z; }
inline vec3f operator + ( const vec3f& a ) const
{ return vec3f( x + a.x, y + a.y, z + a.z ); }
inline vec3f operator += ( const vec3f& a ) const
{ return vec3f( x + a.x, y + a.y, z + a.z ); }
inline vec3f operator * ( const double a ) const
{ return vec3f( x * a, y * a, z * a ); }
inline vec3f operator * ( const vec3f a ) const
{ return vec3f( x * a.x, y * a.y, z * a.z ); }
inline vec3f v3 () const
{ return vec3f( x , y, z ); }
inline vec3f operator = ( const vector3 a )
{ x=a.x;y=a.y;z=a.z;return *this; }
inline vec3f operator = ( const vec3f a )
{ x=a.x;y=a.y;z=a.z;return *this; }
inline vec3f operator / ( const vec3f a ) const
{ return vec3f( x / a.x, y / a.y, z / a.z ); }
inline vec3f operator - ( const vec3f& a ) const
{ return vec3f( x - a.x, y - a.y, z - a.z ); }
inline vec3f operator / ( const double a ) const
{ return vec3f( x / a, y / a, z / a ); }
inline double dot( const vec3f& a ) const
{ return a.x*x + a.y*y + a.z*z; }
inline vec3f cross( const vec3f& a , const vec3f& b )
{
x = a.y * b.z - a.z * b.y;
y = a.z * b.x - a.x * b.z;
z = a.x * b.y - a.y * b.x;
return *this;
}
inline double angle( const vec3f& v )
{
vec3f a = v , b = *this;
double dot = v.x*x + v.y*y + v.z*z;
double len = a.length() * b.length();
if(len==0)len=0.00001f;
double input = dot / len;
if (input<-1) input=-1;
if (input>1) input=1;
return (double) acos ( input );
}
inline double angle2( const vec3f& v , const vec3f& w )
{
vec3f a = v , b= *this;
double dot = a.x*b.x + a.y*b.y + a.z*b.z;
double len = a.length() * b.length();
if(len==0)len=1;
vec3f plane; plane.cross( b,w );
if ( plane.x * a.x + plane.y * a.y + plane.z * a.z > 0 )
return (double) -acos ( dot / len );
return (double) acos ( dot / len );
}
inline vec3f rot_x( double a )
{
double yy = cos ( a ) * y + sin ( a ) * z;
double zz = cos ( a ) * z - sin ( a ) * y;
y = yy; z = zz;
return *this;
}
inline vec3f rot_y( double a )
{
double xx = cos ( -a ) * x + sin ( -a ) * z;
double zz = cos ( -a ) * z - sin ( -a ) * x;
x = xx; z = zz;
return *this;
}
inline void clamp( double min, double max )
{
if (x<min) x=min;
if (y<min) y=min;
if (z<min) z=min;
if (x>max) x=max;
if (y>max) y=max;
if (z>max) z=max;
}
inline vec3f rot_z( double a )
{
double yy = cos ( a ) * y + sin ( a ) * x;
double xx = cos ( a ) * x - sin ( a ) * y;
y = yy; x = xx;
return *this;
}
inline vec3f invert()
{
x=-x;y=-y;z=-z;return *this;
}
inline vec3f frac()
{
return vec3f(
x-double(int(x)),
y-double(int(y)),
z-double(int(z))
);
}
inline vec3f integer()
{
return vec3f(
double(int(x)),
double(int(y)),
double(int(z))
);
}
inline double length() const
{
return (double)sqrt(x*x + y*y + z*z);
}
inline vec3f normalize( double desired_length = 1 )
{
double square = sqrt(x*x + y*y + z*z);
/*
if (square <= 0.00001f )
{
x=1;y=0;z=0;
return *this;
}*/
//double len = desired_length / square;
x/=square;y/=square;z/=square;
return *this;
}
static vec3f normalize( vec3f a );
static void random_init();
static double random_double();
static vec3f random();
static int random_number;
double random_double_01(double a){
double rnf=a*14.434252+a*364.2343+a*4213.45352+a*2341.43255+a*254341.43535+a*223454341.3523534245+23453.423412;
int rni=((int)rnf)%100000;
return double(rni)/(100000.0f-1.0f);
}
vec3f random01_fxyz(){
x=(double)random_double_01(x);
y=(double)random_double_01(y);
z=(double)random_double_01(z);
return *this;
}
};
vec3f barycentric(const vec3f &p, const vec3f &a, const vec3f &b, const vec3f &c){
vec3f v0 = b-a;
vec3f v1 = c-a;
vec3f v2 = p-a;
double d00 = v0.dot(v0);
double d01 = v0.dot(v1);
double d11 = v1.dot(v1);
double d20 = v2.dot(v0);
double d21 = v2.dot(v1);
double denom = d00*d11-d01*d01;
double v = (d11 * d20 - d01 * d21) / denom;
double w = (d00 * d21 - d01 * d20) / denom;
double u = 1.0 - v - w;
return vec3f(u,v,w);
}
vec3f interpolate(const vec3f &p, const vec3f &a, const vec3f &b, const vec3f &c, const vec3f attrs[3])
{
vec3f bary = barycentric(p,a,b,c);
vec3f out = vec3f(0,0,0);
out = out + attrs[0] * bary.x;
out = out + attrs[1] * bary.y;
out = out + attrs[2] * bary.z;
return out;
}
double min(double v1, double v2) {
return fmin(v1,v2);
}
class SymetricMatrix {
public:
// Constructor
SymetricMatrix(double c=0) { loopi(0,10) m[i] = c; }
SymetricMatrix( double m11, double m12, double m13, double m14,
double m22, double m23, double m24,
double m33, double m34,
double m44) {
m[0] = m11; m[1] = m12; m[2] = m13; m[3] = m14;
m[4] = m22; m[5] = m23; m[6] = m24;
m[7] = m33; m[8] = m34;
m[9] = m44;
}
// Make plane
SymetricMatrix(double a,double b,double c,double d)
{
m[0] = a*a; m[1] = a*b; m[2] = a*c; m[3] = a*d;
m[4] = b*b; m[5] = b*c; m[6] = b*d;
m[7 ] =c*c; m[8 ] = c*d;
m[9 ] = d*d;
}
double operator[](int c) const { return m[c]; }
// Determinant
double det( int a11, int a12, int a13,
int a21, int a22, int a23,
int a31, int a32, int a33)
{
double det = m[a11]*m[a22]*m[a33] + m[a13]*m[a21]*m[a32] + m[a12]*m[a23]*m[a31]
- m[a13]*m[a22]*m[a31] - m[a11]*m[a23]*m[a32]- m[a12]*m[a21]*m[a33];
return det;
}
const SymetricMatrix operator+(const SymetricMatrix& n) const
{
return SymetricMatrix( m[0]+n[0], m[1]+n[1], m[2]+n[2], m[3]+n[3],
m[4]+n[4], m[5]+n[5], m[6]+n[6],
m[ 7]+n[ 7], m[ 8]+n[8 ],
m[ 9]+n[9 ]);
}
SymetricMatrix& operator+=(const SymetricMatrix& n)
{
m[0]+=n[0]; m[1]+=n[1]; m[2]+=n[2]; m[3]+=n[3];
m[4]+=n[4]; m[5]+=n[5]; m[6]+=n[6]; m[7]+=n[7];
m[8]+=n[8]; m[9]+=n[9];
return *this;
}
double m[10];
};
///////////////////////////////////////////
namespace Simplify
{
// Global Variables & Strctures
enum Attributes {
NONE,
NORMAL = 2,
TEXCOORD = 4,
COLOR = 8
};
struct Triangle { int v[3];double err[4];int deleted,dirty,attr;vec3f n;vec3f uvs[3];int material; };
struct Vertex { vec3f p;int tstart,tcount;SymetricMatrix q;int border;};
struct Ref { int tid,tvertex; };
std::vector<Triangle> triangles;
std::vector<Vertex> vertices;
std::vector<Ref> refs;
std::string mtllib;
std::vector<std::string> materials;
// Helper functions
double vertex_error(SymetricMatrix q, double x, double y, double z);
double calculate_error(int id_v1, int id_v2, vec3f &p_result);
bool flipped(vec3f p,int i0,int i1,Vertex &v0,Vertex &v1,std::vector<int> &deleted);
void update_uvs(int i0,const Vertex &v,const vec3f &p,std::vector<int> &deleted);
void update_triangles(int i0,Vertex &v,std::vector<int> &deleted,int &deleted_triangles);
void update_mesh(int iteration);
void compact_mesh();
//
// Main simplification function
//
// target_count : target nr. of triangles
// agressiveness : sharpness to increase the threshold.
// 5..8 are good numbers
// more iterations yield higher quality
//
void simplify_mesh(int target_count, double agressiveness=7, bool verbose=false)
{
// init
loopi(0,triangles.size())
{
triangles[i].deleted=0;
}
// main iteration loop
int deleted_triangles=0;
std::vector<int> deleted0,deleted1;
int triangle_count=triangles.size();
//int iteration = 0;
//loop(iteration,0,100)
for (int iteration = 0; iteration < 100; iteration ++)
{
if(triangle_count-deleted_triangles<=target_count)break;
// update mesh once in a while
if(iteration%5==0)
{
update_mesh(iteration);
}
// clear dirty flag
loopi(0,triangles.size()) triangles[i].dirty=0;
//
// All triangles with edges below the threshold will be removed
//
// The following numbers works well for most models.
// If it does not, try to adjust the 3 parameters
//
double threshold = 0.000000001*pow(double(iteration+3),agressiveness);
// target number of triangles reached ? Then break
if ((verbose) && (iteration%5==0)) {
printf("iteration %d - triangles %d threshold %g\n",iteration,triangle_count-deleted_triangles, threshold);
}
// remove vertices & mark deleted triangles
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
if(t.err[3]>threshold) continue;
if(t.deleted) continue;
if(t.dirty) continue;
loopj(0,3)if(t.err[j]<threshold)
{
int i0=t.v[ j ]; Vertex &v0 = vertices[i0];
int i1=t.v[(j+1)%3]; Vertex &v1 = vertices[i1];
// Border check
if(v0.border != v1.border) continue;
// Compute vertex to collapse to
vec3f p;
calculate_error(i0,i1,p);
deleted0.resize(v0.tcount); // normals temporarily
deleted1.resize(v1.tcount); // normals temporarily
// don't remove if flipped
if( flipped(p,i0,i1,v0,v1,deleted0) ) continue;
if( flipped(p,i1,i0,v1,v0,deleted1) ) continue;
if ( (t.attr & TEXCOORD) == TEXCOORD )
{
update_uvs(i0,v0,p,deleted0);
update_uvs(i0,v1,p,deleted1);
}
// not flipped, so remove edge
v0.p=p;
v0.q=v1.q+v0.q;
int tstart=refs.size();
update_triangles(i0,v0,deleted0,deleted_triangles);
update_triangles(i0,v1,deleted1,deleted_triangles);
int tcount=refs.size()-tstart;
if(tcount<=v0.tcount)
{
// save ram
if(tcount)memcpy(&refs[v0.tstart],&refs[tstart],tcount*sizeof(Ref));
}
else
// append
v0.tstart=tstart;
v0.tcount=tcount;
break;
}
// done?
if(triangle_count-deleted_triangles<=target_count)break;
}
}
// clean up mesh
compact_mesh();
} //simplify_mesh()
void simplify_mesh_lossless(bool verbose=false)
{
// init
loopi(0,triangles.size()) triangles[i].deleted=0;
// main iteration loop
int deleted_triangles=0;
std::vector<int> deleted0,deleted1;
int triangle_count=triangles.size();
//int iteration = 0;
//loop(iteration,0,100)
for (int iteration = 0; iteration < 9999; iteration ++)
{
// update mesh constantly
update_mesh(iteration);
// clear dirty flag
loopi(0,triangles.size()) triangles[i].dirty=0;
//
// All triangles with edges below the threshold will be removed
//
// The following numbers works well for most models.
// If it does not, try to adjust the 3 parameters
//
double threshold = DBL_EPSILON; //1.0E-3 EPS;
if (verbose) {
printf("lossless iteration %d\n", iteration);
}
// remove vertices & mark deleted triangles
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
if(t.err[3]>threshold) continue;
if(t.deleted) continue;
if(t.dirty) continue;
loopj(0,3)if(t.err[j]<threshold)
{
int i0=t.v[ j ]; Vertex &v0 = vertices[i0];
int i1=t.v[(j+1)%3]; Vertex &v1 = vertices[i1];
// Border check
if(v0.border != v1.border) continue;
// Compute vertex to collapse to
vec3f p;
calculate_error(i0,i1,p);
deleted0.resize(v0.tcount); // normals temporarily
deleted1.resize(v1.tcount); // normals temporarily
// don't remove if flipped
if( flipped(p,i0,i1,v0,v1,deleted0) ) continue;
if( flipped(p,i1,i0,v1,v0,deleted1) ) continue;
if ( (t.attr & TEXCOORD) == TEXCOORD )
{
update_uvs(i0,v0,p,deleted0);
update_uvs(i0,v1,p,deleted1);
}
// not flipped, so remove edge
v0.p=p;
v0.q=v1.q+v0.q;
int tstart=refs.size();
update_triangles(i0,v0,deleted0,deleted_triangles);
update_triangles(i0,v1,deleted1,deleted_triangles);
int tcount=refs.size()-tstart;
if(tcount<=v0.tcount)
{
// save ram
if(tcount)memcpy(&refs[v0.tstart],&refs[tstart],tcount*sizeof(Ref));
}
else
// append
v0.tstart=tstart;
v0.tcount=tcount;
break;
}
}
if(deleted_triangles<=0)break;
deleted_triangles=0;
} //for each iteration
// clean up mesh
compact_mesh();
} //simplify_mesh_lossless()
// Check if a triangle flips when this edge is removed
bool flipped(vec3f p,int i0,int i1,Vertex &v0,Vertex &v1,std::vector<int> &deleted)
{
loopk(0,v0.tcount)
{
Triangle &t=triangles[refs[v0.tstart+k].tid];
if(t.deleted)continue;
int s=refs[v0.tstart+k].tvertex;
int id1=t.v[(s+1)%3];
int id2=t.v[(s+2)%3];
if(id1==i1 || id2==i1) // delete ?
{
deleted[k]=1;
continue;
}
vec3f d1 = vertices[id1].p-p; d1.normalize();
vec3f d2 = vertices[id2].p-p; d2.normalize();
if(fabs(d1.dot(d2))>0.999) return true;
vec3f n;
n.cross(d1,d2);
n.normalize();
deleted[k]=0;
if(n.dot(t.n)<0.2) return true;
}
return false;
}
// update_uvs
void update_uvs(int i0,const Vertex &v,const vec3f &p,std::vector<int> &deleted)
{
loopk(0,v.tcount)
{
Ref &r=refs[v.tstart+k];
Triangle &t=triangles[r.tid];
if(t.deleted)continue;
if(deleted[k])continue;
vec3f p1=vertices[t.v[0]].p;
vec3f p2=vertices[t.v[1]].p;
vec3f p3=vertices[t.v[2]].p;
t.uvs[r.tvertex] = interpolate(p,p1,p2,p3,t.uvs);
}
}
// Update triangle connections and edge error after a edge is collapsed
void update_triangles(int i0,Vertex &v,std::vector<int> &deleted,int &deleted_triangles)
{
vec3f p;
loopk(0,v.tcount)
{
Ref &r=refs[v.tstart+k];
Triangle &t=triangles[r.tid];
if(t.deleted)continue;
if(deleted[k])
{
t.deleted=1;
deleted_triangles++;
continue;
}
t.v[r.tvertex]=i0;
t.dirty=1;
t.err[0]=calculate_error(t.v[0],t.v[1],p);
t.err[1]=calculate_error(t.v[1],t.v[2],p);
t.err[2]=calculate_error(t.v[2],t.v[0],p);
t.err[3]=min(t.err[0],min(t.err[1],t.err[2]));
refs.push_back(r);
}
}
// compact triangles, compute edge error and build reference list
void update_mesh(int iteration)
{
if(iteration>0) // compact triangles
{
int dst=0;
loopi(0,triangles.size())
if(!triangles[i].deleted)
{
triangles[dst++]=triangles[i];
}
triangles.resize(dst);
}
//
// Init Quadrics by Plane & Edge Errors
//
// required at the beginning ( iteration == 0 )
// recomputing during the simplification is not required,
// but mostly improves the result for closed meshes
//
if( iteration == 0 )
{
loopi(0,vertices.size())
vertices[i].q=SymetricMatrix(0.0);
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
vec3f n,p[3];
loopj(0,3) p[j]=vertices[t.v[j]].p;
n.cross(p[1]-p[0],p[2]-p[0]);
n.normalize();
t.n=n;
loopj(0,3) vertices[t.v[j]].q =
vertices[t.v[j]].q+SymetricMatrix(n.x,n.y,n.z,-n.dot(p[0]));
}
loopi(0,triangles.size())
{
// Calc Edge Error
Triangle &t=triangles[i];vec3f p;
loopj(0,3) t.err[j]=calculate_error(t.v[j],t.v[(j+1)%3],p);
t.err[3]=min(t.err[0],min(t.err[1],t.err[2]));
}
}
// Init Reference ID list
loopi(0,vertices.size())
{
vertices[i].tstart=0;
vertices[i].tcount=0;
}
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
loopj(0,3) vertices[t.v[j]].tcount++;
}
int tstart=0;
loopi(0,vertices.size())
{
Vertex &v=vertices[i];
v.tstart=tstart;
tstart+=v.tcount;
v.tcount=0;
}
// Write References
refs.resize(triangles.size()*3);
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
loopj(0,3)
{
Vertex &v=vertices[t.v[j]];
refs[v.tstart+v.tcount].tid=i;
refs[v.tstart+v.tcount].tvertex=j;
v.tcount++;
}
}
// Identify boundary : vertices[].border=0,1
if( iteration == 0 )
{
std::vector<int> vcount,vids;
loopi(0,vertices.size())
vertices[i].border=0;
loopi(0,vertices.size())
{
Vertex &v=vertices[i];
vcount.clear();
vids.clear();
loopj(0,v.tcount)
{
int k=refs[v.tstart+j].tid;
Triangle &t=triangles[k];
loopk(0,3)
{
int ofs=0,id=t.v[k];
while(ofs<vcount.size())
{
if(vids[ofs]==id)break;
ofs++;
}
if(ofs==vcount.size())
{
vcount.push_back(1);
vids.push_back(id);
}
else
vcount[ofs]++;
}
}
loopj(0,vcount.size()) if(vcount[j]==1)
vertices[vids[j]].border=1;
}
}
}
// Finally compact mesh before exiting
void compact_mesh()
{
int dst=0;
loopi(0,vertices.size())
{
vertices[i].tcount=0;
}
loopi(0,triangles.size())
if(!triangles[i].deleted)
{
Triangle &t=triangles[i];
triangles[dst++]=t;
loopj(0,3)vertices[t.v[j]].tcount=1;
}
triangles.resize(dst);
dst=0;
loopi(0,vertices.size())
if(vertices[i].tcount)
{
vertices[i].tstart=dst;
vertices[dst].p=vertices[i].p;
dst++;
}
loopi(0,triangles.size())
{
Triangle &t=triangles[i];
loopj(0,3)t.v[j]=vertices[t.v[j]].tstart;
}
vertices.resize(dst);
}
// Error between vertex and Quadric
double vertex_error(SymetricMatrix q, double x, double y, double z)
{
return q[0]*x*x + 2*q[1]*x*y + 2*q[2]*x*z + 2*q[3]*x + q[4]*y*y
+ 2*q[5]*y*z + 2*q[6]*y + q[7]*z*z + 2*q[8]*z + q[9];
}
// Error for one edge
double calculate_error(int id_v1, int id_v2, vec3f &p_result)
{
// compute interpolated vertex
SymetricMatrix q = vertices[id_v1].q + vertices[id_v2].q;
bool border = vertices[id_v1].border & vertices[id_v2].border;
double error=0;
double det = q.det(0, 1, 2, 1, 4, 5, 2, 5, 7);
if ( det != 0 && !border )
{
// q_delta is invertible
p_result.x = -1/det*(q.det(1, 2, 3, 4, 5, 6, 5, 7 , 8)); // vx = A41/det(q_delta)
p_result.y = 1/det*(q.det(0, 2, 3, 1, 5, 6, 2, 7 , 8)); // vy = A42/det(q_delta)
p_result.z = -1/det*(q.det(0, 1, 3, 1, 4, 6, 2, 5, 8)); // vz = A43/det(q_delta)
error = vertex_error(q, p_result.x, p_result.y, p_result.z);
}
else
{
// det = 0 -> try to find best result
vec3f p1=vertices[id_v1].p;
vec3f p2=vertices[id_v2].p;
vec3f p3=(p1+p2)/2;
double error1 = vertex_error(q, p1.x,p1.y,p1.z);
double error2 = vertex_error(q, p2.x,p2.y,p2.z);
double error3 = vertex_error(q, p3.x,p3.y,p3.z);
error = min(error1, min(error2, error3));
if (error1 == error) p_result=p1;
if (error2 == error) p_result=p2;
if (error3 == error) p_result=p3;
}
return error;
}
char *trimwhitespace(char *str)
{
char *end;
// Trim leading space
while(isspace((unsigned char)*str)) str++;
if(*str == 0) // All spaces?
return str;
// Trim trailing space
end = str + strlen(str) - 1;
while(end > str && isspace((unsigned char)*end)) end--;
// Write new null terminator
*(end+1) = 0;
return str;
}
//Option : Load OBJ
void load_obj(const char* filename, bool process_uv=false){
vertices.clear();
triangles.clear();
//printf ( "Loading Objects %s ... \n",filename);
FILE* fn;
if(filename==NULL) return ;
if((char)filename[0]==0) return ;
if ((fn = fopen(filename, "rb")) == NULL)
{
printf ( "File %s not found!\n" ,filename );
return;
}
char line[1000];
memset ( line,0,1000 );
int vertex_cnt = 0;
int material = -1;
std::map<std::string, int> material_map;
std::vector<vec3f> uvs;
std::vector<std::vector<int> > uvMap;
while(fgets( line, 1000, fn ) != NULL)
{
Vertex v;
vec3f uv;
if (strncmp(line, "mtllib", 6) == 0)
{
mtllib = trimwhitespace(&line[7]);
}
if (strncmp(line, "usemtl", 6) == 0)
{
std::string usemtl = trimwhitespace(&line[7]);
if (material_map.find(usemtl) == material_map.end())
{
material_map[usemtl] = materials.size();
materials.push_back(usemtl);
}
material = material_map[usemtl];
}
if ( line[0] == 'v' && line[1] == 't' )
{
if ( line[2] == ' ' )
if(sscanf(line,"vt %lf %lf",
&uv.x,&uv.y)==2)
{
uv.z = 0;
uvs.push_back(uv);
} else
if(sscanf(line,"vt %lf %lf %lf",
&uv.x,&uv.y,&uv.z)==3)
{
uvs.push_back(uv);
}
}
else if ( line[0] == 'v' )
{
if ( line[1] == ' ' )
if(sscanf(line,"v %lf %lf %lf",
&v.p.x, &v.p.y, &v.p.z)==3)
{
vertices.push_back(v);
}
}
int integers[9];
if ( line[0] == 'f' )
{
Triangle t;
bool tri_ok = false;
bool has_uv = false;
if(sscanf(line,"f %d %d %d",
&integers[0],&integers[1],&integers[2])==3)
{
tri_ok = true;
}else
if(sscanf(line,"f %d// %d// %d//",
&integers[0],&integers[1],&integers[2])==3)
{
tri_ok = true;
}else
if(sscanf(line,"f %d//%d %d//%d %d//%d",
&integers[0],&integers[3],
&integers[1],&integers[4],
&integers[2],&integers[5])==6)
{
tri_ok = true;
}else
if(sscanf(line,"f %d/%d/%d %d/%d/%d %d/%d/%d",
&integers[0],&integers[6],&integers[3],
&integers[1],&integers[7],&integers[4],
&integers[2],&integers[8],&integers[5])==9)
{
tri_ok = true;
has_uv = true;
}
else
{
printf("unrecognized sequence\n");
printf("%s\n",line);
while(1);
}
if ( tri_ok )
{
t.v[0] = integers[0]-1-vertex_cnt;
t.v[1] = integers[1]-1-vertex_cnt;
t.v[2] = integers[2]-1-vertex_cnt;
t.attr = 0;
if ( process_uv && has_uv )
{
std::vector<int> indices;
indices.push_back(integers[6]-1-vertex_cnt);
indices.push_back(integers[7]-1-vertex_cnt);
indices.push_back(integers[8]-1-vertex_cnt);
uvMap.push_back(indices);
t.attr |= TEXCOORD;
}
t.material = material;
//geo.triangles.push_back ( tri );
triangles.push_back(t);
//state_before = state;
//state ='f';
}
}
}
if ( process_uv && uvs.size() )
{
loopi(0,triangles.size())
{
loopj(0,3)
triangles[i].uvs[j] = uvs[uvMap[i][j]];
}
}
fclose(fn);
//printf("load_obj: vertices = %lu, triangles = %lu, uvs = %lu\n", vertices.size(), triangles.size(), uvs.size() );
} // load_obj()
// Optional : Store as OBJ
void write_obj(const char* filename)
{
FILE *file=fopen(filename, "w");
int cur_material = -1;
bool has_uv = (triangles.size() && (triangles[0].attr & TEXCOORD) == TEXCOORD);
if (!file)
{
printf("write_obj: can't write data file \"%s\".\n", filename);
exit(0);
}
if (!mtllib.empty())
{
fprintf(file, "mtllib %s\n", mtllib.c_str());
}
loopi(0,vertices.size())
{
//fprintf(file, "v %lf %lf %lf\n", vertices[i].p.x,vertices[i].p.y,vertices[i].p.z);
fprintf(file, "v %g %g %g\n", vertices[i].p.x,vertices[i].p.y,vertices[i].p.z); //more compact: remove trailing zeros
}
if (has_uv)
{
loopi(0,triangles.size()) if(!triangles[i].deleted)
{
fprintf(file, "vt %g %g\n", triangles[i].uvs[0].x, triangles[i].uvs[0].y);
fprintf(file, "vt %g %g\n", triangles[i].uvs[1].x, triangles[i].uvs[1].y);
fprintf(file, "vt %g %g\n", triangles[i].uvs[2].x, triangles[i].uvs[2].y);
}
}
int uv = 1;
loopi(0,triangles.size()) if(!triangles[i].deleted)
{
if (triangles[i].material != cur_material)
{
cur_material = triangles[i].material;
fprintf(file, "usemtl %s\n", materials[triangles[i].material].c_str());
}
if (has_uv)
{
fprintf(file, "f %d/%d %d/%d %d/%d\n", triangles[i].v[0]+1, uv, triangles[i].v[1]+1, uv+1, triangles[i].v[2]+1, uv+2);
uv += 3;
}
else
{
fprintf(file, "f %d %d %d\n", triangles[i].v[0]+1, triangles[i].v[1]+1, triangles[i].v[2]+1);
}
//fprintf(file, "f %d// %d// %d//\n", triangles[i].v[0]+1, triangles[i].v[1]+1, triangles[i].v[2]+1); //more compact: remove trailing zeros
}
fclose(file);
}
};
///////////////////////////////////////////
| 25,295 | C | 23.58309 | 142 | 0.567108 |
NVlabs/ACID/ACID/src/utils/libmcubes/pyarray_symbol.h |
#define PY_ARRAY_UNIQUE_SYMBOL mcubes_PyArray_API
| 51 | C | 16.333328 | 49 | 0.803922 |
NVlabs/ACID/ACID/src/utils/libmcubes/README.rst | ========
PyMCubes
========
PyMCubes is an implementation of the marching cubes algorithm to extract
isosurfaces from volumetric data. The volumetric data can be given as a
three-dimensional NumPy array or as a Python function ``f(x, y, z)``. The first
option is much faster, but it requires more memory and becomes unfeasible for
very large volumes.
PyMCubes also provides a function to export the results of the marching cubes as
COLLADA ``(.dae)`` files. This requires the
`PyCollada <https://github.com/pycollada/pycollada>`_ library.
Installation
============
Just as any standard Python package, clone or download the project
and run::
$ cd path/to/PyMCubes
$ python setup.py build
$ python setup.py install
If you do not have write permission on the directory of Python packages,
install with the ``--user`` option::
$ python setup.py install --user
Example
=======
The following example creates a data volume with spherical isosurfaces and
extracts one of them (i.e., a sphere) with PyMCubes. The result is exported as
``sphere.dae``::
>>> import numpy as np
>>> import mcubes
# Create a data volume (30 x 30 x 30)
>>> X, Y, Z = np.mgrid[:30, :30, :30]
>>> u = (X-15)**2 + (Y-15)**2 + (Z-15)**2 - 8**2
# Extract the 0-isosurface
>>> vertices, triangles = mcubes.marching_cubes(u, 0)
# Export the result to sphere.dae
>>> mcubes.export_mesh(vertices, triangles, "sphere.dae", "MySphere")
The second example is very similar to the first one, but it uses a function
to represent the volume instead of a NumPy array::
>>> import numpy as np
>>> import mcubes
# Create the volume
>>> f = lambda x, y, z: x**2 + y**2 + z**2
# Extract the 16-isosurface
>>> vertices, triangles = mcubes.marching_cubes_func((-10,-10,-10), (10,10,10),
... 100, 100, 100, f, 16)
# Export the result to sphere2.dae
>>> mcubes.export_mesh(vertices, triangles, "sphere2.dae", "MySphere")
| 1,939 | reStructuredText | 28.846153 | 81 | 0.682826 |
NVlabs/ACID/ACID/src/utils/libmcubes/marchingcubes.h |
#ifndef _MARCHING_CUBES_H
#define _MARCHING_CUBES_H
#include <stddef.h>
#include <vector>
namespace mc
{
extern int edge_table[256];
extern int triangle_table[256][16];
namespace private_
{
double mc_isovalue_interpolation(double isovalue, double f1, double f2,
double x1, double x2);
void mc_add_vertex(double x1, double y1, double z1, double c2,
int axis, double f1, double f2, double isovalue, std::vector<double>* vertices);
}
template<typename coord_type, typename vector3, typename formula>
void marching_cubes(const vector3& lower, const vector3& upper,
int numx, int numy, int numz, formula f, double isovalue,
std::vector<double>& vertices, std::vector<size_t>& polygons)
{
using namespace private_;
// typedef decltype(lower[0]) coord_type;
// numx, numy and numz are the numbers of evaluations in each direction
--numx; --numy; --numz;
coord_type dx = (upper[0] - lower[0])/static_cast<coord_type>(numx);
coord_type dy = (upper[1] - lower[1])/static_cast<coord_type>(numy);
coord_type dz = (upper[2] - lower[2])/static_cast<coord_type>(numz);
size_t* shared_indices = new size_t[2*numy*numz*3];
const int z3 = numz*3;
const int yz3 = numy*z3;
for(int i=0; i<numx; ++i)
{
coord_type x = lower[0] + dx*i + dx/2;
coord_type x_dx = lower[0] + dx*(i+1) + dx/2;
const int i_mod_2 = i % 2;
const int i_mod_2_inv = (i_mod_2 ? 0 : 1);
for(int j=0; j<numy; ++j)
{
coord_type y = lower[1] + dy*j + dy/2;
coord_type y_dy = lower[1] + dy*(j+1) + dy/2;
for(int k=0; k<numz; ++k)
{
coord_type z = lower[2] + dz*k + dz/2;
coord_type z_dz = lower[2] + dz*(k+1) + dz/2;
double v[8];
v[0] = f(x,y,z); v[1] = f(x_dx,y,z);
v[2] = f(x_dx,y_dy,z); v[3] = f(x, y_dy, z);
v[4] = f(x,y,z_dz); v[5] = f(x_dx,y,z_dz);
v[6] = f(x_dx,y_dy,z_dz); v[7] = f(x, y_dy, z_dz);
unsigned int cubeindex = 0;
for(int m=0; m<8; ++m)
if(v[m] <= isovalue)
cubeindex |= 1<<m;
// Generate vertices AVOIDING DUPLICATES.
int edges = edge_table[cubeindex];
std::vector<size_t> indices(12, -1);
if(edges & 0x040)
{
indices[6] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 0] = indices[6];
mc_add_vertex(x_dx, y_dy, z_dz, x, 0, v[6], v[7], isovalue, &vertices);
}
if(edges & 0x020)
{
indices[5] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 1] = indices[5];
mc_add_vertex(x_dx, y, z_dz, y_dy, 1, v[5], v[6], isovalue, &vertices);
}
if(edges & 0x400)
{
indices[10] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 2] = indices[10];
mc_add_vertex(x_dx, y+dx, z, z_dz, 2, v[2], v[6], isovalue, &vertices);
}
if(edges & 0x001)
{
if(j == 0 || k == 0)
{
indices[0] = vertices.size() / 3;
mc_add_vertex(x, y, z, x_dx, 0, v[0], v[1], isovalue, &vertices);
}
else
indices[0] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + (k-1)*3 + 0];
}
if(edges & 0x002)
{
if(k == 0)
{
indices[1] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, y_dy, 1, v[1], v[2], isovalue, &vertices);
}
else
indices[1] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x004)
{
if(k == 0)
{
indices[2] = vertices.size() / 3;
mc_add_vertex(x_dx, y_dy, z, x, 0, v[2], v[3], isovalue, &vertices);
}
else
indices[2] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 0];
}
if(edges & 0x008)
{
if(i == 0 || k == 0)
{
indices[3] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, y, 1, v[3], v[0], isovalue, &vertices);
}
else
indices[3] = shared_indices[i_mod_2_inv*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x010)
{
if(j == 0)
{
indices[4] = vertices.size() / 3;
mc_add_vertex(x, y, z_dz, x_dx, 0, v[4], v[5], isovalue, &vertices);
}
else
indices[4] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 0];
}
if(edges & 0x080)
{
if(i == 0)
{
indices[7] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z_dz, y, 1, v[7], v[4], isovalue, &vertices);
}
else
indices[7] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 1];
}
if(edges & 0x100)
{
if(i == 0 || j == 0)
{
indices[8] = vertices.size() / 3;
mc_add_vertex(x, y, z, z_dz, 2, v[0], v[4], isovalue, &vertices);
}
else
indices[8] = shared_indices[i_mod_2_inv*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x200)
{
if(j == 0)
{
indices[9] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, z_dz, 2, v[1], v[5], isovalue, &vertices);
}
else
indices[9] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x800)
{
if(i == 0)
{
indices[11] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, z_dz, 2, v[3], v[7], isovalue, &vertices);
}
else
indices[11] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 2];
}
int tri;
int* triangle_table_ptr = triangle_table[cubeindex];
for(int m=0; tri = triangle_table_ptr[m], tri != -1; ++m)
polygons.push_back(indices[tri]);
}
}
}
delete [] shared_indices;
}
template<typename coord_type, typename vector3, typename formula>
void marching_cubes2(const vector3& lower, const vector3& upper,
int numx, int numy, int numz, formula f, double isovalue,
std::vector<double>& vertices, std::vector<size_t>& polygons)
{
using namespace private_;
// typedef decltype(lower[0]) coord_type;
// numx, numy and numz are the numbers of evaluations in each direction
--numx; --numy; --numz;
coord_type dx = (upper[0] - lower[0])/static_cast<coord_type>(numx);
coord_type dy = (upper[1] - lower[1])/static_cast<coord_type>(numy);
coord_type dz = (upper[2] - lower[2])/static_cast<coord_type>(numz);
size_t* shared_indices = new size_t[2*numy*numz*3];
const int z3 = numz*3;
const int yz3 = numy*z3;
for(int i=0; i<numx; ++i)
{
coord_type x = lower[0] + dx*i;
coord_type x_dx = lower[0] + dx*(i+1);
const int i_mod_2 = i % 2;
const int i_mod_2_inv = (i_mod_2 ? 0 : 1);
for(int j=0; j<numy; ++j)
{
coord_type y = lower[1] + dy*j;
coord_type y_dy = lower[1] + dy*(j+1);
for(int k=0; k<numz; ++k)
{
coord_type z = lower[2] + dz*k;
coord_type z_dz = lower[2] + dz*(k+1);
double v[8];
v[0] = f(x,y,z); v[1] = f(x_dx,y,z);
v[2] = f(x_dx,y_dy,z); v[3] = f(x, y_dy, z);
v[4] = f(x,y,z_dz); v[5] = f(x_dx,y,z_dz);
v[6] = f(x_dx,y_dy,z_dz); v[7] = f(x, y_dy, z_dz);
unsigned int cubeindex = 0;
for(int m=0; m<8; ++m)
if(v[m] <= isovalue)
cubeindex |= 1<<m;
// Generate vertices AVOIDING DUPLICATES.
int edges = edge_table[cubeindex];
std::vector<size_t> indices(12, -1);
if(edges & 0x040)
{
indices[6] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 0] = indices[6];
mc_add_vertex(x_dx, y_dy, z_dz, x, 0, v[6], v[7], isovalue, &vertices);
}
if(edges & 0x020)
{
indices[5] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 1] = indices[5];
mc_add_vertex(x_dx, y, z_dz, y_dy, 1, v[5], v[6], isovalue, &vertices);
}
if(edges & 0x400)
{
indices[10] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 2] = indices[10];
mc_add_vertex(x_dx, y+dx, z, z_dz, 2, v[2], v[6], isovalue, &vertices);
}
if(edges & 0x001)
{
if(j == 0 || k == 0)
{
indices[0] = vertices.size() / 3;
mc_add_vertex(x, y, z, x_dx, 0, v[0], v[1], isovalue, &vertices);
}
else
indices[0] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + (k-1)*3 + 0];
}
if(edges & 0x002)
{
if(k == 0)
{
indices[1] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, y_dy, 1, v[1], v[2], isovalue, &vertices);
}
else
indices[1] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x004)
{
if(k == 0)
{
indices[2] = vertices.size() / 3;
mc_add_vertex(x_dx, y_dy, z, x, 0, v[2], v[3], isovalue, &vertices);
}
else
indices[2] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 0];
}
if(edges & 0x008)
{
if(i == 0 || k == 0)
{
indices[3] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, y, 1, v[3], v[0], isovalue, &vertices);
}
else
indices[3] = shared_indices[i_mod_2_inv*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x010)
{
if(j == 0)
{
indices[4] = vertices.size() / 3;
mc_add_vertex(x, y, z_dz, x_dx, 0, v[4], v[5], isovalue, &vertices);
}
else
indices[4] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 0];
}
if(edges & 0x080)
{
if(i == 0)
{
indices[7] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z_dz, y, 1, v[7], v[4], isovalue, &vertices);
}
else
indices[7] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 1];
}
if(edges & 0x100)
{
if(i == 0 || j == 0)
{
indices[8] = vertices.size() / 3;
mc_add_vertex(x, y, z, z_dz, 2, v[0], v[4], isovalue, &vertices);
}
else
indices[8] = shared_indices[i_mod_2_inv*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x200)
{
if(j == 0)
{
indices[9] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, z_dz, 2, v[1], v[5], isovalue, &vertices);
}
else
indices[9] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x800)
{
if(i == 0)
{
indices[11] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, z_dz, 2, v[3], v[7], isovalue, &vertices);
}
else
indices[11] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 2];
}
int tri;
int* triangle_table_ptr = triangle_table[cubeindex];
for(int m=0; tri = triangle_table_ptr[m], tri != -1; ++m)
polygons.push_back(indices[tri]);
}
}
}
delete [] shared_indices;
}
template<typename coord_type, typename vector3, typename formula>
void marching_cubes3(const vector3& lower, const vector3& upper,
int numx, int numy, int numz, formula f, double isovalue,
std::vector<double>& vertices, std::vector<size_t>& polygons)
{
using namespace private_;
// typedef decltype(lower[0]) coord_type;
// numx, numy and numz are the numbers of evaluations in each direction
--numx; --numy; --numz;
coord_type dx = (upper[0] - lower[0])/static_cast<coord_type>(numx);
coord_type dy = (upper[1] - lower[1])/static_cast<coord_type>(numy);
coord_type dz = (upper[2] - lower[2])/static_cast<coord_type>(numz);
size_t* shared_indices = new size_t[2*numy*numz*3];
const int z3 = numz*3;
const int yz3 = numy*z3;
for(int i=0; i<numx; ++i)
{
coord_type x = lower[0] + dx*i - dx/2;
coord_type x_dx = lower[0] + dx*(i+1) - dx/2;
const int i_mod_2 = i % 2;
const int i_mod_2_inv = (i_mod_2 ? 0 : 1);
for(int j=0; j<numy; ++j)
{
coord_type y = lower[1] + dy*j - dy/2;
coord_type y_dy = lower[1] + dy*(j+1) - dy/2;
for(int k=0; k<numz; ++k)
{
coord_type z = lower[2] + dz*k - dz/2;
coord_type z_dz = lower[2] + dz*(k+1) - dz/2;
double v[8];
v[0] = f(x,y,z); v[1] = f(x_dx,y,z);
v[2] = f(x_dx,y_dy,z); v[3] = f(x, y_dy, z);
v[4] = f(x,y,z_dz); v[5] = f(x_dx,y,z_dz);
v[6] = f(x_dx,y_dy,z_dz); v[7] = f(x, y_dy, z_dz);
unsigned int cubeindex = 0;
for(int m=0; m<8; ++m)
if(v[m] <= isovalue)
cubeindex |= 1<<m;
// Generate vertices AVOIDING DUPLICATES.
int edges = edge_table[cubeindex];
std::vector<size_t> indices(12, -1);
if(edges & 0x040)
{
indices[6] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 0] = indices[6];
mc_add_vertex(x_dx, y_dy, z_dz, x, 0, v[6], v[7], isovalue, &vertices);
}
if(edges & 0x020)
{
indices[5] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 1] = indices[5];
mc_add_vertex(x_dx, y, z_dz, y_dy, 1, v[5], v[6], isovalue, &vertices);
}
if(edges & 0x400)
{
indices[10] = vertices.size() / 3;
shared_indices[i_mod_2*yz3 + j*z3 + k*3 + 2] = indices[10];
mc_add_vertex(x_dx, y+dx, z, z_dz, 2, v[2], v[6], isovalue, &vertices);
}
if(edges & 0x001)
{
if(j == 0 || k == 0)
{
indices[0] = vertices.size() / 3;
mc_add_vertex(x, y, z, x_dx, 0, v[0], v[1], isovalue, &vertices);
}
else
indices[0] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + (k-1)*3 + 0];
}
if(edges & 0x002)
{
if(k == 0)
{
indices[1] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, y_dy, 1, v[1], v[2], isovalue, &vertices);
}
else
indices[1] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x004)
{
if(k == 0)
{
indices[2] = vertices.size() / 3;
mc_add_vertex(x_dx, y_dy, z, x, 0, v[2], v[3], isovalue, &vertices);
}
else
indices[2] = shared_indices[i_mod_2*yz3 + j*z3 + (k-1)*3 + 0];
}
if(edges & 0x008)
{
if(i == 0 || k == 0)
{
indices[3] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, y, 1, v[3], v[0], isovalue, &vertices);
}
else
indices[3] = shared_indices[i_mod_2_inv*yz3 + j*z3 + (k-1)*3 + 1];
}
if(edges & 0x010)
{
if(j == 0)
{
indices[4] = vertices.size() / 3;
mc_add_vertex(x, y, z_dz, x_dx, 0, v[4], v[5], isovalue, &vertices);
}
else
indices[4] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 0];
}
if(edges & 0x080)
{
if(i == 0)
{
indices[7] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z_dz, y, 1, v[7], v[4], isovalue, &vertices);
}
else
indices[7] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 1];
}
if(edges & 0x100)
{
if(i == 0 || j == 0)
{
indices[8] = vertices.size() / 3;
mc_add_vertex(x, y, z, z_dz, 2, v[0], v[4], isovalue, &vertices);
}
else
indices[8] = shared_indices[i_mod_2_inv*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x200)
{
if(j == 0)
{
indices[9] = vertices.size() / 3;
mc_add_vertex(x_dx, y, z, z_dz, 2, v[1], v[5], isovalue, &vertices);
}
else
indices[9] = shared_indices[i_mod_2*yz3 + (j-1)*z3 + k*3 + 2];
}
if(edges & 0x800)
{
if(i == 0)
{
indices[11] = vertices.size() / 3;
mc_add_vertex(x, y_dy, z, z_dz, 2, v[3], v[7], isovalue, &vertices);
}
else
indices[11] = shared_indices[i_mod_2_inv*yz3 + j*z3 + k*3 + 2];
}
int tri;
int* triangle_table_ptr = triangle_table[cubeindex];
for(int m=0; tri = triangle_table_ptr[m], tri != -1; ++m)
polygons.push_back(indices[tri]);
}
}
}
delete [] shared_indices;
}
}
#endif // _MARCHING_CUBES_H
| 20,843 | C | 37.457565 | 92 | 0.372931 |
NVlabs/ACID/ACID/src/utils/libmcubes/pyarraymodule.h |
#ifndef _EXTMODULE_H
#define _EXTMODULE_H
#include <Python.h>
#include <stdexcept>
// #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#define PY_ARRAY_UNIQUE_SYMBOL mcubes_PyArray_API
#define NO_IMPORT_ARRAY
#include "numpy/arrayobject.h"
#include <complex>
template<class T>
struct numpy_typemap;
#define define_numpy_type(ctype, dtype) \
template<> \
struct numpy_typemap<ctype> \
{static const int type = dtype;};
define_numpy_type(bool, NPY_BOOL);
define_numpy_type(char, NPY_BYTE);
define_numpy_type(short, NPY_SHORT);
define_numpy_type(int, NPY_INT);
define_numpy_type(long, NPY_LONG);
define_numpy_type(long long, NPY_LONGLONG);
define_numpy_type(unsigned char, NPY_UBYTE);
define_numpy_type(unsigned short, NPY_USHORT);
define_numpy_type(unsigned int, NPY_UINT);
define_numpy_type(unsigned long, NPY_ULONG);
define_numpy_type(unsigned long long, NPY_ULONGLONG);
define_numpy_type(float, NPY_FLOAT);
define_numpy_type(double, NPY_DOUBLE);
define_numpy_type(long double, NPY_LONGDOUBLE);
define_numpy_type(std::complex<float>, NPY_CFLOAT);
define_numpy_type(std::complex<double>, NPY_CDOUBLE);
define_numpy_type(std::complex<long double>, NPY_CLONGDOUBLE);
template<typename T>
T PyArray_SafeGet(const PyArrayObject* aobj, const npy_intp* indaux)
{
// HORROR.
npy_intp* ind = const_cast<npy_intp*>(indaux);
void* ptr = PyArray_GetPtr(const_cast<PyArrayObject*>(aobj), ind);
switch(PyArray_TYPE(aobj))
{
case NPY_BOOL:
return static_cast<T>(*reinterpret_cast<bool*>(ptr));
case NPY_BYTE:
return static_cast<T>(*reinterpret_cast<char*>(ptr));
case NPY_SHORT:
return static_cast<T>(*reinterpret_cast<short*>(ptr));
case NPY_INT:
return static_cast<T>(*reinterpret_cast<int*>(ptr));
case NPY_LONG:
return static_cast<T>(*reinterpret_cast<long*>(ptr));
case NPY_LONGLONG:
return static_cast<T>(*reinterpret_cast<long long*>(ptr));
case NPY_UBYTE:
return static_cast<T>(*reinterpret_cast<unsigned char*>(ptr));
case NPY_USHORT:
return static_cast<T>(*reinterpret_cast<unsigned short*>(ptr));
case NPY_UINT:
return static_cast<T>(*reinterpret_cast<unsigned int*>(ptr));
case NPY_ULONG:
return static_cast<T>(*reinterpret_cast<unsigned long*>(ptr));
case NPY_ULONGLONG:
return static_cast<T>(*reinterpret_cast<unsigned long long*>(ptr));
case NPY_FLOAT:
return static_cast<T>(*reinterpret_cast<float*>(ptr));
case NPY_DOUBLE:
return static_cast<T>(*reinterpret_cast<double*>(ptr));
case NPY_LONGDOUBLE:
return static_cast<T>(*reinterpret_cast<long double*>(ptr));
default:
throw std::runtime_error("data type not supported");
}
}
template<typename T>
T PyArray_SafeSet(PyArrayObject* aobj, const npy_intp* indaux, const T& value)
{
// HORROR.
npy_intp* ind = const_cast<npy_intp*>(indaux);
void* ptr = PyArray_GetPtr(aobj, ind);
switch(PyArray_TYPE(aobj))
{
case NPY_BOOL:
*reinterpret_cast<bool*>(ptr) = static_cast<bool>(value);
break;
case NPY_BYTE:
*reinterpret_cast<char*>(ptr) = static_cast<char>(value);
break;
case NPY_SHORT:
*reinterpret_cast<short*>(ptr) = static_cast<short>(value);
break;
case NPY_INT:
*reinterpret_cast<int*>(ptr) = static_cast<int>(value);
break;
case NPY_LONG:
*reinterpret_cast<long*>(ptr) = static_cast<long>(value);
break;
case NPY_LONGLONG:
*reinterpret_cast<long long*>(ptr) = static_cast<long long>(value);
break;
case NPY_UBYTE:
*reinterpret_cast<unsigned char*>(ptr) = static_cast<unsigned char>(value);
break;
case NPY_USHORT:
*reinterpret_cast<unsigned short*>(ptr) = static_cast<unsigned short>(value);
break;
case NPY_UINT:
*reinterpret_cast<unsigned int*>(ptr) = static_cast<unsigned int>(value);
break;
case NPY_ULONG:
*reinterpret_cast<unsigned long*>(ptr) = static_cast<unsigned long>(value);
break;
case NPY_ULONGLONG:
*reinterpret_cast<unsigned long long*>(ptr) = static_cast<unsigned long long>(value);
break;
case NPY_FLOAT:
*reinterpret_cast<float*>(ptr) = static_cast<float>(value);
break;
case NPY_DOUBLE:
*reinterpret_cast<double*>(ptr) = static_cast<double>(value);
break;
case NPY_LONGDOUBLE:
*reinterpret_cast<long double*>(ptr) = static_cast<long double>(value);
break;
default:
throw std::runtime_error("data type not supported");
}
}
#endif
| 4,645 | C | 32.666666 | 93 | 0.655328 |
NVlabs/ACID/ACID/src/utils/libmcubes/__init__.py | from src.utils.libmcubes.mcubes import (
marching_cubes, marching_cubes_func
)
from src.utils.libmcubes.exporter import (
export_mesh, export_obj, export_off
)
__all__ = [
marching_cubes, marching_cubes_func,
export_mesh, export_obj, export_off
]
| 265 | Python | 19.461537 | 42 | 0.70566 |
NVlabs/ACID/ACID/src/utils/libmcubes/exporter.py |
import numpy as np
def export_obj(vertices, triangles, filename):
"""
Exports a mesh in the (.obj) format.
"""
with open(filename, 'w') as fh:
for v in vertices:
fh.write("v {} {} {}\n".format(*v))
for f in triangles:
fh.write("f {} {} {}\n".format(*(f + 1)))
def export_off(vertices, triangles, filename):
"""
Exports a mesh in the (.off) format.
"""
with open(filename, 'w') as fh:
fh.write('OFF\n')
fh.write('{} {} 0\n'.format(len(vertices), len(triangles)))
for v in vertices:
fh.write("{} {} {}\n".format(*v))
for f in triangles:
fh.write("3 {} {} {}\n".format(*f))
def export_mesh(vertices, triangles, filename, mesh_name="mcubes_mesh"):
"""
Exports a mesh in the COLLADA (.dae) format.
Needs PyCollada (https://github.com/pycollada/pycollada).
"""
import collada
mesh = collada.Collada()
vert_src = collada.source.FloatSource("verts-array", vertices, ('X','Y','Z'))
geom = collada.geometry.Geometry(mesh, "geometry0", mesh_name, [vert_src])
input_list = collada.source.InputList()
input_list.addInput(0, 'VERTEX', "#verts-array")
triset = geom.createTriangleSet(np.copy(triangles), input_list, "")
geom.primitives.append(triset)
mesh.geometries.append(geom)
geomnode = collada.scene.GeometryNode(geom, [])
node = collada.scene.Node(mesh_name, children=[geomnode])
myscene = collada.scene.Scene("mcubes_scene", [node])
mesh.scenes.append(myscene)
mesh.scene = myscene
mesh.write(filename)
| 1,697 | Python | 25.53125 | 81 | 0.570418 |
NVlabs/ACID/ACID/src/utils/libmcubes/marchingcubes.cpp |
#include "marchingcubes.h"
namespace mc
{
int edge_table[256] =
{
0x000, 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c, 0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x099, 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c, 0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x033, 0x13a, 0x636, 0x73f, 0x435, 0x53c, 0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0x0aa, 0x7a6, 0x6af, 0x5a5, 0x4ac, 0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x066, 0x16f, 0x265, 0x36c, 0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0x0ff, 0x3f5, 0x2fc, 0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x055, 0x15c, 0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0x0cc, 0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc, 0x0cc, 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c, 0x15c, 0x055, 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc, 0x2fc, 0x3f5, 0x0ff, 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c, 0x36c, 0x265, 0x16f, 0x066, 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac, 0x4ac, 0x5a5, 0x6af, 0x7a6, 0x0aa, 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c, 0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x033, 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c, 0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x099, 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c, 0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x000
};
int triangle_table[256][16] =
{
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1},
{3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1},
{4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1},
{9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1},
{10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1},
{5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1},
{8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1},
{2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1},
{11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1},
{5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1},
{11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1},
{11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1},
{9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1},
{6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1},
{6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1},
{8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1},
{7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1},
{3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1},
{9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1},
{8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1},
{0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1},
{6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1},
{10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1},
{10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1},
{0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1},
{3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1},
{9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1},
{8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1},
{3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1},
{10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1},
{10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1},
{7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1},
{1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1},
{11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1},
{8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1},
{0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1},
{7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1},
{7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1},
{10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1},
{0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1},
{7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1},
{9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1},
{6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1},
{4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1},
{10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1},
{8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1},
{1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1},
{10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1},
{10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1},
{9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1},
{7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1},
{3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1},
{7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1},
{3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1},
{6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1},
{9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1},
{1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1},
{4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1},
{7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1},
{6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1},
{0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1},
{6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1},
{0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1},
{11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1},
{6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1},
{5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1},
{9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1},
{1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1},
{10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1},
{0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1},
{5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1},
{11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1},
{9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1},
{7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1},
{2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1},
{9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1},
{1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1},
{10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1},
{2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1},
{0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1},
{0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1},
{9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1},
{5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1},
{5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1},
{9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1},
{1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1},
{3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1},
{4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1},
{9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1},
{11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1},
{2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1},
{9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1},
{3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1},
{1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1},
{4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1},
{0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1},
{1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}
};
namespace private_
{
double mc_isovalue_interpolation(double isovalue, double f1, double f2,
double x1, double x2)
{
if(f2==f1)
return (x2+x1)/2;
return (x2-x1)*(isovalue-f1)/(f2-f1) + x1;
}
void mc_add_vertex(double x1, double y1, double z1, double c2,
int axis, double f1, double f2, double isovalue, std::vector<double>* vertices)
{
if(axis == 0)
{
double x = mc_isovalue_interpolation(isovalue, f1, f2, x1, c2);
vertices->push_back(x);
vertices->push_back(y1);
vertices->push_back(z1);
return;
}
if(axis == 1)
{
double y = mc_isovalue_interpolation(isovalue, f1, f2, y1, c2);
vertices->push_back(x1);
vertices->push_back(y);
vertices->push_back(z1);
return;
}
if(axis == 2)
{
double z = mc_isovalue_interpolation(isovalue, f1, f2, z1, c2);
vertices->push_back(x1);
vertices->push_back(y1);
vertices->push_back(z);
return;
}
}
}
}
| 18,889 | C++ | 56.069486 | 116 | 0.339827 |
NVlabs/ACID/ACID/src/utils/libmcubes/pywrapper.cpp |
#include "pywrapper.h"
#include "marchingcubes.h"
#include <stdexcept>
struct PythonToCFunc
{
PyObject* func;
PythonToCFunc(PyObject* func) {this->func = func;}
double operator()(double x, double y, double z)
{
PyObject* res = PyObject_CallFunction(func, "(d,d,d)", x, y, z); // py::extract<double>(func(x,y,z));
if(res == NULL)
return 0.0;
double result = PyFloat_AsDouble(res);
Py_DECREF(res);
return result;
}
};
PyObject* marching_cubes_func(PyObject* lower, PyObject* upper,
int numx, int numy, int numz, PyObject* f, double isovalue)
{
std::vector<double> vertices;
std::vector<size_t> polygons;
// Copy the lower and upper coordinates to a C array.
double lower_[3];
double upper_[3];
for(int i=0; i<3; ++i)
{
PyObject* l = PySequence_GetItem(lower, i);
if(l == NULL)
throw std::runtime_error("error");
PyObject* u = PySequence_GetItem(upper, i);
if(u == NULL)
{
Py_DECREF(l);
throw std::runtime_error("error");
}
lower_[i] = PyFloat_AsDouble(l);
upper_[i] = PyFloat_AsDouble(u);
Py_DECREF(l);
Py_DECREF(u);
if(lower_[i]==-1.0 || upper_[i]==-1.0)
{
if(PyErr_Occurred())
throw std::runtime_error("error");
}
}
// Marching cubes.
mc::marching_cubes<double>(lower_, upper_, numx, numy, numz, PythonToCFunc(f), isovalue, vertices, polygons);
// Copy the result to two Python ndarrays.
npy_intp size_vertices = vertices.size();
npy_intp size_polygons = polygons.size();
PyArrayObject* verticesarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_vertices, PyArray_DOUBLE));
PyArrayObject* polygonsarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_polygons, PyArray_ULONG));
std::vector<double>::const_iterator it = vertices.begin();
for(int i=0; it!=vertices.end(); ++i, ++it)
*reinterpret_cast<double*>(PyArray_GETPTR1(verticesarr, i)) = *it;
std::vector<size_t>::const_iterator it2 = polygons.begin();
for(int i=0; it2!=polygons.end(); ++i, ++it2)
*reinterpret_cast<unsigned long*>(PyArray_GETPTR1(polygonsarr, i)) = *it2;
PyObject* res = Py_BuildValue("(O,O)", verticesarr, polygonsarr);
Py_XDECREF(verticesarr);
Py_XDECREF(polygonsarr);
return res;
}
struct PyArrayToCFunc
{
PyArrayObject* arr;
PyArrayToCFunc(PyArrayObject* arr) {this->arr = arr;}
double operator()(int x, int y, int z)
{
npy_intp c[3] = {x,y,z};
return PyArray_SafeGet<double>(arr, c);
}
};
PyObject* marching_cubes(PyArrayObject* arr, double isovalue)
{
if(PyArray_NDIM(arr) != 3)
throw std::runtime_error("Only three-dimensional arrays are supported.");
// Prepare data.
npy_intp* shape = PyArray_DIMS(arr);
double lower[3] = {0,0,0};
double upper[3] = {shape[0]-1, shape[1]-1, shape[2]-1};
long numx = upper[0] - lower[0] + 1;
long numy = upper[1] - lower[1] + 1;
long numz = upper[2] - lower[2] + 1;
std::vector<double> vertices;
std::vector<size_t> polygons;
// Marching cubes.
mc::marching_cubes<double>(lower, upper, numx, numy, numz, PyArrayToCFunc(arr), isovalue,
vertices, polygons);
// Copy the result to two Python ndarrays.
npy_intp size_vertices = vertices.size();
npy_intp size_polygons = polygons.size();
PyArrayObject* verticesarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_vertices, PyArray_DOUBLE));
PyArrayObject* polygonsarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_polygons, PyArray_ULONG));
std::vector<double>::const_iterator it = vertices.begin();
for(int i=0; it!=vertices.end(); ++i, ++it)
*reinterpret_cast<double*>(PyArray_GETPTR1(verticesarr, i)) = *it;
std::vector<size_t>::const_iterator it2 = polygons.begin();
for(int i=0; it2!=polygons.end(); ++i, ++it2)
*reinterpret_cast<unsigned long*>(PyArray_GETPTR1(polygonsarr, i)) = *it2;
PyObject* res = Py_BuildValue("(O,O)", verticesarr, polygonsarr);
Py_XDECREF(verticesarr);
Py_XDECREF(polygonsarr);
return res;
}
PyObject* marching_cubes2(PyArrayObject* arr, double isovalue)
{
if(PyArray_NDIM(arr) != 3)
throw std::runtime_error("Only three-dimensional arrays are supported.");
// Prepare data.
npy_intp* shape = PyArray_DIMS(arr);
double lower[3] = {0,0,0};
double upper[3] = {shape[0]-1, shape[1]-1, shape[2]-1};
long numx = upper[0] - lower[0] + 1;
long numy = upper[1] - lower[1] + 1;
long numz = upper[2] - lower[2] + 1;
std::vector<double> vertices;
std::vector<size_t> polygons;
// Marching cubes.
mc::marching_cubes2<double>(lower, upper, numx, numy, numz, PyArrayToCFunc(arr), isovalue,
vertices, polygons);
// Copy the result to two Python ndarrays.
npy_intp size_vertices = vertices.size();
npy_intp size_polygons = polygons.size();
PyArrayObject* verticesarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_vertices, PyArray_DOUBLE));
PyArrayObject* polygonsarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_polygons, PyArray_ULONG));
std::vector<double>::const_iterator it = vertices.begin();
for(int i=0; it!=vertices.end(); ++i, ++it)
*reinterpret_cast<double*>(PyArray_GETPTR1(verticesarr, i)) = *it;
std::vector<size_t>::const_iterator it2 = polygons.begin();
for(int i=0; it2!=polygons.end(); ++i, ++it2)
*reinterpret_cast<unsigned long*>(PyArray_GETPTR1(polygonsarr, i)) = *it2;
PyObject* res = Py_BuildValue("(O,O)", verticesarr, polygonsarr);
Py_XDECREF(verticesarr);
Py_XDECREF(polygonsarr);
return res;
}
PyObject* marching_cubes3(PyArrayObject* arr, double isovalue)
{
if(PyArray_NDIM(arr) != 3)
throw std::runtime_error("Only three-dimensional arrays are supported.");
// Prepare data.
npy_intp* shape = PyArray_DIMS(arr);
double lower[3] = {0,0,0};
double upper[3] = {shape[0]-1, shape[1]-1, shape[2]-1};
long numx = upper[0] - lower[0] + 1;
long numy = upper[1] - lower[1] + 1;
long numz = upper[2] - lower[2] + 1;
std::vector<double> vertices;
std::vector<size_t> polygons;
// Marching cubes.
mc::marching_cubes3<double>(lower, upper, numx, numy, numz, PyArrayToCFunc(arr), isovalue,
vertices, polygons);
// Copy the result to two Python ndarrays.
npy_intp size_vertices = vertices.size();
npy_intp size_polygons = polygons.size();
PyArrayObject* verticesarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_vertices, PyArray_DOUBLE));
PyArrayObject* polygonsarr = reinterpret_cast<PyArrayObject*>(PyArray_SimpleNew(1, &size_polygons, PyArray_ULONG));
std::vector<double>::const_iterator it = vertices.begin();
for(int i=0; it!=vertices.end(); ++i, ++it)
*reinterpret_cast<double*>(PyArray_GETPTR1(verticesarr, i)) = *it;
std::vector<size_t>::const_iterator it2 = polygons.begin();
for(int i=0; it2!=polygons.end(); ++i, ++it2)
*reinterpret_cast<unsigned long*>(PyArray_GETPTR1(polygonsarr, i)) = *it2;
PyObject* res = Py_BuildValue("(O,O)", verticesarr, polygonsarr);
Py_XDECREF(verticesarr);
Py_XDECREF(polygonsarr);
return res;
} | 7,565 | C++ | 35.907317 | 120 | 0.624455 |
NVlabs/ACID/ACID/src/utils/libmcubes/pywrapper.h |
#ifndef _PYWRAPPER_H
#define _PYWRAPPER_H
#include <Python.h>
#include "pyarraymodule.h"
#include <vector>
PyObject* marching_cubes(PyArrayObject* arr, double isovalue);
PyObject* marching_cubes2(PyArrayObject* arr, double isovalue);
PyObject* marching_cubes3(PyArrayObject* arr, double isovalue);
PyObject* marching_cubes_func(PyObject* lower, PyObject* upper,
int numx, int numy, int numz, PyObject* f, double isovalue);
#endif // _PYWRAPPER_H
| 455 | C | 25.823528 | 64 | 0.758242 |
NVlabs/ACID/ACID/src/data/__init__.py |
from src.data.core import (
PlushEnvGeom, collate_remove_none, worker_init_fn, get_plush_loader
)
from src.data.transforms import (
PointcloudNoise, SubsamplePointcloud,
SubsamplePoints,
)
__all__ = [
# Core
PlushEnvGeom,
get_plush_loader,
collate_remove_none,
worker_init_fn,
PointcloudNoise,
SubsamplePointcloud,
SubsamplePoints,
]
| 379 | Python | 18.999999 | 71 | 0.693931 |
NVlabs/ACID/ACID/src/data/core.py | import os
import yaml
import pickle
import torch
import logging
import numpy as np
from torch.utils import data
from torch.utils.data.dataloader import default_collate
from src.utils import plushsim_util, common_util
scene_range = plushsim_util.SCENE_RANGE.copy()
to_range = np.array([[-1.1,-1.1,-1.1],[1.1,1.1,1.1]]) * 0.5
logger = logging.getLogger(__name__)
def collate_remove_none(batch):
''' Collater that puts each data field into a tensor with outer dimension
batch size.
Args:
batch: batch
'''
batch = list(filter(lambda x: x is not None, batch))
return data.dataloader.default_collate(batch)
def worker_init_fn(worker_id):
''' Worker init function to ensure true randomness.
'''
def set_num_threads(nt):
try:
import mkl; mkl.set_num_threads(nt)
except:
pass
torch.set_num_threads(1)
os.environ['IPC_ENABLE']='1'
for o in ['OPENBLAS_NUM_THREADS','NUMEXPR_NUM_THREADS','OMP_NUM_THREADS','MKL_NUM_THREADS']:
os.environ[o] = str(nt)
random_data = os.urandom(4)
base_seed = int.from_bytes(random_data, byteorder="big")
np.random.seed(base_seed + worker_id)
def collate_pair_fn(batch):
num_points = batch[0]['sampled_pts'].shape[1]
collated = {}
for key in batch[0]:
if key == 'geo_dists':
collated[key] = torch.as_tensor(np.concatenate([d[key] for d in batch]))
elif key == 'num_pairs':
indices = []
for i,d in enumerate(batch):
indices.append(np.arange(d['num_pairs']) + i * num_points)
collated["pair_indices"] = torch.as_tensor(np.concatenate(indices))
else:
collated[key] = default_collate([d[key] for d in batch])
return collated
class PlushEnvBoth(data.Dataset):
def __init__(self, flow_root, pair_root, num_points,
split="train", transform={}, pos_ratio=2):
# Attributes
self.flow_root = flow_root
self.num_points = num_points
self.split = split
if split != "train":
self.num_points = -1
self.pair_root = pair_root
self.transform = transform
self.pos_ratio = pos_ratio
if split == 'train':
with open(os.path.join(flow_root, 'train.pkl'), 'rb') as fp:
self.models = pickle.load(fp)
else:
with open(os.path.join(flow_root, 'test.pkl'), 'rb') as fp:
self.models = pickle.load(fp)
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.models)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
data = {}
split_id, model_id, reset_id, int_id = self.models[idx]
# load frame and get partial observation
points_dict = np.load(
plushsim_util.get_flow_data_file(
self.flow_root,split_id, model_id, reset_id, int_id))
obj_pcloud, env_pcloud = self._prepare_partial_obs(points_dict)
# load pair frame info
pair_info = np.load(
plushsim_util.get_flow_pair_data_file(
self.pair_root,split_id, model_id, reset_id, int_id))
pair_reset_id, pair_int_id = self._get_pair_id(pair_info)
# load pair frame and get partial observation
points_dict2 = np.load(
plushsim_util.get_flow_data_file(
self.flow_root,split_id, model_id, pair_reset_id, pair_int_id))
obj_pcloud2, env_pcloud2 = self._prepare_partial_obs(points_dict2)
if self.split == 'train':
# if training, load random points
# implicit network sampled points
pts, occs, sampled_pts, sampled_occ, sampled_flow, sampled_inds = self._prepare_points(
points_dict)
# get which occupied points are sampled (index is in the occupied subset)
occed = occs != 0
num_occed = occed.sum()
total_to_occs = np.zeros(pts.shape[0], dtype=np.uint32)
total_to_occs[occed] = np.arange(num_occed)
sampled_occs_ids = total_to_occs[sampled_inds[sampled_occ == 1.]]
# basically sampled_positive ids is used to index the pairs in pair info npz
# reorganize sampled_pts
sampled_pts = np.concatenate([sampled_pts[sampled_occ == 1.], sampled_pts[sampled_occ == 0.]])
sampled_occ = np.concatenate([sampled_occ[sampled_occ == 1.], sampled_occ[sampled_occ == 0.]])
sampled_flow = np.concatenate([sampled_flow[sampled_occ == 1.], sampled_flow[sampled_occ == 0.]])
geo_dists, tgtids = self._prepare_pair_data(pair_info, sampled_occs_ids)
_,_, sampled_pts2, sampled_occ2, sampled_flow2, _ = self._prepare_points(points_dict2, chosen=tgtids)
else:
# if not training, load matched points
sampled_pts, sampled_pts2, \
sampled_occ, sampled_occ2, \
sampled_flow, sampled_flow2, geo_dists = self._prepare_matched_unique(points_dict, points_dict2)
data = {
"obj_obs":np.stack([obj_pcloud,obj_pcloud2]),
"env_obs":np.stack([env_pcloud,env_pcloud2]),
"sampled_pts":np.stack([sampled_pts,sampled_pts2]),
"sampled_occ":np.stack([sampled_occ,sampled_occ2]),
"sampled_flow":np.stack([sampled_flow,sampled_flow2]),
"geo_dists":geo_dists.astype(np.float32),
"num_pairs":len(geo_dists),
"idx":idx,
"start_frame":int(points_dict['start_frame']),
"end_frame":int(points_dict['end_frame']),
}
return data
def _get_pts_related_info(self, points_dict):
pts = points_dict['pts'].astype(np.float32)
occs = np.unpackbits(points_dict['occ'])
inds = points_dict['ind']
flow = np.zeros((len(pts), 3), dtype=np.float32)
flow[occs != 0] = points_dict['flow'].astype(np.float32) * 10.
return pts, occs, inds, flow
def _prepare_matched_unique(self, points_dict, points_dict2):
pts1,occs1,inds1,flow1 = self._get_pts_related_info(points_dict)
pts2,occs2,inds2,flow2 = self._get_pts_related_info(points_dict2)
cls1, id1 = np.unique(inds1, return_index=True)
cls2, id2 = np.unique(inds2, return_index=True)
int_cls, int_id1, int_id2 = np.intersect1d(cls1, cls2,
assume_unique=True, return_indices=True)
geo_dists = np.zeros_like(int_cls)
unique_pts_1 = pts1[occs1==1][id1[int_id1]]
unique_flow_1 = flow1[occs1==1][id1[int_id1]]
unique_occ_1 = np.ones(geo_dists.shape[0], dtype=occs1.dtype)
sub_inds = common_util.subsample_points(unique_pts_1, resolution=0.03, return_index=True)
unique_pts_1 = unique_pts_1[sub_inds]
unique_flow_1 = unique_flow_1[sub_inds]
unique_occ_1 = unique_occ_1[sub_inds]
sample_others1 = np.random.randint(pts1.shape[0], size=pts1.shape[0] - unique_pts_1.shape[0])
pts_others1 = pts1[sample_others1]
occ_others1 = occs1[sample_others1]
flow_others1 = flow1[sample_others1]
sampled_pts1 = np.concatenate([unique_pts_1, pts_others1])
sampled_occ1 = np.concatenate([unique_occ_1, occ_others1])
sampled_flow1 = np.concatenate([unique_flow_1, flow_others1])
unique_pts_2 = pts2[occs2==1][id2[int_id2]]
unique_flow_2 = flow2[occs2==1][id2[int_id2]]
unique_occ_2 = np.ones(geo_dists.shape[0], dtype=occs2.dtype)
unique_pts_2 = unique_pts_2[sub_inds]
unique_flow_2 = unique_flow_2[sub_inds]
unique_occ_2 = unique_occ_2[sub_inds]
sample_others2 = np.random.randint(pts2.shape[0], size=pts2.shape[0] - unique_pts_2.shape[0])
pts_others2 = pts2[sample_others2]
occ_others2 = occs2[sample_others2]
flow_others2 = flow2[sample_others2]
sampled_pts2 = np.concatenate([unique_pts_2, pts_others2])
sampled_occ2 = np.concatenate([unique_occ_2, occ_others2])
sampled_flow2 = np.concatenate([unique_flow_2, flow_others2])
geo_dists = geo_dists[sub_inds]
return sampled_pts1, sampled_pts2,\
sampled_occ1, sampled_occ2, \
sampled_flow1, sampled_flow2, geo_dists
def _prepare_partial_obs(self, info_dict):
# obj partial observation
obj_pcloud = info_dict['obj_pcloud_obs'].astype(np.float32)
grasp_loc = common_util.transform_points(info_dict['grasp_loc'], scene_range, to_range)
target_loc = common_util.transform_points(info_dict['target_loc'], scene_range, to_range)
tiled_grasp_loc = np.tile(grasp_loc, (len(obj_pcloud), 1)).astype(np.float32)
tiled_target_loc = np.tile(target_loc, (len(obj_pcloud), 1)).astype(np.float32)
obj_pcloud= np.concatenate([obj_pcloud, tiled_target_loc, obj_pcloud[:,:3] - tiled_grasp_loc], axis=-1)
if 'obj_pcloud' in self.transform:
obj_pcloud = self.transform['obj_pcloud'](obj_pcloud)
# scene partial observation
env_pcloud = info_dict['env_pcloud'].astype(np.float32)
env_pcloud += 1e-4 * np.random.randn(*env_pcloud.shape)
if 'env_pcloud' in self.transform:
env_pcloud = self.transform['env_pcloud'](env_pcloud)
return obj_pcloud, env_pcloud
# chosen is the set of positive points that's preselected
def _prepare_points(self, points_dict, chosen=None):
pts,occs,inds,flow = self._get_pts_related_info(points_dict)
if chosen is None:
if self.num_points == -1:
sampled_pts = pts
sampled_occ = occs
sampled_flow = flow
sampled_inds = np.arange(len(pts))
else:
sampled_inds = np.random.randint(pts.shape[0], size=self.num_points)
sampled_pts = pts[sampled_inds]
sampled_occ = occs[sampled_inds]
sampled_flow = flow[sampled_inds]
else:
pts_chosen = pts[occs!= 0][chosen]
occ_chosen = np.ones(chosen.shape[0], dtype=occs.dtype)
flow_chosen = flow[occs!= 0][chosen]
if self.num_points == -1:
sample_others = np.random.randint(pts.shape[0], size=pts.shape[0] - chosen.shape[0])
else:
sample_others = np.random.randint(pts.shape[0], size=self.num_points - chosen.shape[0])
pts_others = pts[sample_others]
occ_others = occs[sample_others]
flow_others = flow[sample_others]
sampled_inds = np.concatenate([chosen, sample_others])
sampled_pts = np.concatenate([pts_chosen, pts_others])
sampled_occ = np.concatenate([occ_chosen, occ_others])
sampled_flow= np.concatenate([flow_chosen, flow_others])
return pts, occs, sampled_pts, sampled_occ.astype(np.float32), sampled_flow, sampled_inds
def _get_pair_id(self, pair_info):
pair_filename = os.path.splitext(str(pair_info["target_file"]))[0]
pair_reset_id, pair_frame_id = (int(f) for f in pair_filename.split('_'))
return pair_reset_id, pair_frame_id
def _prepare_pair_data(self, pair_info, sampled_occs_ids):
# load pair info
dists_sampled = pair_info['dists'][sampled_occs_ids]
tgtid_sampled = pair_info['inds'][sampled_occs_ids]
# draw samples,
# for half of the points, we draw from their three closests,
# for the other half, we draw from the further points
H,W = dists_sampled.shape
draw_pair_ids = np.random.randint(3, size=H)
draw_pair_ids[H // self.pos_ratio:] = np.random.randint(3, high=W, size=H - H // self.pos_ratio)
tgtids = tgtid_sampled[np.arange(H), draw_pair_ids]
geo_dists = dists_sampled[np.arange(H), draw_pair_ids]
# contrastive_mask = geo_dists > self.contrastive_threshold
return geo_dists, tgtids
def get_model_dict(self, idx):
return self.models[idx]
class PlushEnvGeom(data.Dataset):
def __init__(self, geom_root, pair_root, num_points,
split="train", transform={}, pos_ratio=2):
# Attributes
self.geom_root = geom_root
self.num_points = num_points
self.split = split
if split != "train":
self.num_points = -1
self.pair_root = pair_root
self.transform = transform
self.pos_ratio = pos_ratio
if split == 'train':
with open(os.path.join(geom_root, 'train.pkl'), 'rb') as fp:
self.models = pickle.load(fp)
else:
with open(os.path.join(geom_root, 'test.pkl'), 'rb') as fp:
self.models = pickle.load(fp)
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.models)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
data = {}
split_id, model_id, reset_id, frame_id = self.models[idx]
# load frame and get partial observation
points_dict = np.load(
plushsim_util.get_geom_data_file(
self.geom_root,split_id, model_id, reset_id, frame_id))
obj_pcloud, env_pcloud = self._prepare_partial_obs(points_dict)
# load pair frame info
pair_info = np.load(
plushsim_util.get_pair_data_file(
self.pair_root,split_id, model_id, reset_id, frame_id))
pair_reset_id, pair_frame_id = self._get_pair_id(pair_info)
# load pair frame and get partial observation
points_dict2 = np.load(
plushsim_util.get_geom_data_file(
self.geom_root,split_id, model_id, pair_reset_id, pair_frame_id))
obj_pcloud2, env_pcloud2 = self._prepare_partial_obs(points_dict2)
if self.split == 'train':
# if training, load random points
# implicit network sampled points
pts, occs, sampled_pts, sampled_occ, sampled_inds = self._prepare_points(points_dict)
# get which occupied points are sampled (index is in the occupied subset)
occed = occs != 0
num_occed = occed.sum()
total_to_occs = np.zeros(pts.shape[0], dtype=np.uint32)
total_to_occs[occed] = np.arange(num_occed)
sampled_occs_ids = total_to_occs[sampled_inds[sampled_occ == 1.]]
# basically sampled_positive ids is used to index the pairs in pair info npz
# reorganize sampled_pts
sampled_pts = np.concatenate([sampled_pts[sampled_occ == 1.], sampled_pts[sampled_occ == 0.]])
sampled_occ = np.concatenate([sampled_occ[sampled_occ == 1.], sampled_occ[sampled_occ == 0.]])
geo_dists, tgtids = self._prepare_pair_data(pair_info, sampled_occs_ids)
_,_, sampled_pts2, sampled_occ2, _ = self._prepare_points(points_dict2, chosen=tgtids)
else:
# if not training, load matched points
sampled_pts, sampled_pts2, sampled_occ, sampled_occ2, geo_dists = self._prepare_matched_unique(points_dict, points_dict2)
data = {
"obj_obs":np.stack([obj_pcloud,obj_pcloud2]),
"env_obs":np.stack([env_pcloud,env_pcloud2]),
"sampled_pts":np.stack([sampled_pts,sampled_pts2]),
"sampled_occ":np.stack([sampled_occ,sampled_occ2]),
"geo_dists":geo_dists.astype(np.float32),
"num_pairs":len(geo_dists),
"idx":idx,
}
return data
def _prepare_matched_unique(self, points_dict, points_dict2):
pts1 = points_dict['pts'].astype(np.float32)
occs1 = np.unpackbits(points_dict['occ'])
inds1 = points_dict['ind']
pts2 = points_dict2['pts'].astype(np.float32)
occs2 = np.unpackbits(points_dict2['occ'])
inds2 = points_dict2['ind']
cls1, id1 = np.unique(inds1, return_index=True)
cls2, id2 = np.unique(inds2, return_index=True)
int_cls, int_id1, int_id2 = np.intersect1d(cls1, cls2, assume_unique=True, return_indices=True)
geo_dists = np.zeros_like(int_cls)
unique_pts_1 = pts1[occs1==1][id1[int_id1]]
unique_pts_2 = pts2[occs2==1][id2[int_id2]]
unique_occ_1 = np.ones(geo_dists.shape[0], dtype=occs1.dtype)
unique_occ_2 = np.ones(geo_dists.shape[0], dtype=occs2.dtype)
sample_others1 = np.random.randint(pts1.shape[0], size=pts1.shape[0] - unique_pts_1.shape[0])
sample_others2 = np.random.randint(pts2.shape[0], size=pts2.shape[0] - unique_pts_2.shape[0])
pts_others1 = pts1[sample_others1]
occ_others1 = occs1[sample_others1]
pts_others2 = pts2[sample_others2]
occ_others2 = occs2[sample_others2]
sampled_pts1 = np.concatenate([unique_pts_1, pts_others1])
sampled_occ1 = np.concatenate([unique_occ_1, occ_others1])
sampled_pts2 = np.concatenate([unique_pts_2, pts_others2])
sampled_occ2 = np.concatenate([unique_occ_2, occ_others2])
return sampled_pts1, sampled_pts2, sampled_occ1, sampled_occ2, geo_dists
def _prepare_partial_obs(self, info_dict):
# obj partial observation
obj_pcloud = info_dict['obj_pcloud'].astype(np.float32)
obj_pcloud += 1e-4 * np.random.randn(*obj_pcloud.shape)
if 'obj_pcloud' in self.transform:
obj_pcloud = self.transform['obj_pcloud'](obj_pcloud)
# scene partial observation
env_pcloud = info_dict['env_pcloud'].astype(np.float32)
env_pcloud += 1e-4 * np.random.randn(*env_pcloud.shape)
if 'env_pcloud' in self.transform:
env_pcloud = self.transform['env_pcloud'](env_pcloud)
return obj_pcloud, env_pcloud
# chosen is the set of positive points that's preselected
def _prepare_points(self, points_dict, chosen=None):
pts = points_dict['pts'].astype(np.float32)
occs = points_dict['occ']
occs = np.unpackbits(occs)#[:points.shape[0]]
if chosen is None:
if self.num_points == -1:
sampled_pts = pts
sampled_occ = occs
sampled_inds = np.arange(len(pts))
else:
sampled_inds = np.random.randint(pts.shape[0], size=self.num_points)
sampled_pts = pts[sampled_inds]
sampled_occ = occs[sampled_inds]
else:
pts_chosen = pts[occs!= 0][chosen]
occ_chosen = np.ones(chosen.shape[0], dtype=occs.dtype)
if self.num_points == -1:
sample_others = np.random.randint(pts.shape[0], size=pts.shape[0] - chosen.shape[0])
else:
sample_others = np.random.randint(pts.shape[0], size=self.num_points - chosen.shape[0])
pts_others = pts[sample_others]
occ_others = occs[sample_others]
sampled_inds = np.concatenate([chosen, sample_others])
sampled_pts = np.concatenate([pts_chosen, pts_others])
sampled_occ = np.concatenate([occ_chosen, occ_others])
return pts, occs, sampled_pts, sampled_occ.astype(np.float32), sampled_inds
def _get_pair_id(self, pair_info):
pair_filename = os.path.splitext(str(pair_info["target_file"]))[0]
pair_reset_id, pair_frame_id = (int(f) for f in pair_filename.split('_'))
return pair_reset_id, pair_frame_id
def _prepare_pair_data(self, pair_info, sampled_occs_ids):
# load pair info
dists_sampled = pair_info['dists'][sampled_occs_ids]
tgtid_sampled = pair_info['inds'][sampled_occs_ids]
# draw samples,
# for half of the points, we draw from their three closests,
# for the other half, we draw from the further points
H,W = dists_sampled.shape
draw_pair_ids = np.random.randint(3, size=H)
draw_pair_ids[H // self.pos_ratio:] = np.random.randint(3, high=W, size=H - H // self.pos_ratio)
tgtids = tgtid_sampled[np.arange(H), draw_pair_ids]
geo_dists = dists_sampled[np.arange(H), draw_pair_ids]
# contrastive_mask = geo_dists > self.contrastive_threshold
return geo_dists, tgtids
def get_model_dict(self, idx):
return self.models[idx]
def build_transform_geom(cfg):
from . import transforms as tsf
from torchvision import transforms
transform = {}
transform['obj_pcloud'] = transforms.Compose([
tsf.SubsamplePointcloud(cfg['data']['pointcloud_n_obj']),
tsf.PointcloudNoise(cfg['data']['pointcloud_noise'])
])
transform['env_pcloud'] = transforms.Compose([
tsf.SubsamplePointcloud(cfg['data']['pointcloud_n_env']),
tsf.PointcloudNoise(cfg['data']['pointcloud_noise'])
])
return transform
def get_geom_dataset(cfg, split='train', transform='build'):
geom_root = cfg['data']['geom_path']
pair_root = cfg['data']['pair_path']
num_points = cfg['data']['points_subsample']
pos_ratio = cfg['data'].get('pos_ratio', 2)
if transform == 'build':
transform = build_transform_geom(cfg)
return PlushEnvGeom(geom_root, pair_root, num_points, split=split, transform=transform, pos_ratio=pos_ratio)
def get_combined_dataset(cfg, split='train', transform='build'):
flow_root = cfg['data']['flow_path']
pair_root = cfg['data']['pair_path']
num_points = cfg['data']['points_subsample']
pos_ratio = cfg['data'].get('pos_ratio', 2)
if transform == 'build':
transform = build_transform_geom(cfg)
return PlushEnvBoth(flow_root, pair_root, num_points, split=split, transform=transform, pos_ratio=pos_ratio)
def get_plush_loader(cfg, mode, split='train', transform='build', test_shuffle=False, num_workers=None):
if mode == 'geom':
dataset = get_geom_dataset(cfg, split, transform)
elif mode == 'combined':
dataset = get_combined_dataset(cfg, split, transform)
if split == 'train':
loader = torch.utils.data.DataLoader(
dataset, batch_size=cfg['training']['batch_size'],
num_workers=cfg['training']['n_workers'],
shuffle=True,
collate_fn=collate_pair_fn,
worker_init_fn=worker_init_fn)
else:
loader = torch.utils.data.DataLoader(
dataset, batch_size=1,
num_workers=cfg['training']['n_workers_val'] if num_workers is None else num_workers,
shuffle=test_shuffle,
collate_fn=collate_pair_fn)
return loader
def get_plan_loader(cfg, transform='build', category="teddy",num_workers=None):
transform = build_transform_geom(cfg)
dataset = PlushEnvPlan(cfg['data']['plan_path'], category=category, transform=transform)
loader = torch.utils.data.DataLoader(
dataset, batch_size=1,
num_workers=cfg['training']['n_workers_val'] if num_workers is None else num_workers,
shuffle=False,)
return loader
class PlushEnvPlan(data.Dataset):
def __init__(self, plan_root, category="teddy",transform={}):
# Attributes
self.plan_root = plan_root
self.transform = transform
self.category = category
import glob
self.scenarios = glob.glob(f'{plan_root}/**/*.npz', recursive=True)
self.scenarios = [x for x in self.scenarios if category in x][:-1]
self.scenarios.sort()
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.scenarios)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
data = {}
# load frame and get partial observation
infos = np.load(self.scenarios[idx])
obj_pcloud_start, env_pcloud_start = self._prepare_partial_obs(infos, "start")
obj_pcloud_end, env_pcloud_end = self._prepare_partial_obs(infos, "end")
action = infos['actions'].astype(np.float32)
pts_start, occ_start, ind_start = self._get_pts_related_info(infos, 'start')
pts_end, occ_end, ind_end = self._get_pts_related_info(infos, 'end')
data = {
"obj_obs_start":obj_pcloud_start,
"env_obs_start":env_pcloud_start,
"obj_obs_end":obj_pcloud_end,
"env_obs_end":env_pcloud_end,
'gt_pts_start': infos['sim_pts_start'].astype(np.float32),
'gt_pts_end': infos['sim_pts_end'].astype(np.float32),
'sampled_pts_start': pts_start,
'sampled_occ_start': occ_start,
'sampled_ind_start': ind_start,
'sampled_pts_end': pts_end,
'sampled_occ_end': occ_end,
'sampled_ind_end': ind_end,
"actions": action,
"sequence_ids":infos['sequence_ids'],
"fname":self.scenarios[idx],
"idx":idx,
}
return data
def _prepare_partial_obs(self, info_dict, key):
# obj partial observation
obj_pcloud = info_dict[f'obj_pcloud_{key}'].astype(np.float32)
if 'obj_pcloud' in self.transform:
obj_pcloud = self.transform['obj_pcloud'](obj_pcloud)
# scene partial observation
env_pcloud = info_dict[f'env_pcloud_{key}'].astype(np.float32)
env_pcloud += 1e-4 * np.random.randn(*env_pcloud.shape)
if 'env_pcloud' in self.transform:
env_pcloud = self.transform['env_pcloud'](env_pcloud)
return obj_pcloud, env_pcloud
def _get_pts_related_info(self, points_dict, key):
pts = points_dict[f'pts_{key}'].astype(np.float32)
occs = np.unpackbits(points_dict[f'occ_{key}']).astype(np.float32)
inds = points_dict[f'ind_{key}'].astype(np.int32)
return pts, occs, inds | 26,177 | Python | 42.557404 | 133 | 0.593154 |
NVlabs/ACID/ACID/src/data/transforms.py | import numpy as np
# Transforms
class PointcloudNoise(object):
''' Point cloud noise transformation class.
It adds noise to point cloud data.
Args:
stddev (int): standard deviation
'''
def __init__(self, stddev):
self.stddev = stddev
def __call__(self, data):
''' Calls the transformation.
Args:
data (dictionary): data dictionary
'''
data_out = data.copy()
points = data[None]
noise = self.stddev * np.random.randn(*points.shape)
noise = noise.astype(np.float32)
data_out[None] = points + noise
return data_out
class SubsamplePointcloud(object):
''' Point cloud subsampling transformation class.
It subsamples the point cloud data.
Args:
N (int): number of points to be subsampled
'''
def __init__(self, N):
self.N = N
def __call__(self, data):
''' Calls the transformation.
Args:
data (dict): data dictionary
'''
indices = np.random.randint(data.shape[0], size=self.N)
return data[indices]
class SubsamplePoints(object):
''' Points subsampling transformation class.
It subsamples the points data.
Args:
N (int): number of points to be subsampled
'''
def __init__(self, N):
self.N = N
def __call__(self, data):
''' Calls the transformation.
Args:
data (dictionary): data dictionary
'''
points = data[None]
occ = data['occ']
ind = data['ind']
flow1 = data['flow1']
flow2 = data['flow2']
data_out = data.copy()
if isinstance(self.N, int):
idx = np.random.randint(points.shape[0], size=self.N)
data_out.update({
None: points[idx, :],
'occ': occ[idx],
'ind': ind[idx],
'flow1': flow1[idx],
'flow2': flow2[idx],
})
else:
Nt_out, Nt_in = self.N
occ_binary = (occ >= 0.5)
points0 = points[~occ_binary]
points1 = points[occ_binary]
ind0 = ind[~occ_binary]
ind1 = ind[occ_binary]
flow10 = flow1[~occ_binary]
flow11 = flow1[occ_binary]
flow20 = flow2[~occ_binary]
flow21 = flow2[occ_binary]
idx0 = np.random.randint(points0.shape[0], size=Nt_out)
idx1 = np.random.randint(points1.shape[0], size=Nt_in)
points0 = points0[idx0, :]
points1 = points1[idx1, :]
points = np.concatenate([points0, points1], axis=0)
ind0 = ind0[idx0]
ind1 = ind1[idx1]
ind = np.concatenate([ind0, ind1], axis=0)
flow10 = flow10[idx0]
flow11 = flow11[idx1]
flow1 = np.concatenate([flow10, flow11], axis=0)
flow20 = flow20[idx0]
flow21 = flow21[idx1]
flow2 = np.concatenate([flow20, flow21], axis=0)
occ0 = np.zeros(Nt_out, dtype=np.float32)
occ1 = np.ones(Nt_in, dtype=np.float32)
occ = np.concatenate([occ0, occ1], axis=0)
volume = occ_binary.sum() / len(occ_binary)
volume = volume.astype(np.float32)
data_out.update({
None: points,
'occ': occ,
'volume': volume,
'ind': ind,
'flow1': flow1,
'flow2': flow2,
})
return data_out
| 3,578 | Python | 25.708955 | 67 | 0.507546 |
NVlabs/ACID/ACID/configs/default.yaml | method: conv_onet
data:
train_split: train
val_split: val
test_split: test
dim: 3
act_dim: 6
padding: 0.1
type: geom
model:
decoder: simple
encoder: resnet18
decoder_kwargs: {}
encoder_kwargs: {}
multi_gpu: false
c_dim: 512
training:
out_dir: out/default
batch_size: 64
pos_weight: 5
print_every: 200
visualize_every: 1000
visualize_total: 15
checkpoint_every: 1000
validate_every: 2000
backup_every: 100000
eval_sample: false
model_selection_metric: loss
model_selection_mode: minimize
n_workers: 4
n_workers_val: 4
test:
threshold: 0.5
eval_mesh: true
eval_pointcloud: true
remove_wall: false
model_file: model_best.pt
generation:
batch_size: 100000
refinement_step: 0
vis_n_outputs: 30
generate_mesh: true
generate_pointcloud: true
generation_dir: generation
use_sampling: false
resolution_0: 32
upsampling_steps: 3
simplify_nfaces: null
copy_groundtruth: false
copy_input: true
latent_number: 4
latent_H: 8
latent_W: 8
latent_ny: 2
latent_nx: 2
latent_repeat: true
sliding_window: False # added for crop generation | 1,121 | YAML | 18.68421 | 51 | 0.702944 |
NVlabs/ACID/ACID/configs/plush_dyn_geodesics.yaml | method: conv_onet
data:
flow_path: train_data/flow
pair_path: train_data/pair
pointcloud_n_obj: 5000
pointcloud_n_env: 1000
pointcloud_noise: 0.005
points_subsample: 3000
model:
type: combined
obj_encoder_kwargs:
f_dim: 3
hidden_dim: 64
plane_resolution: 128
unet_kwargs:
depth: 4
merge_mode: concat
start_filts: 64
env_encoder_kwargs:
f_dim: 3
hidden_dim: 16
plane_resolution: 64
unet_kwargs:
depth: 2
merge_mode: concat
start_filts: 16
decoder_kwargs:
corr_dim: 32
sample_mode: bilinear # bilinear / nearest
hidden_size: 32
obj_c_dim: 64
env_c_dim: 16
loss:
type: contrastive
contrastive_threshold: 1
use_geodesics: true
scale_with_geodesics: False
training:
out_dir: result/dyn/geodesics
batch_size: 4
model_selection_metric: flow
model_selection_mode: minimize
print_every: 1
visualize_every: 4000
validate_every: 4000
checkpoint_every: 4000
backup_every: 4000
n_workers: 16
n_workers_val: 4
test:
threshold: 0.95
eval_mesh: true
eval_pointcloud: false
model_file: model_best.pt
generation:
refine: false
n_x: 128
n_z: 1
| 1,175 | YAML | 18.932203 | 46 | 0.67234 |
NVlabs/ACID/ACID/preprocess/gen_data_flow_plush.py | import numpy as np
import os
import time, datetime
import sys
import os.path as osp
ACID_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
sys.path.insert(0,ACID_dir)
import json
from src.utils import plushsim_util
from src.utils import common_util
import glob
import tqdm
from multiprocessing import Pool
import argparse
parser = argparse.ArgumentParser("Training Flow Data Generation")
data_plush_default = osp.join(ACID_dir, "data_plush")
flow_default = osp.join(ACID_dir, "train_data", "flow")
parser.add_argument("--data_root", type=str, default=data_plush_default)
parser.add_argument("--save_root", type=str, default=flow_default)
args = parser.parse_args()
data_root = args.data_root
save_root = args.save_root
scene_range = plushsim_util.SCENE_RANGE.copy()
to_range = np.array([[-1.1,-1.1,-1.1],[1.1,1.1,1.1]]) * 0.5
class_to_std = {
'teddy':0.12,
'elephant':0.15,
'octopus':0.12,
'rabbit':0.08,
'dog':0.08,
'snake':0.04,
}
def export_train_data(data_id):
# try:
# load action info
split_id, model_category, model_name, reset_id, interaction_id = data_id
grasp_loc, target_loc, f1, _, f2 = plushsim_util.get_action_info(model_category, model_name, split_id, reset_id, interaction_id, data_root)
# get observations
obj_pts1, env_pts1 = plushsim_util.get_scene_partial_pointcloud(
model_category, model_name, split_id, reset_id, f1, data_root)
obj_pts1=common_util.subsample_points(
common_util.transform_points(obj_pts1, scene_range, to_range), resolution=0.005, return_index=False)
env_pts1=common_util.subsample_points(
common_util.transform_points(env_pts1, scene_range, to_range), resolution=0.020, return_index=False)
# calculate flow
sim_pts1, _, loc,_,_= plushsim_util.get_object_full_points(
model_category, model_name, split_id, reset_id, f1, data_root)
sim_pts2, _,_,_,_= plushsim_util.get_object_full_points(
model_category, model_name, split_id, reset_id, f2, data_root)
sim_pts1=common_util.transform_points(sim_pts1, scene_range, to_range)
sim_pts2=common_util.transform_points(sim_pts2, scene_range, to_range)
sim_pts_flow = sim_pts2 - sim_pts1
# sample occupancy
center =common_util.transform_points(loc, scene_range, to_range)[0]
pts, occ, pt_class = plushsim_util.sample_occupancies(sim_pts1, center,
std=class_to_std[model_category],sample_scheme='object')
# get implicit flows
flow = sim_pts_flow[pt_class]
# save
kwargs = {'sim_pts':sim_pts1.astype(np.float16),
'obj_pcloud_obs':obj_pts1.astype(np.float16),
'env_pcloud':env_pts1.astype(np.float16),
'pts':pts.astype(np.float16),
'occ':np.packbits(occ),
'ind':pt_class.astype(np.uint16),
'flow':flow.astype(np.float16),
'start_frame':f1,
'end_frame':f2,
'grasp_loc':grasp_loc,
'target_loc': target_loc}
model_dir = os.path.join(save_root, f"{split_id}", f"{model_name}")
save_path = os.path.join(model_dir, f"{reset_id:03d}_{interaction_id:03d}.npz")
np.savez_compressed(save_path, **kwargs)
def get_all_data_points_flow(data_root):
good_interactions = glob.glob(f"{data_root}/*/*/*/info/good_interactions.json")
good_ints = []
for g in tqdm.tqdm(good_interactions):
split_id, model_category, model_name = g.split('/')[-5:-2]
model_dir = os.path.join(save_root, f"{split_id}", f"{model_name}")
os.makedirs(model_dir, exist_ok=True)
model_dir = plushsim_util.get_model_dir(data_root, split_id, model_category, model_name)
with open(g, 'r') as fp:
good_ones = json.load(fp)
for k,v in good_ones.items():
reset_id = int(k)
for int_id in v:
good_ints.append((split_id, model_category, model_name, reset_id, int_id))
return good_ints
good_ints = get_all_data_points_flow(data_root)#[:100]
start_time = time.time()
with Pool(40) as p:
for _ in tqdm.tqdm(p.imap_unordered(export_train_data, good_ints), total=len(good_ints)):
pass
end_time = time.time()
from datetime import timedelta
time_str = str(timedelta(seconds=end_time - start_time))
print(f'Total processing takes: {time_str}') | 4,353 | Python | 39.691588 | 143 | 0.64668 |
NVlabs/ACID/ACID/preprocess/gen_data_contrastive_pairs_flow.py | import os
import sys
import glob
import tqdm
import random
import argparse
import numpy as np
import os.path as osp
import time
from multiprocessing import Pool
ACID_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
sys.path.insert(0,ACID_dir)
parser = argparse.ArgumentParser("Training Contrastive Pair Data Generation")
data_plush_default = osp.join(ACID_dir, "data_plush")
meta_default = osp.join(ACID_dir, "data_plush", "metadata")
flow_default = osp.join(ACID_dir, "train_data", "flow")
pair_default = osp.join(ACID_dir, "train_data", "pair")
parser.add_argument("--data_root", type=str, default=data_plush_default)
parser.add_argument("--meta_root", type=str, default=meta_default)
parser.add_argument("--flow_root", type=str, default=flow_default)
parser.add_argument("--save_root", type=str, default=pair_default)
args = parser.parse_args()
data_root = args.data_root
flow_root = args.flow_root
save_root = args.save_root
meta_root = args.meta_root
os.makedirs(save_root, exist_ok=True)
def using_complex(a):
weight = 1j*np.linspace(0, a.shape[1], a.shape[0], endpoint=False)
b = a + weight[:, np.newaxis]
u, ind = np.unique(b, return_index=True)
b = np.zeros_like(a) + 256
np.put(b, ind, a.flat[ind])
return b
def process(pair, num_samples=320, keep=80):
split_id, model_name, f,p = pair
src_file = np.load(f"{flow_root}/{split_id}/{model_name}/{f}")
tgt_file = np.load(f"{flow_root}/{split_id}/{model_name}/{p}")
src_inds = src_file['ind']
tgt_inds = tgt_file['ind']
src_inds = np.tile(src_inds, (num_samples,1)).T
tgt_samples = np.random.randint(0, high=len(tgt_inds) - 1, size=(len(src_inds), num_samples))
tgt_samples_inds = tgt_inds[tgt_samples]
dists = dist_matrix[src_inds.reshape(-1), tgt_samples_inds.reshape(-1)].reshape(*src_inds.shape)
dists_unique = using_complex(dists)
idx = np.argsort(dists_unique, axis=-1)
dists_sorted = np.take_along_axis(dists, idx, axis=-1).astype(np.uint8)[:,:keep]
tgt_samples_sorted = np.take_along_axis(tgt_samples, idx, axis=-1)[:,:keep]
if tgt_samples_sorted.max() <= np.iinfo(np.uint16).max:
tgt_samples_sorted = tgt_samples_sorted.astype(np.uint16)
else:
tgt_samples_sorted = tgt_samples_sorted.astype(np.uint32)
results = {"target_file":p, "dists":dists_sorted, "inds":tgt_samples_sorted}
np.savez_compressed(os.path.join(save_dir, f"pair_{f}"), **results)
def export_pair_data(data_id):
split_id, model_name = data_id
all_files = all_geoms[data_id]
print(split_id, model_name)
global dist_matrix
dist_matrix = np.load(f'{meta_root}/{split_id}/{model_name}_dist.npz')['arr_0']
global save_dir
save_dir = os.path.join(save_root, split_id, model_name)
os.makedirs(save_dir, exist_ok=True)
pairs = [ (split_id, model_name, f,random.choice(all_files)) for f in all_files ]
start_time = time.time()
with Pool(10) as p:
for _ in tqdm.tqdm(p.imap_unordered(process, pairs), total=len(all_files)):
pass
end_time = time.time()
from datetime import timedelta
time_str = str(timedelta(seconds=end_time - start_time))
print(f'Total processing takes: {time_str}')
if __name__ == '__main__':
from collections import defaultdict
global all_geoms
all_geoms = defaultdict(lambda: [])
for g in glob.glob(f"{flow_root}/*/*/*"):
split_id, model_name, file_name = g.split('/')[-3:]
all_geoms[(split_id, model_name)].append(file_name)
for k in all_geoms.keys():
export_pair_data(k)
| 3,584 | Python | 35.212121 | 100 | 0.66183 |
NVlabs/ACID/ACID/preprocess/gen_data_flow_splits.py | import os
import sys
import os.path as osp
ACID_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
sys.path.insert(0,ACID_dir)
import glob
import argparse
flow_default = osp.join(ACID_dir, "train_data", "flow")
parser = argparse.ArgumentParser("Making training / testing splits...")
parser.add_argument("--flow_root", type=str, default=flow_default)
parser.add_argument("--no_split", action="store_true", default=False)
args = parser.parse_args()
flow_root = args.flow_root
all_npz = glob.glob(f"{flow_root}/*/*/*.npz")
print(f"In total {len(all_npz)} data points...")
def filename_to_id(fname):
split_id, model_name, f = fname.split("/")[-3:]
reset_id, frame_id = (int(x) for x in os.path.splitext(f)[0].split('_'))
return split_id, model_name, reset_id, frame_id
from collections import defaultdict
total_files = defaultdict(lambda : defaultdict(lambda : []))
for fname in all_npz:
split_id, model_name, reset_id, frame_id = filename_to_id(fname)
total_files[(split_id, model_name)][reset_id].append(frame_id)
total_files = dict(total_files)
for k,v in total_files.items():
total_files[k] = dict(v)
import pickle
if args.no_split:
train = total_files
test = total_files
else:
train = {}
test = {}
for k,v in total_files.items():
split_id, model_name = k
if "teddy" in model_name:
test[k] = v
else:
train[k] = v
train_total = []
for k,v in train.items():
for x, u in v.items():
for y in u:
train_total.append((*k, x, y))
print(f"training data points: {len(train_total)}")
test_total = []
for k,v in test.items():
for x, u in v.items():
for y in u:
test_total.append((*k, x, y))
print(f"testing data points: {len(test_total)}")
with open(f"{flow_root}/train.pkl", "wb") as fp:
pickle.dump(train_total, fp)
with open(f"{flow_root}/test.pkl", "wb") as fp:
pickle.dump(test_total, fp) | 1,972 | Python | 28.447761 | 76 | 0.625761 |
erasromani/isaac-sim-python/simulate_grasp.py | import os
import argparse
from grasp.grasp_sim import GraspSimulator
from omni.isaac.motion_planning import _motion_planning
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.synthetic_utils import OmniKitHelper
def main(args):
kit = OmniKitHelper(
{"renderer": "RayTracedLighting", "experience": f"{os.environ['EXP_PATH']}/isaac-sim-python.json", "width": args.width, "height": args.height}
)
_mp = _motion_planning.acquire_motion_planning_interface()
_dc = _dynamic_control.acquire_dynamic_control_interface()
if args.video: record = True
else: record = False
sim = GraspSimulator(kit, _dc, _mp, record=record)
# add object path
if args.location == 'local': from_server = False
else: from_server = True
for path in args.path:
sim.add_object_path(path, from_server=from_server)
# start simulation
sim.play()
for _ in range(args.num):
sim.add_object(position=(40, 0, 10))
sim.wait_for_drop()
sim.wait_for_loading()
evaluation = sim.execute_grasp(args.position, args.angle)
output_string = f"Grasp evaluation: {evaluation}"
print('\n' + ''.join(['#'] * len(output_string)))
print(output_string)
print(''.join(['#'] * len(output_string)) + '\n')
# Stop physics simulation
sim.stop()
if record: sim.save_video(args.video)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Simulate Panda arm planar grasp execution in NVIDIA Omniverse Isaac Sim')
required = parser.add_argument_group('required arguments')
required.add_argument('-P', '--path', type=str, nargs='+', metavar='', required=True, help='path to usd file or content folder')
required.add_argument('-p', '--position', type=float, nargs=3, metavar='', required=True, help='grasp position, X Y Z')
required.add_argument('-a', '--angle', type=float, metavar='', required=True, help='grasp angle in degrees')
parser.add_argument('-l', '--location', type=str, metavar='', required=False, help='location of usd path, choices={local, nucleus_server}', choices=['local', 'nucleus_server'], default='local')
parser.add_argument('-n', '--num', type=int, metavar='', required=False, help='number of objects to spawn in the scene', default=1)
parser.add_argument('-v', '--video', type=str, metavar='', required=False, help='output filename of grasp simulation video')
parser.add_argument('-W', '--width', type=int, metavar='', required=False, help='width of the viewport and generated images', default=1024)
parser.add_argument('-H', '--height', type=int, metavar='', required=False, help='height of the viewport and generated images', default=800)
args = parser.parse_args()
print(args.path)
main(args) | 2,835 | Python | 39.514285 | 197 | 0.662434 |
erasromani/isaac-sim-python/README.md | # isaac-sim-python: Python wrapper for NVIDIA Omniverse Isaac-Sim
## Overview
This repository contains a collection of python wrappers for NVIDIA Omniverse Isaac-Sim simulations. `grasp` package simulates a planar grasp execution of a Panda arm in a scene with various rigid objects place in a bin.
## Installation
This repository requires installation of NVIDIA Omniverse Isaac-Sim. A comprehensive setup tutorial is provided in the official [NVIDIA Omniverse Isaac-Sim](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/setup.html) documentation. Following installation of Isaac-Sim, a conda environment must also be created that contains all the required packages for the python wrappers. Another comprehensive conda environment setup tutorial is provided in this [link](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/python_samples.html).
`ffmpeg-python` must be installed within the `isaac-sim` conda environment and can be aquired via a typical pip install:
```
conda activate isaac-sim
pip install ffmpeg-python
```
Lastly, clone the repository into the `python-samples` sub-directory within the `isaac-sim` directory.
```
git clone https://github.com/erasromani/isaac-sim-python.git
```
## Quickstart
Navigate to the `python-samples` sub-directory within the `isaac-sim` directory, source environment variables, activate conda environment, and run `simulate_grasp.py`.
```
source setenv.sh
conda activate isaac-sim
cd isaac-sim-python
python simulate_grasp.py -P Isaac/Props/Flip_Stack/large_corner_bracket_physics.usd Isaac/Props/Flip_Stack/screw_95_physics.usd Isaac/Props/Flip_Stack/t_connector_physics.usd -l nucleus_server -p 40 0 5 -a 45 -n 5 -v sim.mp4
```
The code above will simulate grasp execution of Panda arm in a scene with a bin and objects 5 randomly selected objects selected from the collection of usd files given. The specified grasp pose is a planar grasp with grasp position `(40, 0, 5)` and angle `5` degrees. A video of the simulation will be generated and saved as `sim.mp4`.
## Additional Resources
- https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/overview.html
- https://docs.omniverse.nvidia.com/py/isaacsim/index.html
| 2,201 | Markdown | 56.947367 | 550 | 0.79055 |
erasromani/isaac-sim-python/grasp/grasp_sim.py | import os
import numpy as np
import tempfile
import omni.kit
from omni.isaac.synthetic_utils import SyntheticDataHelper
from grasp.utils.isaac_utils import RigidBody
from grasp.grasping_scenarios.grasp_object import GraspObject
from grasp.utils.visualize import screenshot, img2vid
default_camera_pose = {
'position': (142, -127, 56), # position given by (x, y, z)
'target': (-180, 234, -27) # target given by (x, y , z)
}
class GraspSimulator(GraspObject):
""" Defines a grasping simulation scenario
Scenarios define planar grasp execution in a scene of a Panda arm and various rigid objects
"""
def __init__(self, kit, dc, mp, dt=1/60.0, record=False, record_interval=10):
"""
Initializes grasp simulator
Args:
kit (omni.isaac.synthetic_utils.scripts.omnikit.OmniKitHelper): helper class for launching OmniKit from a python environment
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
dt (float): simulation time step in seconds
record (bool): flag for capturing screenshots throughout simulation for video recording
record_interval (int): frame intervals for capturing screenshots
"""
super().__init__(kit, dc, mp)
self.frame = 0
self.dt = dt
self.record = record
self.record_interval = record_interval
self.tmp_dir = tempfile.mkdtemp()
self.sd_helper = SyntheticDataHelper()
# create initial scene
self.create_franka()
# set camera pose
self.set_camera_pose(default_camera_pose['position'], default_camera_pose['target'])
def execute_grasp(self, position, angle):
"""
Executes a planar grasp with a panda arm.
Args:
position (list or numpy.darray): grasp position array of length 3 given by [x, y, z]
angle (float): grap angle in degrees
Returns:
evaluation (enum.EnumMeta): GRASP_eval class containing two states {GRASP_eval.FAILURE, GRAPS_eval.SUCCESS}
"""
self.set_target_angle(angle)
self.set_target_position(position)
self.perform_tasks()
# start simulation
if self._kit.editor.is_playing(): previously_playing = True
else: previously_playing = False
if self.pick_and_place is not None:
while True:
self.step(0)
self.update()
if self.pick_and_place.evaluation is not None:
break
evaluation = self.pick_and_place.evaluation
self.stop_tasks()
self.step(0)
self.update()
# Stop physics simulation
if not previously_playing: self.stop()
return evaluation
def wait_for_drop(self, max_steps=2000):
"""
Waits for all objects to drop.
Args:
max_steps (int): maximum number of timesteps before aborting wait
"""
# start simulation
if self._kit.editor.is_playing(): previously_playing = True
else: previously_playing = False
if not previously_playing: self.play()
step = 0
while step < max_steps or self._kit.is_loading():
self.step(step)
self.update()
objects_speed = np.array([o.get_speed() for o in self.objects])
if np.all(objects_speed == 0): break
step +=1
# Stop physics simulation
if not previously_playing: self.stop()
def wait_for_loading(self):
"""
Waits for all scene visuals to load.
"""
while self.is_loading():
self.update()
def play(self):
"""
Starts simulation.
"""
self._kit.play()
if not hasattr(self, 'world') or not hasattr(self, 'franka_solid') or not hasattr(self, 'bin_solid') or not hasattr(self, 'pick_and_place'):
self.register_scene()
def stop(self):
"""
Stops simulation.
"""
self._kit.stop()
def update(self):
"""
Simulate one time step.
"""
if self.record and self.sd_helper is not None and self.frame % self.record_interval == 0:
screenshot(self.sd_helper, suffix=self.frame, directory=self.tmp_dir)
self._kit.update(self.dt)
self.frame += 1
def is_loading(self):
"""
Determine if all scene visuals are loaded.
Returns:
(bool): flag for whether or not all scene visuals are loaded
"""
return self._kit.is_loading()
def set_camera_pose(self, position, target):
"""
Set camera pose.
Args:
position (list or numpy.darray): camera position array of length 3 given by [x, y, z]
target (list or numpy.darray): target position array of length 3 given by [x, y, z]
"""
self._editor.set_camera_position("/OmniverseKit_Persp", *position, True)
self._editor.set_camera_target("/OmniverseKit_Persp", *target, True)
def save_video(self, path):
"""
Save video recording of screenshots taken throughout the simulation.
Args:
path (str): output video filename
"""
framerate = int(round(1.0 / (self.record_interval * self.dt)))
img2vid(os.path.join(self.tmp_dir, '*.png'), path, framerate=framerate)
| 5,666 | Python | 32.532544 | 148 | 0.59107 |
erasromani/isaac-sim-python/grasp/grasping_scenarios/scenario.py | # Credits: The majority of this code is taken from build code associated with nvidia/isaac-sim:2020.2.2_ea with minor modifications.
import gc
import carb
import omni.usd
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
from grasp.utils.isaac_utils import set_up_z_axis
class Scenario:
"""
Defines a block stacking scenario.
Scenarios define the life cycle within kit and handle init, startup, shutdown etc.
"""
def __init__(self, editor, dc, mp):
"""
Initialize scenario.
Args:
editor (omni.kit.editor._editor.IEditor): editor object from isaac-sim simulation
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
"""
self._editor = editor # Reference to the Kit editor
self._stage = omni.usd.get_context().get_stage() # Reference to the current USD stage
self._dc = dc # Reference to the dynamic control plugin
self._mp = mp # Reference to the motion planning plugin
self._domains = [] # Contains instances of environment
self._obstacles = [] # Containts references to any obstacles in the scenario
self._executor = None # Contains the thread pool used to run tasks
self._created = False # Is the robot created or not
self._running = False # Is the task running or not
def __del__(self):
"""
Cleanup scenario objects when deleted, force garbage collection.
"""
self.robot_created = False
self._domains = []
self._obstacles = []
self._executor = None
gc.collect()
def reset_blocks(self, *args):
"""
Funtion called when block poses are reset.
"""
pass
def stop_tasks(self, *args):
"""
Stop tasks in the scenario if any.
"""
self._running = False
pass
def step(self, step):
"""
Step the scenario, can be used to update things in the scenario per frame.
"""
pass
def create_franka(self, *args):
"""
Create franka USD objects.
"""
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
self.asset_path = nucleus_server + "/Isaac"
# USD paths loaded by scenarios
self.franka_table_usd = self.asset_path + "/Samples/Leonardo/Stage/franka_block_stacking.usd"
self.franka_ghost_usd = self.asset_path + "/Samples/Leonardo/Robots/franka_ghost.usd"
self.background_usd = self.asset_path + "/Environments/Grid/gridroom_curved.usd"
self.rubiks_cube_usd = self.asset_path + "/Props/Rubiks_Cube/rubiks_cube.usd"
self.red_cube_usd = self.asset_path + "/Props/Blocks/red_block.usd"
self.yellow_cube_usd = self.asset_path + "/Props/Blocks/yellow_block.usd"
self.green_cube_usd = self.asset_path + "/Props/Blocks/green_block.usd"
self.blue_cube_usd = self.asset_path + "/Props/Blocks/blue_block.usd"
self._created = True
self._stage = omni.usd.get_context().get_stage()
set_up_z_axis(self._stage)
self.stop_tasks()
pass
def register_assets(self, *args):
"""
Connect franka controller to usd assets
"""
pass
def task(self, domain):
"""
Task to be performed for a given robot.
"""
pass
def perform_tasks(self, *args):
"""
Perform all tasks in scenario if multiple robots are present.
"""
self._running = True
pass
def is_created(self):
"""
Return if the franka was already created.
"""
return self._created
| 3,963 | Python | 32.880342 | 132 | 0.609134 |
erasromani/isaac-sim-python/grasp/grasping_scenarios/grasp_object.py | # Credits: Starter code taken from build code associated with nvidia/isaac-sim:2020.2.2_ea.
import os
import random
import numpy as np
import glob
import omni
import carb
from enum import Enum
from collections import deque
from pxr import Gf, UsdGeom
from copy import copy
from omni.physx.scripts.physicsUtils import add_ground_plane
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.utils._isaac_utils import math as math_utils
from omni.isaac.samples.scripts.utils.world import World
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
from omni.physx import _physx
from grasp.utils.isaac_utils import create_prim_from_usd, RigidBody, set_translate, set_rotate, setup_physics
from grasp.grasping_scenarios.franka import Franka, default_config
from grasp.grasping_scenarios.scenario import Scenario
statedic = {0: "orig", 1: "axis_x", 2: "axis_y", 3: "axis_z"}
class SM_events(Enum):
"""
State machine events.
"""
START = 0
WAYPOINT_REACHED = 1
GOAL_REACHED = 2
ATTACHED = 3
DETACHED = 4
TIMEOUT = 5
STOP = 6
NONE = 7 # no event ocurred, just clocks
class SM_states(Enum):
"""
State machine states.
"""
STANDBY = 0 # Default state, does nothing unless enters with event START
PICKING = 1
ATTACH = 2
HOLDING = 3
GRASPING = 4
LIFTING = 5
class GRASP_eval(Enum):
"""
Grasp execution evaluation.
"""
FAILURE = 0
SUCCESS = 1
class PickAndPlaceStateMachine(object):
"""
Self-contained state machine class for Robot Behavior. Each machine state may react to different events,
and the handlers are defined as in-class functions.
"""
def __init__(self, stage, robot, ee_prim, default_position):
"""
Initialize state machine.
Args:
stage (pxr.Usd.Stage): usd stage
robot (grasp.grasping_scenarios.franka.Franka): robot controller object
ee_prim (pxr.Usd.Prim): Panda arm end effector prim
default_position (omni.isaac.dynamic_control._dynamic_control.Transform): default position of Panda arm
"""
self.robot = robot
self.dc = robot.dc
self.end_effector = ee_prim
self.end_effector_handle = None
self._stage = stage
self.start_time = 0.0
self.start = False
self._time = 0.0
self.default_timeout = 10
self.default_position = copy(default_position)
self.target_position = default_position
self.target_point = default_position.p
self.target_angle = 0 # grasp angle in degrees
self.reset = False
self.evaluation = None
self.waypoints = deque()
self.thresh = {}
# Threshold to clear waypoints/goal
# (any waypoint that is not final will be cleared with the least precision)
self.precision_thresh = [
[0.0005, 0.0025, 0.0025, 0.0025],
[0.0005, 0.005, 0.005, 0.005],
[0.05, 0.2, 0.2, 0.2],
[0.08, 0.4, 0.4, 0.4],
[0.18, 0.6, 0.6, 0.6],
]
self.add_object = None
# Event management variables
# Used to verify if the goal was reached due to robot moving or it had never left previous target
self._is_moving = False
self._attached = False # Used to flag the Attached/Detached events on a change of state from the end effector
self._detached = False
self.is_closed = False
self.pick_count = 0
# Define the state machine handling functions
self.sm = {}
# Make empty state machine for all events and states
for s in SM_states:
self.sm[s] = {}
for e in SM_events:
self.sm[s][e] = self._empty
self.thresh[s] = 0
# Fill in the functions to handle each event for each status
self.sm[SM_states.STANDBY][SM_events.START] = self._standby_start
self.sm[SM_states.STANDBY][SM_events.GOAL_REACHED] = self._standby_goal_reached
self.thresh[SM_states.STANDBY] = 3
self.sm[SM_states.PICKING][SM_events.GOAL_REACHED] = self._picking_goal_reached
self.thresh[SM_states.PICKING] = 1
self.sm[SM_states.GRASPING][SM_events.ATTACHED] = self._grasping_attached
self.sm[SM_states.LIFTING][SM_events.GOAL_REACHED] = self._lifting_goal_reached
for s in SM_states:
self.sm[s][SM_events.DETACHED] = self._all_detached
self.sm[s][SM_events.TIMEOUT] = self._all_timeout
self.current_state = SM_states.STANDBY
self.previous_state = -1
self._physxIFace = _physx.acquire_physx_interface()
# Auxiliary functions
def _empty(self, *args):
"""
Empty function to use on states that do not react to some specific event.
"""
pass
def change_state(self, new_state, print_state=True):
"""
Function called every time a event handling changes current state.
"""
self.current_state = new_state
self.start_time = self._time
if print_state: carb.log_warn(str(new_state))
def goalReached(self):
"""
Checks if the robot has reached a certain waypoint in the trajectory.
"""
if self._is_moving:
state = self.robot.end_effector.status.current_frame
target = self.robot.end_effector.status.current_target
error = 0
for i in [0, 2, 3]:
k = statedic[i]
state_v = state[k]
target_v = target[k]
error = np.linalg.norm(state_v - target_v)
# General Threshold is the least strict
thresh = self.precision_thresh[-1][i]
if len(self.waypoints) == 0:
thresh = self.precision_thresh[self.thresh[self.current_state]][i]
if error > thresh:
return False
self._is_moving = False
return True
return False
def get_current_state_tr(self):
"""
Gets current End Effector Transform, converted from Motion position and Rotation matrix.
"""
# Gets end effector frame
state = self.robot.end_effector.status.current_frame
orig = state["orig"] * 100.0
mat = Gf.Matrix3f(
*state["axis_x"].astype(float), *state["axis_y"].astype(float), *state["axis_z"].astype(float)
)
q = mat.ExtractRotation().GetQuaternion()
(q_x, q_y, q_z) = q.GetImaginary()
q = [q_x, q_y, q_z, q.GetReal()]
tr = _dynamic_control.Transform()
tr.p = list(orig)
tr.r = q
return tr
def lerp_to_pose(self, pose, n_waypoints=1):
"""
adds spherical linear interpolated waypoints from last pose in the waypoint list to the provided pose
if the waypoit list is empty, use current pose.
"""
if len(self.waypoints) == 0:
start = self.get_current_state_tr()
start.p = math_utils.mul(start.p, 0.01)
else:
start = self.waypoints[-1]
if n_waypoints > 1:
for i in range(n_waypoints):
self.waypoints.append(math_utils.slerp(start, pose, (i + 1.0) / n_waypoints))
else:
self.waypoints.append(pose)
def move_to_zero(self):
self._is_moving = False
self.robot.end_effector.go_local(
orig=[], axis_x=[], axis_y=[], axis_z=[], use_default_config=True, wait_for_target=False, wait_time=5.0
)
def move_to_target(self):
"""
Move arm towards target with RMP controller.
"""
xform_attr = self.target_position
self._is_moving = True
orig = np.array([xform_attr.p.x, xform_attr.p.y, xform_attr.p.z])
axis_y = np.array(math_utils.get_basis_vector_y(xform_attr.r))
axis_z = np.array(math_utils.get_basis_vector_z(xform_attr.r))
self.robot.end_effector.go_local(
orig=orig,
axis_x=[],
axis_y=axis_y,
axis_z=axis_z,
use_default_config=True,
wait_for_target=False,
wait_time=5.0,
)
def get_target_orientation(self):
"""
Gets target gripper orientation given target angle and a plannar grasp.
"""
angle = self.target_angle * np.pi / 180
mat = Gf.Matrix3f(
-np.cos(angle), -np.sin(angle), 0, -np.sin(angle), np.cos(angle), 0, 0, 0, -1
)
q = mat.ExtractRotation().GetQuaternion()
(q_x, q_y, q_z) = q.GetImaginary()
q = [q_x, q_y, q_z, q.GetReal()]
return q
def get_target_to_point(self, offset_position=[]):
"""
Get target Panda arm pose from target position and angle.
"""
offset = _dynamic_control.Transform()
if offset_position:
offset.p.x = offset_position[0]
offset.p.y = offset_position[1]
offset.p.z = offset_position[2]
target_pose = _dynamic_control.Transform()
target_pose.p = self.target_point
target_pose.r = self.get_target_orientation()
target_pose = math_utils.mul(target_pose, offset)
target_pose.p = math_utils.mul(target_pose.p, 0.01)
return target_pose
def set_target_to_point(self, offset_position=[], n_waypoints=1, clear_waypoints=True):
"""
Clears waypoints list, and sets a new waypoint list towards the a given point in space.
"""
target_position = self.get_target_to_point(offset_position=offset_position)
# linear interpolate to target pose
if clear_waypoints:
self.waypoints.clear()
self.lerp_to_pose(target_position, n_waypoints=n_waypoints)
# Get first waypoint target
self.target_position = self.waypoints.popleft()
def step(self, timestamp, start=False, reset=False):
"""
Steps the State machine, handling which event to call.
"""
if self.current_state != self.previous_state:
self.previous_state = self.current_state
if not self.start:
self.start = start
if self.current_state in [SM_states.GRASPING, SM_states.LIFTING]:
# object grasped
if not self.robot.end_effector.gripper.is_closed(1e-1) and not self.robot.end_effector.gripper.is_moving(1e-2):
self._attached = True
# self.is_closed = False
# object not grasped
elif self.robot.end_effector.gripper.is_closed(1e-1):
self._detached = True
self.is_closed = True
# Process events
if reset:
# reset to default pose, clear waypoints, and re-initialize event handlers
self.current_state = SM_states.STANDBY
self.previous_state = -1
self.robot.end_effector.gripper.open()
self.evaluation = None
self.start = False
self._time = 0
self.start_time = self._time
self.pick_count = 0
self.waypoints.clear()
self._detached = False
self._attached = False
self.target_position = self.default_position
self.move_to_target()
elif self._detached:
self._detached = False
self.sm[self.current_state][SM_events.DETACHED]()
elif self.goalReached():
if len(self.waypoints) == 0:
self.sm[self.current_state][SM_events.GOAL_REACHED]()
else:
self.target_position = self.waypoints.popleft()
self.move_to_target()
# self.start_time = self._time
elif self.current_state == SM_states.STANDBY and self.start:
self.sm[self.current_state][SM_events.START]()
elif self._attached:
self._attached = False
self.sm[self.current_state][SM_events.ATTACHED]()
elif self._time - self.start_time > self.default_timeout:
self.sm[self.current_state][SM_events.TIMEOUT]()
else:
self.sm[self.current_state][SM_events.NONE]()
self._time += 1.0 / 60.0
# Event handling functions. Each state has its own event handler function depending on which event happened
def _standby_start(self, *args):
"""
Handles the start event when in standby mode.
Proceeds to move towards target grasp pose.
"""
# Tell motion planner controller to ignore current object as an obstacle
self.pick_count = 0
self.evaluation = None
self.lerp_to_pose(self.default_position, 1)
self.lerp_to_pose(self.default_position, 60)
self.robot.end_effector.gripper.open()
# set target above the current bin with offset of 10 cm
self.set_target_to_point(offset_position=[0.0, 0.0, -10.0], n_waypoints=90, clear_waypoints=False)
# pause before lowering to target object
self.lerp_to_pose(self.waypoints[-1], 180)
self.set_target_to_point(n_waypoints=90, clear_waypoints=False)
# start arm movement
self.move_to_target()
# Move to next state
self.change_state(SM_states.PICKING)
# NOTE: As is, this method is never executed
def _standby_goal_reached(self, *args):
"""
Reset grasp execution.
"""
self.move_to_zero()
self.start = True
def _picking_goal_reached(self, *args):
"""
Grap pose reached, close gripper.
"""
self.robot.end_effector.gripper.close()
self.is_closed = True
# Move to next state
self.move_to_target()
self.robot.end_effector.gripper.width_history.clear()
self.change_state(SM_states.GRASPING)
def _grasping_attached(self, *args):
"""
Object grasped, lift arm.
"""
self.waypoints.clear()
offset = _dynamic_control.Transform()
offset.p.z = -10
target_pose = math_utils.mul(self.get_current_state_tr(), offset)
target_pose.p = math_utils.mul(target_pose.p, 0.01)
self.lerp_to_pose(target_pose, n_waypoints=60)
self.lerp_to_pose(target_pose, n_waypoints=120)
# Move to next state
self.move_to_target()
self.robot.end_effector.gripper.width_history.clear()
self.change_state(SM_states.LIFTING)
def _lifting_goal_reached(self, *args):
"""
Finished executing grasp successfully, resets for next grasp execution.
"""
self.is_closed = False
self.robot.end_effector.gripper.open()
self._all_detached()
self.pick_count += 1
self.evaluation = GRASP_eval.SUCCESS
carb.log_warn(str(GRASP_eval.SUCCESS))
def _all_timeout(self, *args):
"""
Timeout reached and reset.
"""
self.change_state(SM_states.STANDBY, print_state=False)
self.robot.end_effector.gripper.open()
self.start = False
self.waypoints.clear()
self.target_position = self.default_position
self.lerp_to_pose(self.default_position, 1)
self.lerp_to_pose(self.default_position, 10)
self.lerp_to_pose(self.default_position, 60)
self.move_to_target()
self.evaluation = GRASP_eval.FAILURE
carb.log_warn(str(GRASP_eval.FAILURE))
def _all_detached(self, *args):
"""
Object detached and reset.
"""
self.change_state(SM_states.STANDBY, print_state=False)
self.start = False
self.waypoints.clear()
self.lerp_to_pose(self.target_position, 60)
self.lerp_to_pose(self.default_position, 10)
self.lerp_to_pose(self.default_position, 60)
self.move_to_target()
self.evaluation = GRASP_eval.FAILURE
carb.log_warn(str(GRASP_eval.FAILURE))
class GraspObject(Scenario):
""" Defines an obstacle avoidance scenario
Scenarios define the life cycle within kit and handle init, startup, shutdown etc.
"""
def __init__(self, kit, dc, mp):
"""
Initialize scenario.
Args:
kit (omni.isaac.synthetic_utils.scripts.omnikit.OmniKitHelper): helper class for launching OmniKit from a python environment
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
"""
super().__init__(kit.editor, dc, mp)
self._kit = kit
self._paused = True
self._start = False
self._reset = False
self._time = 0
self._start_time = 0
self.current_state = SM_states.STANDBY
self.timeout_max = 8.0
self.pick_and_place = None
self._pending_stop = False
self._gripper_open = False
self.current_obj = 0
self.max_objs = 100
self.num_objs = 3
self.add_objects_timeout = -1
self.franka_solid = None
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
else:
self.nucleus_server = nucleus_server
def __del__(self):
"""
Cleanup scenario objects when deleted, force garbage collection.
"""
if self.franka_solid:
self.franka_solid.end_effector.gripper = None
super().__del__()
def add_object_path(self, object_path, from_server=False):
"""
Add object usd path.
"""
if from_server and hasattr(self, 'nucleus_server'):
object_path = os.path.join(self.nucleus_server, object_path)
if not from_server and os.path.isdir(object_path): objects_usd = glob.glob(os.path.join(object_path, '**/*.usd'), recursive=True)
else: object_usd = [object_path]
if hasattr(self, 'objects_usd'):
self.objects_usd.extend(object_usd)
else:
self.objects_usd = object_usd
def create_franka(self, *args):
"""
Create franka USD objects and bin USD objects.
"""
super().create_franka()
if self.asset_path is None:
return
# Load robot environment and set its transform
self.env_path = "/scene"
robot_usd = self.asset_path + "/Robots/Franka/franka.usd"
robot_path = "/scene/robot"
create_prim_from_usd(self._stage, robot_path, robot_usd, Gf.Vec3d(0, 0, 0))
bin_usd = self.asset_path + "/Props/KLT_Bin/large_KLT.usd"
bin_path = "/scene/bin"
create_prim_from_usd(self._stage, bin_path, bin_usd, Gf.Vec3d(40, 0, 4))
# Set robot end effector Target
target_path = "/scene/target"
if self._stage.GetPrimAtPath(target_path):
return
GoalPrim = self._stage.DefinePrim(target_path, "Xform")
self.default_position = _dynamic_control.Transform()
self.default_position.p = [0.4, 0.0, 0.3]
self.default_position.r = [0.0, 1.0, 0.0, 0.0] #TODO: Check values for stability
p = self.default_position.p
r = self.default_position.r
set_translate(GoalPrim, Gf.Vec3d(p.x * 100, p.y * 100, p.z * 100))
set_rotate(GoalPrim, Gf.Matrix3d(Gf.Quatd(r.w, r.x, r.y, r.z)))
# Setup physics simulation
add_ground_plane(self._stage, "/groundPlane", "Z", 1000.0, Gf.Vec3f(0.0), Gf.Vec3f(1.0))
setup_physics(self._stage)
def rand_position(self, bound, margin=0, z_range=None):
"""
Obtain random position contained within a specified bound.
"""
x_range = (bound[0][0] * (1 - margin), bound[1][0] * (1 - margin))
y_range = (bound[0][1] * (1 - margin), bound[1][1] * (1 - margin))
if z_range is None:
z_range = (bound[0][2] * (1 - margin), bound[1][2] * (1 - margin))
x = np.random.uniform(*x_range)
y = np.random.uniform(*y_range)
z = np.random.uniform(*z_range)
return Gf.Vec3d(x, y, z)
# combine add_object and add_and_register_object
def add_object(self, *args, register=True, position=None):
"""
Add object to scene.
"""
prim = self.create_new_objects(position=position)
if not register:
return prim
self._kit.update()
if not hasattr(self, 'objects'):
self.objects = []
self.objects.append(RigidBody(prim, self._dc))
def create_new_objects(self, *args, position=None):
"""
Randomly select and create prim of object in scene.
"""
if not hasattr(self, 'objects_usd'):
return
prim_usd_path = self.objects_usd[random.randint(0, len(self.objects_usd) - 1)]
prim_env_path = "/scene/objects/object_{}".format(self.current_obj)
if position is None:
position = self.rand_position(self.bin_solid.get_bound(), margin=0.2, z_range=(10, 10))
prim = create_prim_from_usd(self._stage, prim_env_path, prim_usd_path, position)
if hasattr(self, 'current_obj'): self.current_obj += 1
else: self.current_obj = 0
return prim
def register_objects(self, *args):
"""
Register all objects.
"""
self.objects = []
objects_path = '/scene/objects'
objects_prim = self._stage.GetPrimAtPath(objects_path)
if objects_prim.IsValid():
for object_prim in objects_prim.GetChildren():
self.objects.append(RigidBody(object_prim, self._dc))
# TODO: Delete method
def add_and_register_object(self, *args):
prim = self.create_new_objects()
self._kit.update()
if not hasattr(self, 'objects'):
self.objects = []
self.objects.append(RigidBody(prim, self._dc))
def register_scene(self, *args):
"""
Register world, panda arm, bin, and objects.
"""
self.world = World(self._dc, self._mp)
self.register_assets(args)
self.register_objects(args)
def register_assets(self, *args):
"""
Connect franka controller to usd assets.
"""
# register robot with RMP
robot_path = "/scene/robot"
self.franka_solid = Franka(
self._stage, self._stage.GetPrimAtPath(robot_path), self._dc, self._mp, self.world, default_config
)
# register bin
bin_path = "/scene/bin"
bin_prim = self._stage.GetPrimAtPath(bin_path)
self.bin_solid = RigidBody(bin_prim, self._dc)
# register stage machine
self.pick_and_place = PickAndPlaceStateMachine(
self._stage,
self.franka_solid,
self._stage.GetPrimAtPath("/scene/robot/panda_hand"),
self.default_position,
)
def perform_tasks(self, *args):
"""
Perform all tasks in scenario if multiple robots are present.
"""
self._start = True
self._paused = False
return False
def step(self, step):
"""
Step the scenario, can be used to update things in the scenario per frame.
"""
if self._editor.is_playing():
if self._pending_stop:
self.stop_tasks()
return
# Updates current references and locations for the robot.
self.world.update()
self.franka_solid.update()
target = self._stage.GetPrimAtPath("/scene/target")
xform_attr = target.GetAttribute("xformOp:transform")
if self._reset:
self._paused = False
if not self._paused:
self._time += 1.0 / 60.0
self.pick_and_place.step(self._time, self._start, self._reset)
if self._reset:
self._paused = True
self._time = 0
self._start_time = 0
p = self.default_position.p
r = self.default_position.r
set_translate(target, Gf.Vec3d(p.x * 100, p.y * 100, p.z * 100))
set_rotate(target, Gf.Matrix3d(Gf.Quatd(r.w, r.x, r.y, r.z)))
else:
state = self.franka_solid.end_effector.status.current_target
state_1 = self.pick_and_place.target_position
tr = state["orig"] * 100.0
set_translate(target, Gf.Vec3d(tr[0], tr[1], tr[2]))
set_rotate(target, Gf.Matrix3d(Gf.Quatd(state_1.r.w, state_1.r.x, state_1.r.y, state_1.r.z)))
self._start = False
self._reset = False
if self.add_objects_timeout > 0:
self.add_objects_timeout -= 1
if self.add_objects_timeout == 0:
self.create_new_objects()
else:
translate_attr = xform_attr.Get().GetRow3(3)
rotate_x = xform_attr.Get().GetRow3(0)
rotate_y = xform_attr.Get().GetRow3(1)
rotate_z = xform_attr.Get().GetRow3(2)
orig = np.array(translate_attr) / 100.0
axis_x = np.array(rotate_x)
axis_y = np.array(rotate_y)
axis_z = np.array(rotate_z)
self.franka_solid.end_effector.go_local(
orig=orig,
axis_x=axis_x, # TODO: consider setting this to [] for stability reasons
axis_y=axis_y,
axis_z=axis_z,
use_default_config=True,
wait_for_target=False,
wait_time=5.0,
)
def stop_tasks(self, *args):
"""
Stop tasks in the scenario if any.
"""
if self.pick_and_place is not None:
if self._editor.is_playing():
self._reset = True
self._pending_stop = False
else:
self._pending_stop = True
def pause_tasks(self, *args):
"""
Pause tasks in the scenario.
"""
self._paused = not self._paused
return self._paused
# TODO: use gripper.width == 0 as a proxy for _gripper_open == False
def actuate_gripper(self):
"""
Actuate Panda gripper.
"""
if self._gripper_open:
self.franka_solid.end_effector.gripper.close()
self._gripper_open = False
else:
self.franka_solid.end_effector.gripper.open()
self._gripper_open = True
def set_target_angle(self, angle):
"""
Set grasp angle in degrees.
"""
if self.pick_and_place is not None:
self.pick_and_place.target_angle = angle
def set_target_position(self, position):
"""
Set grasp position.
"""
if self.pick_and_place is not None:
self.pick_and_place.target_point = position
| 27,230 | Python | 35.502681 | 137 | 0.573265 |
erasromani/isaac-sim-python/grasp/grasping_scenarios/franka.py | # Credits: The majority of this code is taken from build code associated with nvidia/isaac-sim:2020.2.2_ea with minor modifications.
import time
import os
import numpy as np
import carb.tokens
import omni.kit.settings
from pxr import Usd, UsdGeom, Gf
from collections import deque
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.motion_planning import _motion_planning
from omni.isaac.samples.scripts.utils import math_utils
# default joint configuration
default_config = (0.00, -1.3, 0.00, -2.87, 0.00, 2.00, 0.75)
# Alternative default config for motion planning
alternate_config = [
(1.5356, -1.3813, -1.5151, -2.0015, -1.3937, 1.5887, 1.4597),
(-1.5356, -1.3813, 1.5151, -2.0015, 1.3937, 1.5887, 0.4314),
]
class Gripper:
"""
Gripper for franka.
"""
def __init__(self, dc, ar):
"""
Initialize gripper.
Args:
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
ar (int): articulation identifier
"""
self.dc = dc
self.ar = ar
self.finger_j1 = self.dc.find_articulation_dof(self.ar, "panda_finger_joint1")
self.finger_j2 = self.dc.find_articulation_dof(self.ar, "panda_finger_joint2")
self.width = 0
self.width_history = deque(maxlen=50)
def open(self, wait=False):
"""
Open gripper.
"""
if self.width < 0.045:
self.move(0.045, wait=True)
self.move(0.09, wait=wait)
def close(self, wait=False, force=0):
"""
Close gripper.
"""
self.move(0, wait=wait)
def move(self, width=0.03, speed=0.2, wait=False):
"""
Modify width.
"""
self.width = width
# if wait:
# time.sleep(0.5)
def update(self):
"""
Actuate gripper.
"""
self.dc.set_dof_position_target(self.finger_j1, self.width * 0.5 * 100)
self.dc.set_dof_position_target(self.finger_j2, self.width * 0.5 * 100)
self.width_history.append(self.get_width())
def get_width(self):
"""
Get current width.
"""
return sum(self.get_position())
def get_position(self):
"""
Get left and right finger local position.
"""
return self.dc.get_dof_position(self.finger_j1), self.dc.get_dof_position(self.finger_j2)
def get_velocity(self, from_articulation=True):
"""
Get left and right finger local velocity.
"""
if from_articulation:
return (self.dc.get_dof_velocity(self.finger_j1), self.dc.get_dof_velocity(self.finger_j2))
else:
leftfinger_handle = self.dc.get_rigid_body(self.dc.get_articulation_path(self.ar) + '/panda_leftfinger')
rightfinger_handle = self.dc.get_rigid_body(self.dc.get_articulation_path(self.ar) + '/panda_rightfinger')
leftfinger_velocity = np.linalg.norm(np.array(self.dc.get_rigid_body_local_linear_velocity(leftfinger_handle)))
rightfinger_velocity = np.linalg.norm(np.array(self.dc.get_rigid_body_local_linear_velocity(rightfinger_handle)))
return (leftfinger_velocity, rightfinger_velocity)
def is_moving(self, tol=1e-2):
"""
Determine if gripper fingers are moving
"""
if len(self.width_history) < self.width_history.maxlen or np.array(self.width_history).std() > tol:
return True
else:
return False
def get_state(self):
"""
Get gripper state.
"""
dof_states = self.dc.get_articulation_dof_states(self.ar, _dynamic_control.STATE_ALL)
return dof_states[-2], dof_states[-1]
def is_closed(self, tol=1e-2):
"""
Determine if gripper is closed.
"""
if self.get_width() < tol:
return True
else:
return False
class Status:
"""
Class that contains status for end effector
"""
def __init__(self, mp, rmp_handle):
"""
Initialize status object.
Args:
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
rmp_handle (int): RMP handle identifier
"""
self.mp = mp
self.rmp_handle = rmp_handle
self.orig = np.array([0, 0, 0])
self.axis_x = np.array([1, 0, 0])
self.axis_y = np.array([0, 1, 0])
self.axis_z = np.array([0, 0, 1])
self.current_frame = {"orig": self.orig, "axis_x": self.axis_x, "axis_y": self.axis_y, "axis_z": self.axis_z}
self.target_frame = {"orig": self.orig, "axis_x": self.axis_x, "axis_y": self.axis_y, "axis_z": self.axis_z}
self.frame = self.current_frame
def update(self):
"""
Update end effector state.
"""
state = self.mp.getRMPState(self.rmp_handle)
target = self.mp.getRMPTarget(self.rmp_handle)
self.orig = np.array([state[0].x, state[0].y, state[0].z])
self.axis_x = np.array([state[1].x, state[1].y, state[1].z])
self.axis_y = np.array([state[2].x, state[2].y, state[2].z])
self.axis_z = np.array([state[3].x, state[3].y, state[3].z])
self.current_frame = {"orig": self.orig, "axis_x": self.axis_x, "axis_y": self.axis_y, "axis_z": self.axis_z}
self.frame = self.current_frame
self.current_target = {
"orig": np.array([target[0].x, target[0].y, target[0].z]),
"axis_x": np.array([target[1].x, target[1].y, target[1].z]),
"axis_y": np.array([target[2].x, target[2].y, target[2].z]),
"axis_z": np.array([target[3].x, target[3].y, target[3].z]),
}
class EndEffector:
"""
End effector object that controls movement.
"""
def __init__(self, dc, mp, ar, rmp_handle):
"""
Initialize end effector.
Args:
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
ar (int): articulation identifier
rmp_handle (int): RMP handle identifier
"""
self.dc = dc
self.ar = ar
self.mp = mp
self.rmp_handle = rmp_handle
self.gripper = Gripper(dc, ar)
self.status = Status(mp, rmp_handle)
self.UpRot = Gf.Rotation(Gf.Vec3d(0, 0, 1), 90)
def freeze(self):
self.go_local(
orig=self.status.orig, axis_x=self.status.axis_x, axis_z=self.status.axis_z, wait_for_target=False
)
def go_local(
self,
target=None,
orig=[],
axis_x=[],
axis_y=[],
axis_z=[],
required_orig_err=0.01,
required_axis_x_err=0.01,
required_axis_y_err=0.01,
required_axis_z_err=0.01,
orig_thresh=None,
axis_x_thresh=None,
axis_y_thresh=None,
axis_z_thresh=None,
approach_direction=[],
approach_standoff=0.1,
approach_standoff_std_dev=0.001,
use_level_surface_orientation=False,
use_target_weight_override=True,
use_default_config=False,
wait_for_target=True,
wait_time=None,
):
self.target_weight_override_value = 10000.0
self.target_weight_override_std_dev = 0.03
if orig_thresh:
required_orig_err = orig_thresh
if axis_x_thresh:
required_axis_x_err = axis_x_thresh
if axis_y_thresh:
required_axis_y_err = axis_y_thresh
if axis_z_thresh:
required_axis_z_err = axis_z_thresh
if target:
orig = target["orig"]
if "axis_x" in target and target["axis_x"] is not None:
axis_x = target["axis_x"]
if "axis_y" in target and target["axis_y"] is not None:
axis_y = target["axis_y"]
if "axis_z" in target and target["axis_z"] is not None:
axis_z = target["axis_z"]
orig = np.array(orig)
axis_x = np.array(axis_x)
axis_y = np.array(axis_y)
axis_z = np.array(axis_z)
approach = _motion_planning.Approach((0, 0, 1), 0, 0)
if len(approach_direction) != 0:
approach = _motion_planning.Approach(approach_direction, approach_standoff, approach_standoff_std_dev)
pose_command = _motion_planning.PartialPoseCommand()
if len(orig) > 0:
pose_command.set(_motion_planning.Command(orig, approach), int(_motion_planning.FrameElement.ORIG))
if len(axis_x) > 0:
pose_command.set(_motion_planning.Command(axis_x), int(_motion_planning.FrameElement.AXIS_X))
if len(axis_y) > 0:
pose_command.set(_motion_planning.Command(axis_y), int(_motion_planning.FrameElement.AXIS_Y))
if len(axis_z) > 0:
pose_command.set(_motion_planning.Command(axis_z), int(_motion_planning.FrameElement.AXIS_Z))
self.mp.goLocal(self.rmp_handle, pose_command)
if wait_for_target and wait_time:
error = 1
future_time = time.time() + wait_time
while error > required_orig_err and time.time() < future_time:
# time.sleep(0.1)
error = self.mp.getError(self.rmp_handle)
def look_at(self, gripper_pos, target):
# Y up works for look at but sometimes flips, go_local might be a safer bet with a locked y_axis
orientation = math_utils.lookAt(gripper_pos, target, (0, 1, 0))
mat = Gf.Matrix3d(orientation).GetTranspose()
self.go_local(
orig=[gripper_pos[0], gripper_pos[1], gripper_pos[2]],
axis_x=[mat.GetColumn(0)[0], mat.GetColumn(0)[1], mat.GetColumn(0)[2]],
axis_z=[mat.GetColumn(2)[0], mat.GetColumn(2)[1], mat.GetColumn(2)[2]],
)
class Franka:
"""
Franka objects that contains implementation details for robot control.
"""
def __init__(self, stage, prim, dc, mp, world=None, group_path="", default_config=None, is_ghost=False):
"""
Initialize Franka controller.
Args:
stage (pxr.Usd.Stage): usd stage
prim (pxr.Usd.Prim): robot prim
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
world (omni.isaac.samples.scripts.utils.world.World): simulation world handler
default_config (tuple or list): default configuration for robot revolute joint drivers
is_ghost (bool): flag for turning off collision and modifying visuals for robot arm
"""
self.dc = dc
self.mp = mp
self.prim = prim
self.stage = stage
# get handle to the articulation for this franka
self.ar = self.dc.get_articulation(prim.GetPath().pathString)
self.is_ghost = is_ghost
self.base = self.dc.get_articulation_root_body(self.ar)
body_count = self.dc.get_articulation_body_count(self.ar)
for bodyIdx in range(body_count):
body = self.dc.get_articulation_body(self.ar, bodyIdx)
self.dc.set_rigid_body_disable_gravity(body, True)
exec_folder = os.path.abspath(
carb.tokens.get_tokens_interface().resolve(
f"{os.environ['ISAAC_PATH']}/exts/omni.isaac.motion_planning/resources/lula/lula_franka"
)
)
self.rmp_handle = self.mp.registerRmp(
exec_folder + "/urdf/lula_franka_gen.urdf",
exec_folder + "/config/robot_descriptor.yaml",
exec_folder + "/config/franka_rmpflow_common.yaml",
prim.GetPath().pathString,
"right_gripper",
True,
)
print("franka rmp handle", self.rmp_handle)
if world is not None:
self.world = world
self.world.rmp_handle = self.rmp_handle
self.world.register_parent(self.base, self.prim, "panda_link0")
settings = omni.kit.settings.get_settings_interface()
self.mp.setFrequency(self.rmp_handle, settings.get("/physics/timeStepsPerSecond"), True)
self.end_effector = EndEffector(self.dc, self.mp, self.ar, self.rmp_handle)
if default_config:
self.mp.setDefaultConfig(self.rmp_handle, default_config)
self.target_visibility = True
if self.is_ghost:
self.target_visibility = False
self.imageable = UsdGeom.Imageable(self.prim)
def __del__(self):
"""
Unregister RMP.
"""
self.mp.unregisterRmp(self.rmp_handle)
print(" Delete Franka")
def set_pose(self, pos, rot):
"""
Set robot pose.
"""
self._mp.setTargetLocal(self.rmp_handle, pos, rot)
def set_speed(self, speed_level):
"""
Set robot speed.
"""
pass
def update(self):
"""
Update robot state.
"""
self.end_effector.gripper.update()
self.end_effector.status.update()
if self.imageable:
if self.target_visibility is not self.imageable.ComputeVisibility(Usd.TimeCode.Default()):
if self.target_visibility:
self.imageable.MakeVisible()
else:
self.imageable.MakeInvisible()
def send_config(self, config):
"""
Set robot default configuration.
"""
if self.is_ghost is False:
self.mp.setDefaultConfig(self.rmp_handle, config)
| 13,794 | Python | 34.371795 | 132 | 0.582935 |
erasromani/isaac-sim-python/grasp/utils/isaac_utils.py | # Credits: All code except class RigidBody and Camera is taken from build code associated with nvidia/isaac-sim:2020.2.2_ea.
import numpy as np
import omni.kit
from pxr import Usd, UsdGeom, Gf, PhysicsSchema, PhysxSchema
def create_prim_from_usd(stage, prim_env_path, prim_usd_path, location):
"""
Create prim from usd.
"""
envPrim = stage.DefinePrim(prim_env_path, "Xform") # create an empty Xform at the given path
envPrim.GetReferences().AddReference(prim_usd_path) # attach the USD to the given path
set_translate(envPrim, location) # set pose
return stage.GetPrimAtPath(envPrim.GetPath().pathString)
def set_up_z_axis(stage):
"""
Utility function to specify the stage with the z axis as "up".
"""
rootLayer = stage.GetRootLayer()
rootLayer.SetPermissionToEdit(True)
with Usd.EditContext(stage, rootLayer):
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z)
def set_translate(prim, new_loc):
"""
Specify position of a given prim, reuse any existing transform ops when possible.
"""
properties = prim.GetPropertyNames()
if "xformOp:translate" in properties:
translate_attr = prim.GetAttribute("xformOp:translate")
translate_attr.Set(new_loc)
elif "xformOp:translation" in properties:
translation_attr = prim.GetAttribute("xformOp:translate")
translation_attr.Set(new_loc)
elif "xformOp:transform" in properties:
transform_attr = prim.GetAttribute("xformOp:transform")
matrix = prim.GetAttribute("xformOp:transform").Get()
matrix.SetTranslateOnly(new_loc)
transform_attr.Set(matrix)
else:
xform = UsdGeom.Xformable(prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
xform_op.Set(Gf.Matrix4d().SetTranslate(new_loc))
def set_rotate(prim, rot_mat):
"""
Specify orientation of a given prim, reuse any existing transform ops when possible.
"""
properties = prim.GetPropertyNames()
if "xformOp:rotate" in properties:
rotate_attr = prim.GetAttribute("xformOp:rotate")
rotate_attr.Set(rot_mat)
elif "xformOp:transform" in properties:
transform_attr = prim.GetAttribute("xformOp:transform")
matrix = prim.GetAttribute("xformOp:transform").Get()
matrix.SetRotateOnly(rot_mat.ExtractRotation())
transform_attr.Set(matrix)
else:
xform = UsdGeom.Xformable(prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
xform_op.Set(Gf.Matrix4d().SetRotate(rot_mat))
def create_background(stage, background_stage):
"""
Create background stage.
"""
background_path = "/background"
if not stage.GetPrimAtPath(background_path):
backPrim = stage.DefinePrim(background_path, "Xform")
backPrim.GetReferences().AddReference(background_stage)
# Move the stage down -104cm so that the floor is below the table wheels, move in y axis to get light closer
set_translate(backPrim, Gf.Vec3d(0, -400, -104))
def setup_physics(stage):
"""
Set default physics parameters.
"""
# Specify gravity
metersPerUnit = UsdGeom.GetStageMetersPerUnit(stage)
gravityScale = 9.81 / metersPerUnit
gravity = Gf.Vec3f(0.0, 0.0, -gravityScale)
scene = PhysicsSchema.PhysicsScene.Define(stage, "/physics/scene")
scene.CreateGravityAttr().Set(gravity)
PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/physics/scene"))
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Get(stage, "/physics/scene")
physxSceneAPI.CreatePhysxSceneEnableCCDAttr(True)
physxSceneAPI.CreatePhysxSceneEnableStabilizationAttr(True)
physxSceneAPI.CreatePhysxSceneEnableGPUDynamicsAttr(False)
physxSceneAPI.CreatePhysxSceneBroadphaseTypeAttr("MBP")
physxSceneAPI.CreatePhysxSceneSolverTypeAttr("TGS")
class Camera:
"""
Camera object that contain state information for a camera in the scene.
"""
def __init__(self, camera_path, translation, rotation):
"""
Initializes the Camera object.
Args:
camera_path (str): path of camera in stage hierarchy
translation (list or tuple): camera position
rotation (list or tuple): camera orientation described by euler angles in degrees
"""
self.prim = self._kit.create_prim(
camera_path,
"Camera",
translation=translation,
rotation=rotatation,
)
self.name = self.prim.GetPrimPath().name
self.vpi = omni.kit.viewport.get_viewport_interface
def set_translate(self, position):
"""
Set camera position.
Args:
position (tuple): camera position specified by (X, Y, Z)
"""
if not isinstance(position, tuple): position = tuple(position)
translate_attr = self.prim.GetAttribute("xformOp:translate")
translate_attr.Set(position)
def set_rotate(self, rotation):
"""
Set camera position.
Args:
rotation (tuple): camera orientation specified by three euler angles in degrees
"""
if not isinstance(rotation, tuple): rotation = tuple(rotation)
rotate_attr = self.prim.GetAttribute("xformOp:rotateZYX")
rotate_attr.Set(rotation)
def activate(self):
"""
Activate camera to viewport.
"""
self.vpi.get_viewport_window().set_active_camera(str(self.prim.GetPath()))
def __repr__(self):
return self.name
class Camera:
"""
Camera object that contain state information for a camera in the scene.
"""
def __init__(self, camera_path, translation, rotation):
"""
Initializes the Camera object.
Args:
camera_path (str): path of camera in stage hierarchy
translation (list or tuple): camera position
rotation (list or tuple): camera orientation described by euler angles in degrees
"""
self.prim = self._kit.create_prim(
camera_path,
"Camera",
translation=translation,
rotation=rotation,
)
self.name = self.prim.GetPrimPath().name
self.vpi = omni.kit.viewport.get_viewport_interface
def set_translate(self, position):
"""
Set camera position.
Args:
position (tuple): camera position specified by (X, Y, Z)
"""
if not isinstance(position, tuple): position = tuple(position)
translate_attr = self.prim.GetAttribute("xformOp:translate")
translate_attr.Set(position)
def set_rotate(self, rotation):
"""
Set camera position.
Args:
rotation (tuple): camera orientation specified by three euler angles in degrees
"""
if not isinstance(rotation, tuple): rotation = tuple(rotation)
rotate_attr = self.prim.GetAttribute("xformOp:rotateZYX")
rotate_attr.Set(rotation)
def activate(self):
"""
Activate camera to viewport.
"""
self.vpi.get_viewport_window().set_active_camera(str(self.prim.GetPath()))
def __repr__(self):
return self.name
class RigidBody:
"""
RigidBody objects that contains state information of the rigid body.
"""
def __init__(self, prim, dc):
"""
Initializes for RigidBody object
Args:
prim (pxr.Usd.Prim): rigid body prim
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
"""
self.prim = prim
self._dc = dc
self.name = prim.GetPrimPath().name
self.handle = self.get_rigid_body_handle()
def get_rigid_body_handle(self):
"""
Get rigid body handle.
"""
object_children = self.prim.GetChildren()
for child in object_children:
child_path = child.GetPath().pathString
body_handle = self._dc.get_rigid_body(child_path)
if body_handle != 0:
bin_path = child_path
object_handle = self._dc.get_rigid_body(bin_path)
if object_handle != 0: return object_handle
def get_linear_velocity(self):
"""
Get linear velocity of rigid body.
"""
return np.array(self._dc.get_rigid_body_linear_velocity(self.handle))
def get_angular_velocity(self):
"""
Get angular velocity of rigid body.
"""
return np.array(self._dc.get_rigid_body_angular_velocity(self.handle))
def get_speed(self):
"""
Get speed of rigid body given by the l2 norm of the velocity.
"""
velocity = self.get_linear_velocity()
speed = np.linalg.norm(velocity)
return speed
def get_pose(self):
"""
Get pose of the rigid body containing the position and orientation information.
"""
return self._dc.get_rigid_body_pose(self.handle)
def get_position(self):
"""
Get the position of the rigid body object.
"""
pose = self.get_pose()
position = np.array(pose.p)
return position
def get_orientation(self):
"""
Get orientation of the rigid body object.
"""
pose = self.get_pose()
orientation = np.array(pose.r)
return orientation
def get_bound(self):
"""
Get bounds of the rigid body object in global coordinates.
"""
bound = UsdGeom.Mesh(self.prim).ComputeWorldBound(0.0, "default").GetBox()
return [np.array(bound.GetMin()), np.array(bound.GetMax())]
def __repr__(self):
return self.name
| 9,822 | Python | 32.640411 | 124 | 0.633069 |
erasromani/isaac-sim-python/grasp/utils/visualize.py | import os
import ffmpeg
import matplotlib.pyplot as plt
def screenshot(sd_helper, suffix="", prefix="image", directory="images/"):
"""
Take a screenshot of the current time step of a running NVIDIA Omniverse Isaac-Sim simulation.
Args:
sd_helper (omni.isaac.synthetic_utils.SyntheticDataHelper): helper class for visualizing OmniKit simulation
suffix (str or int): suffix for output filename of image screenshot of current time step of simulation
prefix (str): prefix for output filename of image screenshot of current time step of simulation
directory (str): output directory of image screenshot of current time step of simulation
"""
gt = sd_helper.get_groundtruth(
[
"rgb",
]
)
image = gt["rgb"][..., :3]
plt.imshow(image)
if suffix == "":
suffix = 0
if isinstance(suffix, int):
filename = os.path.join(directory, f'{prefix}_{suffix:05}.png')
else:
filename = os.path.join(directory, f'{prefix}_{suffix}.png')
plt.axis('off')
plt.savefig(filename)
def img2vid(input_pattern, output_fn, pattern_type='glob', framerate=25):
"""
Create video from a collection of images.
Args:
input_pattern (str): input pattern for a path of collection of images
output_fn (str): video output filename
pattern_type (str): pattern type for input pattern
framerate (int): video framerate
"""
(
ffmpeg
.input(input_pattern, pattern_type=pattern_type, framerate=framerate)
.output(output_fn)
.run(overwrite_output=True, quiet=True)
)
| 1,647 | Python | 30.692307 | 115 | 0.649059 |
pantelis-classes/omniverse-ai/README.md | # Learning in Simulated Worlds in Omniverse.
Please go to the wiki tab.
![image](https://user-images.githubusercontent.com/589439/143660504-bbcdb786-ea5f-4f74-9496-489032fa2e03.png)
https://github.com/pantelis-classes/omniverse-ai/wiki
<hr />
# Wiki Navigation
* [Home][home]
* [Isaac-Sim-SDK-Omniverse-Installation][Omniverse]
* [Synthetic-Data-Generation][SDG]
* [NVIDIA Transfer Learning Toolkit (TLT) Installation][TLT]
* [NVIDIA TAO][TAO]
* [detectnet_v2 Installation][detectnet_v2]
* [Jupyter Notebook][Jupyter-Notebook]
[home]: https://github.com/pantelis-classes/omniverse-ai/wiki
[Omniverse]: https://github.com/pantelis-classes/omniverse-ai/wiki/Isaac-Sim-SDK-Omniverse-Installation
[SDG]: https://github.com/pantelis-classes/omniverse-ai/wiki/Synthetic-Data-Generation-(Python-API)
[TLT]: https://github.com/pantelis-classes/omniverse-ai/wiki/NVIDIA-Transfer-Learning-Toolkit-(TLT)-Installation
[NTLTSD]: https://github.com/pantelis-classes/omniverse-ai/wiki/Using-NVIDIA-TLT-with-Synthetic-Data
[TAO]: https://github.com/pantelis-classes/omniverse-ai/wiki/TAO-(NVIDIA-Train,-Adapt,-and-Optimize)
[detectnet_v2]: https://github.com/pantelis-classes/omniverse-ai/wiki/detectnet_v2-Installation
[Jupyter-Notebook]: https://github.com/pantelis-classes/omniverse-ai/wiki/Jupyter-Notebook
<hr />
<a href="https://docs.google.com/document/d/1WAzdqlWE0RUns41-0P951mnsqMR7I2XV/edit?usp=sharing&ouid=112712585131518554614&rtpof=true&sd=true"> ![image](https://user-images.githubusercontent.com/589439/161171527-4e748031-ff4d-46ed-b1ac-b521cd8ffd3c.png)</a>
## Reports
<a href="https://docs.google.com/document/d/1jVXxrNgtOosZw_vAORzomSnmy45G3qK_mmk2B4oJtPg/edit?usp=sharing">Domain Randomization Paper</a><br>
This report provides an indepth understanding on how Domain Randomization helps perception machine learning tasks such as object detection and/or segmentation.
<a href="https://docs.google.com/document/d/1WAzdqlWE0RUns41-0P951mnsqMR7I2XV/edit?usp=sharing&ouid=112712585131518554614&rtpof=true&sd=true">Final Report</a><br>
This final report contains an indepth explanation on the hardware/software used, the methods used to collect the data, an explanation on the data collected, trained and pruned, and the overall conclusions made from the trained and pruned datasets.
<a href="https://docs.google.com/document/d/1WAzdqlWE0RUns41-0P951mnsqMR7I2XV/edit?usp=sharing&ouid=112712585131518554614&rtpof=true&sd=true">![image](https://user-images.githubusercontent.com/589439/161171433-d2359618-b3dc-4839-b509-c938ce401f73.png)</a>
## Authors
<a href="https://github.com/dfsanchez999">Diego Sanchez</a> | <a href="https://harp.njit.edu/~jga26/">Jibran Absarulislam</a> | <a href="https://github.com/markkcruz">Mark Cruz</a> | <a href="https://github.com/sppatel2112">Sapan Patel</a>
## Supervisor
<a href="https://pantelis.github.io/">Dr. Pantelis Monogioudis</a>
## Credits
<a href="https://developer.nvidia.com/nvidia-omniverse-platform">NVIDIA Omniverse</a><br>
A platform that enables universal interoperability across different applications and 3D ecosystem vendors providing real-time scene updates.
| 3,133 | Markdown | 57.037036 | 256 | 0.785828 |
pantelis-classes/omniverse-ai/Images/images.md | # A markdown file containing all the images in the wiki. (Saved in github's cloud)
![image](https://user-images.githubusercontent.com/589439/143155216-aad83dd6-0bc7-4c85-8c45-4696659a0ff2.png)
![image](https://user-images.githubusercontent.com/589439/143155405-5ab0c92a-10ea-4af2-bcc3-10215808025c.png)
![image](https://user-images.githubusercontent.com/589439/143155607-66dd62b1-9096-4960-af80-05c7d0560616.png)
![image](https://user-images.githubusercontent.com/589439/143155666-96fc6a9d-ca5c-4e10-bb4b-0b75c6afd331.png)
![image](https://user-images.githubusercontent.com/589439/143155774-8a7f0020-70e7-48a4-ad38-9abfda935f1b.png)
![image](https://user-images.githubusercontent.com/589439/143155905-39760d3e-ef68-4a03-8af8-8f1ea0fa7801.png)
![image](https://user-images.githubusercontent.com/589439/143155958-7fe5ce25-d447-4a07-b79b-9785ac456b9a.png)
![image](https://user-images.githubusercontent.com/589439/143155991-41cd11df-3ff9-4ca5-b112-7e63785740db.png)
![image](https://user-images.githubusercontent.com/589439/143156000-5b8dea90-b63b-4c05-90e5-da8f484070e2.png)
![image](https://user-images.githubusercontent.com/589439/143156018-cc426e4a-2785-4050-b643-dc2bee6251aa.png)
![image](https://user-images.githubusercontent.com/589439/143156108-4e4c2f1e-138b-451e-87a7-3cff9da960cb.png)
![image](https://user-images.githubusercontent.com/589439/143156160-4aef319c-3756-4ff6-b429-032d2e45513f.png)
![image](https://user-images.githubusercontent.com/589439/143156180-226269c5-ba2c-4f29-ad2e-378eaf8ee523.png)
![image](https://user-images.githubusercontent.com/589439/143156205-4d4b8afb-c334-4a22-af62-33f7174c716d.png)
![image](https://user-images.githubusercontent.com/589439/143156303-93a31da8-2dc3-49d5-b80a-0246b877dd34.png)
![image](https://user-images.githubusercontent.com/589439/143156381-07c34f94-b2f5-42ac-a61e-10fb5f27a8c9.png)
![image](https://user-images.githubusercontent.com/589439/143157449-7a86072c-0dc4-4e49-a1b3-a62c3f88187c.png)
![image](https://user-images.githubusercontent.com/589439/143157471-7b9bfc36-d505-4b77-8938-d9387e8280b1.png)
![image](https://user-images.githubusercontent.com/589439/143157512-a862401e-38f8-4334-90eb-3f597c583a48.png)
![image](https://user-images.githubusercontent.com/589439/143157553-744bfd7e-5b14-4905-bc84-cf01a245d9ff.png)
![image](https://user-images.githubusercontent.com/589439/143158851-a4f7a00b-4f25-40e0-ae2e-2fba3edef08e.png)
![image](https://user-images.githubusercontent.com/589439/143158880-17506781-abc2-4188-aca3-4546dcb475f9.png)
![image](https://user-images.githubusercontent.com/589439/143158912-97fb24ad-8b49-432e-a3d7-4badb0977714.png)
![image](https://user-images.githubusercontent.com/589439/143158967-afad1831-822f-4440-9a4b-9248c909007d.png)
![image](https://user-images.githubusercontent.com/589439/143160948-90380e23-e8cc-42b3-8933-4d88c5c9bc90.png)
![image](https://user-images.githubusercontent.com/589439/143655465-4efa6088-9bcd-4df4-92f3-d641975ece93.png)
![image](https://user-images.githubusercontent.com/589439/143655576-0ff7992c-0339-48c5-94f1-2be90b2877e5.png
![image](https://user-images.githubusercontent.com/589439/143655623-cc957acf-a6f3-4e23-ad84-2f63762db770.png)
![image](https://user-images.githubusercontent.com/589439/143655734-92f93f94-723b-4a03-aee3-9004ebdfa931.png)
![image](https://user-images.githubusercontent.com/589439/143655803-423dddd8-398e-49e0-839f-d96a5e655441.png)
![image](https://user-images.githubusercontent.com/589439/143656306-85f1aefd-a6a8-4f07-a2e9-b7153ff175ce.png)
![image](https://user-images.githubusercontent.com/589439/143663079-a9503fd4-75f1-4bb0-bfd8-ada3bd9fa2ec.png)
![image](https://user-images.githubusercontent.com/589439/143663183-0bdb6ee0-84be-4788-bdc7-0ab23e9e5d41.png)
![image](https://user-images.githubusercontent.com/589439/143663255-907bff87-ae02-4c4d-8400-ef6a914c3aae.png)
![image](https://user-images.githubusercontent.com/589439/143663347-4ec70e43-da4d-4b97-bd26-b336586bc9d7.png)
![image](https://user-images.githubusercontent.com/589439/143663405-5323b62f-74a8-409f-80a8-c2c6ad961497.png)
![image](https://user-images.githubusercontent.com/589439/143664760-9d9bc86e-9e4a-4bf0-882a-3fa2db1d416b.png)
![image](https://user-images.githubusercontent.com/589439/143664935-4e1d2e45-b4da-4f83-922c-2e7581a65f98.png)
![image](https://user-images.githubusercontent.com/589439/143665245-9dc7ac44-78cd-45ac-992c-f3e23d1a044e.png)
![image](https://user-images.githubusercontent.com/589439/143665289-9f80a74d-f3f4-45b9-a92a-e213a6c37056.png)
![image](https://user-images.githubusercontent.com/589439/143666284-5ff41514-5c89-4cc7-afa0-b17ed9003b61.png)
![image](https://user-images.githubusercontent.com/589439/143666323-eb172e58-d0cb-4228-af31-f9f7daf43d19.png)
![image](https://user-images.githubusercontent.com/589439/143666365-9cbab570-213f-403b-bdc9-d891025fabac.png)
![image](https://user-images.githubusercontent.com/589439/143666538-47885861-2340-4fca-9507-8a1a66d82fe9.png)
![image](https://user-images.githubusercontent.com/589439/143666560-4a7dd70c-abde-4af8-a1c7-16eab5d99bf3.png)
![image](https://user-images.githubusercontent.com/589439/143666727-f7a06dbc-aba6-410f-8bd5-0aa24ecf38d3.png)
![image](https://user-images.githubusercontent.com/589439/143666820-b12aafdd-f1e1-4c46-889c-34af1c9ca929.png)
![image](https://user-images.githubusercontent.com/589439/143666829-813f9715-3a2d-49f1-9124-5a690681accc.png)
![image](https://user-images.githubusercontent.com/589439/143666852-90d659de-01a0-4685-bf36-42868e1c77d9.png)
![image](https://user-images.githubusercontent.com/589439/143666866-5896317b-1255-4e67-abe7-5f3ff03be288.png)
![image](https://user-images.githubusercontent.com/589439/143666874-a453b635-63e6-44e0-94c1-7127e1c7f729.png)
![image](https://user-images.githubusercontent.com/589439/143723668-73111ae8-0ac5-4729-b89b-481d29b25d16.png)
![image](https://user-images.githubusercontent.com/589439/143723824-968874c9-5f8e-44cc-a535-d0d336a72b78.png)
![image](https://user-images.githubusercontent.com/589439/143723906-baf552bc-e9d1-435b-8d43-553f6f0a6707.png)
![image](https://user-images.githubusercontent.com/589439/143723930-c9c8658f-339d-4693-894a-daf70dea28ae.png)
![image](https://user-images.githubusercontent.com/589439/143724110-61196b6e-7d6e-4fc5-86a4-c7234cd4d379.png)
![image](https://user-images.githubusercontent.com/589439/143724128-692a9f83-0365-4f0f-9068-e8e6af9cac15.png)
![image](https://user-images.githubusercontent.com/589439/143724159-ae6c0578-14e4-463b-8287-ef4147ff0f34.png)
![image](https://user-images.githubusercontent.com/589439/143724167-70721d41-12f2-4322-b611-3f07df92d344.png)
![image](https://user-images.githubusercontent.com/589439/143724450-3e95cb75-0dba-45da-9abf-7de026d3b8f3.png)
![image](https://user-images.githubusercontent.com/589439/143724459-afaf363f-dd92-494b-9707-5400f409d05a.png)
![image](https://user-images.githubusercontent.com/589439/143724476-77609fc2-e5a7-4773-94d9-799f2b78be6f.png)
![image](https://user-images.githubusercontent.com/589439/143724492-3036d310-3569-4820-9087-daca2bf9869f.png)
![image](https://user-images.githubusercontent.com/589439/143724167-70721d41-12f2-4322-b611-3f07df92d344.png)
![image](https://user-images.githubusercontent.com/589439/143727424-3d4930ef-647a-42cc-838a-ea7284dbda2a.png)
![image](https://user-images.githubusercontent.com/589439/143727464-598963b9-73c6-4b65-a617-5eecf454f4e9.png)
![image](https://user-images.githubusercontent.com/589439/143727479-6828fc05-4672-4c60-8a21-f1fe6e97d0ea.png)
![image](https://user-images.githubusercontent.com/589439/143727509-2313c55c-bc6d-4451-91f1-f4424fac580a.png)
![image](https://user-images.githubusercontent.com/589439/143727576-96f5f991-0493-4417-94e8-e12ab8bfd769.png)
![image](https://user-images.githubusercontent.com/589439/143727645-23bc99ea-105b-455b-b03f-1737892d3b9a.png)
![image](https://user-images.githubusercontent.com/589439/143729065-f90e8ed0-07ac-4c77-8e4b-8d4c7fcffdef.png)
![image](https://user-images.githubusercontent.com/589439/143729154-00111bfe-534d-4403-bcab-92e3adf032ee.png)
![image](https://user-images.githubusercontent.com/589439/143729162-ad6f82c6-643e-4ec0-a082-75842c237053.png)
![image](https://user-images.githubusercontent.com/589439/143729232-16e479b2-527e-4b0f-94b0-e43bd08cfba8.png)
![image](https://user-images.githubusercontent.com/589439/143729360-d16987b0-25ab-42db-a08d-66bae5576443.png)
![image](https://user-images.githubusercontent.com/589439/143729413-dffdd2dc-d0cb-40aa-8b0f-fd567b2a527c.png)
![image](https://user-images.githubusercontent.com/589439/143729441-e43fde75-76ed-489d-acef-56fea5ddf539.png)
![image](https://user-images.githubusercontent.com/589439/143729376-8e7db409-6651-4a55-90a7-8750d77d5838.png)
![image](https://user-images.githubusercontent.com/589439/143729521-c7b0fc38-baf0-4701-9032-dba324497f5e.png)
![image](https://user-images.githubusercontent.com/589439/143730115-59cf1d93-b27b-4902-a39b-522551733281.png)
![image](https://user-images.githubusercontent.com/589439/143730111-33db8027-2c8e-41e8-98a6-9e5e45984fc5.png)
![image](https://user-images.githubusercontent.com/589439/143797035-060165d6-d462-4160-b9f0-a2b31bdd3d72.png)
![image](https://user-images.githubusercontent.com/589439/143808844-e4244060-5842-41e2-868d-7a75c57a3c21.png)
![image](https://user-images.githubusercontent.com/589439/143809035-2ae69802-7929-47a6-a445-12b571cacd14.png)
![image](https://user-images.githubusercontent.com/589439/143809423-cea91ff5-916f-4c03-b7c3-e4eb625756a4.png)
![image](https://user-images.githubusercontent.com/589439/143809877-6e766d73-ff1c-405f-bd6f-600a58736b25.png)
![image](https://user-images.githubusercontent.com/589439/143809929-1e119a3b-0239-4144-bece-a1d9aa7d51bf.png)
![image](https://user-images.githubusercontent.com/589439/143809965-7997fd22-e172-4360-af13-8c0d65b83f4e.png)
![image](https://user-images.githubusercontent.com/589439/143809992-3a41471a-dd02-4a3e-acea-96b7a7c3a674.png)
![image](https://user-images.githubusercontent.com/589439/143810068-5f175928-4e4d-4820-8b14-067a31b35cd6.png)
![image](https://user-images.githubusercontent.com/589439/143810077-6bfb77d3-4643-4129-a8c4-0b4fbf196b43.png)
![image](https://user-images.githubusercontent.com/589439/143810093-f8508bb1-5728-4010-b87b-21f4aed74e73.png)
![image](https://user-images.githubusercontent.com/589439/143810115-c88787cb-3cae-433a-93c8-712a25db0c78.png)
![image](https://user-images.githubusercontent.com/589439/143810872-231209ca-eb71-4bd2-930d-3527fbaaace0.png)
![image](https://user-images.githubusercontent.com/589439/143810896-a9875ab8-b9ab-4ced-ad49-c47ea321a052.png)
![image](https://user-images.githubusercontent.com/589439/143810915-c9428405-1f00-462d-8a80-2d1467c95e7b.png)
![image](https://user-images.githubusercontent.com/589439/143810942-972f34b4-b7a4-4532-9e8d-6f6bcc01ac9f.png)
![image](https://user-images.githubusercontent.com/589439/143810970-69367200-b71e-481f-b813-3d447e154bb3.png)
![image](https://user-images.githubusercontent.com/589439/143811032-4adc40ef-fa0e-4596-88b5-2a24610cdaf3.png)
![image](https://user-images.githubusercontent.com/589439/143811081-edaa58f5-d3e6-40c6-9dab-f19e547d090e.png)
![image](https://user-images.githubusercontent.com/589439/143811255-0b946589-2679-4747-b514-3b91ac2259cd.png)
![image](https://user-images.githubusercontent.com/589439/143811275-488e15be-15bd-4341-8392-834cd68bbcad.png)
| 11,427 | Markdown | 48.90393 | 109 | 0.808874 |
pantelis-classes/omniverse-ai/Wikipages/Editing Synthetic Data Generation (Python API).md | # Synthetic Data in Omniverse from Isaac Sim
Omniverse comes with synthetic data generation samples in Python. These can be found in (home/.local/share/ov/pkg/isaac_sim-2021.2.0/python_samples)
## Offline Dataset Generation
This example will demonstrate how to generate synthetic dataset offline which can be used for training deep neural networks using default values.
From the package root folder (home/.local/share/ov/pkg/isaac_sim-2021.2.0/) run this command to generate synthetic data:
./python.sh standalone_examples/replicator/offline_generation.py
These are the arguments we can use:
1. --scenario: Specify the USD stage to load from omniverse server for dataset generation.
1. --num_frames: Number of frames to record.
1. --max_queue_size: Maximum size of queue to store and process synthetic data. If value of this field is less than or equal to zero, the queue size is infinite.
1. --data_dir: Location where data will be output. Default is ./output
1. --writer_mode: Specify output format - npy or kitti. Default is npy.
When KittiWriter is used with the --writer_mode kitti argument, two more arguments become available.
6. --classes: Which classes to write labels for. Defaults to all classes.
7. --train_size: Number of frames for training set. Defaults to 8.
queue size is infinite.
With arguments, the above command looks like:
./python.sh standalone_examples/replicator/offline_generation.py --scenario omniverse://<server-name>/Isaac/Samples/Synthetic_Data/Stage/warehouse_with_sensors.usd --num_frames 10 --max_queue_size 500
All output data is stored within (home/.local/share/ov/pkg/isaac_sim-2021.1.1/output)
## Offline Training with TLT
To leverage TLT, we need to have a dataset in the Kitti format. NVIDIA Transfer Learning Toolkit (TLT) is a Python-based AI toolkit for taking purpose-built pretrained AI models and customizing them with your own data.
### Offline Kitti Dataset Generation
for this we add the argument --writer_mode kitti and specify the classes like in this example (not specifying an argument makes it use the default):
./python.sh standalone_examples/replicator/offline_generation.py --writer_mode kitti --classes ceiling floor --num_frames 500 --train_size 100
![image](https://user-images.githubusercontent.com/589439/143666365-9cbab570-213f-403b-bdc9-d891025fabac.png)
![image](https://user-images.githubusercontent.com/589439/143666538-47885861-2340-4fca-9507-8a1a66d82fe9.png)
![image](https://user-images.githubusercontent.com/589439/143666560-4a7dd70c-abde-4af8-a1c7-16eab5d99bf3.png)
![omniverse data gen](https://user-images.githubusercontent.com/589439/143667012-183800ff-f197-44a7-9677-d19940a06179.gif)
The python scripts can be extensively modified to generate more customized datasets (code deep dive to come).
- The output of the synthetic data generation can be found in: `~/.local/share/ov/pkg/isaac_sim-2021.2.0/output`
![image](https://user-images.githubusercontent.com/589439/143666727-f7a06dbc-aba6-410f-8bd5-0aa24ecf38d3.png)
- The dataset is divided into two folders; A Training and Test Dataset. The training dataset contains **images** and **labels** of the warehouse.
![image](https://user-images.githubusercontent.com/589439/143666820-b12aafdd-f1e1-4c46-889c-34af1c9ca929.png)
![image](https://user-images.githubusercontent.com/589439/143666829-813f9715-3a2d-49f1-9124-5a690681accc.png)
![image](https://user-images.githubusercontent.com/589439/143666852-90d659de-01a0-4685-bf36-42868e1c77d9.png)
![image](https://user-images.githubusercontent.com/589439/143666866-5896317b-1255-4e67-abe7-5f3ff03be288.png)
- The test dataset contains only **images**.
![image](https://user-images.githubusercontent.com/589439/143666874-a453b635-63e6-44e0-94c1-7127e1c7f729.png)
![omniversepicgen](https://user-images.githubusercontent.com/589439/143667064-d0136cd5-9b3e-4b5d-987f-c013ff08d401.gif)
| 3,918 | Markdown | 49.243589 | 218 | 0.782797 |
pantelis-classes/omniverse-ai/Wikipages/Isaac Sim SDK Omniverse Installation.md | ## Prerequisites
Ubuntu 18.04 LTS required
Nvidia drivers 470 or higher
### Installing Nvidia Drivers on Ubuntu 18.04 LTS
sudo apt-add-repository -r ppa:graphics-drivers/ppa
![image](https://user-images.githubusercontent.com/589439/143662835-6d5624b2-b78d-4ff2-acc3-efadc64d58a2.png)
sudo apt update
![image](https://user-images.githubusercontent.com/589439/143662852-f99e89cc-1c28-4039-8c25-95c470de171f.png)
sudo apt remove nvidia*
![image](https://user-images.githubusercontent.com/589439/143662863-5dbc78c5-c175-495e-bd36-5b214557774c.png)
![image](https://user-images.githubusercontent.com/589439/143662877-cd6abe58-973f-4da1-ac1c-9fe5d28a5853.png)
sudo apt autoremove
![image](https://user-images.githubusercontent.com/589439/143662895-53e3155b-e8bf-498b-9bb3-4cbe39e1354a.png)
![image](https://user-images.githubusercontent.com/589439/143662915-70024577-3531-46da-8f6e-ea2d8d230e8a.png)
sudo ubuntu-drivers autoinstall
![image](https://user-images.githubusercontent.com/589439/143662959-6b21b9f4-5462-4b9d-9a29-083fad49eafe.png)
sudo apt install nvidia-driver-470
![image](https://user-images.githubusercontent.com/589439/143662965-5e05ee0d-a48f-4161-a086-ab03bf6854bf.png)
- Restart your PC.
- Run nvidia-smi to make sure you are on the latest nvidia drivers for Isaac.
nvidia-smi
![image](https://user-images.githubusercontent.com/589439/143663079-a9503fd4-75f1-4bb0-bfd8-ada3bd9fa2ec.png)
## Omniverse and Isaac Sim installation (executable)
### 1. Create nvidia developer account. This is required to access some of the downloads as well as obtaining API keys for Nvidia NGC
- Go to this <a href="https://developer.nvidia.com/developer-program">link</a> and create an account.
![image](https://user-images.githubusercontent.com/589439/143655734-92f93f94-723b-4a03-aee3-9004ebdfa931.png)
- Fill out your NVIDIA profile.
![image](https://user-images.githubusercontent.com/589439/143655803-423dddd8-398e-49e0-839f-d96a5e655441.png)
### 2. Go to this <a href="https://www.nvidia.com/en-us/omniverse/">omniverse link</a> and download Omniverse and install.
![image](https://user-images.githubusercontent.com/589439/143158851-a4f7a00b-4f25-40e0-ae2e-2fba3edef08e.png)
- Fill out the form.
![image](https://user-images.githubusercontent.com/589439/143158880-17506781-abc2-4188-aca3-4546dcb475f9.png)
- Click the download link for Linux.
![image](https://user-images.githubusercontent.com/589439/143158912-97fb24ad-8b49-432e-a3d7-4badb0977714.png)
- Download and save the AppImage file to your ~/Downloads folder.
![image](https://user-images.githubusercontent.com/589439/143158967-afad1831-822f-4440-9a4b-9248c909007d.png)
- Run these commands to execute the AppImage.
cd ~/Downloads
ls
chmod +x omniverse-launcher-linux.AppImage
./omniverse-launcher-linux.AppImage
![image](https://user-images.githubusercontent.com/589439/143656306-85f1aefd-a6a8-4f07-a2e9-b7153ff175ce.png)
### 3. Login to Omniverse to install Isaac Sim 2021.
- Login with your NVIDIA credentials.
![image](https://user-images.githubusercontent.com/589439/143160948-90380e23-e8cc-42b3-8933-4d88c5c9bc90.png)
- Accept the terms of agreement.
![image](https://user-images.githubusercontent.com/589439/143161008-59913f3c-cfde-4c9f-93d4-609dc0346469.png)
- Click continue. (default paths)
![image](https://user-images.githubusercontent.com/589439/143161046-21afc550-6bf7-450c-b023-3296de59d7b4.png)
- Install cache.
![image](https://user-images.githubusercontent.com/589439/143161192-9936a489-e81d-4ccc-a2e0-caf120ce92c4.png)
### 4. Installing Isaac through Omniverse.
- Click the Exchange tab in Omniverse.
![image](https://user-images.githubusercontent.com/589439/143165080-9daa5e96-99c0-4e60-9a40-ff4f77944311.png)
- Search for Isaac and Click Isaac Sim.
![image](https://user-images.githubusercontent.com/589439/143165387-659a75bf-ba62-49e4-9bab-320b0da9eeb1.png)
- Click install.
![image](https://user-images.githubusercontent.com/589439/143165778-75f9cbea-b93b-4c0a-9661-269ec0e643f5.png)
### 5. Go to the nucleus tab and create a nucleus local server to run the Omniverse Isaac Sim Samples.
- Create your local nucleus account by clicking the Nucleus tab in Omniverse.
- Click Add Local Nucleus Service.
![image](https://user-images.githubusercontent.com/589439/143163402-c38ef3e5-64a8-437f-8a4c-7f978b37e40b.png)
- Click Next. (Default Path)
![image](https://user-images.githubusercontent.com/589439/143163446-5fa6c2bc-6437-4239-bcd7-5be8f9159de7.png)
- Create Administrator Account.
- Go to this <a href="https://developer.nvidia.com/nvidia-isaac-sim-assets-20211">link</a> and download the Isaac Sim Assets.
![image](https://user-images.githubusercontent.com/589439/143163494-95fba91c-12b3-4228-ae21-39ce639d66b4.png)
- Unzip the by going to your downloads folder and right clicking isaac-sim-assets-2021.1.1.zip and choosing "extract here".
![image](https://user-images.githubusercontent.com/589439/143657912-d33c71f8-1965-4ca2-b06c-3d0790ffd1e4.png)
- Log into the Nucleus Service with the credentials you created.
![image](https://user-images.githubusercontent.com/589439/143163725-d7b1a5ae-2391-4da0-9a70-f58ce063eb38.png)
- Create an Isaac Folder. (Right click localhost)
![image](https://user-images.githubusercontent.com/589439/143164075-7cfacb0b-a2e2-4e29-a63f-85316f585a5e.png)
![image](https://user-images.githubusercontent.com/589439/143164125-851ba73c-0cc8-4555-b5d8-769d54625d8d.png)
![image](https://user-images.githubusercontent.com/589439/143657335-7499d95b-d4e0-44bd-88f9-87f4d73a9de9.png)
- Drag and drop the the files in the isaac-sim-assets-2021.1.1. folder into the Isaac folder in Omniverse. (NOT THE .ZIP; THE FILES IN THE FOLDER THAT WAS CREATED WHEN YOU EXTRACTED IT).
![image](https://user-images.githubusercontent.com/589439/143666284-5ff41514-5c89-4cc7-afa0-b17ed9003b61.png)
- Click upload.
![image](https://user-images.githubusercontent.com/589439/143657451-f9792fd1-e085-4850-a5b5-1ccbe9d4d4e5.png)
![image](https://user-images.githubusercontent.com/589439/143666323-eb172e58-d0cb-4228-af31-f9f7daf43d19.png)
### 6. Now launch Isaac Sim from the Library Omniverse tab within Omniverse.
- Click Launch in the Library Tab of Omniverse.
![image](https://user-images.githubusercontent.com/589439/143657605-6b09b104-698d-4eba-b5f7-e027eee033eb.png)
- Click Start with the default settings with "Issac Sim" selected.
![image](https://user-images.githubusercontent.com/589439/143657653-c3d31131-1da7-4919-b7dd-8a9555c4aba6.png)
- Once Isaac Sim has finished loading, login to localhost with the browser window that opened.
![image](https://user-images.githubusercontent.com/589439/143658289-5d6ed582-e15f-4ca7-b3dd-b7cd1d37a2fb.png)
![image](https://user-images.githubusercontent.com/589439/143658399-7538b399-a050-4468-842f-32cfe782bf80.png)
From here we can launch the Isaac Sim application. Currently there is no way to generate KITTI formated output synthetic data (which is required for Nvidia's transfer learning) from the domain randomizer within the application itself.
For this we need to use Omniverse's built in python environment.
## Python API Installation
1. Using the Linux command line interface (terminal), go to the packages root folder (home/.local/share/ov/pkg/isaac_sim-2021.2.0/).
cd ~/.local/share/ov/pkg/isaac_sim-2021.2.0/
ls
![image](https://user-images.githubusercontent.com/589439/143659975-91da9c57-e9c0-4c41-a208-c02010656a83.png)
2. Run the following command to get all the required dependencies:
./python.sh -m pip install -r requirements.txt
![image](https://user-images.githubusercontent.com/589439/143660049-8e2288b8-14c4-4503-a4d4-56fb45574849.png)
| 7,828 | Markdown | 38.741117 | 234 | 0.774527 |
pantelis-classes/omniverse-ai/Wikipages/TAO (NVIDIA Train, Adapt, and Optimize).md | All instructions stem from this <a href="https://docs.nvidia.com/tao/tao-toolkit/text/tao_toolkit_quick_start_guide.html">Nvidia Doc</a>.
# Requirements
### Hardware Requirements (Recommended)
32 GB system RAM
32 GB of GPU RAM
8 core CPU
1 NVIDIA GPU
100 GB of SSD space
### Hardware Requirements (REQUIRED)
- TAO Toolkit is supported on **A100**, **V100** and **RTX 30x0 GPUs**.
# Login to the NGC docker registry.
Login to the NGC docker registry:
Use the command
docker login nvcr.io
and enter the following credentials:
a. Username: "$oauthtoken"
b. Password: "YOUR_NGC_API_KEY"
- Where YOUR_NGC_API_KEY corresponds to the key you generated from step 3.
![image](https://user-images.githubusercontent.com/589439/143663405-5323b62f-74a8-409f-80a8-c2c6ad961497.png)
# Installing TAO Toolkit
- TAO Toolkit is a Python pip package that is hosted on the NVIDIA PyIndex. The package uses the docker restAPI under the hood to interact with the NGC Docker registry to pull and instantiate the underlying docker containers. You must have an NGC account and an API key associated with your account. See the Installation Prerequisites section for details on creating an NGC account and obtaining an API key.
## 1. Create a new virtualenv using virtualenvwrapper
- Click this <a href="https://python-guide-cn.readthedocs.io/en/latest/dev/virtualenvs.html"> link</a> to understand how virtual enviroments in python work.
- Make sure you have virtualenv installed by checking it's version. (Instructions are in this <a href="https://github.com/pantelis-classes/omniverse-ai/wiki/NVIDIA-Transfer-Learning-Toolkit-(TLT)-Installation#1-create-new-python-virtual-environment">page</a> of the)
virtualenv --version
![image](https://user-images.githubusercontent.com/589439/143723668-73111ae8-0ac5-4729-b89b-481d29b25d16.png)
## 2. Define the environment variable called VIRTUALENVWRAPPER_PYTHON.
- Run this command to see where your python is located.
which python3
![image](https://user-images.githubusercontent.com/589439/143723824-968874c9-5f8e-44cc-a535-d0d336a72b78.png)
- Define the environment variable of your Python location.
export VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3
![image](https://user-images.githubusercontent.com/589439/143723906-baf552bc-e9d1-435b-8d43-553f6f0a6707.png)
- Run this command to make sure the enviroment variable was created. (There should be red output with the variable name.)
env | grep 'VIRTUALENVWRAPPER_PYTHON'
![image](https://user-images.githubusercontent.com/589439/143723930-c9c8658f-339d-4693-894a-daf70dea28ae.png)
- Run this command.
source `which virtualenvwrapper.sh`
- Run this command to create a virtualenv named "TAO".
mkvirtualenv TAO -p $VIRTUALENVWRAPPER_PYTHON
![image](https://user-images.githubusercontent.com/589439/143724459-afaf363f-dd92-494b-9707-5400f409d05a.png)
- You should now see a (TAO) prepending your username in the CLI.
![image](https://user-images.githubusercontent.com/589439/143724476-77609fc2-e5a7-4773-94d9-799f2b78be6f.png)
## Intructions on how to activate/deactive the vitualenv.
- When you are done with you session, you may deactivate your virtualenv using the deactivate command:
deactivate
![image](https://user-images.githubusercontent.com/589439/143724159-ae6c0578-14e4-463b-8287-ef4147ff0f34.png)
- You may re-instantiate this created virtualenv env using the workon command.
workon TAO
![image](https://user-images.githubusercontent.com/589439/143724492-3036d310-3569-4820-9087-daca2bf9869f.png)
## 3. Download Jupyter Notebook.
- TAO Toolkit provides samples notebooks to walk through and prescrible TAO workflow. These samples are hosted on NGC as a resource and can be downloaded from NGC by executing the command mentioned below.
- Run these commands to set up your notebook.
workon TAO
![image](https://user-images.githubusercontent.com/589439/143725152-cbbd609d-6d94-452c-8a48-a2bcf66dc4ab.png)
- Copy the command belown and keep pressing enter until you are in ~/cv_samples_v1.2.0.
wget --content-disposition https://api.ngc.nvidia.com/v2/resources/nvidia/tao/cv_samples/versions/v1.2.0/zip -O cv_samples_v1.2.0.zip
unzip -u cv_samples_v1.2.0.zip -d ./cv_samples_v1.2.0 && rm -rf cv_samples_v1.2.0.zip && cd ./cv_samples_v1.2.0
![image](https://user-images.githubusercontent.com/589439/143725176-02cc805c-4a98-4afe-9d49-ff17b48e171c.png)
![image](https://user-images.githubusercontent.com/589439/143725173-3c7d7cf0-c3b7-487a-9ed9-818aa5615e84.png)
![image](https://user-images.githubusercontent.com/589439/143725183-3d1caa61-125e-43fe-be67-683429c272ab.png)
## 4. Start Jupyter Notebook
- Once the notebook samples are downloaded, you may start the notebook using the below commands:
jupyter notebook --ip 0.0.0.0 --port 8888 --allow-root
![image](https://user-images.githubusercontent.com/589439/143725216-d67fe159-5f1f-47b1-8dbe-5c14a4e6a7aa.png)
- Open an internet browser on localhost and navigate to the following URL:
http://0.0.0.0:8888
![image](https://user-images.githubusercontent.com/589439/143725228-4696d70e-ec0b-485c-985b-3bffb83be6ac.png)
- Navigate to ./detectnet_v2/detectnet_v2.ipynb
![image](https://user-images.githubusercontent.com/589439/143725266-806cf049-c46f-4e22-9940-ac4e9d952117.png)
![image](https://user-images.githubusercontent.com/589439/143725290-0778740c-3b39-45b4-8d83-a254f545844c.png)
![image](https://user-images.githubusercontent.com/589439/143725306-14110acd-9a61-460a-be5d-df45a55c5b65.png)
| 5,654 | Markdown | 40.580882 | 408 | 0.759816 |
pantelis-classes/omniverse-ai/Wikipages/_Sidebar.md | # Isaac Sim in Omniverse
* [Home][home]
* [Isaac-Sim-SDK-Omniverse-Installation][Omniverse]
* [Synthetic-Data-Generation][SDG]
* [NVIDIA Transfer Learning Toolkit (TLT) Installation][TLT]
* [NVIDIA TAO][TAO]
* [detectnet_v2 Installation][detectnet_v2]
* [Jupyter Notebook][Jupyter-Notebook]
[home]: https://github.com/pantelis-classes/omniverse-ai/wiki
[Omniverse]: https://github.com/pantelis-classes/omniverse-ai/wiki/Isaac-Sim-SDK-Omniverse-Installation
[SDG]: https://github.com/pantelis-classes/omniverse-ai/wiki/Synthetic-Data-Generation-(Python-API)
[TLT]: https://github.com/pantelis-classes/omniverse-ai/wiki/NVIDIA-Transfer-Learning-Toolkit-(TLT)-Installation
[NTLTSD]: https://github.com/pantelis-classes/omniverse-ai/wiki/Using-NVIDIA-TLT-with-Synthetic-Data
[TAO]: https://github.com/pantelis-classes/omniverse-ai/wiki/TAO-(NVIDIA-Train,-Adapt,-and-Optimize)
[detectnet_v2]: https://github.com/pantelis-classes/omniverse-ai/wiki/detectnet_v2-Installation
[Jupyter-Notebook]: https://github.com/pantelis-classes/omniverse-ai/wiki/Jupyter-Notebook
| 1,061 | Markdown | 57.999997 | 112 | 0.782281 |
pantelis-classes/omniverse-ai/Wikipages/home.md | # Learning in Simulated Worlds in Omniverse.
## Wiki Navigation
* [Home][home]
* [Isaac-Sim-SDK-Omniverse-Installation][Omniverse]
* [Synthetic-Data-Generation][SDG]
* [NVIDIA Transfer Learning Toolkit (TLT) Installation][TLT]
* [NVIDIA TAO][TAO]
* [detectnet_v2 Installation][detectnet_v2]
* [Jupyter Notebook][Jupyter-Notebook]
[home]: https://github.com/pantelis-classes/omniverse-ai/wiki
[Omniverse]: https://github.com/pantelis-classes/omniverse-ai/wiki/Isaac-Sim-SDK-Omniverse-Installation
[SDG]: https://github.com/pantelis-classes/omniverse-ai/wiki/Synthetic-Data-Generation-(Python-API)
[TLT]: https://github.com/pantelis-classes/omniverse-ai/wiki/NVIDIA-Transfer-Learning-Toolkit-(TLT)-Installation
[NTLTSD]: https://github.com/pantelis-classes/omniverse-ai/wiki/Using-NVIDIA-TLT-with-Synthetic-Data
[TAO]: https://github.com/pantelis-classes/omniverse-ai/wiki/TAO-(NVIDIA-Train,-Adapt,-and-Optimize)
[detectnet_v2]: https://github.com/pantelis-classes/omniverse-ai/wiki/detectnet_v2-Installation
[Jupyter-Notebook]: https://github.com/pantelis-classes/omniverse-ai/wiki/Jupyter-Notebook
<hr />
## Reports
<a href="https://docs.google.com/document/d/1jVXxrNgtOosZw_vAORzomSnmy45G3qK_mmk2B4oJtPg/edit?usp=sharing">Domain Randomization Paper</a><br>
This report provides an indepth understanding on how Domain Randomization helps perception machine learning tasks such as object detection and/or segmentation.
<a href="https://docs.google.com/document/d/1WAzdqlWE0RUns41-0P951mnsqMR7I2XV/edit?usp=sharing&ouid=112712585131518554614&rtpof=true&sd=true">Final Report</a><br>
This final report contains an indepth explanation on the hardware/software used, the methods used to collect the data, an explanation on the data collected, trained and pruned, and the overall conclusions made from the trained and pruned datasets. | 1,834 | Markdown | 64.535712 | 247 | 0.794984 |
pantelis-classes/omniverse-ai/Wikipages/NVIDIA Transfer Learning Toolkit (TLT) Installation.md | # Installing the Pre-requisites
## 1. Install docker-ce:
### * Set up repository:
Update apt package index and install packages.
sudo apt-get update
![image](https://user-images.githubusercontent.com/589439/143660967-37eb6626-62c0-4afa-af3a-c43a3c172e85.png)
sudo apt-get install \
ca-certificates \
curl \
gnupg \
lsb-release
- The following image has these dependencies already installed.
![image](https://user-images.githubusercontent.com/589439/143660985-4ae4366b-8d28-4514-b1df-bd7fe03e581d.png)
Add Docker's official GPG key:
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
![image](https://user-images.githubusercontent.com/589439/143661077-2d0ce142-be2f-4ab6-ad99-a685fa709182.png)
### * Install Docker Engine:
Update the apt package index, and install the latest version of Docker Engine.
sudo apt-get update
![image](https://user-images.githubusercontent.com/589439/143661094-a2b86161-c37f-42fd-9110-34523343f65a.png)
sudo apt-get install docker-ce docker-ce-cli containerd.io
![image](https://user-images.githubusercontent.com/589439/143661447-8fa25b3b-1c79-470d-b962-88c21bd56f63.png)
Verify that Docker Engine is installed correctly by running the hello-world image.
sudo docker run hello-world
![image](https://user-images.githubusercontent.com/589439/143661433-d67e18ac-c098-4665-b7ba-127e397b0df6.png)
### * Manage Docker as a non-root user:
Create the docker group.
sudo groupadd docker
![image](https://user-images.githubusercontent.com/589439/143661491-c43c3f94-90d7-47d4-8bd4-dee974f67838.png)
Add your user to the docker group.
sudo usermod -aG docker $USER
![image](https://user-images.githubusercontent.com/589439/143661478-cff5282c-e864-4821-a084-7f1f8360b4bc.png)
Log out and log back in so that your group membership is re-evaluated.
![image](https://user-images.githubusercontent.com/589439/143661541-098c52b5-0c54-46c9-9d14-fd0250f27a1e.png)
Verify that you can run docker commands without sudo.
docker run hello-world
![image](https://user-images.githubusercontent.com/589439/143661708-6baceb75-a047-4f75-8b51-9496e6908d15.png)
- If you get the WARNING error in the above image, run these two commands. Otherwise Skip to #2.
sudo chown "$USER":"$USER" /home/"$USER"/.docker -R
sudo chmod g+rwx "/home/$USER/.docker" -R
- Run docker run hello-world to double check it works now.
docker run hello-world
![image](https://user-images.githubusercontent.com/589439/143661749-52f2103f-19c5-47bb-85b3-0b5069957b87.png)
## 2. Install NVIDIA Container Toolkit:
Setup the stable repository and the GPG key:
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \
&& curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - \
&& curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list
![image](https://user-images.githubusercontent.com/589439/143662010-9b31cc9d-bbbe-4aa7-af69-ade75e18ccc6.png)
Install the nvidia-docker2 package (and dependencies) after updating the package listing:
sudo apt-get update
sudo apt-get install -y nvidia-docker2
![image](https://user-images.githubusercontent.com/589439/143662034-8e020c83-780b-40d0-a17b-ad0cdfd4210f.png)
Restart the Docker daemon to complete the installation after setting the default runtime:
sudo systemctl restart docker
![image](https://user-images.githubusercontent.com/589439/143662068-dfcad334-8466-4c9a-9cd0-e08a23f31b66.png)
At this point, a working setup can be tested by running a base CUDA container:
sudo docker run --rm --gpus all nvidia/cuda:11.0-base nvidia-smi
- This should result in a console output shown below:
![image](https://user-images.githubusercontent.com/589439/143663183-0bdb6ee0-84be-4788-bdc7-0ab23e9e5d41.png)
## 3. Get an NVIDIA NGC account and API key:
- Go to <a href="https://ngc.nvidia.com/signin">NGC</a> and click the Transfer Learning Toolkit container in the Catalog tab. This message is displayed: “Sign in to access the PULL feature of this repository”.
![image](https://user-images.githubusercontent.com/589439/143662546-8e8053f4-9aa9-40bb-bb8c-432d652db64b.png)
- Enter your Email address and click Next, or click Create an Account.
- Choose your organization when prompted for Organization/Team.
- Click Sign In.
- Once redirected to this <a href="https://catalog.ngc.nvidia.com/">page</a> with your account made, click the top right corner to click your profile and click "Setup"
![image](https://user-images.githubusercontent.com/589439/143662652-a6595488-44e6-494e-8e11-17056209a3fd.png)
- Click Get API Key.
![image](https://user-images.githubusercontent.com/589439/143662747-cda7d160-6f1f-41dc-815f-65bf13ba7bc7.png)
- Click Generate API Key.
![image](https://user-images.githubusercontent.com/589439/143662782-9bebeb67-26ec-4980-9624-1a91f0d1a6cc.png)
- Your API key and username will be shown under the DOCKER tm section. Copy the text with your username and API password and save it in a file somewhere.
![image](https://user-images.githubusercontent.com/589439/143663255-907bff87-ae02-4c4d-8400-ef6a914c3aae.png)
![image](https://user-images.githubusercontent.com/589439/143663347-4ec70e43-da4d-4b97-bd26-b336586bc9d7.png)
## 4. Login to the NGC docker registry:
Use the command
docker login nvcr.io
and enter the following credentials:
a. Username: "$oauthtoken"
b. Password: "YOUR_NGC_API_KEY"
- Where YOUR_NGC_API_KEY corresponds to the key you generated from step 3.
![image](https://user-images.githubusercontent.com/589439/143663405-5323b62f-74a8-409f-80a8-c2c6ad961497.png)
# Installing TLT
The Transfer Learning Toolkit (TLT) is a Python pip package that is hosted on the NVIDIA PyIndex. The package uses the docker restAPI under the hood to interact with the NGC Docker registry to pull and instantiate the underlying docker containers.
## 1. Create new Python virtual environment.
### Python virtualenv setup using virtualenvwrapper
Install via pip:
pip3 install virtualenv
![image](https://user-images.githubusercontent.com/589439/143667101-35f5e890-f96d-4a24-8f85-4db1ff95ab8f.png)
pip3 install virtualenvwrapper
![image](https://user-images.githubusercontent.com/589439/143667117-cef7ead6-5ca1-4f93-b759-4caa9c8dca76.png)
| 6,554 | Markdown | 38.727272 | 247 | 0.754043 |
pantelis-classes/omniverse-ai/Wikipages/_Footer.md | ## Authors
### <a href="https://github.com/dfsanchez999">Diego Sanchez</a> | <a href="https://harp.njit.edu/~jga26/">Jibran Absarulislam</a> | <a href="https://github.com/markkcruz">Mark Cruz</a> | <a href="https://github.com/sppatel2112">Sapan Patel</a>
## Supervisor
### <a href="https://pantelis.github.io/">Dr. Pantelis Monogioudis</a>
## Credits
### <a href="https://developer.nvidia.com/nvidia-omniverse-platform">NVIDIA Omniverse</a> | 446 | Markdown | 39.63636 | 244 | 0.686099 |
pantelis-classes/omniverse-ai/Wikipages/detectnet_v2 Installation.md | # Installing running detectnet_v2 in a jupyter notebook
## Setup File Structures.
- Run these commands to create the correct file structure.
cd ~
mkdir tao
mv cv_samples_v1.2.0/ tao
cd tao/cv_samples_v1.2.0/
rm -r detectnet_v2
![image](https://user-images.githubusercontent.com/589439/143797815-904b6033-f5db-43ac-a736-d653d4d19cfe.png)
![image](https://user-images.githubusercontent.com/589439/143797903-cd33e342-e45d-44ca-a8ac-6efb6d2cd18f.png)
- Download the detectnet_v2.zip from this <a href="https://github.com/pantelis-classes/omniverse-ai/raw/main/detectnet_v2.zip">link</a>.
![image](https://user-images.githubusercontent.com/589439/143727479-6828fc05-4672-4c60-8a21-f1fe6e97d0ea.png)
- Run this command to move the .zip from your downloads folder to your detectnet_v2 folder.
mv ~/Downloads/detectnet_v2.zip ~/tao/cv_samples_v1.2.0/
![image](https://user-images.githubusercontent.com/589439/143798005-a702ed00-5971-4ece-b60a-d05e14fa09b9.png)
- Run this command to unzip the folder.
unzip ~/tao/cv_samples_v1.2.0/detectnet_v2.zip -d detectnet_v2
![image](https://user-images.githubusercontent.com/589439/143798404-ae066e4a-d573-4144-a1ec-b5410db9efb7.png)
![image](https://user-images.githubusercontent.com/589439/143798434-9d14756d-2bdb-4f68-88cb-0e5610562034.png)
- Run this command to copy your dataset to the TAO folder. (You generated this dataset in this <a href="https://github.com/pantelis-classes/omniverse-ai/wiki/Synthetic-Data-Generation-(Python-API)#offline-training-with-tlt">wiki page</a>.)
cp -r ~/.local/share/ov/pkg/isaac_sim-2021.2.0/output/testing/ ~/tao/cv_samples_v1.2.0/detectnet_v2/workspace/tao-experiment/data/
cp -r ~/.local/share/ov/pkg/isaac_sim-2021.2.0/output/training/ ~/tao/cv_samples_v1.2.0/detectnet_v2/workspace/tao-experiment/data/
![image](https://user-images.githubusercontent.com/589439/143798514-be064b8e-18e9-4f21-97b2-ef72820190a8.png)
![image](https://user-images.githubusercontent.com/589439/143798539-d7555c9c-87c3-4037-819a-ee32aca9fa44.png)
- Navigate to Home -> cv_samples_v1.2.0 -> detectnet_v2
- Open the detectnet_v2.ipynb file.
![image](https://user-images.githubusercontent.com/589439/143729232-16e479b2-527e-4b0f-94b0-e43bd08cfba8.png)
- Scroll down to section "0. Set up env variables and map drives" (Ctrl + F)
![image](https://user-images.githubusercontent.com/589439/143729413-dffdd2dc-d0cb-40aa-8b0f-fd567b2a527c.png)
- Replace "diego" with your username. (TIP: whoami in BASH)
![image](https://user-images.githubusercontent.com/589439/143729441-e43fde75-76ed-489d-acef-56fea5ddf539.png)
![image](https://user-images.githubusercontent.com/589439/143729521-c7b0fc38-baf0-4701-9032-dba324497f5e.png) | 2,798 | Markdown | 43.428571 | 240 | 0.74732 |
pantelis-classes/omniverse-ai/Wikipages/Jupyter Notebook.md | # Object Detection using TAO DetectNet_v2
- Transfer learning is the process of transferring learned features from one application to another. It is a commonly used training technique where you use a model trained on one task and re-train to use it on a different task.
- Train Adapt Optimize (TAO) Toolkit is a simple and easy-to-use Python based AI toolkit for taking purpose-built AI models and customizing them with users' own data.
## How to use the notebook.
- Please refer to the actual jupyter notebook to have more in-depth explanations of the code.
- Each Cell will run some lines of code. Start from the top of the notebook and run each cell by click the play button or using **shift + enter**.
![image](https://user-images.githubusercontent.com/589439/143809035-2ae69802-7929-47a6-a445-12b571cacd14.png)
- Some of the cells may take a long time to complete. Please do not skip cells and wait for the output to finish.
## 0. Set up env variables and map drives
![image](https://user-images.githubusercontent.com/589439/143808844-e4244060-5842-41e2-868d-7a75c57a3c21.png)
![image](https://user-images.githubusercontent.com/589439/143809423-cea91ff5-916f-4c03-b7c3-e4eb625756a4.png)
- We set up the env variables by linking paths, setting number of GPUs, and choosing an encoding style.
## 1. Install the TAO launcher
- This step should have been already completed in the previous wiki pages. Please refer to this <a href="https://github.com/pantelis-classes/omniverse-ai/wiki/TAO-(NVIDIA-Train,-Adapt,-and-Optimize)#login-to-the-ngc-docker-registry">link</a>.
![image](https://user-images.githubusercontent.com/589439/143809877-6e766d73-ff1c-405f-bd6f-600a58736b25.png)
## 2. Prepare dataset and pre-trained model
![image](https://user-images.githubusercontent.com/589439/143809929-1e119a3b-0239-4144-bece-a1d9aa7d51bf.png)
![image](https://user-images.githubusercontent.com/589439/143809965-7997fd22-e172-4360-af13-8c0d65b83f4e.png)
![image](https://user-images.githubusercontent.com/589439/143809992-3a41471a-dd02-4a3e-acea-96b7a7c3a674.png)
![image](https://user-images.githubusercontent.com/589439/143810068-5f175928-4e4d-4820-8b14-067a31b35cd6.png)
![image](https://user-images.githubusercontent.com/589439/143810077-6bfb77d3-4643-4129-a8c4-0b4fbf196b43.png)
![image](https://user-images.githubusercontent.com/589439/143810093-f8508bb1-5728-4010-b87b-21f4aed74e73.png)
![image](https://user-images.githubusercontent.com/589439/143810115-c88787cb-3cae-433a-93c8-712a25db0c78.png)
## 3. Provide training specification
![image](https://user-images.githubusercontent.com/589439/143810872-231209ca-eb71-4bd2-930d-3527fbaaace0.png)
## 4. Run TAO training
![image](https://user-images.githubusercontent.com/589439/143810896-a9875ab8-b9ab-4ced-ad49-c47ea321a052.png)
## 5. Evaluate the trained model
![image](https://user-images.githubusercontent.com/589439/143811275-488e15be-15bd-4341-8392-834cd68bbcad.png)
## 6. Prune the trained model
![image](https://user-images.githubusercontent.com/589439/143810915-c9428405-1f00-462d-8a80-2d1467c95e7b.png)
## 7. Retrain the pruned model
![image](https://user-images.githubusercontent.com/589439/143810942-972f34b4-b7a4-4532-9e8d-6f6bcc01ac9f.png)
![image](https://user-images.githubusercontent.com/589439/143810970-69367200-b71e-481f-b813-3d447e154bb3.png)
## 8. Evaluate the retrained model
![image](https://user-images.githubusercontent.com/589439/143811255-0b946589-2679-4747-b514-3b91ac2259cd.png)
## 9. Visualize inferences
![image](https://user-images.githubusercontent.com/589439/143811032-4adc40ef-fa0e-4596-88b5-2a24610cdaf3.png)
![image](https://user-images.githubusercontent.com/589439/143811081-edaa58f5-d3e6-40c6-9dab-f19e547d090e.png) | 3,749 | Markdown | 45.874999 | 243 | 0.787143 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/CODE_OF_CONDUCT.md | ## Code of Conduct
This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
opensource-codeofconduct@amazon.com with any additional questions or comments.
| 309 | Markdown | 60.999988 | 105 | 0.789644 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/CONTRIBUTING.md | # Contributing Guidelines
Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
documentation, we greatly value feedback and contributions from our community.
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
information to effectively respond to your bug report or contribution.
## Reporting Bugs/Feature Requests
We welcome you to use the GitHub issue tracker to report bugs or suggest features.
When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already
reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
* A reproducible test case or series of steps
* The version of our code being used
* Any modifications you've made relevant to the bug
* Anything unusual about your environment or deployment
## Contributing via Pull Requests
Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
1. You are working against the latest source on the *main* branch.
2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
To send us a pull request, please:
1. Fork the repository.
2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
3. Ensure local tests pass.
4. Commit to your fork using clear commit messages.
5. Send us a pull request, answering any default questions in the pull request interface.
6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
## Finding contributions to work on
Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
## Code of Conduct
This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
opensource-codeofconduct@amazon.com with any additional questions or comments.
## Security issue notifications
If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
## Licensing
See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
| 3,160 | Markdown | 51.683332 | 275 | 0.792405 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/README.md | # NVIDIA Omniverse Nucleus on Amazon EC2
NVIDIA Omniverse is a scalable, multi-GPU, real-time platform for building and operating metaverse applications, based on Pixar's Universal Scene Description (USD) and NVIDIA RTX technology. USD is a powerful, extensible 3D framework and ecosystem that enables 3D designers and developers to connect and collaborate between industry-leading 3D content creation, rendering, and simulation applications. Omniverse helps individual creators to connect and enhance their 3D artistic process, and enterprises to build and simulate large scale virtual worlds for industrial applications.
With Omniverse, everyone involved in the lifecycle of 3D data has access to high-quality visualizations, authoring, and review tools. Teams do not need additional overhead to manage complex 3D data pipelines. Instead, they can focus on their unique contributions to bring value to the market. Non-technical stakeholders do not need to subject themselves to applications with steep learning curves, nor do results need to be compromised for the sake of iteration reviews.
To support distributed Omniverse users, Nucleus should be deployed in a secure environment. With on-demand compute, storage, and networking resources, AWS infrastructure is well suited to all spatial computing workloads, including Omniverse Nucleus. This repository provides the steps and infrastructure for an Omniverse Enterprise Nucleus Server deployment on Amazon EC2.
## Contents
* [Prerequisites](#prerequisites)
* [Deployment](#deployment)
* [Architecture](#architecture)
* [Troubleshooting](#troubleshooting)
* [Getting Help](#getting-help)
* [Changelog](#changelog)
* [Security](#security)
* [License](#license)
* [References](#references)
## Prerequisites
- AWS CLI - https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
- AWS CDK - https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html#getting_started_install
- Docker - https://www.docker.com/products/docker-desktop/
- Python 3.9 or greater - https://www.python.org
- Access to NVIDIA Enterprise Omniverse Nucleus packages - https://docs.omniverse.nvidia.com/prod_nucleus/prod_nucleus/enterprise/installation/quick_start_tips.html
- A Route53 Public Hosted Zone - https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingHostedZone.html
**To learn more, reference the official documentation from NVIDIA:** https://docs.omniverse.nvidia.com/prod_nucleus/prod_nucleus/enterprise/cloud_aws_ec2.html
## Architecture
![architecture](/diagrams/architecture.png)
## Deployment
### 1. Download Nucleus Deployment Artifacts from NVIDIA
Place them in `./src/tools/nucleusServer/stack`
For example: `./src/tools/nucleusServer/stack/nucleus-stack-2022.1.0+tag-2022.1.0.gitlab.3983146.613004ac.tar.gz`
Consult NVIDIA documentation to find the appropriate packages.
> Note This deployment has a templated copy of `nucleus-stack.env` located at `./src/tools/nucleusServer/templates/nucleus-stack.env` this may need to be updated if NVIDIA makes changes to the `nucleus-stack.env` file packaged with their archive.
>
> The same applies to NVIDIA's reverse proxy `nginx.conf` located at `./src/tools/reverseProxy/templates/nginx.conf`
### 2. configure .env file
create ./.env
Set the following variables
```
export APP_STACK_NAME=omni-app
export AWS_DEFAULT_REGION=us-west-2
# STACK INPUTS
export OMNIVERSE_ARTIFACTS_BUCKETNAME=example-bucket-name
export ROOT_DOMAIN=example-domain.com
export NUCLEUS_SERVER_PREFIX=nucleus
export NUCLEUS_BUILD=nucleus-stack-2022.1.0+tag-2022.1.0.gitlab.3983146.613004ac # from Step 1
export ALLOWED_CIDR_RANGE_01=cidr-range-with-public-access
export DEV_MODE=true
```
> NOTE: This deployment assumes you have a public hosted zone in Route53 for the ROOT_DOMAIN, this deployment will add a CNAME record to that hosted zone
### 3. Run the deployment
The following script will run cdk deploy. The calling process must be authenticated with sufficient permissions to deploy AWS resources.
```
chmod +x ./deploy.sh
./deploy.sh
```
> NOTE: deployment requires a running docker session for building Python Lambda functions
> NOTE: It can take a few minutes for the instances to get up and running. After the deployment script finishes, review your EC2 instances and check that they are in a running state.
### 4. Test the connection
Test a connection to `<NUCLEUS_SERVER_PREFIX>.<ROOT_DOMAIN>` from within the ALLOWED_CIDR_RANGE set in the `.env` file. Do so by browsing to `https://<NUCLUES_SERVER_PREFIX>.<ROOT_DOMAIN>` in your web browser.
The default admin username for the Nucleus server is 'omniverse'. You can find the password in a Secrets Manager resource via the AWS Secrets Manager Console. Alternatively, from the Omniverse WebUI, you can create a new username and password.
## Troubleshooting
### Unable to connect to the Nucleus Server
If you are not able to connect to to the Nucleus server, review the status of the Nginx service, and the Nucleus docker stack. To do so, connect to your instances from the EC2 Console via Session Manager - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/session-manager.html.
- On the Nginx Server, run `sudo journalctl -u nginx.service`, if this is produces no output the Nginx service is not running.
- On the Nucleus server, run `sudo docker ps`, you should see a list of Nucleus containers up.
If there are issues with either of these, it is likely there was an issue with the Lambda and/or SSM run commands that configure the instances. Browse to the Lambda Console (https://us-west-2.console.aws.amazon.com/lambda/home?region=us-west-2#/functions) and search for the respective Lambda Functions:
- STACK_NAME-ReverseProxyConfig-CustomResource
- STACK_NAME-NucleusServerConfig-CustomResource
Review the function CloudWatch Logs.
### No service log entries, or unable to restart nitro-enclave service
If there are issues with either of these, it is likely there was an issue with the Lambda and/or SSM run commands that configure the instances. Browse to the Lambda Console and search for the `STACK_NAME-ReverseProxyConfig-CustomResource` Lambda Function, then review the CloudWatch Logs.
At times the Reverse Proxy custom resource Lambda function does not trigger on a initial stack deployment. If the reverse proxy instance is in a running state, but there are now invocations/logs, terminate the instance and give the auto scaling group a few minutes to create another one, and then try again. Afterwards, check the CloudWatch Logs for the Lambda function: `ReverseProxyAutoScalingLifecycleLambdaFunction`
### Additional Nginx Commands
View Nitro Enclaves Service Logs:
`sudo journalctl -u nginx.service`
Viewing Nginx Logs
`sudo cat /var/log/nginx/error.log`
`sudo cat /var/log/nginx/access.log`
Restart Nginx
`systemctl restart nginx.service`
### Additional Nucleus server notes
Review NVIDIA's Documentation - https://docs.omniverse.nvidia.com/prod_nucleus/prod_nucleus/enterprise/installation/quick_start_tips.html
default base stack and config location: `/opt/ove/`
default omniverse data dir: `/var/lib/omni/nucleus-data`
Interacting with the Nucleus Server docker compose stack:
`sudo docker-compose --env-file ./nucleus-stack.env -f ./nucleus-stack-ssl.yml pull`
`sudo docker-compose --env-file ./nucleus-stack.env -f ./nucleus-stack-ssl.yml up -d`
`sudo docker-compose --env-file ./nucleus-stack.env -f ./nucleus-stack-ssl.yml down`
`sudo docker-compose --env-file ./nucleus-stack.env -f ./nucleus-stack-ssl.yml ps`
Generate new secrets
`sudo rm -fr secrets && sudo ./generate-sample-insecure-secrets.sh`
## Getting Help
If you have questions as you explore this sample project, post them to the Issues section of this repository. To report bugs, request new features, or contribute to this open source project, see [CONTRIBUTING.md](./CONTRIBUTING.md).
## Changelog
To view the history and recent changes to this repository, see [CHANGELOG.md](./CHANGELOG.md)
## Security
See [CONTRIBUTING](./CONTRIBUTING.md) for more information.
## License
This sample code is licensed under the MIT-0 License. See the [LICENSE](./LICENSE) file.
## References
### NVIDIA Omniverse
[Learn more about the NVIDIA Omniverse Platform](https://www.nvidia.com/en-us/omniverse/)
### Omniverse Nucleus
[Learn more about the NVIDIA Omniverse Nucleus](https://docs.omniverse.nvidia.com/prod_nucleus/prod_nucleus/overview.html)
| 8,456 | Markdown | 53.211538 | 581 | 0.786542 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/nucleusServer/setup.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="Nucleus Server Tools",
version="1.0",
py_modules=[
'nst'
],
install_requires=[
"boto3",
"python-dotenv",
"Click"
],
entry_points='''
[console_scripts]
nst=nst_cli:main
'''
)
| 576 | Python | 21.192307 | 73 | 0.609375 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/nucleusServer/nst_cli.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
"""
helper tools for omniverse nucleus deployment configuration
"""
# std lib modules
import os
import logging
from pathlib import Path
# 3rd party modules
import click
import nst.logger as logger
pass_config = click.make_pass_decorator(object, ensure=True)
@click.group()
@pass_config
def main(config):
pass
@main.command()
@pass_config
@click.option("--my_opt_arg")
def hello_world(config, my_opt_arg):
logger.info(f"Hello World: {my_opt_arg=}")
@main.command()
@pass_config
@click.option("--server-ip", required=True)
@click.option("--reverse-proxy-domain", required=True)
@click.option("--instance-name", required=True)
@click.option("--master-password", required=True)
@click.option("--service-password", required=True)
@click.option("--data-root", required=True)
def generate_nucleus_stack_env(
config,
server_ip,
reverse_proxy_domain,
instance_name,
master_password,
service_password,
data_root,
):
logger.info(
f"generate_nucleus_stack_env:{server_ip=},{reverse_proxy_domain=},{instance_name=},{master_password=},{service_password=},{data_root=}"
)
tools_path = "/".join(list(Path(__file__).parts[:-1]))
cur_dir_path = "."
template_name = "nucleus-stack.env"
template_path = f"{tools_path}/templates/{template_name}"
output_path = f"{cur_dir_path}/{template_name}"
if not Path(template_path).is_file():
raise Exception("File not found: {template_path}")
data = ""
with open(template_path, "r") as file:
data = file.read()
data = data.format(
SERVER_IP_OR_HOST=server_ip,
REVERSE_PROXY_DOMAIN=reverse_proxy_domain,
INSTANCE_NAME=instance_name,
MASTER_PASSWORD=master_password,
SERVICE_PASSWORD=service_password,
DATA_ROOT=data_root,
ACCEPT_EULA="1",
SECURITY_REVIEWED="1",
)
with open(f"{output_path}", "w") as file:
file.write(data)
logger.info(output_path)
| 2,391 | Python | 25.876404 | 143 | 0.677123 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/nucleusServer/README.md | # Tools for configuring Nuclues Server
The contents of this directory are zipped and then deployed to the nuclues server | 121 | Markdown | 39.666653 | 81 | 0.826446 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/nucleusServer/nst/__init__.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
| 210 | Python | 41.199992 | 73 | 0.766667 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/nucleusServer/nst/logger.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import os
import logging
LOG_LEVEL = os.getenv('LOG_LEVEL', 'DEBUG')
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
def info(*args):
print(*args)
def debug(*args):
print(*args)
def warning(*args):
print(*args)
def error(*args):
print(*args) | 480 | Python | 20.863635 | 73 | 0.708333 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/reverseProxy/rpt_cli.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
"""
helper tools for reverse proxy nginx configuration
"""
# std lib modules
import os
import logging
from pathlib import Path
# 3rd party modules
import click
import rpt.logger as logger
pass_config = click.make_pass_decorator(object, ensure=True)
@click.group()
@pass_config
def main(config):
pass
@main.command()
@pass_config
def hello_world(config):
logger.info(f'Hello World')
@main.command()
@pass_config
@click.option("--cert-arn", required=True)
def generate_acm_yaml(config, cert_arn):
logger.info(f'generate_acm_yaml: {cert_arn=}')
tools_path = '/'.join(list(Path(__file__).parts[:-1]))
cur_dir_path = '.'
template_path = f'{tools_path}/templates/acm.yaml'
output_path = f'{cur_dir_path}/acm.yaml'
logger.info(Path(template_path).is_file())
data = ''
with open(template_path, 'r') as file:
data = file.read()
data = data.format(cert_arn=cert_arn)
with open(f'{output_path}', 'w') as file:
file.write(data)
logger.info(output_path)
@main.command()
@pass_config
@click.option("--domain", required=True)
@click.option("--server-address", required=True)
def generate_nginx_config(config, domain, server_address):
logger.info(f'generate_nginx_config: {domain=}')
nginx_template_path = os.path.join(
os.getcwd(), 'templates', 'nginx.conf')
if Path(nginx_template_path).is_file():
logger.info(f"NGINX template found at: {nginx_template_path}")
else:
raise Exception(
f"ERROR: No NGINX template found at: {nginx_template_path}")
output_path = f'/etc/nginx/nginx.conf'
if Path(output_path).is_file():
logger.info(f"NGINX default configuration found at: {output_path}")
else:
raise Exception(
f"ERROR: No NGINX default configuration found at: {output_path}. Verify NGINX installation.")
data = ''
with open(nginx_template_path, 'r') as file:
data = file.read()
data = data.format(PUBLIC_DOMAIN=domain,
NUCLEUS_SERVER_DOMAIN=server_address)
with open(output_path, 'w') as file:
file.write(data)
logger.info(output_path)
| 2,373 | Python | 24.526881 | 105 | 0.659503 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/reverseProxy/setup.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="Reverse Proxy Tools",
version="1.0",
py_modules=["rpt"],
install_requires=["boto3", "python-dotenv", "Click"],
entry_points="""
[console_scripts]
rpt=rpt_cli:main
""",
)
| 532 | Python | 25.649999 | 73 | 0.657895 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/reverseProxy/README.md | # Tools for configuring Nginx Reverse Proxy
The contents of this directory are zipped and then deployed to the reverse proxy server | 132 | Markdown | 43.333319 | 87 | 0.825758 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/reverseProxy/templates/acm.yaml | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
---
# ACM for Nitro Enclaves config.
#
# This is an example of setting up ACM, with Nitro Enclaves and nginx.
# You can take this file and then:
# - copy it to /etc/nitro_enclaves/acm.yaml;
# - fill in your ACM certificate ARN in the `certificate_arn` field below;
# - make sure /etc/nginx/nginx.conf is set up to:
# - use the pkcs11 SSL engine, and;
# - include the stanza file configured below (under `NginxStanza`)
# somewhere in the nginx.conf `server` section;
# - start the nitro-enclaves-acm service.
#
# Enclave general configuration
enclave:
# Number of vCPUs to be assigned to the enclave
cpu_count: 2
# Memory (in MiB) to be assigned to the enclave
memory_mib: 256
tokens:
# A label for this PKCS#11 token
- label: nginx-acm-token
# Configure a managed token, sourced from an ACM certificate.
source:
Acm:
# The certificate ARN
# Note: this certificate must have been associated with the
# IAM role assigned to the instance on which ACM for
# Nitro Enclaves is run.
certificate_arn: "{cert_arn}"
target:
NginxStanza:
# Path to the nginx stanza to be written by the ACM service whenever
# the certificate configuration changes (e.g. after a certificate renewal).
# This file must be included from the main nginx config `server` section,
# as it will contain the TLS nginx configuration directives.
path: /etc/pki/nginx/nginx-acm.conf
# Stanza file owner (i.e. the user nginx is configured to run as).
user: nginx
| 1,689 | YAML | 39.238094 | 83 | 0.68206 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/customResources/reverseProxyConfig/index.py | import os
import logging
import json
from crhelper import CfnResource
import aws_utils.ssm as ssm
import aws_utils.ec2 as ec2
import config.reverseProxy as config
LOG_LEVEL = os.getenv("LOG_LEVEL", "DEBUG")
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
helper = CfnResource(
json_logging=False, log_level="DEBUG", boto_level="CRITICAL"
)
@helper.create
def create(event, context):
logger.info("Create Event: %s", json.dumps(event, indent=2))
response = update_config(
event["ResourceProperties"]["STACK_NAME"],
event["ResourceProperties"]["ARTIFACTS_BUCKET_NAME"],
event["ResourceProperties"]["FULL_DOMAIN"],
event["ResourceProperties"]["RP_AUTOSCALING_GROUP_NAME"],
)
logger.info("Run Command Results: %s", json.dumps(response, indent=2))
@helper.update
def update(event, context):
logger.info("Update Event: %s", json.dumps(event, indent=2))
response = update_config(
event["ResourceProperties"]["STACK_NAME"],
event["ResourceProperties"]["ARTIFACTS_BUCKET_NAME"],
event["ResourceProperties"]["FULL_DOMAIN"],
event["ResourceProperties"]["RP_AUTOSCALING_GROUP_NAME"],
)
logger.info("Run Command Results: %s", json.dumps(response, indent=2))
def update_config(
stack_name,
artifacts_bucket_name,
full_domain,
rp_autoscaling_group_name
):
# get nucleus main instance id
nucleus_instances = []
try:
nucleus_instances = ec2.get_instances_by_tag(
"Name", f"{stack_name}/NucleusServer")
except Exception as e:
raise Exception(
f"Failed to get nucleus instances by name. {e}")
logger.info(f"Nucleus Instances: {nucleus_instances}")
# get nucleus main hostname
nucleus_hostname = ec2.get_instance_private_dns_name(nucleus_instances[0])
logger.info(f"Nucleus Hostname: {nucleus_hostname}")
# generate config for reverse proxy servers
commands = []
try:
commands = config.get_config(
artifacts_bucket_name, nucleus_hostname, full_domain)
logger.debug(commands)
except Exception as e:
raise Exception(f"Failed to get Reverse Proxy config. {e}")
# get reverse proxy instance ids
rp_instances = ec2.get_autoscaling_instance(rp_autoscaling_group_name)
if rp_instances is None:
return None
logger.info(rp_instances)
# run config commands
response = []
for i in rp_instances:
r = ssm.run_commands(
i, commands, document="AWS-RunShellScript"
)
response.append(r)
return response
@helper.delete
def delete(event, context):
logger.info("Delete Event: %s", json.dumps(event, indent=2))
def handler(event, context):
helper(event, context)
| 2,776 | Python | 26.495049 | 78 | 0.667147 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/customResources/nucleusServerConfig/index.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import os
import logging
import json
from crhelper import CfnResource
import aws_utils.ssm as ssm
import aws_utils.sm as sm
import config.nucleus as config
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
helper = CfnResource(json_logging=False, log_level="DEBUG",
boto_level="CRITICAL")
@helper.create
def create(event, context):
logger.info("Create Event: %s", json.dumps(event, indent=2))
instanceId = event["ResourceProperties"]["instanceId"]
reverseProxyDomain = event["ResourceProperties"]["reverseProxyDomain"]
artifactsBucket = event["ResourceProperties"]["artifactsBucket"]
nucleusBuild = event["ResourceProperties"]["nucleusBuild"]
ovMainLoginSecretArn = event["ResourceProperties"]["ovMainLoginSecretArn"]
ovServiceLoginSecretArn = event["ResourceProperties"]["ovServiceLoginSecretArn"]
response = update_nucleus_config(
instanceId,
artifactsBucket,
reverseProxyDomain,
nucleusBuild,
ovMainLoginSecretArn,
ovServiceLoginSecretArn,
)
logger.info("Run Command Results: %s", json.dumps(response, indent=2))
@helper.update
def update(event, context):
logger.info("Update Event: %s", json.dumps(event, indent=2))
instanceId = event["ResourceProperties"]["instanceId"]
reverseProxyDomain = event["ResourceProperties"]["reverseProxyDomain"]
artifactsBucket = event["ResourceProperties"]["artifactsBucket"]
nucleusBuild = event["ResourceProperties"]["nucleusBuild"]
ovMainLoginSecretArn = event["ResourceProperties"]["ovMainLoginSecretArn"]
ovServiceLoginSecretArn = event["ResourceProperties"]["ovServiceLoginSecretArn"]
response = update_nucleus_config(
instanceId,
artifactsBucket,
reverseProxyDomain,
nucleusBuild,
ovMainLoginSecretArn,
ovServiceLoginSecretArn,
)
logger.info("Run Command Results: %s", json.dumps(response, indent=2))
def update_nucleus_config(
instanceId,
artifactsBucket,
reverseProxyDomain,
nucleusBuild,
ovMainLoginSecretArn,
ovServiceLoginSecretArn,
):
ovMainLoginSecret = sm.get_secret(ovMainLoginSecretArn)
ovServiceLoginSecret = sm.get_secret(ovServiceLoginSecretArn)
ovMainLoginPassword = ovMainLoginSecret["password"]
ovServiceLoginPassword = ovServiceLoginSecret["password"]
# generate config for reverse proxy servers
commands = []
try:
commands = config.get_config(
artifactsBucket, reverseProxyDomain, nucleusBuild, ovMainLoginPassword, ovServiceLoginPassword)
logger.debug(commands)
except Exception as e:
raise Exception("Failed to get Reverse Proxy config. {}".format(e))
for p in commands:
print(p)
response = ssm.run_commands(
instanceId, commands, document="AWS-RunShellScript")
return response
@helper.delete
def delete(event, context):
logger.info("Delete Event: %s", json.dumps(event, indent=2))
def handler(event, context):
helper(event, context)
| 3,303 | Python | 30.169811 | 107 | 0.718438 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/asgLifeCycleHooks/reverseProxy/index.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import boto3
import os
import json
import logging
import traceback
from botocore.exceptions import ClientError
import aws_utils.ssm as ssm
import aws_utils.r53 as r53
import aws_utils.ec2 as ec2
import config.reverseProxy as config
logger = logging.getLogger()
logger.setLevel(logging.INFO)
autoscaling = boto3.client("autoscaling")
ARTIFACTS_BUCKET = os.environ["ARTIFACTS_BUCKET"]
NUCLEUS_ROOT_DOMAIN = os.environ["NUCLEUS_ROOT_DOMAIN"]
NUCLEUS_DOMAIN_PREFIX = os.environ["NUCLEUS_DOMAIN_PREFIX"]
NUCLEUS_SERVER_ADDRESS = os.environ["NUCLEUS_SERVER_ADDRESS"]
def send_lifecycle_action(event, result):
try:
response = autoscaling.complete_lifecycle_action(
LifecycleHookName=event["detail"]["LifecycleHookName"],
AutoScalingGroupName=event["detail"]["AutoScalingGroupName"],
LifecycleActionToken=event["detail"]["LifecycleActionToken"],
LifecycleActionResult=result,
InstanceId=event["detail"]["EC2InstanceId"],
)
logger.info(response)
except ClientError as e:
message = "Error completing lifecycle action: {}".format(e)
logger.error(message)
raise Exception(message)
return
def update_nginix_config(
instanceId, artifactsBucket, nucleusServerAddress, domain
):
# generate config for reverse proxy servers
commands = []
try:
commands = config.get_config(
artifactsBucket, nucleusServerAddress, domain)
logger.debug(commands)
except Exception as e:
raise Exception("Failed to get Reverse Proxy config. {}".format(e))
response = ssm.run_commands(
instanceId, commands, document="AWS-RunShellScript"
)
return response
def handler(event, context):
logger.info("Event: %s", json.dumps(event, indent=2))
instanceId = event["detail"]["EC2InstanceId"]
transition = event["detail"]["LifecycleTransition"]
if transition == "autoscaling:EC2_INSTANCE_LAUNCHING":
try:
update_nginix_config(
instanceId,
ARTIFACTS_BUCKET,
NUCLEUS_SERVER_ADDRESS,
f"{NUCLEUS_DOMAIN_PREFIX}.{NUCLEUS_ROOT_DOMAIN}",
)
send_lifecycle_action(event, "CONTINUE")
except Exception as e:
message = "Error running command: {}".format(e)
logger.warning(traceback.format_exc())
logger.error(message)
send_lifecycle_action(event, "ABANDON")
elif transition == "autoscaling:EC2_INSTANCE_TERMINATING":
try:
send_lifecycle_action(event, "CONTINUE")
except Exception as e:
message = "Error running command: {}".format(e)
logger.warning(traceback.format_exc())
logger.error(message)
send_lifecycle_action(event, "ABANDON")
logger.info("Execution Complete")
return
| 3,116 | Python | 28.40566 | 75 | 0.662067 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/common/aws_utils/ec2.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import os
import logging
import boto3
from botocore.exceptions import ClientError
LOG_LEVEL = os.getenv("LOG_LEVEL", "DEBUG")
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
client = boto3.client("ec2")
ec2_resource = boto3.resource("ec2")
autoscaling = boto3.client("autoscaling")
def get_instance_public_dns_name(instanceId):
instance = get_instance_description(instanceId)
if instance is None:
return None
return instance["PublicDnsName"]
def get_instance_private_dns_name(instanceId):
instance = get_instance_description(instanceId)
if instance is None:
return None
return instance["PrivateDnsName"]
def get_instance_description(instanceId):
response = client.describe_instances(
InstanceIds=[instanceId],
)
instances = response["Reservations"][0]["Instances"]
if not instances:
return None
return instances[0]
def get_instance_status(instanceId):
response = client.describe_instance_status(
Filters=[
{
"Name": "string",
"Values": [
"string",
],
},
],
InstanceIds=[
"string",
],
MaxResults=123,
NextToken="string",
DryRun=True | False,
IncludeAllInstances=True | False,
)
statuses = response["InstanceStatuses"][0]
status = {"instanceStatus": None, "systemStatus": None}
if statuses:
status = {
"instanceStatus": statuses["InstanceStatus"]["Status"],
"systemStatus": statuses["SystemStatus"]["Status"],
}
return status
def get_autoscaling_instance(groupName):
response = autoscaling.describe_auto_scaling_groups(
AutoScalingGroupNames=[groupName]
)
logger.debug(response)
instances = response['AutoScalingGroups'][0]["Instances"]
if not instances:
return None
instanceIds = []
for i in instances:
instanceIds.append(i["InstanceId"])
return instanceIds
def update_tag_value(resourceIds: list, tagKey: str, tagValue: str):
client.create_tags(
Resources=resourceIds,
Tags=[{
'Key': tagKey,
'Value': tagValue
}],
)
def delete_tag(resourceIds: list, tagKey: str, tagValue: str):
response = client.delete_tags(
Resources=resourceIds,
Tags=[{
'Key': tagKey,
'Value': tagValue
}],
)
return response
def get_instance_state(id):
instance = ec2_resource.Instance(id)
return instance.state['Name']
def get_instances_by_tag(tagKey, tagValue):
instances = ec2_resource.instances.filter(
Filters=[{'Name': 'tag:{}'.format(tagKey), 'Values': [tagValue]}])
if not instances:
return None
instanceIds = []
for i in instances:
instanceIds.append(i.id)
return instanceIds
def get_instances_by_name(name):
instances = get_instances_by_tag("Name", name)
if not instances:
logger.error(f"ERROR: Failed to get instances by tag: Name, {name}")
return None
return instances
def get_active_instance(instances):
for i in instances:
instance_state = get_instance_state(i)
logger.info(f"Instance: {i}. State: {instance_state}")
if instance_state == "running" or instance_state == "pending":
return i
logger.warn(f"Instances are not active")
return None
def get_volumes_by_instance_id(id):
instance = ec2_resource.Instance(id)
volumes = instance.volumes.all()
volumeIds = []
for i in volumes:
volumeIds.append(i.id)
return volumeIds
def terminate_instances(instance_ids):
response = client.terminate_instances(InstanceIds=instance_ids)
logger.info(response)
return response
| 4,068 | Python | 21.605555 | 76 | 0.630285 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/common/aws_utils/ssm.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import os
import time
import logging
import boto3
from botocore.exceptions import ClientError
LOG_LEVEL = os.getenv("LOG_LEVEL", "DEBUG")
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
client = boto3.client("ssm")
def get_param_value(name) -> str:
response = client.get_parameter(Name=name)
logger.info(response)
return response['Parameter']['Value']
def update_param_value(name, value) -> bool:
response = client.put_parameter(Name=name, Value=value, Overwrite=True)
logger.info(response)
try:
return (response['Version'] > 0)
except ClientError as e:
message = "Error calling SendCommand: {}".format(e)
logger.error(message)
return False
def run_commands(
instance_id, commands, document="AWS-RunPowerShellScript", comment="aws_utils.ssm.run_commands"
):
"""alt document options:
AWS-RunShellScript
"""
# Run Commands
logger.info("Calling SendCommand: {} for instance: {}".format(
commands, instance_id))
attempt = 0
response = None
while attempt < 20:
attempt = attempt + 1
try:
time.sleep(10 * attempt)
logger.info("SendCommand, attempt #: {}".format(attempt))
response = client.send_command(
InstanceIds=[instance_id],
DocumentName=document,
Parameters={"commands": commands},
Comment=comment,
CloudWatchOutputConfig={
"CloudWatchLogGroupName": instance_id,
"CloudWatchOutputEnabled": True,
},
)
logger.info(response)
if "Command" in response:
break
if attempt == 10:
message = "Command did not execute successfully in time allowed."
raise Exception(message)
except ClientError as e:
message = "Error calling SendCommand: {}".format(e)
logger.error(message)
continue
if not response:
message = "Command did not execute successfully in time allowed."
raise Exception(message)
# Check Command Status
command_id = response["Command"]["CommandId"]
logger.info(
"Calling GetCommandInvocation for command: {} for instance: {}".format(
command_id, instance_id
)
)
attempt = 0
result = None
while attempt < 10:
attempt = attempt + 1
try:
time.sleep(10 * attempt)
logger.info("GetCommandInvocation, attempt #: {}".format(attempt))
result = client.get_command_invocation(
CommandId=command_id,
InstanceId=instance_id,
)
if result["Status"] == "InProgress":
logger.info("Command is running.")
continue
elif result["Status"] == "Success":
logger.info("Command Output: {}".format(
result["StandardOutputContent"]))
if result["StandardErrorContent"]:
message = "Command returned STDERR: {}".format(
result["StandardErrorContent"])
logger.warning(message)
break
elif result["Status"] == "Failed":
message = "Error Running Command: {}".format(
result["StandardErrorContent"])
logger.error(message)
raise Exception(message)
else:
message = "Command has an unhandled status, will continue: {}".format(
e)
logger.warning(message)
continue
except client.exceptions.InvocationDoesNotExist as e:
message = "Error calling GetCommandInvocation: {}".format(e)
logger.error(message)
raise Exception(message)
if not result or result["Status"] != "Success":
message = "Command did not execute successfully in time allowed."
raise Exception(message)
return result
| 4,304 | Python | 30.195652 | 99 | 0.574814 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/common/aws_utils/r53.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import boto3
client = boto3.client("route53")
def update_hosted_zone_cname_record(hostedZoneID, rootDomain, domainPrefix, serverAddress):
fqdn = f"{domainPrefix}.{rootDomain}"
response = client.change_resource_record_sets(
HostedZoneId=hostedZoneID,
ChangeBatch={
"Comment": "Updating {fqdn}->{serverAddress} CNAME record",
"Changes": [
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": fqdn,
"Type": "CNAME",
"TTL": 300,
"ResourceRecords": [{"Value": serverAddress}],
},
}
],
},
)
return response
def delete_hosted_zone_cname_record(hostedZoneID, rootDomain, domainPrefix, serverAddress):
response = client.change_resource_record_sets(
HostedZoneId=hostedZoneID,
ChangeBatch={
"Comment": "string",
"Changes": [
{
"Action": "DELETE",
"ResourceRecordSet": {
"Name": f"{domainPrefix}.{rootDomain}",
"Type": "CNAME",
"ResourceRecords": [{"Value": serverAddress}],
},
}
],
},
)
# botocore.errorfactory.InvalidInput: An error occurred (InvalidInput) when calling the ChangeResourceRecordSets operation: Invalid request:
# Expected exactly one of [AliasTarget, all of [TTL, and ResourceRecords], or TrafficPolicyInstanceId], but found none in Change with
# [Action=DELETE, Name=nucleus-dev.awsps.myinstance.com, Type=CNAME, SetIdentifier=null]
return response
| 1,989 | Python | 33.310344 | 144 | 0.553042 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/common/aws_utils/sm.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import json
import boto3
SM = boto3.client("secretsmanager")
def get_secret(secret_name):
response = SM.get_secret_value(SecretId=secret_name)
secret = json.loads(response["SecretString"])
return secret
| 429 | Python | 25.874998 | 73 | 0.745921 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/common/config/nucleus.py |
def start_nucleus_config() -> list[str]:
return '''
cd /opt/ove/base_stack || exit 1
echo "STARTING NUCLEUS STACK ----------------------------------"
docker-compose --env-file nucleus-stack.env -f nucleus-stack-ssl.yml start
'''.splitlines()
def stop_nucleus_config() -> list[str]:
return '''
cd /opt/ove/base_stack || exit 1
echo "STOPPING NUCLEUS STACK ----------------------------------"
docker-compose --env-file nucleus-stack.env -f nucleus-stack-ssl.yml stop
'''.splitlines()
def restart_nucleus_config() -> list[str]:
return '''
cd /opt/ove/base_stack || exit 1
echo "RESTARTING NUCLEUS STACK ----------------------------------"
docker-compose --env-file nucleus-stack.env -f nucleus-stack-ssl.yml restart
'''.splitlines()
def get_config(artifacts_bucket_name: str, full_domain: str, nucleus_build: str, ov_main_password: str, ov_service_password: str) -> list[str]:
return f'''
echo "------------------------ NUCLEUS SERVER CONFIG ------------------------"
echo "UPDATING AND INSTALLING DEPS ----------------------------------"
sudo apt-get update -y -q && sudo apt-get upgrade -y
sudo apt-get install dialog apt-utils -y
echo "INSTALLING AWS CLI ----------------------------------"
sudo curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
sudo apt-get install unzip
sudo unzip awscliv2.zip
sudo ./aws/install
sudo rm awscliv2.zip
sudo rm -fr ./aws/install
echo "INSTALLING PYTHON ----------------------------------"
sudo apt-get -y install python3.9
sudo curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
sudo python3.9 get-pip.py
sudo pip3 install --upgrade pip
sudo pip3 --version
echo "INSTALLING DOCKER ----------------------------------"
sudo apt-get remove docker docker-engine docker.io containerd runc
sudo apt-get -y install apt-transport-https ca-certificates curl gnupg-agent software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get -y update
sudo apt-get -y install docker-ce docker-ce-cli containerd.io
sudo systemctl enable --now docker
echo "INSTALLING DOCKER COMPOSE ----------------------------------"
sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
echo "INSTALLING NUCLEUS TOOLS ----------------------------------"
sudo mkdir -p /opt/ove
cd /opt/ove || exit 1
aws s3 cp --recursive s3://{artifacts_bucket_name}/tools/nucleusServer/ ./nucleusServer
cd nucleusServer || exit 1
sudo pip3 install -r requirements.txt
echo "UNPACKAGING NUCLEUS STACK ----------------------------------"
sudo tar xzvf stack/{nucleus_build}.tar.gz -C /opt/ove --strip-components=1
cd /opt/ove/base_stack || exit 1
omniverse_data_path=/var/lib/omni/nucleus-data
nucleusHost=$(curl -s http://169.254.169.254/latest/meta-data/hostname)
sudo nst generate-nucleus-stack-env --server-ip $nucleusHost --reverse-proxy-domain {full_domain} --instance-name nucleus_server --master-password {ov_main_password} --service-password {ov_service_password} --data-root $omniverse_data_path
chmod +x ./generate-sample-insecure-secrets.sh
./generate-sample-insecure-secrets.sh
echo "PULLING NUCLEUS IMAGES ----------------------------------"
docker-compose --env-file nucleus-stack.env -f nucleus-stack-ssl.yml pull
echo "STARTING NUCLEUS STACK ----------------------------------"
docker-compose --env-file nucleus-stack.env -f nucleus-stack-ssl.yml up -d
docker-compose --env-file nucleus-stack.env -f nucleus-stack-ssl.yml ps -a
'''.splitlines()
| 4,176 | Python | 48.72619 | 247 | 0.582136 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/common/config/reverseProxy.py | def get_config(artifacts_bucket_name: str, nucleus_address: str, full_domain: str) -> list[str]:
return f'''
echo "------------------------ REVERSE PROXY CONFIG ------------------------"
echo "UPDATING PACKAGES ----------------------------------"
sudo yum update -y
echo "INSTALLING DEPENDENCIES ----------------------------------"
sudo yum install -y aws-cfn-bootstrap gcc openssl-devel bzip2-devel libffi-devel zlib-devel
echo "INSTALLING NGINX ----------------------------------"
sudo yum install -y amazon-linux-extras
sudo amazon-linux-extras enable nginx1
sudo yum install -y nginx
sudo nginx -v
echo "INSTALLING PYTHON ----------------------------------"
sudo wget https://www.python.org/ftp/python/3.9.9/Python-3.9.9.tgz -P /opt/python3.9
cd /opt/python3.9 || exit 1
sudo tar xzf Python-3.9.9.tgz
cd Python-3.9.9 || exit 1
sudo ./configure --prefix=/usr --enable-optimizations
sudo make install
echo "------------------------ REVERSE PROXY CONFIG ------------------------"
echo "INSTALLING REVERSE PROXY TOOLS ----------------------------------"
cd /opt || exit 1
sudo aws s3 cp --recursive s3://{artifacts_bucket_name}/tools/reverseProxy/ ./reverseProxy
cd reverseProxy || exit 1
pip3 --version
sudo pip3 install -r requirements.txt
sudo rpt generate-nginx-config --domain {full_domain} --server-address {nucleus_address}
echo "STARTING NGINX ----------------------------------"
sudo service nginx restart
'''.splitlines()
| 1,670 | Python | 44.162161 | 99 | 0.511976 |
arhix52/Strelka/conanfile.py | import os
from conan import ConanFile
from conan.tools.cmake import cmake_layout
from conan.tools.files import copy
class StrelkaRecipe(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "CMakeToolchain", "CMakeDeps"
def requirements(self):
self.requires("glm/cci.20230113")
self.requires("spdlog/[>=1.4.1]")
self.requires("imgui/1.89.3")
self.requires("glfw/3.3.8")
self.requires("stb/cci.20230920")
self.requires("glad/0.1.36")
self.requires("doctest/2.4.11")
self.requires("cxxopts/3.1.1")
self.requires("tinygltf/2.8.19")
self.requires("nlohmann_json/3.11.3")
def generate(self):
copy(self, "*glfw*", os.path.join(self.dependencies["imgui"].package_folder,
"res", "bindings"), os.path.join(self.source_folder, "external", "imgui"))
copy(self, "*opengl3*", os.path.join(self.dependencies["imgui"].package_folder,
"res", "bindings"), os.path.join(self.source_folder, "external", "imgui"))
copy(self, "*metal*", os.path.join(self.dependencies["imgui"].package_folder,
"res", "bindings"), os.path.join(self.source_folder, "external", "imgui"))
def layout(self):
cmake_layout(self)
| 1,294 | Python | 37.088234 | 87 | 0.619784 |
arhix52/Strelka/BuildOpenUSD.md | USD building:
VS2019 + python 3.10
To build debug on windows:
python USD\build_scripts\build_usd.py "C:\work\USD_build_debug" --python --materialx --build-variant debug
For USD 23.03 you could use VS2022
Linux:
* python3 ./OpenUSD/build_scripts/build_usd.py /home/<user>/work/OpenUSD_build/ --python --materialx
| 315 | Markdown | 30.599997 | 106 | 0.746032 |
arhix52/Strelka/README.md | # Strelka
Path tracing render based on NVIDIA OptiX + NVIDIA MDL and Apple Metal
## OpenUSD Hydra render delegate
![Kitchen Set from OpenUSD](images/Kitchen_2048i_4d_2048spp_0.png)
## Basis curves support
![Hairs](images/hairmat_2_light_10000i_6d_10000spp_0.png)
![Einar](images/einar_1024i_3d_1024spp_0.png)
## Project Dependencies
OpenUSD https://github.com/PixarAnimationStudios/OpenUSD
* Set evn var: `USD_DIR=c:\work\USD_build`
OptiX
* Set evn var: `OPTIX_DIR=C:\work\OptiX SDK 8.0.0`
Download MDL sdk (for example: mdl-sdk-367100.2992): https://developer.nvidia.com/nvidia-mdl-sdk-get-started
* unzip content to /external/mdl-sdk/
LLVM 12.0.1 (https://github.com/llvm/llvm-project/releases/tag/llvmorg-12.0.1) for MDL ptx code generator
* for win: https://github.com/llvm/llvm-project/releases/download/llvmorg-12.0.1/LLVM-12.0.1-win64.exe
* for linux: https://github.com/llvm/llvm-project/releases/download/llvmorg-12.0.1/clang+llvm-12.0.1-x86_64-linux-gnu-ubuntu-16.04.tar.xz
* install it to `c:\work` for example
* add to PATH: `c:\work\LLVM\bin`
* extract 2 header files files from external/clang12_patched to `C:\work\LLVM\lib\clang\12.0.1\include`
Strelka uses conan https://conan.io/
* install conan: `pip install conan`
* install ninja [https://ninja-build.org/] build system: `sudo apt install ninja-build`
detect conan profile: `conan profile detect --force`
1. `conan install . --build=missing --settings=build_type=Debug`
2. `cd build`
3. `cmake .. -G "Visual Studio 17 2022" -DCMAKE_TOOLCHAIN_FILE=generators\conan_toolchain.cmake`
4. `cmake --build . --config Debug`
On Mac/Linux:
1. `conan install . -c tools.cmake.cmaketoolchain:generator=Ninja -c tools.system.package_manager:mode=install -c tools.system.package_manager:sudo=True --build=missing --settings=build_type=Debug`
2. `cd build/Debug`
3. `source ./generators/conanbuild.sh`
4. `cmake ../.. -DCMAKE_TOOLCHAIN_FILE=generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Debug`
5. `cmake --build .`
#### Installation
#### Launch
## Synopsis
Strelka -s <USD Scene path> [OPTION...] positional parameters
-s, --scene arg scene path (default: "")
-i, --iteration arg Iteration to capture (default: -1)
-h, --help Print usage
To set log level use
`export SPDLOG_LEVEL=debug`
The available log levels are: trace, debug, info, warn, and err.
## Example
./Strelka -s misc/coffeemaker.usdc -i 100
## USD
USD env:
export USD_DIR=/Users/<user>/work/usd_build/
export PATH=/Users/<user>/work/usd_build/bin:$PATH
export PYTHONPATH=/Users/<user>/work/usd_build/lib/python:$PYTHONPATH
Install plugin:
cmake --install . --component HdStrelka
## License
* USD plugin design and material translation code based on Pablo Gatling code:
https://github.com/pablode/gatling | 2,849 | Markdown | 33.337349 | 197 | 0.713935 |
arhix52/Strelka/src/HdStrelka/RenderParam.h | #pragma once
#include "pxr/pxr.h"
#include "pxr/imaging/hd/renderDelegate.h"
#include "pxr/imaging/hd/renderThread.h"
#include <scene/scene.h>
PXR_NAMESPACE_OPEN_SCOPE
class HdStrelkaRenderParam final : public HdRenderParam
{
public:
HdStrelkaRenderParam(oka::Scene* scene, HdRenderThread* renderThread, std::atomic<int>* sceneVersion)
: mScene(scene), mRenderThread(renderThread), mSceneVersion(sceneVersion)
{
}
virtual ~HdStrelkaRenderParam() = default;
/// Accessor for the top-level embree scene.
oka::Scene* AcquireSceneForEdit()
{
mRenderThread->StopRender();
(*mSceneVersion)++;
return mScene;
}
private:
oka::Scene* mScene;
/// A handle to the global render thread.
HdRenderThread* mRenderThread;
/// A version counter for edits to mScene.
std::atomic<int>* mSceneVersion;
};
PXR_NAMESPACE_CLOSE_SCOPE
| 901 | C | 24.055555 | 105 | 0.694784 |
arhix52/Strelka/src/HdStrelka/BasisCurves.h | #pragma once
#include <pxr/pxr.h>
#include <pxr/imaging/hd/basisCurves.h>
#include <scene/scene.h>
#include <pxr/base/gf/vec2f.h>
PXR_NAMESPACE_OPEN_SCOPE
class HdStrelkaBasisCurves final : public HdBasisCurves
{
public:
HF_MALLOC_TAG_NEW("new HdStrelkaBasicCurves");
HdStrelkaBasisCurves(const SdfPath& id, oka::Scene* scene);
~HdStrelkaBasisCurves() override;
void Sync(HdSceneDelegate* sceneDelegate,
HdRenderParam* renderParam,
HdDirtyBits* dirtyBits,
const TfToken& reprToken) override;
HdDirtyBits GetInitialDirtyBitsMask() const override;
void _ConvertCurve();
const std::vector<glm::float3>& GetPoints() const;
const std::vector<float>& GetWidths() const;
const std::vector<uint32_t>& GetVertexCounts() const;
const GfMatrix4d& GetPrototypeTransform() const;
const char* getName() const;
protected:
void _InitRepr(const TfToken& reprName, HdDirtyBits* dirtyBits) override;
HdDirtyBits _PropagateDirtyBits(HdDirtyBits bits) const override;
private:
bool _FindPrimvar(HdSceneDelegate* sceneDelegate, const TfToken& primvarName, HdInterpolation& interpolation) const;
void _PullPrimvars(HdSceneDelegate* sceneDelegate,
VtVec3fArray& points,
VtVec3fArray& normals,
VtFloatArray& widths,
bool& indexedNormals,
bool& indexedUVs,
GfVec3f& color,
bool& hasColor) const;
void _UpdateGeometry(HdSceneDelegate* sceneDelegate);
oka::Scene* mScene;
std::string mName;
GfVec3f mColor;
VtIntArray mVertexCounts;
VtVec3fArray mPoints;
VtVec3fArray mNormals;
VtFloatArray mWidths;
GfMatrix4d m_prototypeTransform;
HdBasisCurvesTopology mTopology;
std::vector<glm::float3> mCurvePoints;
std::vector<float> mCurveWidths;
std::vector<uint32_t> mCurveVertexCounts;
// std::vector<GfVec2f> m_uvs;
};
PXR_NAMESPACE_CLOSE_SCOPE
| 2,048 | C | 28.271428 | 120 | 0.67334 |
arhix52/Strelka/src/HdStrelka/Tokens.cpp | #include "Tokens.h"
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PUBLIC_TOKENS(HdStrelkaSettingsTokens, HD_STRELKA_SETTINGS_TOKENS);
TF_DEFINE_PUBLIC_TOKENS(HdStrelkaNodeIdentifiers, HD_STRELKA_NODE_IDENTIFIER_TOKENS);
TF_DEFINE_PUBLIC_TOKENS(HdStrelkaSourceTypes, HD_STRELKA_SOURCE_TYPE_TOKENS);
TF_DEFINE_PUBLIC_TOKENS(HdStrelkaDiscoveryTypes, HD_STRELKA_DISCOVERY_TYPE_TOKENS);
TF_DEFINE_PUBLIC_TOKENS(HdStrelkaRenderContexts, HD_STRELKA_RENDER_CONTEXT_TOKENS);
TF_DEFINE_PUBLIC_TOKENS(HdStrelkaNodeContexts, HD_STRELKA_NODE_CONTEXT_TOKENS);
PXR_NAMESPACE_CLOSE_SCOPE
| 564 | C++ | 42.461535 | 85 | 0.833333 |
arhix52/Strelka/src/HdStrelka/MdlDiscoveryPlugin.h | #pragma once
#include <pxr/usd/ndr/discoveryPlugin.h>
PXR_NAMESPACE_OPEN_SCOPE
class HdStrelkaMdlDiscoveryPlugin final : public NdrDiscoveryPlugin
{
public:
NdrNodeDiscoveryResultVec DiscoverNodes(const Context& ctx) override;
const NdrStringVec& GetSearchURIs() const override;
};
PXR_NAMESPACE_CLOSE_SCOPE
| 317 | C | 18.874999 | 71 | 0.807571 |
arhix52/Strelka/src/HdStrelka/Material.h | #pragma once
#include "materialmanager.h"
#include "MaterialNetworkTranslator.h"
#include <pxr/imaging/hd/material.h>
#include <pxr/imaging/hd/sceneDelegate.h>
PXR_NAMESPACE_OPEN_SCOPE
class HdStrelkaMaterial final : public HdMaterial
{
public:
HF_MALLOC_TAG_NEW("new HdStrelkaMaterial");
HdStrelkaMaterial(const SdfPath& id, const MaterialNetworkTranslator& translator);
~HdStrelkaMaterial() override;
HdDirtyBits GetInitialDirtyBitsMask() const override;
void Sync(HdSceneDelegate* sceneDelegate, HdRenderParam* renderParam, HdDirtyBits* dirtyBits) override;
const std::string& GetStrelkaMaterial() const;
bool isMdl() const
{
return mIsMdl;
}
std::string getFileUri()
{
return mMdlFileUri;
}
std::string getSubIdentifier()
{
return mMdlSubIdentifier;
}
const std::vector<oka::MaterialManager::Param>& getParams() const
{
return mMaterialParams;
}
private:
const MaterialNetworkTranslator& m_translator;
bool mIsMdl = false;
std::string mMaterialXCode;
// MDL related
std::string mMdlFileUri;
std::string mMdlSubIdentifier;
std::vector<oka::MaterialManager::Param> mMaterialParams;
};
PXR_NAMESPACE_CLOSE_SCOPE
| 1,258 | C | 21.890909 | 107 | 0.709062 |
arhix52/Strelka/src/HdStrelka/Light.cpp | #include "Light.h"
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <glm/gtx/compatibility.hpp>
#include <pxr/imaging/hd/instancer.h>
#include <pxr/imaging/hd/meshUtil.h>
#include <pxr/imaging/hd/smoothNormals.h>
#include <pxr/imaging/hd/vertexAdjacency.h>
PXR_NAMESPACE_OPEN_SCOPE
// Lookup table from:
// Colour Rendering of Spectra
// by John Walker
// https://www.fourmilab.ch/documents/specrend/specrend.c
//
// Covers range from 1000k to 10000k in 500k steps
// assuming Rec709 / sRGB colorspace chromaticity.
//
// NOTE: 6500K doesn't give a pure white because the D65
// illuminant used by Rec. 709 doesn't lie on the
// Planckian Locus. We would need to compute the
// Correlated Colour Temperature (CCT) using Ohno's
// method to get pure white. Maybe one day.
//
// Note that the beginning and ending knots are repeated to simplify
// boundary behavior. The last 4 knots represent the segment starting
// at 1.0.
//
static GfVec3f const _blackbodyRGB[] = {
GfVec3f(1.000000f, 0.027490f, 0.000000f), // 1000 K (Approximation)
GfVec3f(1.000000f, 0.027490f, 0.000000f), // 1000 K (Approximation)
GfVec3f(1.000000f, 0.149664f, 0.000000f), // 1500 K (Approximation)
GfVec3f(1.000000f, 0.256644f, 0.008095f), // 2000 K
GfVec3f(1.000000f, 0.372033f, 0.067450f), // 2500 K
GfVec3f(1.000000f, 0.476725f, 0.153601f), // 3000 K
GfVec3f(1.000000f, 0.570376f, 0.259196f), // 3500 K
GfVec3f(1.000000f, 0.653480f, 0.377155f), // 4000 K
GfVec3f(1.000000f, 0.726878f, 0.501606f), // 4500 K
GfVec3f(1.000000f, 0.791543f, 0.628050f), // 5000 K
GfVec3f(1.000000f, 0.848462f, 0.753228f), // 5500 K
GfVec3f(1.000000f, 0.898581f, 0.874905f), // 6000 K
GfVec3f(1.000000f, 0.942771f, 0.991642f), // 6500 K
GfVec3f(0.906947f, 0.890456f, 1.000000f), // 7000 K
GfVec3f(0.828247f, 0.841838f, 1.000000f), // 7500 K
GfVec3f(0.765791f, 0.801896f, 1.000000f), // 8000 K
GfVec3f(0.715255f, 0.768579f, 1.000000f), // 8500 K
GfVec3f(0.673683f, 0.740423f, 1.000000f), // 9000 K
GfVec3f(0.638992f, 0.716359f, 1.000000f), // 9500 K
GfVec3f(0.609681f, 0.695588f, 1.000000f), // 10000 K
GfVec3f(0.609681f, 0.695588f, 1.000000f), // 10000 K
GfVec3f(0.609681f, 0.695588f, 1.000000f) // 10000 K
};
// Catmull-Rom basis
static const float _basis[4][4] = {
{ -0.5f, 1.5f, -1.5f, 0.5f }, { 1.f, -2.5f, 2.0f, -0.5f }, { -0.5f, 0.0f, 0.5f, 0.0f }, { 0.f, 1.0f, 0.0f, 0.0f }
};
static inline float _Rec709RgbToLuma(const GfVec3f& rgb)
{
return GfDot(rgb, GfVec3f(0.2126f, 0.7152f, 0.0722f));
}
static GfVec3f _BlackbodyTemperatureAsRgb(float temp)
{
// Catmull-Rom interpolation of _blackbodyRGB
constexpr int numKnots = sizeof(_blackbodyRGB) / sizeof(_blackbodyRGB[0]);
// Parametric distance along spline
const float u_spline = GfClamp((temp - 1000.0f) / 9000.0f, 0.0f, 1.0f);
// Last 4 knots represent a trailing segment starting at u_spline==1.0,
// to simplify boundary behavior
constexpr int numSegs = (numKnots - 4);
const float x = u_spline * numSegs;
const int seg = int(floor(x));
const float u_seg = x - seg; // Parameter within segment
// Knot values for this segment
GfVec3f k0 = _blackbodyRGB[seg + 0];
GfVec3f k1 = _blackbodyRGB[seg + 1];
GfVec3f k2 = _blackbodyRGB[seg + 2];
GfVec3f k3 = _blackbodyRGB[seg + 3];
// Compute cubic coefficients. Could fold constants (zero, one) here
// if speed is a concern.
GfVec3f a = _basis[0][0] * k0 + _basis[0][1] * k1 + _basis[0][2] * k2 + _basis[0][3] * k3;
GfVec3f b = _basis[1][0] * k0 + _basis[1][1] * k1 + _basis[1][2] * k2 + _basis[1][3] * k3;
GfVec3f c = _basis[2][0] * k0 + _basis[2][1] * k1 + _basis[2][2] * k2 + _basis[2][3] * k3;
GfVec3f d = _basis[3][0] * k0 + _basis[3][1] * k1 + _basis[3][2] * k2 + _basis[3][3] * k3;
// Eval cubic polynomial.
GfVec3f rgb = ((a * u_seg + b) * u_seg + c) * u_seg + d;
// Normalize to the same luminance as (1,1,1)
rgb /= _Rec709RgbToLuma(rgb);
// Clamp at zero, since the spline can produce small negative values,
// e.g. in the blue component at 1300k.
rgb[0] = GfMax(rgb[0], 0.f);
rgb[1] = GfMax(rgb[1], 0.f);
rgb[2] = GfMax(rgb[2], 0.f);
return rgb;
}
HdStrelkaLight::HdStrelkaLight(const SdfPath& id, TfToken const& lightType) : HdLight(id), mLightType(lightType)
{
}
HdStrelkaLight::~HdStrelkaLight()
{
}
void HdStrelkaLight::Sync(HdSceneDelegate* sceneDelegate, HdRenderParam* renderParam, HdDirtyBits* dirtyBits)
{
TF_UNUSED(renderParam);
bool pullLight = (*dirtyBits & DirtyBits::DirtyParams);
*dirtyBits = DirtyBits::Clean;
if (!pullLight)
{
return;
}
const SdfPath& id = GetId();
// const VtValue& resource = sceneDelegate->GetMaterialResource(id);
// Get the color of the light
GfVec3f hdc = sceneDelegate->GetLightParamValue(id, HdLightTokens->color).Get<GfVec3f>();
// Color temperature
VtValue enableColorTemperatureVal = sceneDelegate->GetLightParamValue(id, HdLightTokens->enableColorTemperature);
if (enableColorTemperatureVal.GetWithDefault<bool>(false))
{
VtValue colorTemperatureVal = sceneDelegate->GetLightParamValue(id, HdLightTokens->colorTemperature);
if (colorTemperatureVal.IsHolding<float>())
{
float colorTemperature = colorTemperatureVal.Get<float>();
hdc = GfCompMult(hdc, _BlackbodyTemperatureAsRgb(colorTemperature));
}
}
// Intensity
float intensity = sceneDelegate->GetLightParamValue(id, HdLightTokens->intensity).Get<float>();
// Exposure
float exposure = sceneDelegate->GetLightParamValue(id, HdLightTokens->exposure).Get<float>();
intensity *= powf(2.0f, GfClamp(exposure, -50.0f, 50.0f));
// Transform
{
GfMatrix4d transform = sceneDelegate->GetTransform(id);
glm::float4x4 xform;
for (int i = 0; i < 4; ++i)
{
for (int j = 0; j < 4; ++j)
{
xform[i][j] = (float)transform[i][j];
}
}
mLightDesc.xform = xform;
mLightDesc.useXform = true;
}
mLightDesc.color = glm::float3(hdc[0], hdc[1], hdc[2]);
mLightDesc.intensity = intensity;
if (mLightType == HdPrimTypeTokens->rectLight)
{
mLightDesc.type = 0;
float width = 0.0f;
float height = 0.0f;
VtValue widthVal = sceneDelegate->GetLightParamValue(id, HdLightTokens->width);
if (widthVal.IsHolding<float>())
{
width = widthVal.Get<float>();
}
VtValue heightVal = sceneDelegate->GetLightParamValue(id, HdLightTokens->height);
if (heightVal.IsHolding<float>())
{
height = heightVal.Get<float>();
}
mLightDesc.height = height;
mLightDesc.width = width;
}
else if (mLightType == HdPrimTypeTokens->diskLight || mLightType == HdPrimTypeTokens->sphereLight)
{
mLightDesc.type = mLightType == HdPrimTypeTokens->diskLight ? 1 : 2;
float radius = 0.0;
VtValue radiusVal = sceneDelegate->GetLightParamValue(id, HdLightTokens->radius);
if (radiusVal.IsHolding<float>())
{
radius = radiusVal.Get<float>();
}
mLightDesc.radius = radius * mLightDesc.xform[0][0]; // uniform scale
}
else if (mLightType == HdPrimTypeTokens->distantLight)
{
float angle = 0.0f;
mLightDesc.type = 3; // TODO: move to enum
VtValue angleVal = sceneDelegate->GetLightParamValue(id, HdLightTokens->angle);
if (angleVal.IsHolding<float>())
{
angle = angleVal.Get<float>();
}
mLightDesc.halfAngle = angle * 0.5f * (M_PI / 180.0f);
mLightDesc.intensity /= M_PI * powf(sin(mLightDesc.halfAngle), 2.0f);
}
}
HdDirtyBits HdStrelkaLight::GetInitialDirtyBitsMask() const
{
return (DirtyParams | DirtyTransform);
}
oka::Scene::UniformLightDesc HdStrelkaLight::getLightDesc()
{
return mLightDesc;
}
PXR_NAMESPACE_CLOSE_SCOPE
| 8,162 | C++ | 35.936651 | 117 | 0.636486 |
arhix52/Strelka/src/HdStrelka/MdlParserPlugin.cpp | // Copyright (C) 2021 Pablo Delgado Krämer
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#include "MdlParserPlugin.h"
#include <pxr/base/tf/staticTokens.h>
#include <pxr/usd/sdr/shaderNode.h>
#include <pxr/usd/ar/resolver.h>
#include "pxr/usd/ar/resolvedPath.h"
#include "pxr/usd/ar/asset.h"
#include <pxr/usd/ar/ar.h>
//#include "Tokens.h"
PXR_NAMESPACE_OPEN_SCOPE
NDR_REGISTER_PARSER_PLUGIN(HdStrelkaMdlParserPlugin);
// clang-format off
TF_DEFINE_PRIVATE_TOKENS(_tokens,
(mdl)
(subIdentifier));
// clang-format on
NdrNodeUniquePtr HdStrelkaMdlParserPlugin::Parse(const NdrNodeDiscoveryResult& discoveryResult)
{
NdrTokenMap metadata = discoveryResult.metadata;
metadata[_tokens->subIdentifier] = discoveryResult.subIdentifier;
return std::make_unique<SdrShaderNode>(discoveryResult.identifier, discoveryResult.version, discoveryResult.name,
discoveryResult.family, _tokens->mdl, discoveryResult.sourceType,
discoveryResult.uri, discoveryResult.resolvedUri, NdrPropertyUniquePtrVec{},
metadata);
}
const NdrTokenVec& HdStrelkaMdlParserPlugin::GetDiscoveryTypes() const
{
static NdrTokenVec s_discoveryTypes{ _tokens->mdl };
return s_discoveryTypes;
}
const TfToken& HdStrelkaMdlParserPlugin::GetSourceType() const
{
return _tokens->mdl;
}
PXR_NAMESPACE_CLOSE_SCOPE
| 2,153 | C++ | 34.311475 | 119 | 0.681839 |
arhix52/Strelka/src/HdStrelka/Instancer.cpp | // Copyright (C) 2021 Pablo Delgado Krämer
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <https://www.gnu.org/licenses/>.
#include "Instancer.h"
#include <pxr/base/gf/quatd.h>
#include <pxr/imaging/hd/sceneDelegate.h>
PXR_NAMESPACE_OPEN_SCOPE
HdStrelkaInstancer::HdStrelkaInstancer(HdSceneDelegate* delegate,
const SdfPath& id)
: HdInstancer(delegate, id)
{
}
HdStrelkaInstancer::~HdStrelkaInstancer()
{
}
void HdStrelkaInstancer::Sync(HdSceneDelegate* sceneDelegate,
HdRenderParam* renderParam,
HdDirtyBits* dirtyBits)
{
TF_UNUSED(renderParam);
_UpdateInstancer(sceneDelegate, dirtyBits);
const SdfPath& id = GetId();
if (!HdChangeTracker::IsAnyPrimvarDirty(*dirtyBits, id))
{
return;
}
const HdPrimvarDescriptorVector& primvars = sceneDelegate->GetPrimvarDescriptors(id, HdInterpolation::HdInterpolationInstance);
for (const HdPrimvarDescriptor& primvar : primvars)
{
TfToken primName = primvar.name;
if (primName != HdInstancerTokens->translate &&
primName != HdInstancerTokens->rotate &&
primName != HdInstancerTokens->scale &&
primName != HdInstancerTokens->instanceTransform)
{
continue;
}
if (!HdChangeTracker::IsPrimvarDirty(*dirtyBits, id, primName))
{
continue;
}
VtValue value = sceneDelegate->Get(id, primName);
m_primvarMap[primName] = value;
}
}
VtMatrix4dArray HdStrelkaInstancer::ComputeInstanceTransforms(const SdfPath& prototypeId)
{
HdSceneDelegate* sceneDelegate = GetDelegate();
const SdfPath& id = GetId();
// Calculate instance transforms for this instancer.
VtValue boxedTranslates = m_primvarMap[HdInstancerTokens->translate];
VtValue boxedRotates = m_primvarMap[HdInstancerTokens->rotate];
VtValue boxedScales = m_primvarMap[HdInstancerTokens->scale];
VtValue boxedInstanceTransforms = m_primvarMap[HdInstancerTokens->instanceTransform];
VtVec3fArray translates;
if (boxedTranslates.IsHolding<VtVec3fArray>())
{
translates = boxedTranslates.UncheckedGet<VtVec3fArray>();
}
else if (!boxedTranslates.IsEmpty())
{
TF_CODING_WARNING("Instancer translate values are not of type Vec3f!");
}
VtVec4fArray rotates;
if (boxedRotates.IsHolding<VtVec4fArray>())
{
rotates = boxedRotates.Get<VtVec4fArray>();
}
else if (!boxedRotates.IsEmpty())
{
TF_CODING_WARNING("Instancer rotate values are not of type Vec3f!");
}
VtVec3fArray scales;
if (boxedScales.IsHolding<VtVec3fArray>())
{
scales = boxedScales.Get<VtVec3fArray>();
}
else if (!boxedScales.IsEmpty())
{
TF_CODING_WARNING("Instancer scale values are not of type Vec3f!");
}
VtMatrix4dArray instanceTransforms;
if (boxedInstanceTransforms.IsHolding<VtMatrix4dArray>())
{
instanceTransforms = boxedInstanceTransforms.Get<VtMatrix4dArray>();
}
GfMatrix4d instancerTransform = sceneDelegate->GetInstancerTransform(id);
const VtIntArray& instanceIndices = sceneDelegate->GetInstanceIndices(id, prototypeId);
VtMatrix4dArray transforms;
transforms.resize(instanceIndices.size());
for (size_t i = 0; i < instanceIndices.size(); i++)
{
int instanceIndex = instanceIndices[i];
GfMatrix4d mat = instancerTransform;
GfMatrix4d temp;
if (i < translates.size())
{
auto trans = GfVec3d(translates[instanceIndex]);
temp.SetTranslate(trans);
mat = temp * mat;
}
if (i < rotates.size())
{
GfVec4f rot = rotates[instanceIndex];
temp.SetRotate(GfQuatd(rot[0], rot[1], rot[2], rot[3]));
mat = temp * mat;
}
if (i < scales.size())
{
auto scale = GfVec3d(scales[instanceIndex]);
temp.SetScale(scale);
mat = temp * mat;
}
if (i < instanceTransforms.size())
{
temp = instanceTransforms[instanceIndex];
mat = temp * mat;
}
transforms[i] = mat;
}
// Calculate instance transforms for all instancer instances.
const SdfPath& parentId = GetParentId();
if (parentId.IsEmpty())
{
return transforms;
}
const HdRenderIndex& renderIndex = sceneDelegate->GetRenderIndex();
HdInstancer* boxedParentInstancer = renderIndex.GetInstancer(parentId);
HdStrelkaInstancer* parentInstancer = dynamic_cast<HdStrelkaInstancer*>(boxedParentInstancer);
VtMatrix4dArray parentTransforms = parentInstancer->ComputeInstanceTransforms(id);
VtMatrix4dArray transformProducts;
transformProducts.resize(parentTransforms.size() * transforms.size());
for (size_t i = 0; i < parentTransforms.size(); i++)
{
for (size_t j = 0; j < transforms.size(); j++)
{
size_t index = i * transforms.size() + j;
transformProducts[index] = transforms[j] * parentTransforms[i];
}
}
return transformProducts;
}
PXR_NAMESPACE_CLOSE_SCOPE
| 5,927 | C++ | 29.556701 | 131 | 0.639615 |
arhix52/Strelka/src/HdStrelka/RenderDelegate.h | #pragma once
#include <pxr/imaging/hd/renderDelegate.h>
#include "MaterialNetworkTranslator.h"
#include <render/common.h>
#include <scene/scene.h>
#include <render/render.h>
PXR_NAMESPACE_OPEN_SCOPE
class HdStrelkaRenderDelegate final : public HdRenderDelegate
{
public:
HdStrelkaRenderDelegate(const HdRenderSettingsMap& settingsMap, const MaterialNetworkTranslator& translator);
~HdStrelkaRenderDelegate() override;
void SetDrivers(HdDriverVector const& drivers) override;
HdRenderSettingDescriptorList GetRenderSettingDescriptors() const override;
HdRenderPassSharedPtr CreateRenderPass(HdRenderIndex* index, const HdRprimCollection& collection) override;
HdResourceRegistrySharedPtr GetResourceRegistry() const override;
void CommitResources(HdChangeTracker* tracker) override;
HdInstancer* CreateInstancer(HdSceneDelegate* delegate, const SdfPath& id) override;
void DestroyInstancer(HdInstancer* instancer) override;
HdAovDescriptor GetDefaultAovDescriptor(const TfToken& name) const override;
/* Rprim */
const TfTokenVector& GetSupportedRprimTypes() const override;
HdRprim* CreateRprim(const TfToken& typeId, const SdfPath& rprimId) override;
void DestroyRprim(HdRprim* rPrim) override;
/* Sprim */
const TfTokenVector& GetSupportedSprimTypes() const override;
HdSprim* CreateSprim(const TfToken& typeId, const SdfPath& sprimId) override;
HdSprim* CreateFallbackSprim(const TfToken& typeId) override;
void DestroySprim(HdSprim* sprim) override;
/* Bprim */
const TfTokenVector& GetSupportedBprimTypes() const override;
HdBprim* CreateBprim(const TfToken& typeId, const SdfPath& bprimId) override;
HdBprim* CreateFallbackBprim(const TfToken& typeId) override;
void DestroyBprim(HdBprim* bprim) override;
TfToken GetMaterialBindingPurpose() const override;
// In a USD file, there can be multiple networks associated with a material:
// token outputs:mdl:surface.connect = </Root/Glass.outputs:out>
// token outputs:surface.connect = </Root/GlassPreviewSurface.outputs:surface>
// This function returns the order of preference used when selecting one for rendering.
TfTokenVector GetMaterialRenderContexts() const override;
TfTokenVector GetShaderSourceTypes() const override;
oka::SharedContext& getSharedContext();
private:
const MaterialNetworkTranslator& m_translator;
HdRenderSettingDescriptorList m_settingDescriptors;
HdResourceRegistrySharedPtr m_resourceRegistry;
const TfTokenVector SUPPORTED_BPRIM_TYPES = { HdPrimTypeTokens->renderBuffer };
const TfTokenVector SUPPORTED_RPRIM_TYPES = { HdPrimTypeTokens->mesh, HdPrimTypeTokens->basisCurves };
const TfTokenVector SUPPORTED_SPRIM_TYPES = {
HdPrimTypeTokens->camera, HdPrimTypeTokens->material, HdPrimTypeTokens->light,
HdPrimTypeTokens->rectLight, HdPrimTypeTokens->diskLight, HdPrimTypeTokens->sphereLight,
HdPrimTypeTokens->distantLight,
};
oka::SharedContext* mSharedCtx;
oka::Scene mScene;
oka::Render* mRenderer;
};
PXR_NAMESPACE_CLOSE_SCOPE
| 3,149 | C | 33.23913 | 113 | 0.768498 |
arhix52/Strelka/src/HdStrelka/RenderDelegate.cpp | #include "RenderDelegate.h"
#include "Camera.h"
#include "Instancer.h"
#include "Light.h"
#include "Material.h"
#include "Mesh.h"
#include "BasisCurves.h"
#include "RenderBuffer.h"
#include "RenderPass.h"
#include "Tokens.h"
#include <pxr/base/gf/vec4f.h>
#include <pxr/imaging/hd/resourceRegistry.h>
#include <log.h>
#include <memory>
PXR_NAMESPACE_OPEN_SCOPE
TF_DEFINE_PRIVATE_TOKENS(_Tokens, (HdStrelkaDriver));
HdStrelkaRenderDelegate::HdStrelkaRenderDelegate(const HdRenderSettingsMap& settingsMap,
const MaterialNetworkTranslator& translator)
: m_translator(translator)
{
m_resourceRegistry = std::make_shared<HdResourceRegistry>();
m_settingDescriptors.push_back(
HdRenderSettingDescriptor{ "Samples per pixel", HdStrelkaSettingsTokens->spp, VtValue{ 8 } });
m_settingDescriptors.push_back(
HdRenderSettingDescriptor{ "Max bounces", HdStrelkaSettingsTokens->max_bounces, VtValue{ 4 } });
_PopulateDefaultSettings(m_settingDescriptors);
for (const auto& setting : settingsMap)
{
const TfToken& key = setting.first;
const VtValue& value = setting.second;
_settingsMap[key] = value;
}
oka::RenderType type = oka::RenderType::eOptiX;
#ifdef __APPLE__
type = oka::RenderType::eMetal;
#endif
mRenderer = oka::RenderFactory::createRender(type);
mRenderer->setScene(&mScene);
}
HdStrelkaRenderDelegate::~HdStrelkaRenderDelegate()
{
}
void HdStrelkaRenderDelegate::SetDrivers(HdDriverVector const& drivers)
{
for (HdDriver* hdDriver : drivers)
{
if (hdDriver->name == _Tokens->HdStrelkaDriver && hdDriver->driver.IsHolding<oka::SharedContext*>())
{
assert(mRenderer);
mSharedCtx = hdDriver->driver.UncheckedGet<oka::SharedContext*>();
mRenderer->setSharedContext(mSharedCtx);
mRenderer->init();
mSharedCtx->mRender = mRenderer;
break;
}
}
}
HdRenderSettingDescriptorList HdStrelkaRenderDelegate::GetRenderSettingDescriptors() const
{
return m_settingDescriptors;
}
HdRenderPassSharedPtr HdStrelkaRenderDelegate::CreateRenderPass(HdRenderIndex* index, const HdRprimCollection& collection)
{
return HdRenderPassSharedPtr(new HdStrelkaRenderPass(index, collection, _settingsMap, mRenderer, &mScene));
}
HdResourceRegistrySharedPtr HdStrelkaRenderDelegate::GetResourceRegistry() const
{
return m_resourceRegistry;
}
void HdStrelkaRenderDelegate::CommitResources(HdChangeTracker* tracker)
{
TF_UNUSED(tracker);
// We delay BVH building and GPU uploads to the next render call.
}
HdInstancer* HdStrelkaRenderDelegate::CreateInstancer(HdSceneDelegate* delegate, const SdfPath& id)
{
return new HdStrelkaInstancer(delegate, id);
}
void HdStrelkaRenderDelegate::DestroyInstancer(HdInstancer* instancer)
{
delete instancer;
}
HdAovDescriptor HdStrelkaRenderDelegate::GetDefaultAovDescriptor(const TfToken& name) const
{
TF_UNUSED(name);
HdAovDescriptor aovDescriptor;
aovDescriptor.format = HdFormatFloat32Vec4;
aovDescriptor.multiSampled = false;
aovDescriptor.clearValue = GfVec4f(0.0f, 0.0f, 0.0f, 0.0f);
return aovDescriptor;
}
const TfTokenVector& HdStrelkaRenderDelegate::GetSupportedRprimTypes() const
{
return SUPPORTED_RPRIM_TYPES;
}
HdRprim* HdStrelkaRenderDelegate::CreateRprim(const TfToken& typeId, const SdfPath& rprimId)
{
if (typeId == HdPrimTypeTokens->mesh)
{
return new HdStrelkaMesh(rprimId, &mScene);
}
else if (typeId == HdPrimTypeTokens->basisCurves)
{
return new HdStrelkaBasisCurves(rprimId, &mScene);
}
STRELKA_ERROR("Unknown Rprim Type {}", typeId.GetText());
return nullptr;
}
void HdStrelkaRenderDelegate::DestroyRprim(HdRprim* rprim)
{
delete rprim;
}
const TfTokenVector& HdStrelkaRenderDelegate::GetSupportedSprimTypes() const
{
return SUPPORTED_SPRIM_TYPES;
}
HdSprim* HdStrelkaRenderDelegate::CreateSprim(const TfToken& typeId, const SdfPath& sprimId)
{
STRELKA_DEBUG("CreateSprim Type: {}", typeId.GetText());
if (sprimId.IsEmpty())
{
STRELKA_DEBUG("skipping creation of empty sprim path");
return nullptr;
}
HdSprim* res = nullptr;
if (typeId == HdPrimTypeTokens->camera)
{
res = new HdStrelkaCamera(sprimId, mScene);
}
else if (typeId == HdPrimTypeTokens->material)
{
res = new HdStrelkaMaterial(sprimId, m_translator);
}
else if (typeId == HdPrimTypeTokens->rectLight || typeId == HdPrimTypeTokens->diskLight ||
typeId == HdPrimTypeTokens->sphereLight || typeId == HdPrimTypeTokens->distantLight)
{
res = new HdStrelkaLight(sprimId, typeId);
}
else
{
STRELKA_ERROR("Unknown Sprim Type {}", typeId.GetText());
}
return res;
}
HdSprim* HdStrelkaRenderDelegate::CreateFallbackSprim(const TfToken& typeId)
{
const SdfPath& sprimId = SdfPath::EmptyPath();
return CreateSprim(typeId, sprimId);
}
void HdStrelkaRenderDelegate::DestroySprim(HdSprim* sprim)
{
delete sprim;
}
const TfTokenVector& HdStrelkaRenderDelegate::GetSupportedBprimTypes() const
{
return SUPPORTED_BPRIM_TYPES;
}
HdBprim* HdStrelkaRenderDelegate::CreateBprim(const TfToken& typeId, const SdfPath& bprimId)
{
if (typeId == HdPrimTypeTokens->renderBuffer)
{
return new HdStrelkaRenderBuffer(bprimId, mSharedCtx);
}
return nullptr;
}
HdBprim* HdStrelkaRenderDelegate::CreateFallbackBprim(const TfToken& typeId)
{
const SdfPath& bprimId = SdfPath::EmptyPath();
return CreateBprim(typeId, bprimId);
}
void HdStrelkaRenderDelegate::DestroyBprim(HdBprim* bprim)
{
delete bprim;
}
TfToken HdStrelkaRenderDelegate::GetMaterialBindingPurpose() const
{
//return HdTokens->full;
return HdTokens->preview;
}
TfTokenVector HdStrelkaRenderDelegate::GetMaterialRenderContexts() const
{
return TfTokenVector{ HdStrelkaRenderContexts->mtlx, HdStrelkaRenderContexts->mdl };
}
TfTokenVector HdStrelkaRenderDelegate::GetShaderSourceTypes() const
{
return TfTokenVector{ HdStrelkaSourceTypes->mtlx, HdStrelkaSourceTypes->mdl };
}
oka::SharedContext& HdStrelkaRenderDelegate::getSharedContext()
{
return mRenderer->getSharedContext();
}
PXR_NAMESPACE_CLOSE_SCOPE
| 6,338 | C++ | 25.634454 | 122 | 0.718523 |
arhix52/Strelka/src/HdStrelka/BasisCurves.cpp | #include "BasisCurves.h"
#include <log.h>
PXR_NAMESPACE_OPEN_SCOPE
void HdStrelkaBasisCurves::Sync(HdSceneDelegate* sceneDelegate,
HdRenderParam* renderParam,
HdDirtyBits* dirtyBits,
const TfToken& reprToken)
{
TF_UNUSED(renderParam);
TF_UNUSED(reprToken);
HdRenderIndex& renderIndex = sceneDelegate->GetRenderIndex();
const SdfPath& id = GetId();
mName = id.GetText();
STRELKA_INFO("Curve Name: {}", mName.c_str());
if (*dirtyBits & HdChangeTracker::DirtyMaterialId)
{
const SdfPath& materialId = sceneDelegate->GetMaterialId(id);
SetMaterialId(materialId);
}
if (*dirtyBits & HdChangeTracker::DirtyTopology)
{
mTopology = sceneDelegate->GetBasisCurvesTopology(id);
}
if (*dirtyBits & HdChangeTracker::DirtyTransform)
{
m_prototypeTransform = sceneDelegate->GetTransform(id);
}
bool updateGeometry = (*dirtyBits & HdChangeTracker::DirtyPoints) | (*dirtyBits & HdChangeTracker::DirtyNormals) |
(*dirtyBits & HdChangeTracker::DirtyTopology);
*dirtyBits = HdChangeTracker::Clean;
if (!updateGeometry)
{
return;
}
// m_faces.clear();
mPoints.clear();
mNormals.clear();
_UpdateGeometry(sceneDelegate);
}
bool HdStrelkaBasisCurves::_FindPrimvar(HdSceneDelegate* sceneDelegate,
const TfToken& primvarName,
HdInterpolation& interpolation) const
{
HdInterpolation interpolations[] = {
HdInterpolation::HdInterpolationVertex, HdInterpolation::HdInterpolationFaceVarying,
HdInterpolation::HdInterpolationConstant, HdInterpolation::HdInterpolationUniform,
HdInterpolation::HdInterpolationVarying, HdInterpolation::HdInterpolationInstance
};
for (HdInterpolation i : interpolations)
{
const auto& primvarDescs = GetPrimvarDescriptors(sceneDelegate, i);
for (const HdPrimvarDescriptor& primvar : primvarDescs)
{
if (primvar.name == primvarName)
{
interpolation = i;
return true;
}
}
}
return false;
}
void HdStrelkaBasisCurves::_PullPrimvars(HdSceneDelegate* sceneDelegate,
VtVec3fArray& points,
VtVec3fArray& normals,
VtFloatArray& widths,
bool& indexedNormals,
bool& indexedUVs,
GfVec3f& color,
bool& hasColor) const
{
const SdfPath& id = GetId();
// Handle points.
HdInterpolation pointInterpolation;
bool foundPoints = _FindPrimvar(sceneDelegate, HdTokens->points, pointInterpolation);
if (!foundPoints)
{
STRELKA_ERROR("Points primvar not found!");
return;
}
else if (pointInterpolation != HdInterpolation::HdInterpolationVertex)
{
STRELKA_ERROR("Points primvar is not vertex-interpolated!");
return;
}
VtValue boxedPoints = sceneDelegate->Get(id, HdTokens->points);
points = boxedPoints.Get<VtVec3fArray>();
// Handle color.
HdInterpolation colorInterpolation;
bool foundColor = _FindPrimvar(sceneDelegate, HdTokens->displayColor, colorInterpolation);
if (foundColor && colorInterpolation == HdInterpolation::HdInterpolationConstant)
{
VtValue boxedColors = sceneDelegate->Get(id, HdTokens->displayColor);
const VtVec3fArray& colors = boxedColors.Get<VtVec3fArray>();
color = colors[0];
hasColor = true;
}
HdBasisCurvesTopology topology = GetBasisCurvesTopology(sceneDelegate);
VtIntArray curveVertexCounts = topology.GetCurveVertexCounts();
// Handle normals.
HdInterpolation normalInterpolation;
bool foundNormals = _FindPrimvar(sceneDelegate, HdTokens->normals, normalInterpolation);
if (foundNormals && normalInterpolation == HdInterpolation::HdInterpolationVarying)
{
VtValue boxedNormals = sceneDelegate->Get(id, HdTokens->normals);
normals = boxedNormals.Get<VtVec3fArray>();
indexedNormals = true;
}
// Handle width.
HdInterpolation widthInterpolation;
bool foundWidth = _FindPrimvar(sceneDelegate, HdTokens->widths, widthInterpolation);
if (foundWidth)
{
VtValue boxedWidths = sceneDelegate->Get(id, HdTokens->widths);
widths = boxedWidths.Get<VtFloatArray>();
}
}
void HdStrelkaBasisCurves::_UpdateGeometry(HdSceneDelegate* sceneDelegate)
{
const HdBasisCurvesTopology& topology = mTopology;
const SdfPath& id = GetId();
// Get USD Curve Metadata
mVertexCounts = topology.GetCurveVertexCounts();
TfToken curveType = topology.GetCurveType();
TfToken curveBasis = topology.GetCurveBasis();
TfToken curveWrap = topology.GetCurveWrap();
size_t num_curves = mVertexCounts.size();
size_t num_keys = 0;
bool indexedNormals;
bool indexedUVs;
bool hasColor = true;
_PullPrimvars(sceneDelegate, mPoints, mNormals, mWidths, indexedNormals, indexedUVs, mColor, hasColor);
_ConvertCurve();
}
HdStrelkaBasisCurves::HdStrelkaBasisCurves(const SdfPath& id, oka::Scene* scene) : HdBasisCurves(id), mScene(scene)
{
}
HdStrelkaBasisCurves::~HdStrelkaBasisCurves()
{
}
HdDirtyBits HdStrelkaBasisCurves::GetInitialDirtyBitsMask() const
{
return HdChangeTracker::DirtyPoints | HdChangeTracker::DirtyNormals | HdChangeTracker::DirtyTopology |
HdChangeTracker::DirtyInstancer | HdChangeTracker::DirtyInstanceIndex | HdChangeTracker::DirtyTransform |
HdChangeTracker::DirtyMaterialId | HdChangeTracker::DirtyPrimvar;
}
HdDirtyBits HdStrelkaBasisCurves::_PropagateDirtyBits(HdDirtyBits bits) const
{
return bits;
}
void HdStrelkaBasisCurves::_InitRepr(const TfToken& reprName, HdDirtyBits* dirtyBits)
{
TF_UNUSED(reprName);
TF_UNUSED(dirtyBits);
}
void HdStrelkaBasisCurves::_ConvertCurve()
{
// calculate phantom points
// https://raytracing-docs.nvidia.com/optix7/guide/index.html#curves#differences-between-curves-spheres-and-triangles
glm::float3 p1 = glm::float3(mPoints[0][0], mPoints[0][1], mPoints[0][2]);
glm::float3 p2 = glm::float3(mPoints[1][0], mPoints[1][1], mPoints[1][2]);
glm::float3 p0 = p1 + (p1 - p2);
mCurvePoints.push_back(p0);
for (const GfVec3f& p : mPoints)
{
mCurvePoints.push_back(glm::float3(p[0], p[1], p[2]));
}
int n = mPoints.size() - 1;
glm::float3 pn = glm::float3(mPoints[n][0], mPoints[n][1], mPoints[n][2]);
glm::float3 pn1 = glm::float3(mPoints[n - 1][0], mPoints[n - 1][1], mPoints[n - 1][2]);
glm::float3 pnn = pn + (pn - pn1);
mCurvePoints.push_back(pnn);
mCurveWidths.push_back(mWidths[0] * 0.5);
assert((mWidths.size() == mPoints.size()) || (mWidths.size() == 1));
if (mWidths.size() == 1)
{
for (int i = 0; i < mPoints.size(); ++i)
{
mCurveWidths.push_back(mWidths[0] * 0.5);
}
}
else
{
for (const float w : mWidths)
{
mCurveWidths.push_back(w * 0.5f);
}
}
mCurveWidths.push_back(mCurveWidths.back());
for (const int i : mVertexCounts)
{
mCurveVertexCounts.push_back(i);
}
}
const std::vector<glm::float3>& HdStrelkaBasisCurves::GetPoints() const
{
return mCurvePoints;
}
const std::vector<float>& HdStrelkaBasisCurves::GetWidths() const
{
return mCurveWidths;
}
const std::vector<uint32_t>& HdStrelkaBasisCurves::GetVertexCounts() const
{
return mCurveVertexCounts;
}
const GfMatrix4d& HdStrelkaBasisCurves::GetPrototypeTransform() const
{
return m_prototypeTransform;
}
const char* HdStrelkaBasisCurves::getName() const
{
return mName.c_str();
}
PXR_NAMESPACE_CLOSE_SCOPE
| 8,047 | C++ | 30.685039 | 121 | 0.644215 |
arhix52/Strelka/src/HdStrelka/RenderBuffer.cpp | #include "RenderBuffer.h"
#include "render.h"
#include <pxr/base/gf/vec3i.h>
PXR_NAMESPACE_OPEN_SCOPE
HdStrelkaRenderBuffer::HdStrelkaRenderBuffer(const SdfPath& id, oka::SharedContext* ctx) : HdRenderBuffer(id), mCtx(ctx)
{
m_isMapped = false;
m_isConverged = false;
m_bufferMem = nullptr;
}
HdStrelkaRenderBuffer::~HdStrelkaRenderBuffer()
{
_Deallocate();
}
bool HdStrelkaRenderBuffer::Allocate(const GfVec3i& dimensions, HdFormat format, bool multiSampled)
{
if (dimensions[2] != 1)
{
return false;
}
m_width = dimensions[0];
m_height = dimensions[1];
m_format = format;
m_isMultiSampled = multiSampled;
size_t size = m_width * m_height * HdDataSizeOfFormat(m_format);
m_bufferMem = realloc(m_bufferMem, size);
if (!m_bufferMem)
{
return false;
}
if (mResult)
{
mResult->resize(m_width, m_height);
}
else
{
oka::BufferDesc desc{};
desc.format = oka::BufferFormat::FLOAT4;
desc.width = m_width;
desc.height = m_height;
mResult = mCtx->mRender->createBuffer(desc);
}
if (!mResult)
{
return false;
}
return true;
}
unsigned int HdStrelkaRenderBuffer::GetWidth() const
{
return m_width;
}
unsigned int HdStrelkaRenderBuffer::GetHeight() const
{
return m_height;
}
unsigned int HdStrelkaRenderBuffer::GetDepth() const
{
return 1u;
}
HdFormat HdStrelkaRenderBuffer::GetFormat() const
{
return m_format;
}
bool HdStrelkaRenderBuffer::IsMultiSampled() const
{
return m_isMultiSampled;
}
VtValue HdStrelkaRenderBuffer::GetResource(bool multiSampled) const
{
return VtValue((uint8_t*)mResult);
}
bool HdStrelkaRenderBuffer::IsConverged() const
{
return m_isConverged;
}
void HdStrelkaRenderBuffer::SetConverged(bool converged)
{
m_isConverged = converged;
}
void* HdStrelkaRenderBuffer::Map()
{
m_isMapped = true;
return m_bufferMem;
}
bool HdStrelkaRenderBuffer::IsMapped() const
{
return m_isMapped;
}
void HdStrelkaRenderBuffer::Unmap()
{
m_isMapped = false;
}
void HdStrelkaRenderBuffer::Resolve()
{
}
void HdStrelkaRenderBuffer::_Deallocate()
{
free(m_bufferMem);
delete mResult;
}
PXR_NAMESPACE_CLOSE_SCOPE
| 2,264 | C++ | 16.558139 | 120 | 0.671378 |
arhix52/Strelka/src/HdStrelka/Tokens.h | #pragma once
#include <pxr/base/tf/staticTokens.h>
PXR_NAMESPACE_OPEN_SCOPE
#define HD_STRELKA_SETTINGS_TOKENS \
((spp, "spp"))((max_bounces, "max-bounces"))
// mtlx node identifier is given by usdMtlx.
#define HD_STRELKA_NODE_IDENTIFIER_TOKENS \
(mtlx)(mdl)
#define HD_STRELKA_SOURCE_TYPE_TOKENS \
(mtlx)(mdl)
#define HD_STRELKA_DISCOVERY_TYPE_TOKENS \
(mtlx)(mdl)
#define HD_STRELKA_RENDER_CONTEXT_TOKENS \
(mtlx)(mdl)
#define HD_STRELKA_NODE_CONTEXT_TOKENS \
(mtlx)(mdl)
#define HD_STRELKA_NODE_METADATA_TOKENS \
(subIdentifier)
TF_DECLARE_PUBLIC_TOKENS(HdStrelkaSettingsTokens, HD_STRELKA_SETTINGS_TOKENS);
TF_DECLARE_PUBLIC_TOKENS(HdStrelkaNodeIdentifiers, HD_STRELKA_NODE_IDENTIFIER_TOKENS);
TF_DECLARE_PUBLIC_TOKENS(HdStrelkaSourceTypes, HD_STRELKA_SOURCE_TYPE_TOKENS);
TF_DECLARE_PUBLIC_TOKENS(HdStrelkaDiscoveryTypes, HD_STRELKA_DISCOVERY_TYPE_TOKENS);
TF_DECLARE_PUBLIC_TOKENS(HdStrelkaRenderContexts, HD_STRELKA_RENDER_CONTEXT_TOKENS);
TF_DECLARE_PUBLIC_TOKENS(HdStrelkaNodeContexts, HD_STRELKA_NODE_CONTEXT_TOKENS);
TF_DECLARE_PUBLIC_TOKENS(HdStrelkaNodeMetadata, HD_STRELKA_NODE_METADATA_TOKENS);
PXR_NAMESPACE_CLOSE_SCOPE
| 1,175 | C | 29.947368 | 86 | 0.771064 |
arhix52/Strelka/src/HdStrelka/Camera.h | #pragma once
#include <pxr/imaging/hd/camera.h>
#include <scene/scene.h>
PXR_NAMESPACE_OPEN_SCOPE
class HdStrelkaCamera final : public HdCamera
{
public:
HdStrelkaCamera(const SdfPath& id, oka::Scene& scene);
~HdStrelkaCamera() override;
public:
float GetVFov() const;
uint32_t GetCameraIndex() const;
public:
void Sync(HdSceneDelegate* sceneDelegate,
HdRenderParam* renderParam,
HdDirtyBits* dirtyBits) override;
HdDirtyBits GetInitialDirtyBitsMask() const override;
private:
oka::Camera _ConstructStrelkaCamera();
float m_vfov;
oka::Scene& mScene;
uint32_t mCameraIndex = -1;
};
PXR_NAMESPACE_CLOSE_SCOPE
| 687 | C | 17.594594 | 58 | 0.697234 |
arhix52/Strelka/src/HdStrelka/MdlDiscoveryPlugin.cpp | #include "MdlDiscoveryPlugin.h"
#include <pxr/base/tf/staticTokens.h>
//#include "Tokens.h"
PXR_NAMESPACE_OPEN_SCOPE
// clang-format off
TF_DEFINE_PRIVATE_TOKENS(_tokens,
(mdl)
);
// clang-format on
NDR_REGISTER_DISCOVERY_PLUGIN(HdStrelkaMdlDiscoveryPlugin);
NdrNodeDiscoveryResultVec HdStrelkaMdlDiscoveryPlugin::DiscoverNodes(const Context& ctx)
{
NdrNodeDiscoveryResultVec result;
NdrNodeDiscoveryResult mdlNode(
/* identifier */ _tokens->mdl,
/* version */ NdrVersion(1),
/* name */ _tokens->mdl,
/* family */ TfToken(),
/* discoveryType */ _tokens->mdl,
/* sourceType */ _tokens->mdl,
/* uri */ std::string(),
/* resolvedUri */ std::string());
result.push_back(mdlNode);
return result;
}
const NdrStringVec& HdStrelkaMdlDiscoveryPlugin::GetSearchURIs() const
{
static const NdrStringVec s_searchURIs;
return s_searchURIs;
}
PXR_NAMESPACE_CLOSE_SCOPE
| 996 | C++ | 23.317073 | 88 | 0.646586 |
arhix52/Strelka/src/HdStrelka/Material.cpp | #include "Material.h"
#include <pxr/base/gf/vec2f.h>
#include <pxr/usd/sdr/registry.h>
#include <pxr/usdImaging/usdImaging/tokens.h>
#include <log.h>
PXR_NAMESPACE_OPEN_SCOPE
HdStrelkaMaterial::HdStrelkaMaterial(const SdfPath& id, const MaterialNetworkTranslator& translator)
: HdMaterial(id), m_translator(translator)
{
}
HdStrelkaMaterial::~HdStrelkaMaterial() = default;
HdDirtyBits HdStrelkaMaterial::GetInitialDirtyBitsMask() const
{
// return DirtyBits::DirtyParams;
return DirtyBits::AllDirty;
}
void HdStrelkaMaterial::Sync(HdSceneDelegate* sceneDelegate, HdRenderParam* renderParam, HdDirtyBits* dirtyBits)
{
TF_UNUSED(renderParam);
const bool pullMaterial = (*dirtyBits & DirtyBits::DirtyParams) != 0u;
*dirtyBits = DirtyBits::Clean;
if (!pullMaterial)
{
return;
}
const SdfPath& id = GetId();
const std::string& name = id.GetString();
STRELKA_INFO("Hydra Material: {}", name.c_str());
const VtValue& resource = sceneDelegate->GetMaterialResource(id);
if (!resource.IsHolding<HdMaterialNetworkMap>())
{
return;
}
auto networkMap = resource.GetWithDefault<HdMaterialNetworkMap>();
HdMaterialNetwork& surfaceNetwork = networkMap.map[HdMaterialTerminalTokens->surface];
bool isUsdPreviewSurface = false;
HdMaterialNode* previewSurfaceNode = nullptr;
// store material parameters
uint32_t nodeIdx = 0;
for (auto& node : surfaceNetwork.nodes)
{
STRELKA_DEBUG("Node #{}: {}", nodeIdx, node.path.GetText());
if (node.identifier == UsdImagingTokens->UsdPreviewSurface)
{
previewSurfaceNode = &node;
isUsdPreviewSurface = true;
}
for (const auto& params : node.parameters)
{
const std::string& name = params.first.GetString();
const TfType type = params.second.GetType();
STRELKA_DEBUG("Node name: {}\tParam name: {}\t{}", node.path.GetName(), name.c_str(),
params.second.GetTypeName().c_str());
if (type.IsA<GfVec3f>())
{
oka::MaterialManager::Param param;
param.name = params.first;
param.type = oka::MaterialManager::Param::Type::eFloat3;
GfVec3f val = params.second.Get<GfVec3f>();
param.value.resize(sizeof(val));
memcpy(param.value.data(), &val, sizeof(val));
mMaterialParams.push_back(param);
}
else if (type.IsA<GfVec4f>())
{
oka::MaterialManager::Param param;
param.name = params.first;
param.type = oka::MaterialManager::Param::Type::eFloat4;
GfVec4f val = params.second.Get<GfVec4f>();
param.value.resize(sizeof(val));
memcpy(param.value.data(), &val, sizeof(val));
mMaterialParams.push_back(param);
}
else if (type.IsA<float>())
{
oka::MaterialManager::Param param;
param.name = params.first;
param.type = oka::MaterialManager::Param::Type::eFloat;
float val = params.second.Get<float>();
param.value.resize(sizeof(val));
memcpy(param.value.data(), &val, sizeof(val));
mMaterialParams.push_back(param);
}
else if (type.IsA<int>())
{
oka::MaterialManager::Param param;
param.name = params.first;
param.type = oka::MaterialManager::Param::Type::eInt;
int val = params.second.Get<int>();
param.value.resize(sizeof(val));
memcpy(param.value.data(), &val, sizeof(val));
mMaterialParams.push_back(param);
}
else if (type.IsA<bool>())
{
oka::MaterialManager::Param param;
param.name = params.first;
param.type = oka::MaterialManager::Param::Type::eBool;
bool val = params.second.Get<bool>();
param.value.resize(sizeof(val));
memcpy(param.value.data(), &val, sizeof(val));
mMaterialParams.push_back(param);
}
else if (type.IsA<SdfAssetPath>())
{
oka::MaterialManager::Param param;
param.name = node.path.GetName() + "_" + std::string(params.first);
param.type = oka::MaterialManager::Param::Type::eTexture;
const SdfAssetPath val = params.second.Get<SdfAssetPath>();
// STRELKA_DEBUG("path: {}", val.GetAssetPath().c_str());
STRELKA_DEBUG("path: {}", val.GetResolvedPath().c_str());
// std::string texPath = val.GetAssetPath();
std::string texPath = val.GetResolvedPath();
if (!texPath.empty())
{
param.value.resize(texPath.size());
memcpy(param.value.data(), texPath.data(), texPath.size());
mMaterialParams.push_back(param);
}
}
else if (type.IsA<GfVec2f>())
{
oka::MaterialManager::Param param;
param.name = params.first;
param.type = oka::MaterialManager::Param::Type::eFloat2;
GfVec2f val = params.second.Get<GfVec2f>();
param.value.resize(sizeof(val));
memcpy(param.value.data(), &val, sizeof(val));
mMaterialParams.push_back(param);
}
else if (type.IsA<TfToken>())
{
const TfToken val = params.second.Get<TfToken>();
STRELKA_DEBUG("TfToken: {}", val.GetText());
}
else if (type.IsA<std::string>())
{
const std::string val = params.second.Get<std::string>();
STRELKA_DEBUG("String: {}", val.c_str());
}
else
{
STRELKA_ERROR("Unknown parameter type!\n");
}
}
nodeIdx++;
}
bool isVolume = false;
const HdMaterialNetwork2 network = HdConvertToHdMaterialNetwork2(networkMap, &isVolume);
if (isVolume)
{
STRELKA_ERROR("Volume %s unsupported", id.GetText());
return;
}
if (isUsdPreviewSurface)
{
mMaterialXCode = m_translator.ParseNetwork(id, network);
// STRELKA_DEBUG("MaterialX code:\n {}\n", mMaterialXCode.c_str());
}
else
{
// MDL
const bool res = MaterialNetworkTranslator::ParseMdlNetwork(network, mMdlFileUri, mMdlSubIdentifier);
if (!res)
{
STRELKA_ERROR("Failed to translate material, replace to default!");
mMdlFileUri = "default.mdl";
mMdlSubIdentifier = "default_material";
}
mIsMdl = true;
}
}
const std::string& HdStrelkaMaterial::GetStrelkaMaterial() const
{
return mMaterialXCode;
}
PXR_NAMESPACE_CLOSE_SCOPE
| 7,130 | C++ | 35.015151 | 112 | 0.551192 |