file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
NVlabs/ACID/ACID/src/conv_onet/generation.py | import torch
import torch.optim as optim
from torch import autograd
import numpy as np
from tqdm import trange, tqdm
import trimesh
from src.utils import libmcubes, common_util
from src.common import make_3d_grid, normalize_coord, add_key, coord2index
from src.utils.libmise import MISE
import time
import math
counter = 0
class Generator3D(object):
''' Generator class for Occupancy Networks.
It provides functions to generate the final mesh as well refining options.
Args:
model (nn.Module): trained Occupancy Network model
points_batch_size (int): batch size for points evaluation
threshold (float): threshold value
refinement_step (int): number of refinement steps
device (device): pytorch device
resolution0 (int): start resolution for MISE
upsampling steps (int): number of upsampling steps
with_normals (bool): whether normals should be estimated
padding (float): how much padding should be used for MISE
sample (bool): whether z should be sampled
input_type (str): type of input
vol_info (dict): volume infomation
vol_bound (dict): volume boundary
simplify_nfaces (int): number of faces the mesh should be simplified to
'''
def __init__(self, model, points_batch_size=100000,
threshold=0.5, refinement_step=0, device=None,
resolution0=16, upsampling_steps=3,
with_normals=False, padding=0.1, sample=False,
input_type = None,
vol_info = None,
vol_bound = None,
simplify_nfaces=None):
self.model = model.to(device)
self.points_batch_size = points_batch_size
self.refinement_step = refinement_step
self.threshold = threshold
self.device = device
self.resolution0 = resolution0
self.upsampling_steps = upsampling_steps
self.with_normals = with_normals
self.input_type = input_type
self.padding = padding
self.sample = sample
self.simplify_nfaces = simplify_nfaces
# for pointcloud_crop
self.vol_bound = vol_bound
if vol_info is not None:
self.input_vol, _, _ = vol_info
def generate_mesh(self, data, return_stats=True):
''' Generates the output mesh.
Args:
data (tensor): data tensor
return_stats (bool): whether stats should be returned
'''
self.model.eval()
device = self.device
for k,v in data.items():
data[k] = v.to(device)
stats_dict = {}
t0 = time.time()
# obtain features for all crops
with torch.no_grad():
c = self.model.encode_inputs(data)
if type(c) is tuple:
for cs in c:
for k,v in cs.items():
cs[k] = v[0].unsqueeze(0)
else:
for k,v in c.items():
c[k] = v[0].unsqueeze(0)
stats_dict['time (encode inputs)'] = time.time() - t0
mesh = self.generate_from_latent(c, stats_dict=stats_dict)
if return_stats:
return mesh, stats_dict
else:
return mesh
def generate_from_latent(self, c=None, stats_dict={}, **kwargs):
''' Generates mesh from latent.
Works for shapes normalized to a unit cube
Args:
c (tensor): latent conditioned code c
stats_dict (dict): stats dictionary
'''
threshold = np.log(self.threshold) - np.log(1. - self.threshold)
t0 = time.time()
# Compute bounding box size
box_size = 1 + self.padding
# Shortcut
if self.upsampling_steps == 0:
nx = self.resolution0
pointsf = box_size * make_3d_grid(
(-0.5,)*3, (0.5,)*3, (nx,)*3
)
values = self.eval_points(pointsf, c, **kwargs).cpu().numpy()
value_grid = values.reshape(nx, nx, nx)
else:
mesh_extractor = MISE(
self.resolution0, self.upsampling_steps, threshold)
points = mesh_extractor.query()
while points.shape[0] != 0:
# Query points
pointsf = points / mesh_extractor.resolution
# Normalize to bounding box
pointsf = box_size * (pointsf - 0.5)
pointsf = torch.FloatTensor(pointsf).to(self.device)
# Evaluate model and update
values = self.eval_points(pointsf, c, **kwargs).cpu().numpy()
values = values.astype(np.float64)
mesh_extractor.update(points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
# Extract mesh
stats_dict['time (eval points)'] = time.time() - t0
mesh = self.extract_mesh(value_grid, c, stats_dict=stats_dict)
return mesh
def eval_points(self, p, c=None, vol_bound=None, **kwargs):
''' Evaluates the occupancy values for the points.
Args:
p (tensor): points
c (tensor): encoded feature volumes
'''
p_split = torch.split(p, self.points_batch_size)
occ_hats = []
for pi in p_split:
pi = pi.unsqueeze(0).to(self.device)
with torch.no_grad():
occ_hat = self.model.eval_points(pi, c, **kwargs)['occ'].logits
occ_hats.append(occ_hat.squeeze(0).detach().cpu())
occ_hat = torch.cat(occ_hats, dim=0)
return occ_hat
def extract_mesh(self, occ_hat, c=None, stats_dict=dict()):
''' Extracts the mesh from the predicted occupancy grid.
Args:
occ_hat (tensor): value grid of occupancies
c (tensor): encoded feature volumes
stats_dict (dict): stats dictionary
'''
# Some short hands
n_x, n_y, n_z = occ_hat.shape
box_size = 1 + self.padding
threshold = np.log(self.threshold) - np.log(1. - self.threshold)
# Make sure that mesh is watertight
t0 = time.time()
occ_hat_padded = np.pad(
occ_hat, 1, 'constant', constant_values=-1e6)
vertices, triangles = libmcubes.marching_cubes(
occ_hat_padded, threshold)
stats_dict['time (marching cubes)'] = time.time() - t0
# Strange behaviour in libmcubes: vertices are shifted by 0.5
vertices -= 0.5
# # Undo padding
vertices -= 1
if self.vol_bound is not None:
# Scale the mesh back to its original metric
bb_min = self.vol_bound['query_vol'][:, 0].min(axis=0)
bb_max = self.vol_bound['query_vol'][:, 1].max(axis=0)
mc_unit = max(bb_max - bb_min) / (self.vol_bound['axis_n_crop'].max() * self.resolution0*2**self.upsampling_steps)
vertices = vertices * mc_unit + bb_min
else:
# Normalize to bounding box
vertices /= np.array([n_x-1, n_y-1, n_z-1])
vertices = box_size * (vertices - 0.5)
# Create mesh
mesh = trimesh.Trimesh(vertices / (1., 1., 3), triangles,
vertex_normals=None,
process=False)
# Directly return if mesh is empty
if vertices.shape[0] == 0:
return mesh
# TODO: normals are lost here
if self.simplify_nfaces is not None:
t0 = time.time()
from src.utils.libsimplify import simplify_mesh
mesh = simplify_mesh(mesh, self.simplify_nfaces, 5.)
stats_dict['time (simplify)'] = time.time() - t0
# Refine mesh
if self.refinement_step > 0:
t0 = time.time()
self.refine_mesh(mesh, occ_hat, c)
stats_dict['time (refine)'] = time.time() - t0
return mesh
def generate_pointcloud(self, data, threshold=0.75, use_gt_occ=False):
self.model.eval()
device = self.device
self.model.eval()
device = self.device
for k,v in data.items():
data[k] = v.to(device)
stats_dict = {}
t0 = time.time()
# obtain features for all crops
with torch.no_grad():
c = self.model.encode_inputs(data)
pts = data['sampled_pts']
B,_,N,C = pts.shape
pts = pts.reshape([B*2,N,C])
p_split = torch.split(pts, self.points_batch_size, dim=-1)
occ_hats = []
features = []
flows = []
for pi in p_split:
with torch.no_grad():
outputs = self.model.eval_points(pi, c)
occ_hats.append((outputs['occ'].probs > threshold).detach().cpu())
if 'corr' in outputs:
features.append(outputs['corr'].detach().cpu())
if 'flow' in outputs:
flows.append(outputs['flow'].detach().cpu())
pts = pts.cpu().numpy()
occ_hat = torch.cat(occ_hats, dim=1).numpy()
if use_gt_occ:
occ_hat = data['sampled_occ'].reshape([B*2, N]).cpu().numpy()
pos_pts0 = pts[0][occ_hat[0] == 1.].reshape((-1,3))
pos_idx0 = common_util.subsample_points(pos_pts0, resolution=0.013)
pos_pts0 = pos_pts0[pos_idx0]
pos_pts1 = pts[1][occ_hat[1] == 1.].reshape((-1,3))
pos_idx1 = common_util.subsample_points(pos_pts1, resolution=0.013)
pos_pts1 = pos_pts1[pos_idx1]
pos_pts = np.concatenate([pos_pts0, pos_pts1], axis=0) / (1.,1.,3.)
if len(features) != 0:
feature = torch.cat(features, dim=1).numpy()
f_dim = feature.shape[-1]
pos_f0 = feature[0][occ_hat[0] == 1.].reshape((-1,f_dim))
pos_f1 = feature[1][occ_hat[1] == 1.].reshape((-1,f_dim))
pos_f0 = pos_f0[pos_idx0]
pos_f1 = pos_f1[pos_idx1]
pos_f = np.concatenate([pos_f0, pos_f1], axis=0)
if pos_f.shape[0] < 100:
pcloud_both = pos_pts
else:
tsne_result = common_util.embed_tsne(pos_f)
colors = common_util.get_color_map(tsne_result)
pcloud_both = np.concatenate([pos_pts, colors], axis=1)
else:
pcloud_both = pos_pts
pcloud0 = pcloud_both[:pos_pts0.shape[0]]
pcloud1 = pcloud_both[pos_pts0.shape[0]:]
if len(flows) != 0:
flow = torch.cat(flows, dim=1).numpy() / 10.
pos_f0 = flow[0][occ_hat[0] == 1.].reshape((-1,3))
pos_f1 = flow[1][occ_hat[1] == 1.].reshape((-1,3))
pos_f0 = pos_f0[pos_idx0]
pos_f1 = pos_f1[pos_idx1]
pcloud_unroll_0 = pcloud0.copy()
pcloud_unroll_0[:,:3] += pos_f0 / (1.,1.,3.)
pcloud_unroll_1 = pcloud1.copy()
pcloud_unroll_1[:,:3] += pos_f1 / (1.,1.,3.)
return pcloud0, pcloud1,pcloud_unroll_0,pcloud_unroll_1
return pcloud0, pcloud1
def refine_mesh(self, mesh, occ_hat, c=None):
''' Refines the predicted mesh.
Args:
mesh (trimesh object): predicted mesh
occ_hat (tensor): predicted occupancy grid
c (tensor): latent conditioned code c
'''
self.model.eval()
# Some shorthands
n_x, n_y, n_z = occ_hat.shape
assert(n_x == n_y == n_z)
# threshold = np.log(self.threshold) - np.log(1. - self.threshold)
threshold = self.threshold
# Vertex parameter
v0 = torch.FloatTensor(mesh.vertices).to(self.device)
v = torch.nn.Parameter(v0.clone())
# Faces of mesh
faces = torch.LongTensor(mesh.faces).to(self.device)
# Start optimization
optimizer = optim.RMSprop([v], lr=1e-4)
for it_r in trange(self.refinement_step):
optimizer.zero_grad()
# Loss
face_vertex = v[faces]
eps = np.random.dirichlet((0.5, 0.5, 0.5), size=faces.shape[0])
eps = torch.FloatTensor(eps).to(self.device)
face_point = (face_vertex * eps[:, :, None]).sum(dim=1)
face_v1 = face_vertex[:, 1, :] - face_vertex[:, 0, :]
face_v2 = face_vertex[:, 2, :] - face_vertex[:, 1, :]
face_normal = torch.cross(face_v1, face_v2)
face_normal = face_normal / \
(face_normal.norm(dim=1, keepdim=True) + 1e-10)
face_value = torch.sigmoid(
self.model.eval_points(face_point.unsqueeze(0), c)['occ'].logits
)
normal_target = -autograd.grad(
[face_value.sum()], [face_point], create_graph=True)[0]
normal_target = \
normal_target / \
(normal_target.norm(dim=1, keepdim=True) + 1e-10)
loss_target = (face_value - threshold).pow(2).mean()
loss_normal = \
(face_normal - normal_target).pow(2).sum(dim=1).mean()
loss = loss_target + 0.01 * loss_normal
# Update
loss.backward()
optimizer.step()
mesh.vertices = v.data.cpu().numpy()
return mesh
def generate_occ_grid(self, c=None, stats_dict={}, **kwargs):
''' Generates mesh from latent.
Works for shapes normalized to a unit cube
Args:
c (tensor): latent conditioned code c
stats_dict (dict): stats dictionary
'''
threshold = np.log(self.threshold) - np.log(1. - self.threshold)
t0 = time.time()
# Compute bounding box size
box_size = 1 + self.padding
# Shortcut
if self.upsampling_steps == 0:
nx = self.resolution0
pointsf = box_size * make_3d_grid(
(-0.5,)*3, (0.5,)*3, (nx,)*3
)
values = self.eval_points(pointsf, c, **kwargs).cpu().numpy()
value_grid = values.reshape(nx, nx, nx)
else:
mesh_extractor = MISE(
self.resolution0, self.upsampling_steps, threshold)
points = mesh_extractor.query()
while points.shape[0] != 0:
# Query points
pointsf = points / mesh_extractor.resolution
# Normalize to bounding box
pointsf = box_size * (pointsf - 0.5)
pointsf = torch.FloatTensor(pointsf).to(self.device)
# Evaluate model and update
values = self.eval_points(pointsf, c, **kwargs).cpu().numpy()
values = values.astype(np.float64)
mesh_extractor.update(points, values)
points = mesh_extractor.query()
value_grid = mesh_extractor.to_dense()
return value_grid
| 14,928 | Python | 36.044665 | 126 | 0.536509 |
NVlabs/ACID/ACID/src/conv_onet/models/decoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.layers import ResnetBlockFC
from src.common import normalize_coordinate, normalize_3d_coordinate, map2local
class GeomDecoder(nn.Module):
''' Decoder.
Instead of conditioning on global features, on plane/volume local features.
Args:
dim (int): input dimension
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
n_blocks (int): number of blocks ResNetBlockFC layers
leaky (bool): whether to use leaky ReLUs
sample_mode (str): sampling feature strategy, bilinear|nearest
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
'''
def __init__(self, dim=3, c_dim=128,
corr_dim=0, corr_head=True,
hidden_size=256, n_blocks=5, leaky=False,
sample_mode='bilinear', padding=0.1):
super().__init__()
self.c_dim = c_dim
self.n_blocks = n_blocks
self.corr_dim = corr_dim
self.corr_head = corr_head
self.fc_c_occ = nn.ModuleList([
nn.Linear(c_dim, hidden_size) for i in range(n_blocks)
])
self.fc_p = nn.Linear(dim, hidden_size)
self.blocks_occ = nn.ModuleList([
ResnetBlockFC(hidden_size) for i in range(n_blocks)
])
self.fc_occ = nn.Linear(hidden_size, 1)
if self.corr_dim != 0 and corr_head:
self.fc_out_corr = nn.Linear(hidden_size, corr_dim)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
self.sample_mode = sample_mode
self.padding = padding
def sample_plane_feature(self, p, c, plane='xz'):
xy = normalize_coordinate(p.clone(), plane=plane, padding=self.padding) # normalize to the range of (0, 1)
xy = xy[:, :, None].float()
vgrid = 2.0 * xy - 1.0 # normalize to (-1, 1)
c = F.grid_sample(c, vgrid, padding_mode='border', align_corners=True, mode=self.sample_mode).squeeze(-1)
return c
def forward(self, p, c_plane, **kwargs):
c = 0
c += self.sample_plane_feature(p, c_plane['xz'], plane='xz')
c += self.sample_plane_feature(p, c_plane['xy'], plane='xy')
c += self.sample_plane_feature(p, c_plane['yz'], plane='yz')
c = c.transpose(1, 2)
p = p.float()
x = self.fc_p(p)
net = x
for i in range(self.n_blocks):
net = net + self.fc_c_occ[i](c)
net = self.blocks_occ[i](net)
results = {}
if self.corr_dim != 0 and not self.corr_head:
results['corr'] = net
net = self.actvn(net)
results['occ'] = self.fc_occ(net).squeeze(-1)
if self.corr_dim != 0 and self.corr_head:
results['corr'] = self.fc_out_corr(net)
return results
class CombinedDecoder(nn.Module):
''' Decoder.
Instead of conditioning on global features, on plane/volume local features.
Args:
dim (int): input dimension
c_dim (int): dimension of latent conditioned code c
hidden_size (int): hidden size of Decoder network
n_blocks (int): number of blocks ResNetBlockFC layers
leaky (bool): whether to use leaky ReLUs
sample_mode (str): sampling feature strategy, bilinear|nearest
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
'''
def __init__(self, dim=3, c_per_dim=128, c_act_dim=128,
corr_dim=0, corr_head=True,
hidden_size=256, n_blocks=5, leaky=False,
sample_mode='bilinear', padding=0.1, fuse=True, detach=False, anneal_gradient=True):
super().__init__()
self.c_per_dim = c_per_dim
self.c_act_dim = c_act_dim
self.n_blocks = n_blocks
self.corr_dim = corr_dim
self.corr_head = corr_head
self.fuse = fuse
self.detach = detach
self.anneal_gradient = anneal_gradient
self.fc_c_per = nn.ModuleList([
nn.Linear(c_per_dim, hidden_size) for i in range(n_blocks)
])
self.fc_c_act = nn.ModuleList([
nn.Linear(c_act_dim, hidden_size) for i in range(n_blocks)
])
if self.fuse:
self.fc_c_merge = nn.ModuleList([
nn.Linear(hidden_size*2, hidden_size) for i in range(n_blocks)
])
self.fc_p_per = nn.Linear(dim, hidden_size)
self.fc_p_act = nn.Linear(dim, hidden_size)
self.blocks_per = nn.ModuleList([
ResnetBlockFC(hidden_size) for i in range(n_blocks)
])
self.blocks_act = nn.ModuleList([
ResnetBlockFC(hidden_size) for i in range(n_blocks)
])
self.fc_occ = nn.Linear(hidden_size, 1)
self.fc_flow= nn.Linear(hidden_size, 3)
if self.corr_dim != 0 and corr_head:
self.fc_out_corr = nn.Linear(hidden_size, corr_dim)
if self.fuse:
self.fc_act_corr_merge = nn.Linear(hidden_size+corr_dim, hidden_size)
if not leaky:
self.actvn = F.relu
else:
self.actvn = lambda x: F.leaky_relu(x, 0.2)
self.sample_mode = sample_mode
self.padding = padding
def sample_plane_feature(self, p, c, plane='xz'):
xy = normalize_coordinate(p.clone(), plane=plane, padding=self.padding) # normalize to the range of (0, 1)
xy = xy[:, :, None].float()
vgrid = 2.0 * xy - 1.0 # normalize to (-1, 1)
c = F.grid_sample(c, vgrid, padding_mode='border', align_corners=True, mode=self.sample_mode).squeeze(-1)
return c
def decode_perception(self, p, c_per_plane):
c_per = 0
c_per += self.sample_plane_feature(p, c_per_plane['xz'], plane='xz')
c_per += self.sample_plane_feature(p, c_per_plane['xy'], plane='xy')
c_per += self.sample_plane_feature(p, c_per_plane['yz'], plane='yz')
c_per = c_per.transpose(1, 2)
p = p.float()
net_per = self.fc_p_per(p)
features = []
for i in range(self.n_blocks):
net_per = net_per + self.fc_c_per[i](c_per)
net_per = self.blocks_per[i](net_per)
if self.detach:
features.append(net_per.detach())
else:
features.append(net_per)
net_per = self.actvn(net_per)
results = {}
results['occ'] = self.fc_occ(net_per).squeeze(-1)
if self.corr_dim != 0 and self.corr_head:
corr = self.fc_out_corr(net_per)
features.append(corr)
results['corr'] = corr
# if self.anneal_gradient:
# for i,p in enumerate(features):
# features[i] = p * 0.1 + p.detach() * 0.9
return results, features
def decode_action(self, p, c_act_plane, per_features):
c_act = 0
c_act += self.sample_plane_feature(p, c_act_plane['xz'], plane='xz')
c_act += self.sample_plane_feature(p, c_act_plane['xy'], plane='xy')
c_act += self.sample_plane_feature(p, c_act_plane['yz'], plane='yz')
c_act = c_act.transpose(1, 2)
p = p.float()
net_act = self.fc_p_act(p)
for i in range(self.n_blocks):
net_act = net_act + self.fc_c_act[i](c_act)
if self.fuse:
net_act = self.blocks_act[i](
self.fc_c_merge[i](
torch.cat( ( net_act, per_features[i]), dim=-1)))
# (net_per.detach()*0.9+net_per * 0.1)), dim=-1)))
else:
net_act = self.blocks_act[i](net_act)
net_act = self.actvn(net_act)
if self.corr_dim != 0 and self.corr_head:
if self.fuse:
net_act = self.fc_act_corr_merge(
torch.cat((net_act, per_features[-1].detach()), dim=-1))
return {'flow':self.fc_flow(net_act)}
def forward(self, p, c_per_plane, c_act_plane):
results, per_features = self.decode_perception(p, c_per_plane)
results['flow'] = self.decode_action(p, c_act_plane, per_features)['flow']
return results
| 8,333 | Python | 35.876106 | 114 | 0.554062 |
NVlabs/ACID/ACID/src/conv_onet/models/__init__.py | import torch
import numpy as np
import torch.nn as nn
from torch import distributions as dist
from src.conv_onet.models import decoder
from src.utils import plushsim_util
# Decoder dictionary
decoder_dict = {
'geom_decoder': decoder.GeomDecoder,
'combined_decoder': decoder.CombinedDecoder,
}
class ConvImpDyn(nn.Module):
def __init__(self, obj_per_encoder, obj_act_encoder, env_encoder, decoder, device=None, env_scale_factor=2.):
super().__init__()
self.decoder = decoder.to(device)
self.obj_per_encoder = obj_per_encoder.to(device)
self.obj_act_encoder = obj_act_encoder.to(device)
if env_encoder is None:
self.env_encoder = env_encoder
else:
self.env_encoder = env_encoder.to(device)
self.env_upsample = torch.nn.UpsamplingBilinear2d(scale_factor=env_scale_factor)
self._device = device
def forward(self, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
#############
c_per, c_act = self.encode_inputs(inputs)
return self.decode(inputs, c_per, c_act, **kwargs)
def forward_perception(self, inputs, filter=True,):
c_per, c_env = self.encode_perception(inputs, merge_env_feature=False)
for k in c_per.keys():
env_f = self.env_upsample(c_env[k])
c_env[k] = env_f
c_per[k] = torch.cat([c_per[k], env_f], dim=1)
# get curr observation state and features
p = inputs['sampled_pts']
if len(p.shape) > 3:
B,_,N,C = p.shape
curr_p = p.reshape([B*2,N,C])
else:
curr_p = p
curr_state, per_features = self.decoder.decode_perception(curr_p, c_per)
occ_pred = dist.Bernoulli(logits=curr_state['occ']).probs >= 0.5
curr_state['occ'] = occ_pred
if filter:
curr_p = curr_p[occ_pred]
if 'corr' in curr_state:
curr_state['corr'] = curr_state['corr'][occ_pred]
for i,p in enumerate(per_features):
per_features[i] = p[occ_pred]
return c_per, c_env, curr_p, curr_state, per_features
def rollout(self, pts, per_features, c_env, actions):
actions = actions.squeeze()
num_sequence = actions.shape[0]
num_actions = actions.shape[-2]
all_traj = []
total_time_act_render = 0
total_time_act_decode = 0
import time
# from functools import partial
# render_pts_func = partial(plushsim_util.render_points, return_index=True)
curr_pts = [pts for _ in range(num_sequence)]
for j in range(num_actions):
act_traj = []
points_world = [p.cpu().numpy().squeeze()
* (1200, 1200, 400)
/ (1.1,1.1,1.1)
+ (0, 0, 180) for p in curr_pts]
for i in range(num_sequence):
g,t = actions[i,0,j], actions[i,1,j]
start_time = time.time()
c_act, act_partial = self.get_action_encoding(curr_pts[i], g, t, c_env)
total_time_act_render += time.time() - start_time
act_traj.append(act_partial)
start_time = time.time()
flow = self.decoder.decode_action(curr_pts[i], c_act, per_features)['flow']
curr_pts[i] = curr_pts[i] + flow / 10.
total_time_act_decode += time.time() - start_time
all_traj.append((curr_pts.copy(), act_traj))
print("total time render: ",total_time_act_render)
print("total time decode: ",total_time_act_decode)
return all_traj
def rollout_async(self, pts, per_features, c_env, actions):
actions = actions.squeeze()
num_sequence = actions.shape[0]
num_actions = actions.shape[-2]
all_traj = []
total_time_act_render = 0
total_time_act_decode = 0
total_async_time_act_render = 0
import time
from functools import partial
render_pts_func = partial(plushsim_util.render_points, return_index=True)
curr_pts = [pts for _ in range(num_sequence)]
for j in range(num_actions):
start_time = time.time()
points_world = [p.cpu().numpy().squeeze()
* (1200, 1200, 400)
/ (1.1,1.1,1.1)
+ (0, 0, 180) for p in curr_pts]
from multiprocessing import Pool
with Pool(16) as p:
vis_idxes = p.map(render_pts_func, points_world)
xyzs, acts = [],[]
for i in range(num_sequence):
g,t = actions[i,0,j], actions[i,1,j]
# c_act, act_partial = self.get_action_encoding(
# curr_pts[i], g, t, c_env, vis_idx=vis_idxes[i])
obj_xyz, obj_act = self.get_action_encoding_new(
curr_pts[i], g, t, c_env, vis_idx=vis_idxes[i])
xyzs.append(obj_xyz)
acts.append(obj_act)
total_time_act_render += time.time() - start_time
n = 20
start_time = time.time()
xyz_chunks = [xyzs[i:i+n] for i in range(0, num_sequence, n)]
act_chunks = [acts[i:i+n] for i in range(0, num_sequence, n)]
c_acts = []
for xyz, act in zip(xyz_chunks, act_chunks):
obj_xyz = torch.as_tensor(np.stack(xyz).astype(np.float32)).to(self._device)
obj_act = torch.as_tensor(np.stack(act).astype(np.float32)).to(self._device)
c_act_new = self.obj_act_encoder((obj_xyz, obj_act))
for chunk_i in range(len(xyz)):
c_act = {}
for k in c_act_new.keys():
c_act[k] = torch.cat([c_act_new[k][chunk_i].unsqueeze(0), c_env[k]], dim=1)
c_acts.append(c_act)
total_time_act_decode += time.time() - start_time
from src.utils import common_util
from PIL import Image
for k,v in c_acts[0].items():
v_np = v.squeeze().permute(1,2,0).cpu().numpy()
feature_plane = v_np.reshape([-1, v_np.shape[-1]])
tsne_result = common_util.embed_tsne(feature_plane)
colors = common_util.get_color_map(tsne_result)
colors = colors.reshape((128,128,-1)).astype(np.float32)
colors = (colors * 255 / np.max(colors)).astype('uint8')
img = Image.fromarray(colors)
img.save(f"act_{k}.png")
import pdb; pdb.set_trace()
for i in range(num_sequence):
flow = self.decoder.decode_action(curr_pts[i], c_acts[i], per_features)['flow']
curr_pts[i] = curr_pts[i] + flow / 10.
all_traj.append(([p.cpu().numpy().squeeze() for p in curr_pts], xyzs))
return all_traj
def get_action_encoding_new(self, pts, grasp_loc, target_loc, c_env, vis_idx=None):
# pts: B*2, N, 3
import time
start_time = time.time()
B,N,_ = pts.shape
pts = pts.cpu().numpy()
xyzs, acts = [], []
# get visable points by rendering pts
occ_pts = pts[0]
occ_pts_t = occ_pts * (1200, 1200, 400) / (1.1,1.1,1.1) + (0,0,180)
if vis_idx is None:
vis_idx = plushsim_util.render_points(occ_pts_t,
plushsim_util.CAM_EXTR,
plushsim_util.CAM_INTR,
return_index=True)
obj_xyz = occ_pts[vis_idx]
#print("time split 1: ", time.time() - start_time)
start_time = time.time()
# subsample pts
indices = np.random.randint(obj_xyz.shape[0], size=5000)
obj_xyz = obj_xyz[indices]
# make action feature
tiled_grasp_loc = np.tile(grasp_loc.cpu().numpy(), (len(obj_xyz), 1)).astype(np.float32)
tiled_target_loc = np.tile(target_loc.cpu().numpy(), (len(obj_xyz), 1)).astype(np.float32)
obj_act = np.concatenate([tiled_target_loc, obj_xyz - tiled_grasp_loc], axis=-1)
return obj_xyz, obj_act
def get_action_encoding(self, pts, grasp_loc, target_loc, c_env, vis_idx=None):
# pts: B*2, N, 3
import time
start_time = time.time()
B,N,_ = pts.shape
pts = pts.cpu().numpy()
xyzs, acts = [], []
# get visable points by rendering pts
occ_pts = pts[0]
occ_pts_t = occ_pts * (1200, 1200, 400) / (1.1,1.1,1.1) + (0,0,180)
if vis_idx is None:
vis_idx = plushsim_util.render_points(occ_pts_t,
plushsim_util.CAM_EXTR,
plushsim_util.CAM_INTR,
return_index=True)
obj_xyz = occ_pts[vis_idx]
#print("time split 1: ", time.time() - start_time)
start_time = time.time()
# subsample pts
indices = np.random.randint(obj_xyz.shape[0], size=5000)
obj_xyz = obj_xyz[indices]
# make action feature
tiled_grasp_loc = np.tile(grasp_loc.cpu().numpy(), (len(obj_xyz), 1)).astype(np.float32)
tiled_target_loc = np.tile(target_loc.cpu().numpy(), (len(obj_xyz), 1)).astype(np.float32)
obj_act = np.concatenate([tiled_target_loc, obj_xyz - tiled_grasp_loc], axis=-1)
xyzs.append(obj_xyz)
acts.append(obj_act)
obj_xyz = torch.as_tensor(np.stack(xyzs).astype(np.float32)).to(self._device)
obj_act = torch.as_tensor(np.stack(acts).astype(np.float32)).to(self._device)
#print("time split 2: ", time.time() - start_time)
start_time = time.time()
c_act_new = self.obj_act_encoder((obj_xyz, obj_act))
#print("time split 3: ", time.time() - start_time)
start_time = time.time()
for k in c_act_new.keys():
c_act_new[k] = torch.cat([c_act_new[k], c_env[k]], dim=1)
#print("time split 4: ", time.time() - start_time)
start_time = time.time()
return c_act_new, obj_xyz
def encode_perception(self, inputs, merge_env_feature=True):
obj_pcloud = inputs['obj_obs']
if len(obj_pcloud.shape) > 3:
B,_,N,C = obj_pcloud.shape
obj_pcloud = obj_pcloud.reshape([B*2,N,C])
obj_xyz, obj_rgb = obj_pcloud[...,:3],obj_pcloud[...,3:6]
c_per = self.obj_per_encoder((obj_xyz, obj_rgb))
if self.env_encoder is not None:
env_pcloud = inputs['env_obs'].cuda()
if len(env_pcloud.shape) > 3:
B,_,N,C = env_pcloud.shape
env_pcloud = env_pcloud.reshape([B*2,N,C])
env_xyz, env_rgb = env_pcloud[...,:3],env_pcloud[...,3:]
env_features = self.env_encoder((env_xyz, env_rgb))
if merge_env_feature:
for k in c_per.keys():
env_f = self.env_upsample(env_features[k])
c_per[k] = torch.cat([c_per[k], env_f], dim=1)
else:
return c_per, env_features
return c_per
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
obj_pcloud = inputs['obj_obs']
B,_,N,C = obj_pcloud.shape
obj_pcloud = obj_pcloud.reshape([B*2,N,C])
obj_xyz, obj_rgb, obj_act = obj_pcloud[...,:3],obj_pcloud[...,3:6],obj_pcloud[...,6:]
c_per = self.obj_per_encoder((obj_xyz, obj_rgb))
c_act = self.obj_act_encoder((obj_xyz, obj_act))
if self.env_encoder is not None:
env_pcloud = inputs['env_obs'].cuda()
B,_,N,C = env_pcloud.shape
env_pcloud = env_pcloud.reshape([B*2,N,C])
env_xyz, env_rgb = env_pcloud[...,:3],env_pcloud[...,3:]
env_features = self.env_encoder((env_xyz, env_rgb))
for k in c_per.keys():
env_f = self.env_upsample(env_features[k])
c_per[k] = torch.cat([c_per[k], env_f], dim=1)
c_act[k] = torch.cat([c_act[k], env_f], dim=1)
return c_per, c_act
def eval_points(self, pts, c):
outputs = self.decoder(pts, *c)
if 'occ' in outputs:
outputs['occ'] = dist.Bernoulli(logits=outputs['occ'])
return outputs
def decode(self, inputs, c1, c2, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
c (tensor): latent conditioned code c
'''
p = inputs['sampled_pts']
B,_,N,C = p.shape
p = p.reshape([B*2,N,C])
outputs = self.decoder(p, c1, c2)
if 'occ' in outputs:
outputs['occ'] = dist.Bernoulli(logits=outputs['occ'])
if 'corr' in outputs:
_,N,C = outputs['corr'].shape
corr_f = outputs['corr'].reshape([B,2,N,C])
if 'skip_indexing' not in kwargs:
corr_f = torch.transpose(corr_f, 0, 1)
corr_f = torch.flatten(corr_f, 1, 2)
inds = inputs['pair_indices']
corr_f = corr_f[:,inds]
outputs['corr'] = corr_f
return outputs
def to(self, device):
''' Puts the model to the device.
Args:
device (device): pytorch device
'''
model = super().to(device)
model._device = device
return model
class ConvOccGeom(nn.Module):
''' Occupancy Network class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
device (device): torch device
'''
def __init__(self, obj_encoder, env_encoder, decoder, device=None, env_scale_factor=2.):
super().__init__()
self.decoder = decoder.to(device)
self.obj_encoder = obj_encoder.to(device)
if env_encoder is None:
self.env_encoder = env_encoder
else:
self.env_encoder = env_encoder.to(device)
self.env_upsample = torch.nn.UpsamplingBilinear2d(scale_factor=env_scale_factor)
self._device = device
def forward(self, inputs, sample=True, **kwargs):
''' Performs a forward pass through the network.
Args:
p (tensor): sampled points
inputs (tensor): conditioning input
sample (bool): whether to sample for z
'''
#############
c = self.encode_inputs(inputs)
return self.decode(inputs, c, **kwargs)
def encode_inputs(self, inputs):
''' Encodes the input.
Args:
input (tensor): the input
'''
obj_pcloud = inputs['obj_obs']
B,_,N,C = obj_pcloud.shape
obj_pcloud = obj_pcloud.reshape([B*2,N,C])
obj_xyz, obj_rgb = obj_pcloud[...,:3],obj_pcloud[...,3:]
obj_features = self.obj_encoder((obj_xyz, obj_rgb))
if self.env_encoder is None:
return obj_features
env_pcloud = inputs['env_obs'].cuda()
B,_,N,C = env_pcloud.shape
env_pcloud = env_pcloud.reshape([B*2,N,C])
env_xyz, env_rgb = env_pcloud[...,:3],env_pcloud[...,3:]
env_features = self.env_encoder((env_xyz, env_rgb))
joint_features = {}
for k in obj_features.keys():
env_f = self.env_upsample(env_features[k])
joint_features[k] = torch.cat([obj_features[k], env_f], dim=1)
return joint_features
def eval_points(self, pts, c):
outputs = self.decoder(pts, c)
if 'occ' in outputs:
outputs['occ'] = dist.Bernoulli(logits=outputs['occ'])
return outputs
def decode(self, inputs, c, **kwargs):
''' Returns occupancy probabilities for the sampled points.
Args:
p (tensor): points
c (tensor): latent conditioned code c
'''
p = inputs['sampled_pts']
B,_,N,C = p.shape
p = p.reshape([B*2,N,C])
outputs = self.decoder(p, c, **kwargs)
if 'occ' in outputs:
outputs['occ'] = dist.Bernoulli(logits=outputs['occ'])
if 'corr' in outputs:
_,N,C = outputs['corr'].shape
corr_f = outputs['corr'].reshape([B,2,N,C])
corr_f = torch.transpose(corr_f, 0, 1)
corr_f = torch.flatten(corr_f, 1, 2)
inds = inputs['pair_indices']
corr_f = corr_f[:,inds]
outputs['corr'] = corr_f
return outputs
def to(self, device):
''' Puts the model to the device.
Args:
device (device): pytorch device
'''
model = super().to(device)
model._device = device
return model
| 17,056 | Python | 39.80622 | 113 | 0.525797 |
NVlabs/ACID/ACID/src/encoder/__init__.py | from src.encoder import (
pointnet
)
encoder_dict = {
'geom_encoder': pointnet.GeomEncoder,
}
| 104 | Python | 10.666665 | 41 | 0.663462 |
NVlabs/ACID/ACID/src/encoder/pointnet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.layers import ResnetBlockFC
from torch_scatter import scatter_mean, scatter_max
from src.common import coordinate2index, normalize_coordinate
from src.encoder.unet import UNet
class GeomEncoder(nn.Module):
''' PointNet-based encoder network with ResNet blocks for each point.
Number of input points are fixed.
Args:
c_dim (int): dimension of latent code c
dim (int): input points dimension
hidden_dim (int): hidden dimension of the network
scatter_type (str): feature aggregation when doing local pooling
unet (bool): weather to use U-Net
unet_kwargs (str): U-Net parameters
unet3d (bool): weather to use 3D U-Net
unet3d_kwargs (str): 3D U-Net parameters
plane_resolution (int): defined resolution for plane feature
grid_resolution (int): defined resolution for grid feature
plane_type (str): feature type, 'xz' - 1-plane, ['xz', 'xy', 'yz'] - 3-plane, ['grid'] - 3D grid volume
padding (float): conventional padding paramter of ONet for unit cube, so [-0.5, 0.5] -> [-0.55, 0.55]
n_blocks (int): number of blocks ResNetBlockFC layers
'''
def __init__(self, c_dim=128, dim=3, f_dim=9, hidden_dim=128, scatter_type='max',
unet_kwargs=None, plane_resolution=None, padding=0.1, n_blocks=5):
super().__init__()
self.c_dim = c_dim
self.fc_pos = nn.Linear(dim+f_dim, 2*hidden_dim)
self.blocks = nn.ModuleList([
ResnetBlockFC(2*hidden_dim, hidden_dim) for i in range(n_blocks)
])
self.fc_c = nn.Linear(hidden_dim, c_dim)
self.actvn = nn.ReLU()
self.hidden_dim = hidden_dim
self.unet = UNet(c_dim, in_channels=c_dim, **unet_kwargs)
self.reso_plane = plane_resolution
self.padding = padding
if scatter_type == 'max':
self.scatter = scatter_max
elif scatter_type == 'mean':
self.scatter = scatter_mean
else:
raise ValueError('incorrect scatter type')
def generate_plane_features(self, p, c, plane='xz'):
# acquire indices of features in plane
xy = normalize_coordinate(p.clone(), plane=plane, padding=self.padding) # normalize to the range of (0, 1)
index = coordinate2index(xy, self.reso_plane)
# scatter plane features from points
fea_plane = c.new_zeros(p.size(0), self.c_dim, self.reso_plane**2)
c = c.permute(0, 2, 1) # B x 512 x T
fea_plane = scatter_mean(c, index, out=fea_plane) # B x 512 x reso^2
fea_plane = fea_plane.reshape(p.size(0), self.c_dim, self.reso_plane, self.reso_plane) # sparce matrix (B x 512 x reso x reso)
# process the plane features with UNet
fea_plane = self.unet(fea_plane)
return fea_plane
def pool_local(self, xy, index, c):
bs, fea_dim = c.size(0), c.size(2)
keys = xy.keys()
c_out = 0
for key in keys:
# scatter plane features from points
fea = self.scatter(c.permute(0, 2, 1), index[key], dim_size=self.reso_plane**2)
if self.scatter == scatter_max:
fea = fea[0]
# gather feature back to points
fea = fea.gather(dim=2, index=index[key].expand(-1, fea_dim, -1))
c_out += fea
return c_out.permute(0, 2, 1)
def forward(self, p):
if type(p) is tuple:
p, pf = p
else:
pf = None
# acquire the index for each point
coord = {}
index = {}
coord['xz'] = normalize_coordinate(p.clone(), plane='xz', padding=self.padding)
index['xz'] = coordinate2index(coord['xz'], self.reso_plane)
coord['xy'] = normalize_coordinate(p.clone(), plane='xy', padding=self.padding)
index['xy'] = coordinate2index(coord['xy'], self.reso_plane)
coord['yz'] = normalize_coordinate(p.clone(), plane='yz', padding=self.padding)
index['yz'] = coordinate2index(coord['yz'], self.reso_plane)
net = self.fc_pos(torch.cat([p, pf],dim=-1))
net = self.blocks[0](net)
for block in self.blocks[1:]:
pooled = self.pool_local(coord, index, net)
net = torch.cat([net, pooled], dim=2)
net = block(net)
c = self.fc_c(net)
fea = {}
fea['xz'] = self.generate_plane_features(p, c, plane='xz')
fea['xy'] = self.generate_plane_features(p, c, plane='xy')
fea['yz'] = self.generate_plane_features(p, c, plane='yz')
return fea
| 4,654 | Python | 37.791666 | 134 | 0.592609 |
NVlabs/ACID/ACID/src/encoder/unet.py | '''
Codes are from:
https://github.com/jaxony/unet-pytorch/blob/master/model.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from torch.nn import init
import numpy as np
def conv3x3(in_channels, out_channels, stride=1,
padding=1, bias=True, groups=1):
return nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=padding,
bias=bias,
groups=groups)
def upconv2x2(in_channels, out_channels, mode='transpose'):
if mode == 'transpose':
return nn.ConvTranspose2d(
in_channels,
out_channels,
kernel_size=2,
stride=2)
else:
# out_channels is always going to be the same
# as in_channels
return nn.Sequential(
nn.Upsample(mode='bilinear', scale_factor=2),
conv1x1(in_channels, out_channels))
def conv1x1(in_channels, out_channels, groups=1):
return nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
groups=groups,
stride=1)
class DownConv(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 MaxPool.
A ReLU activation follows each convolution.
"""
def __init__(self, in_channels, out_channels, pooling=True):
super(DownConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.pooling = pooling
self.conv1 = conv3x3(self.in_channels, self.out_channels)
self.conv2 = conv3x3(self.out_channels, self.out_channels)
if self.pooling:
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
before_pool = x
if self.pooling:
x = self.pool(x)
return x, before_pool
class UpConv(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 UpConvolution.
A ReLU activation follows each convolution.
"""
def __init__(self, in_channels, out_channels,
merge_mode='concat', up_mode='transpose'):
super(UpConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.merge_mode = merge_mode
self.up_mode = up_mode
self.upconv = upconv2x2(self.in_channels, self.out_channels,
mode=self.up_mode)
if self.merge_mode == 'concat':
self.conv1 = conv3x3(
2*self.out_channels, self.out_channels)
else:
# num of input channels to conv2 is same
self.conv1 = conv3x3(self.out_channels, self.out_channels)
self.conv2 = conv3x3(self.out_channels, self.out_channels)
def forward(self, from_down, from_up):
""" Forward pass
Arguments:
from_down: tensor from the encoder pathway
from_up: upconv'd tensor from the decoder pathway
"""
from_up = self.upconv(from_up)
if self.merge_mode == 'concat':
x = torch.cat((from_up, from_down), 1)
else:
x = from_up + from_down
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
return x
class UNet(nn.Module):
""" `UNet` class is based on https://arxiv.org/abs/1505.04597
The U-Net is a convolutional encoder-decoder neural network.
Contextual spatial information (from the decoding,
expansive pathway) about an input tensor is merged with
information representing the localization of details
(from the encoding, compressive pathway).
Modifications to the original paper:
(1) padding is used in 3x3 convolutions to prevent loss
of border pixels
(2) merging outputs does not require cropping due to (1)
(3) residual connections can be used by specifying
UNet(merge_mode='add')
(4) if non-parametric upsampling is used in the decoder
pathway (specified by upmode='upsample'), then an
additional 1x1 2d convolution occurs after upsampling
to reduce channel dimensionality by a factor of 2.
This channel halving happens with the convolution in
the tranpose convolution (specified by upmode='transpose')
"""
def __init__(self, num_classes, in_channels=3, depth=5,
start_filts=64, up_mode='transpose',
merge_mode='concat', **kwargs):
"""
Arguments:
in_channels: int, number of channels in the input tensor.
Default is 3 for RGB images.
depth: int, number of MaxPools in the U-Net.
start_filts: int, number of convolutional filters for the
first conv.
up_mode: string, type of upconvolution. Choices: 'transpose'
for transpose convolution or 'upsample' for nearest neighbour
upsampling.
"""
super(UNet, self).__init__()
if up_mode in ('transpose', 'upsample'):
self.up_mode = up_mode
else:
raise ValueError("\"{}\" is not a valid mode for "
"upsampling. Only \"transpose\" and "
"\"upsample\" are allowed.".format(up_mode))
if merge_mode in ('concat', 'add'):
self.merge_mode = merge_mode
else:
raise ValueError("\"{}\" is not a valid mode for"
"merging up and down paths. "
"Only \"concat\" and "
"\"add\" are allowed.".format(up_mode))
# NOTE: up_mode 'upsample' is incompatible with merge_mode 'add'
if self.up_mode == 'upsample' and self.merge_mode == 'add':
raise ValueError("up_mode \"upsample\" is incompatible "
"with merge_mode \"add\" at the moment "
"because it doesn't make sense to use "
"nearest neighbour to reduce "
"depth channels (by half).")
self.num_classes = num_classes
self.in_channels = in_channels
self.start_filts = start_filts
self.depth = depth
self.down_convs = []
self.up_convs = []
# create the encoder pathway and add to a list
for i in range(depth):
ins = self.in_channels if i == 0 else outs
outs = self.start_filts*(2**i)
pooling = True if i < depth-1 else False
down_conv = DownConv(ins, outs, pooling=pooling)
self.down_convs.append(down_conv)
# create the decoder pathway and add to a list
# - careful! decoding only requires depth-1 blocks
for i in range(depth-1):
ins = outs
outs = ins // 2
up_conv = UpConv(ins, outs, up_mode=up_mode,
merge_mode=merge_mode)
self.up_convs.append(up_conv)
# add the list of modules to current module
self.down_convs = nn.ModuleList(self.down_convs)
self.up_convs = nn.ModuleList(self.up_convs)
self.conv_final = conv1x1(outs, self.num_classes)
self.reset_params()
@staticmethod
def weight_init(m):
if isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight)
init.constant_(m.bias, 0)
def reset_params(self):
for i, m in enumerate(self.modules()):
self.weight_init(m)
def forward(self, x):
encoder_outs = []
# encoder pathway, save outputs for merging
for i, module in enumerate(self.down_convs):
x, before_pool = module(x)
encoder_outs.append(before_pool)
for i, module in enumerate(self.up_convs):
before_pool = encoder_outs[-(i+2)]
x = module(before_pool, x)
# No softmax is used. This means you need to use
# nn.CrossEntropyLoss is your training script,
# as this module includes a softmax already.
x = self.conv_final(x)
return x
if __name__ == "__main__":
"""
testing
"""
model = UNet(1, depth=5, merge_mode='concat', in_channels=1, start_filts=32)
print(model)
print(sum(p.numel() for p in model.parameters()))
reso = 176
x = np.zeros((1, 1, reso, reso))
x[:,:,int(reso/2-1), int(reso/2-1)] = np.nan
x = torch.FloatTensor(x)
out = model(x)
print('%f'%(torch.sum(torch.isnan(out)).detach().cpu().numpy()/(reso*reso)))
# loss = torch.sum(out)
# loss.backward()
| 8,696 | Python | 32.57915 | 80 | 0.575092 |
NVlabs/ACID/ACID/src/utils/common_util.py | import os
import glob
import json
import scipy
import itertools
import numpy as np
from PIL import Image
from scipy.spatial.transform import Rotation
from sklearn.neighbors import NearestNeighbors
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
def get_color_map(x):
colours = plt.cm.Spectral(x)
return colours[:, :3]
def embed_tsne(data):
"""
N x D np.array data
"""
tsne = TSNE(n_components=1, verbose=0, perplexity=40, n_iter=300, random_state=0)
tsne_results = tsne.fit_transform(data)
tsne_results = np.squeeze(tsne_results)
tsne_min = np.min(tsne_results)
tsne_max = np.max(tsne_results)
return (tsne_results - tsne_min) / (tsne_max - tsne_min)
########################################################################
# Viewpoint transform
########################################################################
view_to_order = {
'cam0': ('X', 'Y', 'Z'),
'cam1': ('-Z', 'Y', 'X'),
'cam2': ('Z', 'Y', '-X'),
'cam3': ('-X', 'Y', '-Z'),
}
def get_axis_pt(val, x, y, z):
multiplier = -1 if '-' in val else 1
if "X" in val:
return x * multiplier
elif "Y" in val:
return y * multiplier
elif "Z" in val:
return z * multiplier
def world_coord_view_augmentation(view, pts):
order = view_to_order[view]
pts = pts.reshape([-1,3])
x,y,z = np.moveaxis(pts, 1, 0)
return np.array([get_axis_pt(o,x,y,z) for o in order]).T
########################################################################
# partial observation projection / transform / rendering utilities
########################################################################
def transform_points_cam_to_world(cam_pts, camera_pose):
world_pts = np.transpose(
np.dot(camera_pose[0:3, 0:3], np.transpose(cam_pts)) + np.tile(camera_pose[0:3, 3:], (1, cam_pts.shape[0])))
return world_pts
def transform_points_world_to_cam(world_points, cam_extr):
return np.transpose(
np.dot(
np.linalg.inv(
cam_extr[0:3, 0:3]),
np.transpose(world_points)
- np.tile(cam_extr[0:3, 3:], (1, world_points.shape[0]))))
def render_points_slowest(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
cam_pts_x = np.rint(cam_pts_x).astype(int)
cam_pts_y = np.rint(cam_pts_y).astype(int)
points = np.stack([cam_pts_y, cam_pts_x, cam_pts_z, np.arange(len(cam_pts_x))]).T
sorted_pts = sorted(points, key=lambda x: (x[0], x[1]))
grouped_pts = [[*j] for i, j in itertools.groupby(
sorted_pts,
key=lambda x: (x[0] // 3, x[1] // 3))]
min_depth = np.array([sorted(p, key=lambda x: -x[2])[0] for p in grouped_pts])
min_idx = min_depth[:,-1]
min_depth = min_depth[:,:-1]
return world_points[min_idx.astype(int)]
def render_points_slow(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
points = np.stack([cam_pts_y, cam_pts_x, cam_pts_z, np.arange(len(cam_pts_x))]).T
points[:,:2] = np.rint(points[:,:2] / 2)
points = points[points[:,1].argsort()]
points = points[points[:,0].argsort(kind='mergesort')]
grouped_pts = np.split(points[:,2:], np.unique(points[:, :2], axis=0, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
return world_points[min_idx]
def render_points(world_points, cam_extr, cam_intr, return_index=False):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
idx = np.rint(cam_pts_y / 2) * 1000 + np.rint(cam_pts_x / 2)
val = np.stack([cam_pts_z, np.arange(len(cam_pts_x))]).T
order = idx.argsort()
idx = idx[order]
val = val[order]
grouped_pts = np.split(val, np.unique(idx, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
if return_index:
return min_idx
return world_points[min_idx]
def project_depth_world_space(depth_image, camera_intr, camera_pose, keep_dim=False, project_factor=1.):
cam_pts = project_depth_cam_space(depth_image, camera_intr, keep_dim=False,project_factor=project_factor)
world_pts = transform_points_cam_to_world(cam_pts, camera_pose)
W, H = depth_image.shape
if keep_dim:
world_pts = world_pts.reshape([W, H, 3])
return world_pts
def project_depth_cam_space(depth_img, camera_intrinsics, keep_dim=True, project_factor=1.):
# Get depth image size
im_h = depth_img.shape[0]
im_w = depth_img.shape[1]
# Project depth into 3D point cloud in camera coordinates
pix_x, pix_y = np.meshgrid(np.linspace(0, im_w - 1, im_w), np.linspace(0, im_h - 1, im_h))
cam_pts_x = np.multiply(pix_x - im_w / 2., -depth_img / camera_intrinsics[0, 0])
cam_pts_y = np.multiply(pix_y - im_h / 2., depth_img / camera_intrinsics[1, 1])
cam_pts_z = depth_img.copy()
cam_pts_x.shape = (im_h * im_w, 1)
cam_pts_y.shape = (im_h * im_w, 1)
cam_pts_z.shape = (im_h * im_w, 1)
cam_pts = np.concatenate((cam_pts_x, cam_pts_y, cam_pts_z), axis=1) * project_factor
if keep_dim:
cam_pts = cam_pts.reshape([im_h, im_w, 3])
return cam_pts
def get_trunc_ab(mean, std, a, b):
return (a - mean) / std, (b - mean) /std
def get_trunc_ab_range(mean_min, mean_max, std, a, b):
return (a - mean_min) / std, (b - mean_max) /std
def transform_points(pointcloud, from_range, to_range):
if len(pointcloud.shape) == 1:
pointcloud = pointcloud.reshape([1,-1])
if pointcloud.shape[1] == 6:
xyz = pointcloud[:,:3]
rgb = pointcloud[:,3:]
else:
xyz = pointcloud
rgb = None
from_center = np.mean(from_range, axis=0)
from_size = np.ptp(from_range, axis=0)
to_center = np.mean(to_range, axis=0)
to_size = np.ptp(to_range, axis=0)
xyz = (xyz - from_center) / from_size * to_size + to_center
if rgb is None:
return xyz
else:
return np.concatenate([xyz, rgb], axis=-1)
def extent_to_cube(extent):
min_x,min_y,min_z = extent[0]
max_x,max_y,max_z = extent[1]
verts = np.array([
(max_x,max_y,max_z),
(max_x,max_y,min_z),
(max_x,min_y,max_z),
(max_x,min_y,min_z),
(min_x,max_y,max_z),
(min_x,max_y,min_z),
(min_x,min_y,max_z),
(min_x,min_y,min_z),])
faces = np.array([
(1,5,7,3),
(4,3,7,8),
(8,7,5,6),
(6,2,4,8),
(2,1,3,4),
(6,5,1,2),])
return verts, faces
########################################################################
# Visualization
########################################################################
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
def set_background_blank(ax):
# Hide grid lines
ax.grid(False)
ax.set_axis_off()
# Hide axes ticks
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
# First remove fill
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
# Now set color to white (or whatever is "invisible")
ax.xaxis.pane.set_edgecolor((1.0, 1.0, 1.0, 0.0))
ax.yaxis.pane.set_edgecolor((1.0, 1.0, 1.0, 0.0))
ax.zaxis.pane.set_edgecolor((1.0, 1.0, 1.0, 0.0))
def side_by_side_point_clouds(point_clouds, angle=(90,0)):
fig = plt.figure()
W = int(len(point_clouds) ** 0.5)
H = math.ceil(len(point_clouds) / W)
for i, pcloud in enumerate(point_clouds):
action = None
flow = None
pts = pcloud['pts']
title = pcloud['title']
col = pcloud.get('col', None)
flow = pcloud.get('flow', None)
action = pcloud.get('action', None)
ax = fig.add_subplot(W, H, i+1,projection='3d')
ax.set_title(title)
if flow is not None:
flow_norm = np.linalg.norm(flow, axis=1)
viz_idx = flow_norm > 0.0
flow = flow[viz_idx]
ax.quiver(
pts[:,0][viz_idx],
pts[:,1][viz_idx],
pts[:,2][viz_idx],
flow[:,0], flow[:,1], flow[:,2],
color = 'red', linewidth=3, alpha=0.2
)
if col is None:
col = 'blue'
ax.scatter(pts[:,0],
pts[:,1],
pts[:,2], color=col,s=0.5)
ax.view_init(*angle)
if action is not None:
ax.scatter(action[0], action[1], 0.,
edgecolors='tomato', color='turquoise', marker='*',s=80)
set_axes_equal(ax)
set_background_blank(ax)
fig.tight_layout()
return fig
def write_pointcoud_as_obj(path, xyzrgb, faces=None):
with open(path, 'w') as fp:
if xyzrgb.shape[1] == 6:
for x,y,z,r,g,b in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f} {r:.3f} {g:.3f} {b:.3f}\n")
else:
for x,y,z in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f}\n")
if faces is not None:
for f in faces:
f_str = " ".join([str(i) for i in f])
fp.write(f"f {f_str}\n")
#################################
# Distance Metric
#################################
def subsample_points(points, resolution=0.0125, return_index=True):
if points.shape[1] == 6:
xyz = points[:,:3]
else:
xyz = points
if points.shape[0] == 0:
if return_index:
return np.arange(0)
return points
idx = np.unique(xyz// resolution * resolution, axis=0, return_index=True)[1]
if return_index:
return idx
return points[idx]
from sklearn.neighbors import NearestNeighbors
def chamfer_distance(x, y, metric='l2', direction='bi'):
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
min_y_to_x = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
min_x_to_y = y_nn.kneighbors(x)[0]
return np.mean(min_y_to_x) + np.mean(min_x_to_y)
def f1_score(x, y, metric='l2', th=0.01):
# x is pred
# y is gt
if x.shape[0] == 0:
return 0,0,0
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
d2 = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
d1 = y_nn.kneighbors(x)[0]
recall = float(sum(d < th for d in d2)) / float(len(d2))
precision = float(sum(d < th for d in d1)) / float(len(d1))
if recall+precision > 0:
fscore = 2 * recall * precision / (recall + precision)
else:
fscore = 0
return fscore, precision, recall | 12,618 | Python | 36.005865 | 116 | 0.560628 |
NVlabs/ACID/ACID/src/utils/io.py | import os
from plyfile import PlyElement, PlyData
import numpy as np
def export_pointcloud(vertices, out_file, as_text=True):
assert(vertices.shape[1] == 3)
vertices = vertices.astype(np.float32)
vertices = np.ascontiguousarray(vertices)
vector_dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]
vertices = vertices.view(dtype=vector_dtype).flatten()
plyel = PlyElement.describe(vertices, 'vertex')
plydata = PlyData([plyel], text=as_text)
plydata.write(out_file)
def load_pointcloud(in_file):
plydata = PlyData.read(in_file)
vertices = np.stack([
plydata['vertex']['x'],
plydata['vertex']['y'],
plydata['vertex']['z']
], axis=1)
return vertices
def read_off(file):
"""
Reads vertices and faces from an off file.
:param file: path to file to read
:type file: str
:return: vertices and faces as lists of tuples
:rtype: [(float)], [(int)]
"""
assert os.path.exists(file), 'file %s not found' % file
with open(file, 'r') as fp:
lines = fp.readlines()
lines = [line.strip() for line in lines]
# Fix for ModelNet bug were 'OFF' and the number of vertices and faces
# are all in the first line.
if len(lines[0]) > 3:
assert lines[0][:3] == 'OFF' or lines[0][:3] == 'off', \
'invalid OFF file %s' % file
parts = lines[0][3:].split(' ')
assert len(parts) == 3
num_vertices = int(parts[0])
assert num_vertices > 0
num_faces = int(parts[1])
assert num_faces > 0
start_index = 1
# This is the regular case!
else:
assert lines[0] == 'OFF' or lines[0] == 'off', \
'invalid OFF file %s' % file
parts = lines[1].split(' ')
assert len(parts) == 3
num_vertices = int(parts[0])
assert num_vertices > 0
num_faces = int(parts[1])
assert num_faces > 0
start_index = 2
vertices = []
for i in range(num_vertices):
vertex = lines[start_index + i].split(' ')
vertex = [float(point.strip()) for point in vertex if point != '']
assert len(vertex) == 3
vertices.append(vertex)
faces = []
for i in range(num_faces):
face = lines[start_index + num_vertices + i].split(' ')
face = [index.strip() for index in face if index != '']
# check to be sure
for index in face:
assert index != '', \
'found empty vertex index: %s (%s)' \
% (lines[start_index + num_vertices + i], file)
face = [int(index) for index in face]
assert face[0] == len(face) - 1, \
'face should have %d vertices but as %d (%s)' \
% (face[0], len(face) - 1, file)
assert face[0] == 3, \
'only triangular meshes supported (%s)' % file
for index in face:
assert index >= 0 and index < num_vertices, \
'vertex %d (of %d vertices) does not exist (%s)' \
% (index, num_vertices, file)
assert len(face) > 1
faces.append(face)
return vertices, faces
assert False, 'could not open %s' % file
| 3,415 | Python | 29.230088 | 78 | 0.513616 |
NVlabs/ACID/ACID/src/utils/visualize.py | import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import src.common as common
def visualize_data(data, data_type, out_file):
r''' Visualizes the data with regard to its type.
Args:
data (tensor): batch of data
data_type (string): data type (img, voxels or pointcloud)
out_file (string): output file
'''
if data_type == 'voxels':
visualize_voxels(data, out_file=out_file)
elif data_type == 'pointcloud':
visualize_pointcloud(data, out_file=out_file)
elif data_type is None or data_type == 'idx':
pass
else:
raise ValueError('Invalid data_type "%s"' % data_type)
def visualize_voxels(voxels, out_file=None, show=False):
r''' Visualizes voxel data.
Args:
voxels (tensor): voxel data
out_file (string): output file
show (bool): whether the plot should be shown
'''
# Use numpy
voxels = np.asarray(voxels)
# Create plot
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
voxels = voxels.transpose(2, 0, 1)
ax.voxels(voxels, edgecolor='k')
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.view_init(elev=30, azim=45)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
plt.close(fig)
def visualize_pointcloud(points, normals=None,
out_file=None, show=False):
r''' Visualizes point cloud data.
Args:
points (tensor): point data
normals (tensor): normal data (if existing)
out_file (string): output file
show (bool): whether the plot should be shown
'''
# Use numpy
points = np.asarray(points)
# Create plot
fig = plt.figure()
ax = fig.gca(projection=Axes3D.name)
ax.scatter(points[:, 2], points[:, 0], points[:, 1])
if normals is not None:
ax.quiver(
points[:, 2], points[:, 0], points[:, 1],
normals[:, 2], normals[:, 0], normals[:, 1],
length=0.1, color='k'
)
ax.set_xlabel('Z')
ax.set_ylabel('X')
ax.set_zlabel('Y')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.set_zlim(-0.5, 0.5)
ax.view_init(elev=30, azim=45)
if out_file is not None:
plt.savefig(out_file)
if show:
plt.show()
plt.close(fig)
| 2,378 | Python | 26.66279 | 65 | 0.585786 |
NVlabs/ACID/ACID/src/utils/mentalsim_util.py | import os
import glob
import json
import scipy
import itertools
import numpy as np
from PIL import Image
from scipy.spatial.transform import Rotation
from sklearn.neighbors import NearestNeighbors
########################################################################
# Viewpoint transform
########################################################################
view_to_order = {
'cam0': ('X', 'Y', 'Z'),
'cam1': ('-Z', 'Y', 'X'),
'cam2': ('Z', 'Y', '-X'),
'cam3': ('-X', 'Y', '-Z'),
}
def get_axis_pt(val, x, y, z):
multiplier = -1 if '-' in val else 1
if "X" in val:
return x * multiplier
elif "Y" in val:
return y * multiplier
elif "Z" in val:
return z * multiplier
def world_coord_view_augmentation(view, pts):
order = view_to_order[view]
pts = pts.reshape([-1,3])
x,y,z = np.moveaxis(pts, 1, 0)
return np.array([get_axis_pt(o,x,y,z) for o in order]).T
########################################################################
# partial observation projection / transform / rendering utilities
########################################################################
def transform_points_cam_to_world(cam_pts, camera_pose):
world_pts = np.transpose(
np.dot(camera_pose[0:3, 0:3], np.transpose(cam_pts)) + np.tile(camera_pose[0:3, 3:], (1, cam_pts.shape[0])))
return world_pts
def transform_points_world_to_cam(world_points, cam_extr):
return np.transpose(
np.dot(
np.linalg.inv(
cam_extr[0:3, 0:3]),
np.transpose(world_points)
- np.tile(cam_extr[0:3, 3:], (1, world_points.shape[0]))))
def render_points_slowest(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
cam_pts_x = np.rint(cam_pts_x).astype(int)
cam_pts_y = np.rint(cam_pts_y).astype(int)
points = np.stack([cam_pts_y, cam_pts_x, cam_pts_z, np.arange(len(cam_pts_x))]).T
sorted_pts = sorted(points, key=lambda x: (x[0], x[1]))
grouped_pts = [[*j] for i, j in itertools.groupby(
sorted_pts,
key=lambda x: (x[0] // 3, x[1] // 3))]
min_depth = np.array([sorted(p, key=lambda x: -x[2])[0] for p in grouped_pts])
min_idx = min_depth[:,-1]
min_depth = min_depth[:,:-1]
return world_points[min_idx.astype(int)]
def render_points_slow(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
points = np.stack([cam_pts_y, cam_pts_x, cam_pts_z, np.arange(len(cam_pts_x))]).T
points[:,:2] = np.rint(points[:,:2] / 2)
points = points[points[:,1].argsort()]
points = points[points[:,0].argsort(kind='mergesort')]
grouped_pts = np.split(points[:,2:], np.unique(points[:, :2], axis=0, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
return world_points[min_idx]
def render_points(world_points, cam_extr, cam_intr):
cam_points = transform_points_world_to_cam(world_points, cam_extr)
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
idx = np.rint(cam_pts_y / 2) * 1000 + np.rint(cam_pts_x / 2)
val = np.stack([cam_pts_z, np.arange(len(cam_pts_x))]).T
order = idx.argsort()
idx = idx[order]
val = val[order]
grouped_pts = np.split(val, np.unique(idx, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
return world_points[min_idx]
def project_depth_world_space(depth_image, camera_intr, camera_pose, keep_dim=False, project_factor=1.):
cam_pts = project_depth_cam_space(depth_image, camera_intr, keep_dim=False,project_factor=project_factor)
world_pts = transform_points_cam_to_world(cam_pts, camera_pose)
W, H = depth_image.shape
if keep_dim:
world_pts = world_pts.reshape([W, H, 3])
return world_pts
def project_depth_cam_space(depth_img, camera_intrinsics, keep_dim=True, project_factor=1.):
# Get depth image size
im_h = depth_img.shape[0]
im_w = depth_img.shape[1]
# Project depth into 3D point cloud in camera coordinates
pix_x, pix_y = np.meshgrid(np.linspace(0, im_w - 1, im_w), np.linspace(0, im_h - 1, im_h))
cam_pts_x = np.multiply(pix_x - im_w / 2., -depth_img / camera_intrinsics[0, 0])
cam_pts_y = np.multiply(pix_y - im_h / 2., depth_img / camera_intrinsics[1, 1])
cam_pts_z = depth_img.copy()
cam_pts_x.shape = (im_h * im_w, 1)
cam_pts_y.shape = (im_h * im_w, 1)
cam_pts_z.shape = (im_h * im_w, 1)
cam_pts = np.concatenate((cam_pts_x, cam_pts_y, cam_pts_z), axis=1) * project_factor
if keep_dim:
cam_pts = cam_pts.reshape([im_h, im_w, 3])
return cam_pts
def get_trunc_ab(mean, std, a, b):
return (a - mean) / std, (b - mean) /std
########################################################################
# partial observation getter for full experiment
########################################################################
CAM_EXTR = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.6427898318479135, -0.766043895201295, -565.0],
[0.0, 0.766047091387779, 0.6427871499290135, 550.0], [0.0, 0.0, 0.0, 1.0]])
CAM_INTR = np.array([[687.1868314210544, 0.0, 360.0], [0.0, 687.1868314210544, 360.0], [0.0, 0.0, 1.0]])
SCENE_RANGE = np.array([[-600, -400, 0], [600, 400, 400]])
def get_scene_partial_pointcloud(model_category, model_name, split_id, int_id, frame_id, data_root):
path = f"{data_root}/{split_id}/{model_category}/{model_name}/img/{{}}_{int_id:04d}_{frame_id:06d}.{{}}"
depth_img = path.format('depth', 'png')
depth_img = np.array(Image.open(depth_img).convert(mode='I'))
depth_vals = -np.array(depth_img).astype(float) / 1000.
rgb_img = path.format('rgb', 'jpg')
rgb_img = np.array(Image.open(rgb_img).convert(mode="RGB")).astype(float) / 255
seg_img = path.format('seg', 'jpg')
seg_img = np.array(Image.open(seg_img).convert('L')).squeeze()
non_env = np.where(seg_img != 0)
env = np.where(seg_img == 0)
partial_points = project_depth_world_space(depth_vals, CAM_INTR, CAM_EXTR, keep_dim=True, project_factor=100.)
partial_points_rgb = np.concatenate([partial_points, rgb_img], axis=-1)
obj_pts = partial_points_rgb[non_env]
env_pts = partial_points_rgb[env]
return obj_pts, env_pts
########################################################################
# Get geometric state (full experiment)
########################################################################
def get_object_full_points(model_category, model_name, split_id, int_id, frame_id, data_root):
path = f"{data_root}/{split_id}/{model_category}/{model_name}/geom/{int_id:04d}_{frame_id:06d}.npz"
geom_data = np.load(path)
loc = geom_data['loc']
print(geom_data['rot'])
w,x,y,z= geom_data['rot']
rot = Rotation.from_quat(np.array([x,y,z,w]))
scale = geom_data['scale']
sim_pts = (rot.apply(geom_data['sim'] * scale)) + loc
vis_pts = (rot.apply(geom_data['vis'] * scale)) + loc
return sim_pts, vis_pts
########################################################################
# partial observation getter for teddy toy example
########################################################################
def get_teddy_partial_pointcloud(int_group, int_id, frame_id, data_root, cam_id='cam0'):
#depth_img = glob.glob(f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_*{frame_id:03d}_depth.png")[0]
depth_img = f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_{frame_id:03d}_depth.png"
depth_img = np.array(Image.open(depth_img).convert(mode='I'))
depth_vals = -np.array(depth_img).astype(float) / 1000.
#rgb_img = glob.glob(f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_*{frame_id:03d}_rgb.png")[0]
rgb_img = f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_{frame_id:03d}_rgb.png"
rgb_img = np.array(Image.open(rgb_img).convert(mode="RGB")).astype(float) / 255
#seg_img = glob.glob(f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_*{frame_id:03d}_seg.png")[0]
seg_img = f"{data_root}/{int_group}/img/{cam_id}/{int_id:06d}_{frame_id:03d}_seg.png"
seg_img = np.array(Image.open(seg_img))
non_env = np.where(seg_img != 0)
ospdir= os.path.dirname
root_dir = ospdir(ospdir(ospdir(os.path.realpath(__file__))))
camera_json = os.path.join(root_dir, "metadata", "camera.json")
with open(camera_json, 'r') as fp:
cam_info = json.load(fp)
for k in cam_info.keys():
cam_extr, cam_intr = cam_info[k]
cam_info[k] = np.array(cam_extr), np.array(cam_intr)
cam_extr, cam_intr = cam_info[cam_id]
partial_points = project_depth_world_space(depth_vals, cam_intr, cam_extr, keep_dim=True)
partial_points_rgb = np.concatenate([partial_points, rgb_img], axis=-1)
xyzrgb = partial_points_rgb[non_env]
xyz = xyzrgb[:,:3]
xyz = world_coord_view_augmentation(cam_id, xyz)
rgb = xyzrgb[:,3:]
return xyz/ 10. * 1.1, rgb
########################################################################
# Get meta info (teddy toy example)
########################################################################
def get_teddy_loc(int_group, int_id, frame_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
int_info = json.load(fp)
return np.array(dict(zip(int_info['frames'], int_info['teddy_loc']))[frame_id])
def get_teddy_rot(int_group, int_id, frame_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
int_info = json.load(fp)
w,x,y,z = np.array(dict(zip(int_info['frames'], int_info['teddy_rot']))[frame_id])
return np.array([x,y,z,w])
def get_action_info(int_group, int_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
int_info = json.load(fp)
grasp_loc = np.array(int_info['grasp'])
target_loc = np.array(int_info['target'])
return grasp_loc, target_loc
def get_release_frame(int_group, int_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
return json.load(fp)['release_frame']
# name = glob.glob(
# f"{data_root}/{int_group}/geom/{int_id:06d}_release_*_sim.npy")[0].split("/")[-1]
# return int(name.split("_")[-2])
def get_end_frame(int_group, int_id, data_root):
obj_info = f"{data_root}/{int_group}/info/{int_id:06d}.json"
with open(obj_info, 'r') as fp:
return json.load(fp)['end_frame']
# name = glob.glob(
# f"{data_root}/{int_group}/geom/{int_id:06d}_static_*_sim.npy")[0].split("/")[-1]
# return int(name.split("_")[-2])
########################################################################
# Get geometric state (teddy toy example)
########################################################################
def get_teddy_full_points(int_group, int_id, frame_id, data_root):
#sim_data = glob.glob(f"{data_root}/{int_group}/geom/{int_id:06d}_*{frame_id:03d}_sim.npy")[0]
sim_data = f"{data_root}/{int_group}/geom/{int_id:06d}_{frame_id:03d}_sim.npy"
points = np.load(sim_data)
teddy_loc = get_teddy_loc(int_group, int_id, frame_id, data_root)
teddy_rot = Rotation.from_quat(get_teddy_rot(int_group, int_id, frame_id, data_root))
return ( teddy_rot.apply(points) + teddy_loc ) / 10. * 1.1
#return ( points + teddy_loc ) / 10. * 1.1
def get_teddy_vis_points(int_group, int_id, frame_id, data_root):
#sim_data = glob.glob(f"{data_root}/{int_group}/geom/{int_id:06d}_*{frame_id:03d}_vis.npy")[0]
sim_data = f"{data_root}/{int_group}/geom/{int_id:06d}_{frame_id:03d}_vis.npy"
points = np.load(sim_data)
teddy_loc = get_teddy_loc(int_group, int_id, frame_id, data_root)
teddy_rot = Rotation.from_quat(get_teddy_rot(int_group, int_id, frame_id, data_root))
return ( teddy_rot.apply(points) + teddy_loc ) / 10. * 1.1
#return ( points + teddy_loc ) / 10. * 1.1
########################################################################
# Get point-based supervision data for implicit functions (teddy toy example)
########################################################################
def sample_occupancies(int_group, int_id, frame_id, data_root, sample_scheme='uniform'):
if sample_scheme not in ['uniform', 'gaussian']:
raise ValueError('Unsupported sampling scheme for occupancy')
num_pts = 100000
if sample_scheme == 'uniform':
pts = np.random.rand(num_pts, 3)
pts = 1.1 * (pts - 0.5)
else:
x,y,z= get_teddy_loc(int_group, int_id, frame_id, data_root) / 10. * 1.1
std = 0.18
a, b = -0.55, 0.55
xs = scipy.stats.truncnorm.rvs(*get_trunc_ab(x, std, a, b), loc=x, scale=std, size=num_pts)
ys = scipy.stats.truncnorm.rvs(*get_trunc_ab(y, std, a, b), loc=y, scale=std, size=num_pts)
zs = scipy.stats.truncnorm.rvs(*get_trunc_ab(z, std, a, b), loc=z, scale=std, size=num_pts)
pts = np.array([xs,ys,zs]).T
teddy_sim_points = get_teddy_full_points(int_group, int_id, frame_id, data_root)
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric='l2').fit(teddy_sim_points)
dist, ind = x_nn.kneighbors(pts)#[0].squeeze()
dist = dist.squeeze()
ind = ind.squeeze()
occ = dist < 0.01
pt_class = ind[occ != 0]
return pts, occ, pt_class
def sample_occupancies_with_flow(int_group, int_id, release_frame, end_frame, data_root, sample_scheme='uniform'):
pts, occ, ind = sample_occupancies(int_group, int_id, 0, data_root, sample_scheme)
xyz0 = get_teddy_full_points(int_group, int_id, 0, data_root)
f1 = get_teddy_full_points(int_group, int_id, release_frame, data_root) - xyz0
f2 = get_teddy_full_points(int_group, int_id, end_frame, data_root) - xyz0
return pts, occ, ind, f1[ind],f2[ind]
########################################################################
# Visualization
########################################################################
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
def side_by_side_point_clouds(point_clouds, angle=(90,0)):
fig = plt.figure()
W = int(len(point_clouds) ** 0.5)
H = math.ceil(len(point_clouds) / W)
for i, pcloud in enumerate(point_clouds):
action = None
flow = None
pts = pcloud['pts']
title = pcloud['title']
col = pcloud.get('col', None)
flow = pcloud.get('flow', None)
action = pcloud.get('action', None)
ax = fig.add_subplot(W, H, i+1,projection='3d')
ax.set_title(title)
if flow is not None:
flow_norm = np.linalg.norm(flow, axis=1)
viz_idx = flow_norm > 0.0
flow = flow[viz_idx]
ax.quiver(
pts[:,0][viz_idx],
pts[:,2][viz_idx],
pts[:,1][viz_idx],
flow[:,0], flow[:,2], flow[:,1],
color = 'red', linewidth=3, alpha=0.2
)
if col is None:
col = 'blue'
ax.scatter(pts[:,0],
pts[:,2],
pts[:,1], color=col,s=0.5)
set_axes_equal(ax)
ax.view_init(*angle)
if action is not None:
ax.scatter(action[0], action[1], 0.,
edgecolors='tomato', color='turquoise', marker='*',s=80)
return fig
def write_pointcoud_as_obj(xyzrgb, path):
if xyzrgb.shape[1] == 6:
with open(path, 'w') as fp:
for x,y,z,r,g,b in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f} {r:.3f} {g:.3f} {b:.3f}\n")
else:
with open(path, 'w') as fp:
for x,y,z in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f}\n")
#################################
# Distance Metric
#################################
def subsample_points(points, resolution=0.0125, return_index=True):
idx = np.unique(points// resolution * resolution, axis=0, return_index=True)[1]
if return_index:
return idx
return points[idx]
from sklearn.neighbors import NearestNeighbors
def chamfer_distance(x, y, metric='l2', direction='bi'):
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
min_y_to_x = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
min_x_to_y = y_nn.kneighbors(x)[0]
return np.mean(min_y_to_x) + np.mean(min_x_to_y)
def f1_score(x, y, metric='l2', th=0.01):
# x is pred
# y is gt
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
d2 = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
d1 = y_nn.kneighbors(x)[0]
recall = float(sum(d < th for d in d2)) / float(len(d2))
precision = float(sum(d < th for d in d1)) / float(len(d1))
if recall+precision > 0:
fscore = 2 * recall * precision / (recall + precision)
else:
fscore = 0
return fscore, precision, recall | 19,039 | Python | 44.118483 | 116 | 0.564578 |
NVlabs/ACID/ACID/src/utils/plushsim_util.py | import os
import glob
import json
import scipy
import itertools
import numpy as np
from PIL import Image
from scipy.spatial.transform import Rotation
from sklearn.neighbors import NearestNeighbors
from .common_util import *
########################################################################
# Some file getters
########################################################################
def get_model_dir(data_root, split_id, model_category, model_name):
return f"{data_root}/{split_id}/{model_category}/{model_name}"
def get_interaction_info_file(data_root, split_id, model_category, model_name, reset_id):
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/info/interaction_info_{reset_id:04d}.npz"
def get_geom_file(data_root, split_id, model_category, model_name, reset_id, frame_id):
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/geom/{reset_id:04d}_{frame_id:06d}.npz"
def get_image_file_template(data_root, split_id, model_category, model_name, reset_id, frame_id):
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/img/{{}}_{reset_id:04d}_{frame_id:06d}.{{}}"
def get_rgb(data_root, split_id, model_category, model_name, reset_id, frame_id):
temp = get_image_file_template(data_root, split_id, model_category, model_name, reset_id, frame_id)
return temp.format('rgb', 'jpg')
def get_depth(data_root, split_id, model_category, model_name, reset_id, frame_id):
temp = get_image_file_template(data_root, split_id, model_category, model_name, reset_id, frame_id)
return temp.format('depth', 'png')
def get_seg(data_root, split_id, model_category, model_name, reset_id, frame_id):
temp = get_image_file_template(data_root, split_id, model_category, model_name, reset_id, frame_id)
return temp.format('seg', 'jpg')
def get_flow_data_file(flow_root,split_id, model_id, reset_id, int_id):
return f"{flow_root}/{split_id}/{model_id}/{reset_id:03d}_{int_id:03d}.npz"
def get_flow_pair_data_file(pair_root,split_id, model_id, reset_id, int_id):
return f"{pair_root}/{split_id}/{model_id}/pair_{reset_id:03d}_{int_id:03d}.npz"
def get_geom_data_file(geom_root,split_id, model_id, reset_id, frame_id):
return f"{geom_root}/{split_id}/{model_id}/{reset_id:03d}_{frame_id:06d}.npz"
def get_pair_data_file(pair_root,split_id, model_id, reset_id, frame_id):
return f"{pair_root}/{split_id}/{model_id}/pair_{reset_id:03d}_{frame_id:06d}.npz"
# Getters for plan data
def get_plan_geom_file(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
if sequence_id == 'gt':
seq_str = sequence_id
else:
seq_str = f"{sequence_id:04d}"
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/geom/{scenario_id:04d}_{seq_str}_{frame_id}.npz"
def get_plan_interaction_info_file(data_root, split_id, model_category, model_name, scenario_id, sequence_id):
if sequence_id == 'gt':
seq_str = sequence_id
else:
seq_str = f"{sequence_id:04d}"
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/info/interaction_info_{scenario_id:04d}_{seq_str}.npz"
def get_plan_image_file_template(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
if sequence_id == 'gt':
seq_str = sequence_id
else:
seq_str = f"{sequence_id:04d}"
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/img/{{}}_{scenario_id:04d}_{seq_str}_{frame_id}.{{}}"
def get_plan_rgb(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
temp = get_plan_image_file_template(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
return temp.format('rgb', 'jpg')
def get_plan_depth(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
temp = get_plan_image_file_template(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
return temp.format('depth', 'png')
def get_plan_seg(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id):
temp = get_plan_image_file_template(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
return temp.format('seg', 'jpg')
def get_plan_perf_file(data_root, split_id, model_category, model_name, scenario_id):
model_dir = get_model_dir(data_root, split_id, model_category, model_name)
return f"{model_dir}/info/perf_{scenario_id:04d}.npz"
########################################################################
# partial observation getter for full experiment
########################################################################
CAM_EXTR = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, 0.6427898318479135, -0.766043895201295, -565.0],
[0.0, 0.766047091387779, 0.6427871499290135, 550.0], [0.0, 0.0, 0.0, 1.0]])
CAM_INTR = np.array([[687.1868314210544, 0.0, 360.0], [0.0, 687.1868314210544, 360.0], [0.0, 0.0, 1.0]])
SCENE_RANGE = np.array([[-600, -600, -20], [600, 600, 380]])
def get_plan_scene_partial_pointcloud(
model_category, model_name, split_id, scenario_id, sequence_id, frame_id, data_root):
depth_img = get_plan_depth(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
depth_img = np.array(Image.open(depth_img).convert(mode='I'))
depth_vals = -np.array(depth_img).astype(float) / 1000.
rgb_img = get_plan_rgb(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
rgb_img = np.array(Image.open(rgb_img).convert(mode="RGB")).astype(float) / 255
seg_img = get_plan_seg(data_root, split_id, model_category, model_name, scenario_id, sequence_id, frame_id)
seg_img = np.array(Image.open(seg_img).convert('L')).squeeze()
non_env = np.where(seg_img != 0)
env = np.where(seg_img == 0)
partial_points = project_depth_world_space(depth_vals, CAM_INTR, CAM_EXTR, keep_dim=True, project_factor=100.)
partial_points_rgb = np.concatenate([partial_points, rgb_img], axis=-1)
obj_pts = partial_points_rgb[non_env]
env_pts = partial_points_rgb[env]
return obj_pts, env_pts
def get_scene_partial_pointcloud(model_category, model_name, split_id, reset_id, frame_id, data_root):
depth_img = get_depth(data_root, split_id, model_category, model_name, reset_id, frame_id)
depth_img = np.array(Image.open(depth_img).convert(mode='I'))
depth_vals = -np.array(depth_img).astype(float) / 1000.
rgb_img = get_rgb(data_root, split_id, model_category, model_name, reset_id, frame_id)
rgb_img = np.array(Image.open(rgb_img).convert(mode="RGB")).astype(float) / 255
seg_img = get_seg(data_root, split_id, model_category, model_name, reset_id, frame_id)
seg_img = np.array(Image.open(seg_img).convert('L')).squeeze()
non_env = np.where(seg_img != 0)
env = np.where(seg_img == 0)
partial_points = project_depth_world_space(depth_vals, CAM_INTR, CAM_EXTR, keep_dim=True, project_factor=100.)
partial_points_rgb = np.concatenate([partial_points, rgb_img], axis=-1)
obj_pts = partial_points_rgb[non_env]
env_pts = partial_points_rgb[env]
return obj_pts, env_pts
def render_points(world_points, cam_extr=None, cam_intr=None, return_index=False, filter_in_cam=True):
if cam_extr is None:
cam_extr = CAM_EXTR
if cam_intr is None:
cam_intr = CAM_INTR
cam_points = transform_points_world_to_cam(world_points, cam_extr) / 100.
cam_pts_x = cam_points[:,0]
cam_pts_y = cam_points[:,1]
cam_pts_z = cam_points[:,2]
cam_pts_x = -cam_pts_x / cam_pts_z * cam_intr[0,0] + cam_intr[1,2]
cam_pts_y = cam_pts_y / cam_pts_z * cam_intr[1,1] + cam_intr[0,2]
idx = np.rint(cam_pts_y / 6) * 1000 + np.rint(cam_pts_x / 6)
val = np.stack([cam_pts_z, np.arange(len(cam_pts_x))]).T
order = idx.argsort()
idx = idx[order]
val = val[order]
grouped_pts = np.split(val, np.unique(idx, return_index=True)[1][1:])
min_depth = np.array([p[p[:,0].argsort()][-1] for p in grouped_pts])
min_idx = min_depth[:,-1].astype(int)
if filter_in_cam:
in_cam = np.where(np.logical_and(cam_pts_x > 0, cam_pts_y > 0))[0]
min_idx = np.intersect1d(in_cam, min_idx, assume_unique=True)
if return_index:
return min_idx
return world_points[min_idx]
########################################################################
# Get geometric state (full experiment)
########################################################################
def extract_full_points(path):
geom_data = np.load(path)
loc = geom_data['loc']
w,x,y,z= geom_data['rot']
rot = Rotation.from_quat(np.array([x,y,z,w]))
scale = geom_data['scale']
sim_pts = (rot.apply(geom_data['sim'] * scale)) + loc
vis_pts = (rot.apply(geom_data['vis'] * scale)) + loc
return sim_pts, vis_pts, loc, rot, scale
def get_object_full_points(model_category, model_name, split_id, reset_id, frame_id, data_root):
path = get_geom_file(data_root, split_id, model_category, model_name, reset_id, frame_id)
return extract_full_points(path)
def get_action_info(model_category, model_name, split_id, reset_id, interaction_id, data_root):
obj_info = get_interaction_info_file(data_root, split_id, model_category, model_name, reset_id)
int_info = np.load(obj_info)
grasp_loc = np.array(int_info['grasp_points'][interaction_id])
target_loc = np.array(int_info['target_points'][interaction_id])
start_frame = int_info['start_frames'][interaction_id]
release_frame = int_info['release_frames'][interaction_id]
static_frame = int_info['static_frames'][interaction_id]
return grasp_loc, target_loc, start_frame, release_frame, static_frame
########################################################################
# Get point-based supervision data for implicit functions (teddy toy example)
########################################################################
def sample_occupancies(full_pts, center,
sample_scheme='gaussian',
num_pts = 100000, bound=0.55,
std=0.1):
if sample_scheme not in ['uniform', 'gaussian', 'object']:
raise ValueError('Unsupported sampling scheme for occupancy')
if sample_scheme == 'uniform':
pts = np.random.rand(num_pts, 3)
pts = 1.1 * (pts - 0.5)
elif sample_scheme == 'object':
displace = full_pts[np.random.randint(full_pts.shape[0], size=num_pts)]
x_min,y_min,z_min = full_pts.min(axis=0)
x_max,y_max,z_max = full_pts.max(axis=0)
a, b = -bound, bound
xs = scipy.stats.truncnorm.rvs(*get_trunc_ab_range(x_min, x_max, std, a, b), loc=0, scale=std, size=num_pts)
ys = scipy.stats.truncnorm.rvs(*get_trunc_ab_range(y_min, y_max, std, a, b), loc=0, scale=std, size=num_pts)
zs = scipy.stats.truncnorm.rvs(*get_trunc_ab_range(z_min, z_max, std, a, b), loc=0, scale=std, size=num_pts)
pts = np.array([xs,ys,zs]).T + displace
else:
x,y,z= center
a, b = -bound, bound
xs = scipy.stats.truncnorm.rvs(*get_trunc_ab(x, std, a, b), loc=x, scale=std, size=num_pts)
ys = scipy.stats.truncnorm.rvs(*get_trunc_ab(y, std, a, b), loc=y, scale=std, size=num_pts)
zs = scipy.stats.truncnorm.rvs(*get_trunc_ab(z, std, a, b), loc=z, scale=std, size=num_pts)
pts = np.array([xs,ys,zs]).T
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric='l2').fit(full_pts)
dist, ind = x_nn.kneighbors(pts)#[0].squeeze()
dist = dist.squeeze()
ind = ind.squeeze()
#points_in = points_uniform[np.where(points_distance< 0.1)]
occ = dist < 0.01
#pt_class = ind[np.where(dist < 0.01)]
pt_class = ind[occ != 0]
return pts, occ, pt_class
########################################################################
# Visualization
########################################################################
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
def side_by_side_point_clouds(point_clouds, angle=(90,0)):
fig = plt.figure()
W = int(len(point_clouds) ** 0.5)
H = math.ceil(len(point_clouds) / W)
for i, pcloud in enumerate(point_clouds):
action = None
flow = None
pts = pcloud['pts']
title = pcloud['title']
col = pcloud.get('col', None)
flow = pcloud.get('flow', None)
action = pcloud.get('action', None)
ax = fig.add_subplot(W, H, i+1,projection='3d')
ax.set_title(title)
if flow is not None:
flow_norm = np.linalg.norm(flow, axis=1)
viz_idx = flow_norm > 0.0
flow = flow[viz_idx]
ax.quiver(
pts[:,0][viz_idx],
pts[:,2][viz_idx],
pts[:,1][viz_idx],
flow[:,0], flow[:,2], flow[:,1],
color = 'red', linewidth=3, alpha=0.2
)
if col is None:
col = 'blue'
ax.scatter(pts[:,0],
pts[:,2],
pts[:,1], color=col,s=0.5)
set_axes_equal(ax)
ax.view_init(*angle)
if action is not None:
ax.scatter(action[0], action[1], 0.,
edgecolors='tomato', color='turquoise', marker='*',s=80)
return fig
def write_pointcoud_as_obj(xyzrgb, path):
if xyzrgb.shape[1] == 6:
with open(path, 'w') as fp:
for x,y,z,r,g,b in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f} {r:.3f} {g:.3f} {b:.3f}\n")
else:
with open(path, 'w') as fp:
for x,y,z in xyzrgb:
fp.write(f"v {x:.3f} {y:.3f} {z:.3f}\n")
#################################
# Distance Metric
#################################
def subsample_points(points, resolution=0.0125, return_index=True):
idx = np.unique(points// resolution * resolution, axis=0, return_index=True)[1]
if return_index:
return idx
return points[idx]
def miou(x, y, th=0.01):
x = subsample_points(x, resolution=th, return_index=False) // th
y = subsample_points(y, resolution=th, return_index=False) // th
xset = set([tuple(i) for i in x])
yset = set([tuple(i) for i in y])
return len(xset & yset) / len(xset | yset)
from sklearn.neighbors import NearestNeighbors
def chamfer_distance(x, y, metric='l2', direction='bi'):
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
min_y_to_x = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
min_x_to_y = y_nn.kneighbors(x)[0]
return np.mean(min_y_to_x) + np.mean(min_x_to_y)
def f1_score(x, y, metric='l2', th=0.01):
# x is pred
# y is gt
x_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(x)
d2 = x_nn.kneighbors(y)[0]
y_nn = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm='kd_tree', metric=metric).fit(y)
d1 = y_nn.kneighbors(x)[0]
recall = float(sum(d < th for d in d2)) / float(len(d2))
precision = float(sum(d < th for d in d1)) / float(len(d1))
if recall+precision > 0:
fscore = 2 * recall * precision / (recall + precision)
else:
fscore = 0
return fscore, precision, recall
from scipy.spatial import cKDTree
def find_nn_cpu(feat0, feat1, return_distance=False):
feat1tree = cKDTree(feat1)
dists, nn_inds = feat1tree.query(feat0, k=1, n_jobs=-1)
if return_distance:
return nn_inds, dists
else:
return nn_inds
def find_emd_cpu(feat0, feat1, return_distance=False):
import time
from scipy.spatial.distance import cdist
from scipy.optimize import linear_sum_assignment
d = cdist(feat0, feat1)
feat0_inds, feat1_inds = linear_sum_assignment(d)
return feat0_inds, feat1_inds
def find_nn_cpu_symmetry_consistent(feat0, feat1, pts0, pts1, n_neighbor=10, local_radis=0.05, return_distance=False):
feat1tree = cKDTree(feat1)
dists, nn_inds = feat1tree.query(feat0, k=n_neighbor, n_jobs=-1)
if return_distance:
return nn_inds, dists
else:
return nn_inds
#################################
# ranking utilities
def overlap(list1, list2, depth):
"""Overlap which accounts for possible ties.
This isn't mentioned in the paper but should be used in the ``rbo*()``
functions below, otherwise overlap at a given depth might be > depth which
inflates the result.
There are no guidelines in the paper as to what's a good way to calculate
this, but a good guess is agreement scaled by the minimum between the
requested depth and the lengths of the considered lists (overlap shouldn't
be larger than the number of ranks in the shorter list, otherwise results
are conspicuously wrong when the lists are of unequal lengths -- rbo_ext is
not between rbo_min and rbo_min + rbo_res.
>>> overlap("abcd", "abcd", 3)
3.0
>>> overlap("abcd", "abcd", 5)
4.0
>>> overlap(["a", {"b", "c"}, "d"], ["a", {"b", "c"}, "d"], 2)
2.0
>>> overlap(["a", {"b", "c"}, "d"], ["a", {"b", "c"}, "d"], 3)
3.0
"""
return agreement(list1, list2, depth) * min(depth, len(list1), len(list2))
def rbo_ext(list1, list2, p=0.9):
"""RBO point estimate based on extrapolating observed overlap.
See equation (32) in paper.
NOTE: The doctests weren't verified against manual computations but seem
plausible.
>>> _round(rbo_ext("abcdefg", "abcdefg", .9))
1.0
>>> _round(rbo_ext("abcdefg", "bacdefg", .9))
0.9
"""
S, L = sorted((list1, list2), key=len)
s, l = len(S), len(L)
x_l = overlap(list1, list2, l)
x_s = overlap(list1, list2, s)
# the paper says overlap(..., d) / d, but it should be replaced by
# agreement(..., d) defined as per equation (28) so that ties are handled
# properly (otherwise values > 1 will be returned)
# sum1 = sum(p**d * overlap(list1, list2, d)[0] / d for d in range(1, l + 1))
sum1 = sum(p ** d * agreement(list1, list2, d) for d in range(1, l + 1))
sum2 = sum(p ** d * x_s * (d - s) / s / d for d in range(s + 1, l + 1))
term1 = (1 - p) / p * (sum1 + sum2)
term2 = p ** l * ((x_l - x_s) / l + x_s / s)
return term1 + term2
def set_at_depth(lst, depth):
ans = set()
for v in lst[:depth]:
if isinstance(v, set):
ans.update(v)
else:
ans.add(v)
return ans
def raw_overlap(list1, list2, depth):
"""Overlap as defined in the article.
"""
set1, set2 = set_at_depth(list1, depth), set_at_depth(list2, depth)
return len(set1.intersection(set2)), len(set1), len(set2)
def agreement(list1, list2, depth):
"""Proportion of shared values between two sorted lists at given depth.
>>> _round(agreement("abcde", "abdcf", 1))
1.0
>>> _round(agreement("abcde", "abdcf", 3))
0.667
>>> _round(agreement("abcde", "abdcf", 4))
1.0
>>> _round(agreement("abcde", "abdcf", 5))
0.8
>>> _round(agreement([{1, 2}, 3], [1, {2, 3}], 1))
0.667
>>> _round(agreement([{1, 2}, 3], [1, {2, 3}], 2))
1.0
"""
len_intersection, len_set1, len_set2 = raw_overlap(list1, list2, depth)
return 2 * len_intersection / (len_set1 + len_set2)
| 20,693 | Python | 43.407725 | 124 | 0.605905 |
NVlabs/ACID/ACID/src/utils/libmise/__init__.py | from .mise import MISE
__all__ = [
MISE
]
| 47 | Python | 6.999999 | 22 | 0.531915 |
NVlabs/ACID/ACID/src/utils/libmise/test.py | import numpy as np
from mise import MISE
import time
t0 = time.time()
extractor = MISE(1, 2, 0.)
p = extractor.query()
i = 0
while p.shape[0] != 0:
print(i)
print(p)
v = 2 * (p.sum(axis=-1) > 2).astype(np.float64) - 1
extractor.update(p, v)
p = extractor.query()
i += 1
if (i >= 8):
break
print(extractor.to_dense())
# p, v = extractor.get_points()
# print(p)
# print(v)
print('Total time: %f' % (time.time() - t0))
| 456 | Python | 16.576922 | 55 | 0.570175 |
NVlabs/ACID/ACID/src/utils/libsimplify/__init__.py | from .simplify_mesh import (
mesh_simplify
)
import trimesh
def simplify_mesh(mesh, f_target=10000, agressiveness=7.):
vertices = mesh.vertices
faces = mesh.faces
vertices, faces = mesh_simplify(vertices, faces, f_target, agressiveness)
mesh_simplified = trimesh.Trimesh(vertices, faces, process=False)
return mesh_simplified
| 355 | Python | 21.249999 | 77 | 0.723944 |
NVlabs/ACID/ACID/src/utils/libsimplify/test.py | from simplify_mesh import mesh_simplify
import numpy as np
v = np.random.rand(100, 3)
f = np.random.choice(range(100), (50, 3))
mesh_simplify(v, f, 50) | 153 | Python | 20.999997 | 41 | 0.705882 |
NVlabs/ACID/ACID/src/utils/libmcubes/__init__.py | from src.utils.libmcubes.mcubes import (
marching_cubes, marching_cubes_func
)
from src.utils.libmcubes.exporter import (
export_mesh, export_obj, export_off
)
__all__ = [
marching_cubes, marching_cubes_func,
export_mesh, export_obj, export_off
]
| 265 | Python | 19.461537 | 42 | 0.70566 |
NVlabs/ACID/ACID/src/utils/libmcubes/exporter.py |
import numpy as np
def export_obj(vertices, triangles, filename):
"""
Exports a mesh in the (.obj) format.
"""
with open(filename, 'w') as fh:
for v in vertices:
fh.write("v {} {} {}\n".format(*v))
for f in triangles:
fh.write("f {} {} {}\n".format(*(f + 1)))
def export_off(vertices, triangles, filename):
"""
Exports a mesh in the (.off) format.
"""
with open(filename, 'w') as fh:
fh.write('OFF\n')
fh.write('{} {} 0\n'.format(len(vertices), len(triangles)))
for v in vertices:
fh.write("{} {} {}\n".format(*v))
for f in triangles:
fh.write("3 {} {} {}\n".format(*f))
def export_mesh(vertices, triangles, filename, mesh_name="mcubes_mesh"):
"""
Exports a mesh in the COLLADA (.dae) format.
Needs PyCollada (https://github.com/pycollada/pycollada).
"""
import collada
mesh = collada.Collada()
vert_src = collada.source.FloatSource("verts-array", vertices, ('X','Y','Z'))
geom = collada.geometry.Geometry(mesh, "geometry0", mesh_name, [vert_src])
input_list = collada.source.InputList()
input_list.addInput(0, 'VERTEX', "#verts-array")
triset = geom.createTriangleSet(np.copy(triangles), input_list, "")
geom.primitives.append(triset)
mesh.geometries.append(geom)
geomnode = collada.scene.GeometryNode(geom, [])
node = collada.scene.Node(mesh_name, children=[geomnode])
myscene = collada.scene.Scene("mcubes_scene", [node])
mesh.scenes.append(myscene)
mesh.scene = myscene
mesh.write(filename)
| 1,697 | Python | 25.53125 | 81 | 0.570418 |
NVlabs/ACID/ACID/src/data/__init__.py |
from src.data.core import (
PlushEnvGeom, collate_remove_none, worker_init_fn, get_plush_loader
)
from src.data.transforms import (
PointcloudNoise, SubsamplePointcloud,
SubsamplePoints,
)
__all__ = [
# Core
PlushEnvGeom,
get_plush_loader,
collate_remove_none,
worker_init_fn,
PointcloudNoise,
SubsamplePointcloud,
SubsamplePoints,
]
| 379 | Python | 18.999999 | 71 | 0.693931 |
NVlabs/ACID/ACID/src/data/core.py | import os
import yaml
import pickle
import torch
import logging
import numpy as np
from torch.utils import data
from torch.utils.data.dataloader import default_collate
from src.utils import plushsim_util, common_util
scene_range = plushsim_util.SCENE_RANGE.copy()
to_range = np.array([[-1.1,-1.1,-1.1],[1.1,1.1,1.1]]) * 0.5
logger = logging.getLogger(__name__)
def collate_remove_none(batch):
''' Collater that puts each data field into a tensor with outer dimension
batch size.
Args:
batch: batch
'''
batch = list(filter(lambda x: x is not None, batch))
return data.dataloader.default_collate(batch)
def worker_init_fn(worker_id):
''' Worker init function to ensure true randomness.
'''
def set_num_threads(nt):
try:
import mkl; mkl.set_num_threads(nt)
except:
pass
torch.set_num_threads(1)
os.environ['IPC_ENABLE']='1'
for o in ['OPENBLAS_NUM_THREADS','NUMEXPR_NUM_THREADS','OMP_NUM_THREADS','MKL_NUM_THREADS']:
os.environ[o] = str(nt)
random_data = os.urandom(4)
base_seed = int.from_bytes(random_data, byteorder="big")
np.random.seed(base_seed + worker_id)
def collate_pair_fn(batch):
num_points = batch[0]['sampled_pts'].shape[1]
collated = {}
for key in batch[0]:
if key == 'geo_dists':
collated[key] = torch.as_tensor(np.concatenate([d[key] for d in batch]))
elif key == 'num_pairs':
indices = []
for i,d in enumerate(batch):
indices.append(np.arange(d['num_pairs']) + i * num_points)
collated["pair_indices"] = torch.as_tensor(np.concatenate(indices))
else:
collated[key] = default_collate([d[key] for d in batch])
return collated
class PlushEnvBoth(data.Dataset):
def __init__(self, flow_root, pair_root, num_points,
split="train", transform={}, pos_ratio=2):
# Attributes
self.flow_root = flow_root
self.num_points = num_points
self.split = split
if split != "train":
self.num_points = -1
self.pair_root = pair_root
self.transform = transform
self.pos_ratio = pos_ratio
if split == 'train':
with open(os.path.join(flow_root, 'train.pkl'), 'rb') as fp:
self.models = pickle.load(fp)
else:
with open(os.path.join(flow_root, 'test.pkl'), 'rb') as fp:
self.models = pickle.load(fp)
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.models)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
data = {}
split_id, model_id, reset_id, int_id = self.models[idx]
# load frame and get partial observation
points_dict = np.load(
plushsim_util.get_flow_data_file(
self.flow_root,split_id, model_id, reset_id, int_id))
obj_pcloud, env_pcloud = self._prepare_partial_obs(points_dict)
# load pair frame info
pair_info = np.load(
plushsim_util.get_flow_pair_data_file(
self.pair_root,split_id, model_id, reset_id, int_id))
pair_reset_id, pair_int_id = self._get_pair_id(pair_info)
# load pair frame and get partial observation
points_dict2 = np.load(
plushsim_util.get_flow_data_file(
self.flow_root,split_id, model_id, pair_reset_id, pair_int_id))
obj_pcloud2, env_pcloud2 = self._prepare_partial_obs(points_dict2)
if self.split == 'train':
# if training, load random points
# implicit network sampled points
pts, occs, sampled_pts, sampled_occ, sampled_flow, sampled_inds = self._prepare_points(
points_dict)
# get which occupied points are sampled (index is in the occupied subset)
occed = occs != 0
num_occed = occed.sum()
total_to_occs = np.zeros(pts.shape[0], dtype=np.uint32)
total_to_occs[occed] = np.arange(num_occed)
sampled_occs_ids = total_to_occs[sampled_inds[sampled_occ == 1.]]
# basically sampled_positive ids is used to index the pairs in pair info npz
# reorganize sampled_pts
sampled_pts = np.concatenate([sampled_pts[sampled_occ == 1.], sampled_pts[sampled_occ == 0.]])
sampled_occ = np.concatenate([sampled_occ[sampled_occ == 1.], sampled_occ[sampled_occ == 0.]])
sampled_flow = np.concatenate([sampled_flow[sampled_occ == 1.], sampled_flow[sampled_occ == 0.]])
geo_dists, tgtids = self._prepare_pair_data(pair_info, sampled_occs_ids)
_,_, sampled_pts2, sampled_occ2, sampled_flow2, _ = self._prepare_points(points_dict2, chosen=tgtids)
else:
# if not training, load matched points
sampled_pts, sampled_pts2, \
sampled_occ, sampled_occ2, \
sampled_flow, sampled_flow2, geo_dists = self._prepare_matched_unique(points_dict, points_dict2)
data = {
"obj_obs":np.stack([obj_pcloud,obj_pcloud2]),
"env_obs":np.stack([env_pcloud,env_pcloud2]),
"sampled_pts":np.stack([sampled_pts,sampled_pts2]),
"sampled_occ":np.stack([sampled_occ,sampled_occ2]),
"sampled_flow":np.stack([sampled_flow,sampled_flow2]),
"geo_dists":geo_dists.astype(np.float32),
"num_pairs":len(geo_dists),
"idx":idx,
"start_frame":int(points_dict['start_frame']),
"end_frame":int(points_dict['end_frame']),
}
return data
def _get_pts_related_info(self, points_dict):
pts = points_dict['pts'].astype(np.float32)
occs = np.unpackbits(points_dict['occ'])
inds = points_dict['ind']
flow = np.zeros((len(pts), 3), dtype=np.float32)
flow[occs != 0] = points_dict['flow'].astype(np.float32) * 10.
return pts, occs, inds, flow
def _prepare_matched_unique(self, points_dict, points_dict2):
pts1,occs1,inds1,flow1 = self._get_pts_related_info(points_dict)
pts2,occs2,inds2,flow2 = self._get_pts_related_info(points_dict2)
cls1, id1 = np.unique(inds1, return_index=True)
cls2, id2 = np.unique(inds2, return_index=True)
int_cls, int_id1, int_id2 = np.intersect1d(cls1, cls2,
assume_unique=True, return_indices=True)
geo_dists = np.zeros_like(int_cls)
unique_pts_1 = pts1[occs1==1][id1[int_id1]]
unique_flow_1 = flow1[occs1==1][id1[int_id1]]
unique_occ_1 = np.ones(geo_dists.shape[0], dtype=occs1.dtype)
sub_inds = common_util.subsample_points(unique_pts_1, resolution=0.03, return_index=True)
unique_pts_1 = unique_pts_1[sub_inds]
unique_flow_1 = unique_flow_1[sub_inds]
unique_occ_1 = unique_occ_1[sub_inds]
sample_others1 = np.random.randint(pts1.shape[0], size=pts1.shape[0] - unique_pts_1.shape[0])
pts_others1 = pts1[sample_others1]
occ_others1 = occs1[sample_others1]
flow_others1 = flow1[sample_others1]
sampled_pts1 = np.concatenate([unique_pts_1, pts_others1])
sampled_occ1 = np.concatenate([unique_occ_1, occ_others1])
sampled_flow1 = np.concatenate([unique_flow_1, flow_others1])
unique_pts_2 = pts2[occs2==1][id2[int_id2]]
unique_flow_2 = flow2[occs2==1][id2[int_id2]]
unique_occ_2 = np.ones(geo_dists.shape[0], dtype=occs2.dtype)
unique_pts_2 = unique_pts_2[sub_inds]
unique_flow_2 = unique_flow_2[sub_inds]
unique_occ_2 = unique_occ_2[sub_inds]
sample_others2 = np.random.randint(pts2.shape[0], size=pts2.shape[0] - unique_pts_2.shape[0])
pts_others2 = pts2[sample_others2]
occ_others2 = occs2[sample_others2]
flow_others2 = flow2[sample_others2]
sampled_pts2 = np.concatenate([unique_pts_2, pts_others2])
sampled_occ2 = np.concatenate([unique_occ_2, occ_others2])
sampled_flow2 = np.concatenate([unique_flow_2, flow_others2])
geo_dists = geo_dists[sub_inds]
return sampled_pts1, sampled_pts2,\
sampled_occ1, sampled_occ2, \
sampled_flow1, sampled_flow2, geo_dists
def _prepare_partial_obs(self, info_dict):
# obj partial observation
obj_pcloud = info_dict['obj_pcloud_obs'].astype(np.float32)
grasp_loc = common_util.transform_points(info_dict['grasp_loc'], scene_range, to_range)
target_loc = common_util.transform_points(info_dict['target_loc'], scene_range, to_range)
tiled_grasp_loc = np.tile(grasp_loc, (len(obj_pcloud), 1)).astype(np.float32)
tiled_target_loc = np.tile(target_loc, (len(obj_pcloud), 1)).astype(np.float32)
obj_pcloud= np.concatenate([obj_pcloud, tiled_target_loc, obj_pcloud[:,:3] - tiled_grasp_loc], axis=-1)
if 'obj_pcloud' in self.transform:
obj_pcloud = self.transform['obj_pcloud'](obj_pcloud)
# scene partial observation
env_pcloud = info_dict['env_pcloud'].astype(np.float32)
env_pcloud += 1e-4 * np.random.randn(*env_pcloud.shape)
if 'env_pcloud' in self.transform:
env_pcloud = self.transform['env_pcloud'](env_pcloud)
return obj_pcloud, env_pcloud
# chosen is the set of positive points that's preselected
def _prepare_points(self, points_dict, chosen=None):
pts,occs,inds,flow = self._get_pts_related_info(points_dict)
if chosen is None:
if self.num_points == -1:
sampled_pts = pts
sampled_occ = occs
sampled_flow = flow
sampled_inds = np.arange(len(pts))
else:
sampled_inds = np.random.randint(pts.shape[0], size=self.num_points)
sampled_pts = pts[sampled_inds]
sampled_occ = occs[sampled_inds]
sampled_flow = flow[sampled_inds]
else:
pts_chosen = pts[occs!= 0][chosen]
occ_chosen = np.ones(chosen.shape[0], dtype=occs.dtype)
flow_chosen = flow[occs!= 0][chosen]
if self.num_points == -1:
sample_others = np.random.randint(pts.shape[0], size=pts.shape[0] - chosen.shape[0])
else:
sample_others = np.random.randint(pts.shape[0], size=self.num_points - chosen.shape[0])
pts_others = pts[sample_others]
occ_others = occs[sample_others]
flow_others = flow[sample_others]
sampled_inds = np.concatenate([chosen, sample_others])
sampled_pts = np.concatenate([pts_chosen, pts_others])
sampled_occ = np.concatenate([occ_chosen, occ_others])
sampled_flow= np.concatenate([flow_chosen, flow_others])
return pts, occs, sampled_pts, sampled_occ.astype(np.float32), sampled_flow, sampled_inds
def _get_pair_id(self, pair_info):
pair_filename = os.path.splitext(str(pair_info["target_file"]))[0]
pair_reset_id, pair_frame_id = (int(f) for f in pair_filename.split('_'))
return pair_reset_id, pair_frame_id
def _prepare_pair_data(self, pair_info, sampled_occs_ids):
# load pair info
dists_sampled = pair_info['dists'][sampled_occs_ids]
tgtid_sampled = pair_info['inds'][sampled_occs_ids]
# draw samples,
# for half of the points, we draw from their three closests,
# for the other half, we draw from the further points
H,W = dists_sampled.shape
draw_pair_ids = np.random.randint(3, size=H)
draw_pair_ids[H // self.pos_ratio:] = np.random.randint(3, high=W, size=H - H // self.pos_ratio)
tgtids = tgtid_sampled[np.arange(H), draw_pair_ids]
geo_dists = dists_sampled[np.arange(H), draw_pair_ids]
# contrastive_mask = geo_dists > self.contrastive_threshold
return geo_dists, tgtids
def get_model_dict(self, idx):
return self.models[idx]
class PlushEnvGeom(data.Dataset):
def __init__(self, geom_root, pair_root, num_points,
split="train", transform={}, pos_ratio=2):
# Attributes
self.geom_root = geom_root
self.num_points = num_points
self.split = split
if split != "train":
self.num_points = -1
self.pair_root = pair_root
self.transform = transform
self.pos_ratio = pos_ratio
if split == 'train':
with open(os.path.join(geom_root, 'train.pkl'), 'rb') as fp:
self.models = pickle.load(fp)
else:
with open(os.path.join(geom_root, 'test.pkl'), 'rb') as fp:
self.models = pickle.load(fp)
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.models)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
data = {}
split_id, model_id, reset_id, frame_id = self.models[idx]
# load frame and get partial observation
points_dict = np.load(
plushsim_util.get_geom_data_file(
self.geom_root,split_id, model_id, reset_id, frame_id))
obj_pcloud, env_pcloud = self._prepare_partial_obs(points_dict)
# load pair frame info
pair_info = np.load(
plushsim_util.get_pair_data_file(
self.pair_root,split_id, model_id, reset_id, frame_id))
pair_reset_id, pair_frame_id = self._get_pair_id(pair_info)
# load pair frame and get partial observation
points_dict2 = np.load(
plushsim_util.get_geom_data_file(
self.geom_root,split_id, model_id, pair_reset_id, pair_frame_id))
obj_pcloud2, env_pcloud2 = self._prepare_partial_obs(points_dict2)
if self.split == 'train':
# if training, load random points
# implicit network sampled points
pts, occs, sampled_pts, sampled_occ, sampled_inds = self._prepare_points(points_dict)
# get which occupied points are sampled (index is in the occupied subset)
occed = occs != 0
num_occed = occed.sum()
total_to_occs = np.zeros(pts.shape[0], dtype=np.uint32)
total_to_occs[occed] = np.arange(num_occed)
sampled_occs_ids = total_to_occs[sampled_inds[sampled_occ == 1.]]
# basically sampled_positive ids is used to index the pairs in pair info npz
# reorganize sampled_pts
sampled_pts = np.concatenate([sampled_pts[sampled_occ == 1.], sampled_pts[sampled_occ == 0.]])
sampled_occ = np.concatenate([sampled_occ[sampled_occ == 1.], sampled_occ[sampled_occ == 0.]])
geo_dists, tgtids = self._prepare_pair_data(pair_info, sampled_occs_ids)
_,_, sampled_pts2, sampled_occ2, _ = self._prepare_points(points_dict2, chosen=tgtids)
else:
# if not training, load matched points
sampled_pts, sampled_pts2, sampled_occ, sampled_occ2, geo_dists = self._prepare_matched_unique(points_dict, points_dict2)
data = {
"obj_obs":np.stack([obj_pcloud,obj_pcloud2]),
"env_obs":np.stack([env_pcloud,env_pcloud2]),
"sampled_pts":np.stack([sampled_pts,sampled_pts2]),
"sampled_occ":np.stack([sampled_occ,sampled_occ2]),
"geo_dists":geo_dists.astype(np.float32),
"num_pairs":len(geo_dists),
"idx":idx,
}
return data
def _prepare_matched_unique(self, points_dict, points_dict2):
pts1 = points_dict['pts'].astype(np.float32)
occs1 = np.unpackbits(points_dict['occ'])
inds1 = points_dict['ind']
pts2 = points_dict2['pts'].astype(np.float32)
occs2 = np.unpackbits(points_dict2['occ'])
inds2 = points_dict2['ind']
cls1, id1 = np.unique(inds1, return_index=True)
cls2, id2 = np.unique(inds2, return_index=True)
int_cls, int_id1, int_id2 = np.intersect1d(cls1, cls2, assume_unique=True, return_indices=True)
geo_dists = np.zeros_like(int_cls)
unique_pts_1 = pts1[occs1==1][id1[int_id1]]
unique_pts_2 = pts2[occs2==1][id2[int_id2]]
unique_occ_1 = np.ones(geo_dists.shape[0], dtype=occs1.dtype)
unique_occ_2 = np.ones(geo_dists.shape[0], dtype=occs2.dtype)
sample_others1 = np.random.randint(pts1.shape[0], size=pts1.shape[0] - unique_pts_1.shape[0])
sample_others2 = np.random.randint(pts2.shape[0], size=pts2.shape[0] - unique_pts_2.shape[0])
pts_others1 = pts1[sample_others1]
occ_others1 = occs1[sample_others1]
pts_others2 = pts2[sample_others2]
occ_others2 = occs2[sample_others2]
sampled_pts1 = np.concatenate([unique_pts_1, pts_others1])
sampled_occ1 = np.concatenate([unique_occ_1, occ_others1])
sampled_pts2 = np.concatenate([unique_pts_2, pts_others2])
sampled_occ2 = np.concatenate([unique_occ_2, occ_others2])
return sampled_pts1, sampled_pts2, sampled_occ1, sampled_occ2, geo_dists
def _prepare_partial_obs(self, info_dict):
# obj partial observation
obj_pcloud = info_dict['obj_pcloud'].astype(np.float32)
obj_pcloud += 1e-4 * np.random.randn(*obj_pcloud.shape)
if 'obj_pcloud' in self.transform:
obj_pcloud = self.transform['obj_pcloud'](obj_pcloud)
# scene partial observation
env_pcloud = info_dict['env_pcloud'].astype(np.float32)
env_pcloud += 1e-4 * np.random.randn(*env_pcloud.shape)
if 'env_pcloud' in self.transform:
env_pcloud = self.transform['env_pcloud'](env_pcloud)
return obj_pcloud, env_pcloud
# chosen is the set of positive points that's preselected
def _prepare_points(self, points_dict, chosen=None):
pts = points_dict['pts'].astype(np.float32)
occs = points_dict['occ']
occs = np.unpackbits(occs)#[:points.shape[0]]
if chosen is None:
if self.num_points == -1:
sampled_pts = pts
sampled_occ = occs
sampled_inds = np.arange(len(pts))
else:
sampled_inds = np.random.randint(pts.shape[0], size=self.num_points)
sampled_pts = pts[sampled_inds]
sampled_occ = occs[sampled_inds]
else:
pts_chosen = pts[occs!= 0][chosen]
occ_chosen = np.ones(chosen.shape[0], dtype=occs.dtype)
if self.num_points == -1:
sample_others = np.random.randint(pts.shape[0], size=pts.shape[0] - chosen.shape[0])
else:
sample_others = np.random.randint(pts.shape[0], size=self.num_points - chosen.shape[0])
pts_others = pts[sample_others]
occ_others = occs[sample_others]
sampled_inds = np.concatenate([chosen, sample_others])
sampled_pts = np.concatenate([pts_chosen, pts_others])
sampled_occ = np.concatenate([occ_chosen, occ_others])
return pts, occs, sampled_pts, sampled_occ.astype(np.float32), sampled_inds
def _get_pair_id(self, pair_info):
pair_filename = os.path.splitext(str(pair_info["target_file"]))[0]
pair_reset_id, pair_frame_id = (int(f) for f in pair_filename.split('_'))
return pair_reset_id, pair_frame_id
def _prepare_pair_data(self, pair_info, sampled_occs_ids):
# load pair info
dists_sampled = pair_info['dists'][sampled_occs_ids]
tgtid_sampled = pair_info['inds'][sampled_occs_ids]
# draw samples,
# for half of the points, we draw from their three closests,
# for the other half, we draw from the further points
H,W = dists_sampled.shape
draw_pair_ids = np.random.randint(3, size=H)
draw_pair_ids[H // self.pos_ratio:] = np.random.randint(3, high=W, size=H - H // self.pos_ratio)
tgtids = tgtid_sampled[np.arange(H), draw_pair_ids]
geo_dists = dists_sampled[np.arange(H), draw_pair_ids]
# contrastive_mask = geo_dists > self.contrastive_threshold
return geo_dists, tgtids
def get_model_dict(self, idx):
return self.models[idx]
def build_transform_geom(cfg):
from . import transforms as tsf
from torchvision import transforms
transform = {}
transform['obj_pcloud'] = transforms.Compose([
tsf.SubsamplePointcloud(cfg['data']['pointcloud_n_obj']),
tsf.PointcloudNoise(cfg['data']['pointcloud_noise'])
])
transform['env_pcloud'] = transforms.Compose([
tsf.SubsamplePointcloud(cfg['data']['pointcloud_n_env']),
tsf.PointcloudNoise(cfg['data']['pointcloud_noise'])
])
return transform
def get_geom_dataset(cfg, split='train', transform='build'):
geom_root = cfg['data']['geom_path']
pair_root = cfg['data']['pair_path']
num_points = cfg['data']['points_subsample']
pos_ratio = cfg['data'].get('pos_ratio', 2)
if transform == 'build':
transform = build_transform_geom(cfg)
return PlushEnvGeom(geom_root, pair_root, num_points, split=split, transform=transform, pos_ratio=pos_ratio)
def get_combined_dataset(cfg, split='train', transform='build'):
flow_root = cfg['data']['flow_path']
pair_root = cfg['data']['pair_path']
num_points = cfg['data']['points_subsample']
pos_ratio = cfg['data'].get('pos_ratio', 2)
if transform == 'build':
transform = build_transform_geom(cfg)
return PlushEnvBoth(flow_root, pair_root, num_points, split=split, transform=transform, pos_ratio=pos_ratio)
def get_plush_loader(cfg, mode, split='train', transform='build', test_shuffle=False, num_workers=None):
if mode == 'geom':
dataset = get_geom_dataset(cfg, split, transform)
elif mode == 'combined':
dataset = get_combined_dataset(cfg, split, transform)
if split == 'train':
loader = torch.utils.data.DataLoader(
dataset, batch_size=cfg['training']['batch_size'],
num_workers=cfg['training']['n_workers'],
shuffle=True,
collate_fn=collate_pair_fn,
worker_init_fn=worker_init_fn)
else:
loader = torch.utils.data.DataLoader(
dataset, batch_size=1,
num_workers=cfg['training']['n_workers_val'] if num_workers is None else num_workers,
shuffle=test_shuffle,
collate_fn=collate_pair_fn)
return loader
def get_plan_loader(cfg, transform='build', category="teddy",num_workers=None):
transform = build_transform_geom(cfg)
dataset = PlushEnvPlan(cfg['data']['plan_path'], category=category, transform=transform)
loader = torch.utils.data.DataLoader(
dataset, batch_size=1,
num_workers=cfg['training']['n_workers_val'] if num_workers is None else num_workers,
shuffle=False,)
return loader
class PlushEnvPlan(data.Dataset):
def __init__(self, plan_root, category="teddy",transform={}):
# Attributes
self.plan_root = plan_root
self.transform = transform
self.category = category
import glob
self.scenarios = glob.glob(f'{plan_root}/**/*.npz', recursive=True)
self.scenarios = [x for x in self.scenarios if category in x][:-1]
self.scenarios.sort()
def __len__(self):
''' Returns the length of the dataset.
'''
return len(self.scenarios)
def __getitem__(self, idx):
''' Returns an item of the dataset.
Args:
idx (int): ID of data point
'''
data = {}
# load frame and get partial observation
infos = np.load(self.scenarios[idx])
obj_pcloud_start, env_pcloud_start = self._prepare_partial_obs(infos, "start")
obj_pcloud_end, env_pcloud_end = self._prepare_partial_obs(infos, "end")
action = infos['actions'].astype(np.float32)
pts_start, occ_start, ind_start = self._get_pts_related_info(infos, 'start')
pts_end, occ_end, ind_end = self._get_pts_related_info(infos, 'end')
data = {
"obj_obs_start":obj_pcloud_start,
"env_obs_start":env_pcloud_start,
"obj_obs_end":obj_pcloud_end,
"env_obs_end":env_pcloud_end,
'gt_pts_start': infos['sim_pts_start'].astype(np.float32),
'gt_pts_end': infos['sim_pts_end'].astype(np.float32),
'sampled_pts_start': pts_start,
'sampled_occ_start': occ_start,
'sampled_ind_start': ind_start,
'sampled_pts_end': pts_end,
'sampled_occ_end': occ_end,
'sampled_ind_end': ind_end,
"actions": action,
"sequence_ids":infos['sequence_ids'],
"fname":self.scenarios[idx],
"idx":idx,
}
return data
def _prepare_partial_obs(self, info_dict, key):
# obj partial observation
obj_pcloud = info_dict[f'obj_pcloud_{key}'].astype(np.float32)
if 'obj_pcloud' in self.transform:
obj_pcloud = self.transform['obj_pcloud'](obj_pcloud)
# scene partial observation
env_pcloud = info_dict[f'env_pcloud_{key}'].astype(np.float32)
env_pcloud += 1e-4 * np.random.randn(*env_pcloud.shape)
if 'env_pcloud' in self.transform:
env_pcloud = self.transform['env_pcloud'](env_pcloud)
return obj_pcloud, env_pcloud
def _get_pts_related_info(self, points_dict, key):
pts = points_dict[f'pts_{key}'].astype(np.float32)
occs = np.unpackbits(points_dict[f'occ_{key}']).astype(np.float32)
inds = points_dict[f'ind_{key}'].astype(np.int32)
return pts, occs, inds | 26,177 | Python | 42.557404 | 133 | 0.593154 |
NVlabs/ACID/ACID/src/data/transforms.py | import numpy as np
# Transforms
class PointcloudNoise(object):
''' Point cloud noise transformation class.
It adds noise to point cloud data.
Args:
stddev (int): standard deviation
'''
def __init__(self, stddev):
self.stddev = stddev
def __call__(self, data):
''' Calls the transformation.
Args:
data (dictionary): data dictionary
'''
data_out = data.copy()
points = data[None]
noise = self.stddev * np.random.randn(*points.shape)
noise = noise.astype(np.float32)
data_out[None] = points + noise
return data_out
class SubsamplePointcloud(object):
''' Point cloud subsampling transformation class.
It subsamples the point cloud data.
Args:
N (int): number of points to be subsampled
'''
def __init__(self, N):
self.N = N
def __call__(self, data):
''' Calls the transformation.
Args:
data (dict): data dictionary
'''
indices = np.random.randint(data.shape[0], size=self.N)
return data[indices]
class SubsamplePoints(object):
''' Points subsampling transformation class.
It subsamples the points data.
Args:
N (int): number of points to be subsampled
'''
def __init__(self, N):
self.N = N
def __call__(self, data):
''' Calls the transformation.
Args:
data (dictionary): data dictionary
'''
points = data[None]
occ = data['occ']
ind = data['ind']
flow1 = data['flow1']
flow2 = data['flow2']
data_out = data.copy()
if isinstance(self.N, int):
idx = np.random.randint(points.shape[0], size=self.N)
data_out.update({
None: points[idx, :],
'occ': occ[idx],
'ind': ind[idx],
'flow1': flow1[idx],
'flow2': flow2[idx],
})
else:
Nt_out, Nt_in = self.N
occ_binary = (occ >= 0.5)
points0 = points[~occ_binary]
points1 = points[occ_binary]
ind0 = ind[~occ_binary]
ind1 = ind[occ_binary]
flow10 = flow1[~occ_binary]
flow11 = flow1[occ_binary]
flow20 = flow2[~occ_binary]
flow21 = flow2[occ_binary]
idx0 = np.random.randint(points0.shape[0], size=Nt_out)
idx1 = np.random.randint(points1.shape[0], size=Nt_in)
points0 = points0[idx0, :]
points1 = points1[idx1, :]
points = np.concatenate([points0, points1], axis=0)
ind0 = ind0[idx0]
ind1 = ind1[idx1]
ind = np.concatenate([ind0, ind1], axis=0)
flow10 = flow10[idx0]
flow11 = flow11[idx1]
flow1 = np.concatenate([flow10, flow11], axis=0)
flow20 = flow20[idx0]
flow21 = flow21[idx1]
flow2 = np.concatenate([flow20, flow21], axis=0)
occ0 = np.zeros(Nt_out, dtype=np.float32)
occ1 = np.ones(Nt_in, dtype=np.float32)
occ = np.concatenate([occ0, occ1], axis=0)
volume = occ_binary.sum() / len(occ_binary)
volume = volume.astype(np.float32)
data_out.update({
None: points,
'occ': occ,
'volume': volume,
'ind': ind,
'flow1': flow1,
'flow2': flow2,
})
return data_out
| 3,578 | Python | 25.708955 | 67 | 0.507546 |
NVlabs/ACID/ACID/preprocess/gen_data_flow_plush.py | import numpy as np
import os
import time, datetime
import sys
import os.path as osp
ACID_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
sys.path.insert(0,ACID_dir)
import json
from src.utils import plushsim_util
from src.utils import common_util
import glob
import tqdm
from multiprocessing import Pool
import argparse
parser = argparse.ArgumentParser("Training Flow Data Generation")
data_plush_default = osp.join(ACID_dir, "data_plush")
flow_default = osp.join(ACID_dir, "train_data", "flow")
parser.add_argument("--data_root", type=str, default=data_plush_default)
parser.add_argument("--save_root", type=str, default=flow_default)
args = parser.parse_args()
data_root = args.data_root
save_root = args.save_root
scene_range = plushsim_util.SCENE_RANGE.copy()
to_range = np.array([[-1.1,-1.1,-1.1],[1.1,1.1,1.1]]) * 0.5
class_to_std = {
'teddy':0.12,
'elephant':0.15,
'octopus':0.12,
'rabbit':0.08,
'dog':0.08,
'snake':0.04,
}
def export_train_data(data_id):
# try:
# load action info
split_id, model_category, model_name, reset_id, interaction_id = data_id
grasp_loc, target_loc, f1, _, f2 = plushsim_util.get_action_info(model_category, model_name, split_id, reset_id, interaction_id, data_root)
# get observations
obj_pts1, env_pts1 = plushsim_util.get_scene_partial_pointcloud(
model_category, model_name, split_id, reset_id, f1, data_root)
obj_pts1=common_util.subsample_points(
common_util.transform_points(obj_pts1, scene_range, to_range), resolution=0.005, return_index=False)
env_pts1=common_util.subsample_points(
common_util.transform_points(env_pts1, scene_range, to_range), resolution=0.020, return_index=False)
# calculate flow
sim_pts1, _, loc,_,_= plushsim_util.get_object_full_points(
model_category, model_name, split_id, reset_id, f1, data_root)
sim_pts2, _,_,_,_= plushsim_util.get_object_full_points(
model_category, model_name, split_id, reset_id, f2, data_root)
sim_pts1=common_util.transform_points(sim_pts1, scene_range, to_range)
sim_pts2=common_util.transform_points(sim_pts2, scene_range, to_range)
sim_pts_flow = sim_pts2 - sim_pts1
# sample occupancy
center =common_util.transform_points(loc, scene_range, to_range)[0]
pts, occ, pt_class = plushsim_util.sample_occupancies(sim_pts1, center,
std=class_to_std[model_category],sample_scheme='object')
# get implicit flows
flow = sim_pts_flow[pt_class]
# save
kwargs = {'sim_pts':sim_pts1.astype(np.float16),
'obj_pcloud_obs':obj_pts1.astype(np.float16),
'env_pcloud':env_pts1.astype(np.float16),
'pts':pts.astype(np.float16),
'occ':np.packbits(occ),
'ind':pt_class.astype(np.uint16),
'flow':flow.astype(np.float16),
'start_frame':f1,
'end_frame':f2,
'grasp_loc':grasp_loc,
'target_loc': target_loc}
model_dir = os.path.join(save_root, f"{split_id}", f"{model_name}")
save_path = os.path.join(model_dir, f"{reset_id:03d}_{interaction_id:03d}.npz")
np.savez_compressed(save_path, **kwargs)
def get_all_data_points_flow(data_root):
good_interactions = glob.glob(f"{data_root}/*/*/*/info/good_interactions.json")
good_ints = []
for g in tqdm.tqdm(good_interactions):
split_id, model_category, model_name = g.split('/')[-5:-2]
model_dir = os.path.join(save_root, f"{split_id}", f"{model_name}")
os.makedirs(model_dir, exist_ok=True)
model_dir = plushsim_util.get_model_dir(data_root, split_id, model_category, model_name)
with open(g, 'r') as fp:
good_ones = json.load(fp)
for k,v in good_ones.items():
reset_id = int(k)
for int_id in v:
good_ints.append((split_id, model_category, model_name, reset_id, int_id))
return good_ints
good_ints = get_all_data_points_flow(data_root)#[:100]
start_time = time.time()
with Pool(40) as p:
for _ in tqdm.tqdm(p.imap_unordered(export_train_data, good_ints), total=len(good_ints)):
pass
end_time = time.time()
from datetime import timedelta
time_str = str(timedelta(seconds=end_time - start_time))
print(f'Total processing takes: {time_str}') | 4,353 | Python | 39.691588 | 143 | 0.64668 |
NVlabs/ACID/ACID/preprocess/gen_data_contrastive_pairs_flow.py | import os
import sys
import glob
import tqdm
import random
import argparse
import numpy as np
import os.path as osp
import time
from multiprocessing import Pool
ACID_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
sys.path.insert(0,ACID_dir)
parser = argparse.ArgumentParser("Training Contrastive Pair Data Generation")
data_plush_default = osp.join(ACID_dir, "data_plush")
meta_default = osp.join(ACID_dir, "data_plush", "metadata")
flow_default = osp.join(ACID_dir, "train_data", "flow")
pair_default = osp.join(ACID_dir, "train_data", "pair")
parser.add_argument("--data_root", type=str, default=data_plush_default)
parser.add_argument("--meta_root", type=str, default=meta_default)
parser.add_argument("--flow_root", type=str, default=flow_default)
parser.add_argument("--save_root", type=str, default=pair_default)
args = parser.parse_args()
data_root = args.data_root
flow_root = args.flow_root
save_root = args.save_root
meta_root = args.meta_root
os.makedirs(save_root, exist_ok=True)
def using_complex(a):
weight = 1j*np.linspace(0, a.shape[1], a.shape[0], endpoint=False)
b = a + weight[:, np.newaxis]
u, ind = np.unique(b, return_index=True)
b = np.zeros_like(a) + 256
np.put(b, ind, a.flat[ind])
return b
def process(pair, num_samples=320, keep=80):
split_id, model_name, f,p = pair
src_file = np.load(f"{flow_root}/{split_id}/{model_name}/{f}")
tgt_file = np.load(f"{flow_root}/{split_id}/{model_name}/{p}")
src_inds = src_file['ind']
tgt_inds = tgt_file['ind']
src_inds = np.tile(src_inds, (num_samples,1)).T
tgt_samples = np.random.randint(0, high=len(tgt_inds) - 1, size=(len(src_inds), num_samples))
tgt_samples_inds = tgt_inds[tgt_samples]
dists = dist_matrix[src_inds.reshape(-1), tgt_samples_inds.reshape(-1)].reshape(*src_inds.shape)
dists_unique = using_complex(dists)
idx = np.argsort(dists_unique, axis=-1)
dists_sorted = np.take_along_axis(dists, idx, axis=-1).astype(np.uint8)[:,:keep]
tgt_samples_sorted = np.take_along_axis(tgt_samples, idx, axis=-1)[:,:keep]
if tgt_samples_sorted.max() <= np.iinfo(np.uint16).max:
tgt_samples_sorted = tgt_samples_sorted.astype(np.uint16)
else:
tgt_samples_sorted = tgt_samples_sorted.astype(np.uint32)
results = {"target_file":p, "dists":dists_sorted, "inds":tgt_samples_sorted}
np.savez_compressed(os.path.join(save_dir, f"pair_{f}"), **results)
def export_pair_data(data_id):
split_id, model_name = data_id
all_files = all_geoms[data_id]
print(split_id, model_name)
global dist_matrix
dist_matrix = np.load(f'{meta_root}/{split_id}/{model_name}_dist.npz')['arr_0']
global save_dir
save_dir = os.path.join(save_root, split_id, model_name)
os.makedirs(save_dir, exist_ok=True)
pairs = [ (split_id, model_name, f,random.choice(all_files)) for f in all_files ]
start_time = time.time()
with Pool(10) as p:
for _ in tqdm.tqdm(p.imap_unordered(process, pairs), total=len(all_files)):
pass
end_time = time.time()
from datetime import timedelta
time_str = str(timedelta(seconds=end_time - start_time))
print(f'Total processing takes: {time_str}')
if __name__ == '__main__':
from collections import defaultdict
global all_geoms
all_geoms = defaultdict(lambda: [])
for g in glob.glob(f"{flow_root}/*/*/*"):
split_id, model_name, file_name = g.split('/')[-3:]
all_geoms[(split_id, model_name)].append(file_name)
for k in all_geoms.keys():
export_pair_data(k)
| 3,584 | Python | 35.212121 | 100 | 0.66183 |
NVlabs/ACID/ACID/preprocess/gen_data_flow_splits.py | import os
import sys
import os.path as osp
ACID_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
sys.path.insert(0,ACID_dir)
import glob
import argparse
flow_default = osp.join(ACID_dir, "train_data", "flow")
parser = argparse.ArgumentParser("Making training / testing splits...")
parser.add_argument("--flow_root", type=str, default=flow_default)
parser.add_argument("--no_split", action="store_true", default=False)
args = parser.parse_args()
flow_root = args.flow_root
all_npz = glob.glob(f"{flow_root}/*/*/*.npz")
print(f"In total {len(all_npz)} data points...")
def filename_to_id(fname):
split_id, model_name, f = fname.split("/")[-3:]
reset_id, frame_id = (int(x) for x in os.path.splitext(f)[0].split('_'))
return split_id, model_name, reset_id, frame_id
from collections import defaultdict
total_files = defaultdict(lambda : defaultdict(lambda : []))
for fname in all_npz:
split_id, model_name, reset_id, frame_id = filename_to_id(fname)
total_files[(split_id, model_name)][reset_id].append(frame_id)
total_files = dict(total_files)
for k,v in total_files.items():
total_files[k] = dict(v)
import pickle
if args.no_split:
train = total_files
test = total_files
else:
train = {}
test = {}
for k,v in total_files.items():
split_id, model_name = k
if "teddy" in model_name:
test[k] = v
else:
train[k] = v
train_total = []
for k,v in train.items():
for x, u in v.items():
for y in u:
train_total.append((*k, x, y))
print(f"training data points: {len(train_total)}")
test_total = []
for k,v in test.items():
for x, u in v.items():
for y in u:
test_total.append((*k, x, y))
print(f"testing data points: {len(test_total)}")
with open(f"{flow_root}/train.pkl", "wb") as fp:
pickle.dump(train_total, fp)
with open(f"{flow_root}/test.pkl", "wb") as fp:
pickle.dump(test_total, fp) | 1,972 | Python | 28.447761 | 76 | 0.625761 |
erasromani/isaac-sim-python/simulate_grasp.py | import os
import argparse
from grasp.grasp_sim import GraspSimulator
from omni.isaac.motion_planning import _motion_planning
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.synthetic_utils import OmniKitHelper
def main(args):
kit = OmniKitHelper(
{"renderer": "RayTracedLighting", "experience": f"{os.environ['EXP_PATH']}/isaac-sim-python.json", "width": args.width, "height": args.height}
)
_mp = _motion_planning.acquire_motion_planning_interface()
_dc = _dynamic_control.acquire_dynamic_control_interface()
if args.video: record = True
else: record = False
sim = GraspSimulator(kit, _dc, _mp, record=record)
# add object path
if args.location == 'local': from_server = False
else: from_server = True
for path in args.path:
sim.add_object_path(path, from_server=from_server)
# start simulation
sim.play()
for _ in range(args.num):
sim.add_object(position=(40, 0, 10))
sim.wait_for_drop()
sim.wait_for_loading()
evaluation = sim.execute_grasp(args.position, args.angle)
output_string = f"Grasp evaluation: {evaluation}"
print('\n' + ''.join(['#'] * len(output_string)))
print(output_string)
print(''.join(['#'] * len(output_string)) + '\n')
# Stop physics simulation
sim.stop()
if record: sim.save_video(args.video)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Simulate Panda arm planar grasp execution in NVIDIA Omniverse Isaac Sim')
required = parser.add_argument_group('required arguments')
required.add_argument('-P', '--path', type=str, nargs='+', metavar='', required=True, help='path to usd file or content folder')
required.add_argument('-p', '--position', type=float, nargs=3, metavar='', required=True, help='grasp position, X Y Z')
required.add_argument('-a', '--angle', type=float, metavar='', required=True, help='grasp angle in degrees')
parser.add_argument('-l', '--location', type=str, metavar='', required=False, help='location of usd path, choices={local, nucleus_server}', choices=['local', 'nucleus_server'], default='local')
parser.add_argument('-n', '--num', type=int, metavar='', required=False, help='number of objects to spawn in the scene', default=1)
parser.add_argument('-v', '--video', type=str, metavar='', required=False, help='output filename of grasp simulation video')
parser.add_argument('-W', '--width', type=int, metavar='', required=False, help='width of the viewport and generated images', default=1024)
parser.add_argument('-H', '--height', type=int, metavar='', required=False, help='height of the viewport and generated images', default=800)
args = parser.parse_args()
print(args.path)
main(args) | 2,835 | Python | 39.514285 | 197 | 0.662434 |
erasromani/isaac-sim-python/grasp/grasp_sim.py | import os
import numpy as np
import tempfile
import omni.kit
from omni.isaac.synthetic_utils import SyntheticDataHelper
from grasp.utils.isaac_utils import RigidBody
from grasp.grasping_scenarios.grasp_object import GraspObject
from grasp.utils.visualize import screenshot, img2vid
default_camera_pose = {
'position': (142, -127, 56), # position given by (x, y, z)
'target': (-180, 234, -27) # target given by (x, y , z)
}
class GraspSimulator(GraspObject):
""" Defines a grasping simulation scenario
Scenarios define planar grasp execution in a scene of a Panda arm and various rigid objects
"""
def __init__(self, kit, dc, mp, dt=1/60.0, record=False, record_interval=10):
"""
Initializes grasp simulator
Args:
kit (omni.isaac.synthetic_utils.scripts.omnikit.OmniKitHelper): helper class for launching OmniKit from a python environment
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
dt (float): simulation time step in seconds
record (bool): flag for capturing screenshots throughout simulation for video recording
record_interval (int): frame intervals for capturing screenshots
"""
super().__init__(kit, dc, mp)
self.frame = 0
self.dt = dt
self.record = record
self.record_interval = record_interval
self.tmp_dir = tempfile.mkdtemp()
self.sd_helper = SyntheticDataHelper()
# create initial scene
self.create_franka()
# set camera pose
self.set_camera_pose(default_camera_pose['position'], default_camera_pose['target'])
def execute_grasp(self, position, angle):
"""
Executes a planar grasp with a panda arm.
Args:
position (list or numpy.darray): grasp position array of length 3 given by [x, y, z]
angle (float): grap angle in degrees
Returns:
evaluation (enum.EnumMeta): GRASP_eval class containing two states {GRASP_eval.FAILURE, GRAPS_eval.SUCCESS}
"""
self.set_target_angle(angle)
self.set_target_position(position)
self.perform_tasks()
# start simulation
if self._kit.editor.is_playing(): previously_playing = True
else: previously_playing = False
if self.pick_and_place is not None:
while True:
self.step(0)
self.update()
if self.pick_and_place.evaluation is not None:
break
evaluation = self.pick_and_place.evaluation
self.stop_tasks()
self.step(0)
self.update()
# Stop physics simulation
if not previously_playing: self.stop()
return evaluation
def wait_for_drop(self, max_steps=2000):
"""
Waits for all objects to drop.
Args:
max_steps (int): maximum number of timesteps before aborting wait
"""
# start simulation
if self._kit.editor.is_playing(): previously_playing = True
else: previously_playing = False
if not previously_playing: self.play()
step = 0
while step < max_steps or self._kit.is_loading():
self.step(step)
self.update()
objects_speed = np.array([o.get_speed() for o in self.objects])
if np.all(objects_speed == 0): break
step +=1
# Stop physics simulation
if not previously_playing: self.stop()
def wait_for_loading(self):
"""
Waits for all scene visuals to load.
"""
while self.is_loading():
self.update()
def play(self):
"""
Starts simulation.
"""
self._kit.play()
if not hasattr(self, 'world') or not hasattr(self, 'franka_solid') or not hasattr(self, 'bin_solid') or not hasattr(self, 'pick_and_place'):
self.register_scene()
def stop(self):
"""
Stops simulation.
"""
self._kit.stop()
def update(self):
"""
Simulate one time step.
"""
if self.record and self.sd_helper is not None and self.frame % self.record_interval == 0:
screenshot(self.sd_helper, suffix=self.frame, directory=self.tmp_dir)
self._kit.update(self.dt)
self.frame += 1
def is_loading(self):
"""
Determine if all scene visuals are loaded.
Returns:
(bool): flag for whether or not all scene visuals are loaded
"""
return self._kit.is_loading()
def set_camera_pose(self, position, target):
"""
Set camera pose.
Args:
position (list or numpy.darray): camera position array of length 3 given by [x, y, z]
target (list or numpy.darray): target position array of length 3 given by [x, y, z]
"""
self._editor.set_camera_position("/OmniverseKit_Persp", *position, True)
self._editor.set_camera_target("/OmniverseKit_Persp", *target, True)
def save_video(self, path):
"""
Save video recording of screenshots taken throughout the simulation.
Args:
path (str): output video filename
"""
framerate = int(round(1.0 / (self.record_interval * self.dt)))
img2vid(os.path.join(self.tmp_dir, '*.png'), path, framerate=framerate)
| 5,666 | Python | 32.532544 | 148 | 0.59107 |
erasromani/isaac-sim-python/grasp/grasping_scenarios/scenario.py | # Credits: The majority of this code is taken from build code associated with nvidia/isaac-sim:2020.2.2_ea with minor modifications.
import gc
import carb
import omni.usd
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
from grasp.utils.isaac_utils import set_up_z_axis
class Scenario:
"""
Defines a block stacking scenario.
Scenarios define the life cycle within kit and handle init, startup, shutdown etc.
"""
def __init__(self, editor, dc, mp):
"""
Initialize scenario.
Args:
editor (omni.kit.editor._editor.IEditor): editor object from isaac-sim simulation
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
"""
self._editor = editor # Reference to the Kit editor
self._stage = omni.usd.get_context().get_stage() # Reference to the current USD stage
self._dc = dc # Reference to the dynamic control plugin
self._mp = mp # Reference to the motion planning plugin
self._domains = [] # Contains instances of environment
self._obstacles = [] # Containts references to any obstacles in the scenario
self._executor = None # Contains the thread pool used to run tasks
self._created = False # Is the robot created or not
self._running = False # Is the task running or not
def __del__(self):
"""
Cleanup scenario objects when deleted, force garbage collection.
"""
self.robot_created = False
self._domains = []
self._obstacles = []
self._executor = None
gc.collect()
def reset_blocks(self, *args):
"""
Funtion called when block poses are reset.
"""
pass
def stop_tasks(self, *args):
"""
Stop tasks in the scenario if any.
"""
self._running = False
pass
def step(self, step):
"""
Step the scenario, can be used to update things in the scenario per frame.
"""
pass
def create_franka(self, *args):
"""
Create franka USD objects.
"""
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
return
self.asset_path = nucleus_server + "/Isaac"
# USD paths loaded by scenarios
self.franka_table_usd = self.asset_path + "/Samples/Leonardo/Stage/franka_block_stacking.usd"
self.franka_ghost_usd = self.asset_path + "/Samples/Leonardo/Robots/franka_ghost.usd"
self.background_usd = self.asset_path + "/Environments/Grid/gridroom_curved.usd"
self.rubiks_cube_usd = self.asset_path + "/Props/Rubiks_Cube/rubiks_cube.usd"
self.red_cube_usd = self.asset_path + "/Props/Blocks/red_block.usd"
self.yellow_cube_usd = self.asset_path + "/Props/Blocks/yellow_block.usd"
self.green_cube_usd = self.asset_path + "/Props/Blocks/green_block.usd"
self.blue_cube_usd = self.asset_path + "/Props/Blocks/blue_block.usd"
self._created = True
self._stage = omni.usd.get_context().get_stage()
set_up_z_axis(self._stage)
self.stop_tasks()
pass
def register_assets(self, *args):
"""
Connect franka controller to usd assets
"""
pass
def task(self, domain):
"""
Task to be performed for a given robot.
"""
pass
def perform_tasks(self, *args):
"""
Perform all tasks in scenario if multiple robots are present.
"""
self._running = True
pass
def is_created(self):
"""
Return if the franka was already created.
"""
return self._created
| 3,963 | Python | 32.880342 | 132 | 0.609134 |
erasromani/isaac-sim-python/grasp/grasping_scenarios/grasp_object.py | # Credits: Starter code taken from build code associated with nvidia/isaac-sim:2020.2.2_ea.
import os
import random
import numpy as np
import glob
import omni
import carb
from enum import Enum
from collections import deque
from pxr import Gf, UsdGeom
from copy import copy
from omni.physx.scripts.physicsUtils import add_ground_plane
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.utils._isaac_utils import math as math_utils
from omni.isaac.samples.scripts.utils.world import World
from omni.isaac.utils.scripts.nucleus_utils import find_nucleus_server
from omni.physx import _physx
from grasp.utils.isaac_utils import create_prim_from_usd, RigidBody, set_translate, set_rotate, setup_physics
from grasp.grasping_scenarios.franka import Franka, default_config
from grasp.grasping_scenarios.scenario import Scenario
statedic = {0: "orig", 1: "axis_x", 2: "axis_y", 3: "axis_z"}
class SM_events(Enum):
"""
State machine events.
"""
START = 0
WAYPOINT_REACHED = 1
GOAL_REACHED = 2
ATTACHED = 3
DETACHED = 4
TIMEOUT = 5
STOP = 6
NONE = 7 # no event ocurred, just clocks
class SM_states(Enum):
"""
State machine states.
"""
STANDBY = 0 # Default state, does nothing unless enters with event START
PICKING = 1
ATTACH = 2
HOLDING = 3
GRASPING = 4
LIFTING = 5
class GRASP_eval(Enum):
"""
Grasp execution evaluation.
"""
FAILURE = 0
SUCCESS = 1
class PickAndPlaceStateMachine(object):
"""
Self-contained state machine class for Robot Behavior. Each machine state may react to different events,
and the handlers are defined as in-class functions.
"""
def __init__(self, stage, robot, ee_prim, default_position):
"""
Initialize state machine.
Args:
stage (pxr.Usd.Stage): usd stage
robot (grasp.grasping_scenarios.franka.Franka): robot controller object
ee_prim (pxr.Usd.Prim): Panda arm end effector prim
default_position (omni.isaac.dynamic_control._dynamic_control.Transform): default position of Panda arm
"""
self.robot = robot
self.dc = robot.dc
self.end_effector = ee_prim
self.end_effector_handle = None
self._stage = stage
self.start_time = 0.0
self.start = False
self._time = 0.0
self.default_timeout = 10
self.default_position = copy(default_position)
self.target_position = default_position
self.target_point = default_position.p
self.target_angle = 0 # grasp angle in degrees
self.reset = False
self.evaluation = None
self.waypoints = deque()
self.thresh = {}
# Threshold to clear waypoints/goal
# (any waypoint that is not final will be cleared with the least precision)
self.precision_thresh = [
[0.0005, 0.0025, 0.0025, 0.0025],
[0.0005, 0.005, 0.005, 0.005],
[0.05, 0.2, 0.2, 0.2],
[0.08, 0.4, 0.4, 0.4],
[0.18, 0.6, 0.6, 0.6],
]
self.add_object = None
# Event management variables
# Used to verify if the goal was reached due to robot moving or it had never left previous target
self._is_moving = False
self._attached = False # Used to flag the Attached/Detached events on a change of state from the end effector
self._detached = False
self.is_closed = False
self.pick_count = 0
# Define the state machine handling functions
self.sm = {}
# Make empty state machine for all events and states
for s in SM_states:
self.sm[s] = {}
for e in SM_events:
self.sm[s][e] = self._empty
self.thresh[s] = 0
# Fill in the functions to handle each event for each status
self.sm[SM_states.STANDBY][SM_events.START] = self._standby_start
self.sm[SM_states.STANDBY][SM_events.GOAL_REACHED] = self._standby_goal_reached
self.thresh[SM_states.STANDBY] = 3
self.sm[SM_states.PICKING][SM_events.GOAL_REACHED] = self._picking_goal_reached
self.thresh[SM_states.PICKING] = 1
self.sm[SM_states.GRASPING][SM_events.ATTACHED] = self._grasping_attached
self.sm[SM_states.LIFTING][SM_events.GOAL_REACHED] = self._lifting_goal_reached
for s in SM_states:
self.sm[s][SM_events.DETACHED] = self._all_detached
self.sm[s][SM_events.TIMEOUT] = self._all_timeout
self.current_state = SM_states.STANDBY
self.previous_state = -1
self._physxIFace = _physx.acquire_physx_interface()
# Auxiliary functions
def _empty(self, *args):
"""
Empty function to use on states that do not react to some specific event.
"""
pass
def change_state(self, new_state, print_state=True):
"""
Function called every time a event handling changes current state.
"""
self.current_state = new_state
self.start_time = self._time
if print_state: carb.log_warn(str(new_state))
def goalReached(self):
"""
Checks if the robot has reached a certain waypoint in the trajectory.
"""
if self._is_moving:
state = self.robot.end_effector.status.current_frame
target = self.robot.end_effector.status.current_target
error = 0
for i in [0, 2, 3]:
k = statedic[i]
state_v = state[k]
target_v = target[k]
error = np.linalg.norm(state_v - target_v)
# General Threshold is the least strict
thresh = self.precision_thresh[-1][i]
if len(self.waypoints) == 0:
thresh = self.precision_thresh[self.thresh[self.current_state]][i]
if error > thresh:
return False
self._is_moving = False
return True
return False
def get_current_state_tr(self):
"""
Gets current End Effector Transform, converted from Motion position and Rotation matrix.
"""
# Gets end effector frame
state = self.robot.end_effector.status.current_frame
orig = state["orig"] * 100.0
mat = Gf.Matrix3f(
*state["axis_x"].astype(float), *state["axis_y"].astype(float), *state["axis_z"].astype(float)
)
q = mat.ExtractRotation().GetQuaternion()
(q_x, q_y, q_z) = q.GetImaginary()
q = [q_x, q_y, q_z, q.GetReal()]
tr = _dynamic_control.Transform()
tr.p = list(orig)
tr.r = q
return tr
def lerp_to_pose(self, pose, n_waypoints=1):
"""
adds spherical linear interpolated waypoints from last pose in the waypoint list to the provided pose
if the waypoit list is empty, use current pose.
"""
if len(self.waypoints) == 0:
start = self.get_current_state_tr()
start.p = math_utils.mul(start.p, 0.01)
else:
start = self.waypoints[-1]
if n_waypoints > 1:
for i in range(n_waypoints):
self.waypoints.append(math_utils.slerp(start, pose, (i + 1.0) / n_waypoints))
else:
self.waypoints.append(pose)
def move_to_zero(self):
self._is_moving = False
self.robot.end_effector.go_local(
orig=[], axis_x=[], axis_y=[], axis_z=[], use_default_config=True, wait_for_target=False, wait_time=5.0
)
def move_to_target(self):
"""
Move arm towards target with RMP controller.
"""
xform_attr = self.target_position
self._is_moving = True
orig = np.array([xform_attr.p.x, xform_attr.p.y, xform_attr.p.z])
axis_y = np.array(math_utils.get_basis_vector_y(xform_attr.r))
axis_z = np.array(math_utils.get_basis_vector_z(xform_attr.r))
self.robot.end_effector.go_local(
orig=orig,
axis_x=[],
axis_y=axis_y,
axis_z=axis_z,
use_default_config=True,
wait_for_target=False,
wait_time=5.0,
)
def get_target_orientation(self):
"""
Gets target gripper orientation given target angle and a plannar grasp.
"""
angle = self.target_angle * np.pi / 180
mat = Gf.Matrix3f(
-np.cos(angle), -np.sin(angle), 0, -np.sin(angle), np.cos(angle), 0, 0, 0, -1
)
q = mat.ExtractRotation().GetQuaternion()
(q_x, q_y, q_z) = q.GetImaginary()
q = [q_x, q_y, q_z, q.GetReal()]
return q
def get_target_to_point(self, offset_position=[]):
"""
Get target Panda arm pose from target position and angle.
"""
offset = _dynamic_control.Transform()
if offset_position:
offset.p.x = offset_position[0]
offset.p.y = offset_position[1]
offset.p.z = offset_position[2]
target_pose = _dynamic_control.Transform()
target_pose.p = self.target_point
target_pose.r = self.get_target_orientation()
target_pose = math_utils.mul(target_pose, offset)
target_pose.p = math_utils.mul(target_pose.p, 0.01)
return target_pose
def set_target_to_point(self, offset_position=[], n_waypoints=1, clear_waypoints=True):
"""
Clears waypoints list, and sets a new waypoint list towards the a given point in space.
"""
target_position = self.get_target_to_point(offset_position=offset_position)
# linear interpolate to target pose
if clear_waypoints:
self.waypoints.clear()
self.lerp_to_pose(target_position, n_waypoints=n_waypoints)
# Get first waypoint target
self.target_position = self.waypoints.popleft()
def step(self, timestamp, start=False, reset=False):
"""
Steps the State machine, handling which event to call.
"""
if self.current_state != self.previous_state:
self.previous_state = self.current_state
if not self.start:
self.start = start
if self.current_state in [SM_states.GRASPING, SM_states.LIFTING]:
# object grasped
if not self.robot.end_effector.gripper.is_closed(1e-1) and not self.robot.end_effector.gripper.is_moving(1e-2):
self._attached = True
# self.is_closed = False
# object not grasped
elif self.robot.end_effector.gripper.is_closed(1e-1):
self._detached = True
self.is_closed = True
# Process events
if reset:
# reset to default pose, clear waypoints, and re-initialize event handlers
self.current_state = SM_states.STANDBY
self.previous_state = -1
self.robot.end_effector.gripper.open()
self.evaluation = None
self.start = False
self._time = 0
self.start_time = self._time
self.pick_count = 0
self.waypoints.clear()
self._detached = False
self._attached = False
self.target_position = self.default_position
self.move_to_target()
elif self._detached:
self._detached = False
self.sm[self.current_state][SM_events.DETACHED]()
elif self.goalReached():
if len(self.waypoints) == 0:
self.sm[self.current_state][SM_events.GOAL_REACHED]()
else:
self.target_position = self.waypoints.popleft()
self.move_to_target()
# self.start_time = self._time
elif self.current_state == SM_states.STANDBY and self.start:
self.sm[self.current_state][SM_events.START]()
elif self._attached:
self._attached = False
self.sm[self.current_state][SM_events.ATTACHED]()
elif self._time - self.start_time > self.default_timeout:
self.sm[self.current_state][SM_events.TIMEOUT]()
else:
self.sm[self.current_state][SM_events.NONE]()
self._time += 1.0 / 60.0
# Event handling functions. Each state has its own event handler function depending on which event happened
def _standby_start(self, *args):
"""
Handles the start event when in standby mode.
Proceeds to move towards target grasp pose.
"""
# Tell motion planner controller to ignore current object as an obstacle
self.pick_count = 0
self.evaluation = None
self.lerp_to_pose(self.default_position, 1)
self.lerp_to_pose(self.default_position, 60)
self.robot.end_effector.gripper.open()
# set target above the current bin with offset of 10 cm
self.set_target_to_point(offset_position=[0.0, 0.0, -10.0], n_waypoints=90, clear_waypoints=False)
# pause before lowering to target object
self.lerp_to_pose(self.waypoints[-1], 180)
self.set_target_to_point(n_waypoints=90, clear_waypoints=False)
# start arm movement
self.move_to_target()
# Move to next state
self.change_state(SM_states.PICKING)
# NOTE: As is, this method is never executed
def _standby_goal_reached(self, *args):
"""
Reset grasp execution.
"""
self.move_to_zero()
self.start = True
def _picking_goal_reached(self, *args):
"""
Grap pose reached, close gripper.
"""
self.robot.end_effector.gripper.close()
self.is_closed = True
# Move to next state
self.move_to_target()
self.robot.end_effector.gripper.width_history.clear()
self.change_state(SM_states.GRASPING)
def _grasping_attached(self, *args):
"""
Object grasped, lift arm.
"""
self.waypoints.clear()
offset = _dynamic_control.Transform()
offset.p.z = -10
target_pose = math_utils.mul(self.get_current_state_tr(), offset)
target_pose.p = math_utils.mul(target_pose.p, 0.01)
self.lerp_to_pose(target_pose, n_waypoints=60)
self.lerp_to_pose(target_pose, n_waypoints=120)
# Move to next state
self.move_to_target()
self.robot.end_effector.gripper.width_history.clear()
self.change_state(SM_states.LIFTING)
def _lifting_goal_reached(self, *args):
"""
Finished executing grasp successfully, resets for next grasp execution.
"""
self.is_closed = False
self.robot.end_effector.gripper.open()
self._all_detached()
self.pick_count += 1
self.evaluation = GRASP_eval.SUCCESS
carb.log_warn(str(GRASP_eval.SUCCESS))
def _all_timeout(self, *args):
"""
Timeout reached and reset.
"""
self.change_state(SM_states.STANDBY, print_state=False)
self.robot.end_effector.gripper.open()
self.start = False
self.waypoints.clear()
self.target_position = self.default_position
self.lerp_to_pose(self.default_position, 1)
self.lerp_to_pose(self.default_position, 10)
self.lerp_to_pose(self.default_position, 60)
self.move_to_target()
self.evaluation = GRASP_eval.FAILURE
carb.log_warn(str(GRASP_eval.FAILURE))
def _all_detached(self, *args):
"""
Object detached and reset.
"""
self.change_state(SM_states.STANDBY, print_state=False)
self.start = False
self.waypoints.clear()
self.lerp_to_pose(self.target_position, 60)
self.lerp_to_pose(self.default_position, 10)
self.lerp_to_pose(self.default_position, 60)
self.move_to_target()
self.evaluation = GRASP_eval.FAILURE
carb.log_warn(str(GRASP_eval.FAILURE))
class GraspObject(Scenario):
""" Defines an obstacle avoidance scenario
Scenarios define the life cycle within kit and handle init, startup, shutdown etc.
"""
def __init__(self, kit, dc, mp):
"""
Initialize scenario.
Args:
kit (omni.isaac.synthetic_utils.scripts.omnikit.OmniKitHelper): helper class for launching OmniKit from a python environment
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
"""
super().__init__(kit.editor, dc, mp)
self._kit = kit
self._paused = True
self._start = False
self._reset = False
self._time = 0
self._start_time = 0
self.current_state = SM_states.STANDBY
self.timeout_max = 8.0
self.pick_and_place = None
self._pending_stop = False
self._gripper_open = False
self.current_obj = 0
self.max_objs = 100
self.num_objs = 3
self.add_objects_timeout = -1
self.franka_solid = None
result, nucleus_server = find_nucleus_server()
if result is False:
carb.log_error("Could not find nucleus server with /Isaac folder")
else:
self.nucleus_server = nucleus_server
def __del__(self):
"""
Cleanup scenario objects when deleted, force garbage collection.
"""
if self.franka_solid:
self.franka_solid.end_effector.gripper = None
super().__del__()
def add_object_path(self, object_path, from_server=False):
"""
Add object usd path.
"""
if from_server and hasattr(self, 'nucleus_server'):
object_path = os.path.join(self.nucleus_server, object_path)
if not from_server and os.path.isdir(object_path): objects_usd = glob.glob(os.path.join(object_path, '**/*.usd'), recursive=True)
else: object_usd = [object_path]
if hasattr(self, 'objects_usd'):
self.objects_usd.extend(object_usd)
else:
self.objects_usd = object_usd
def create_franka(self, *args):
"""
Create franka USD objects and bin USD objects.
"""
super().create_franka()
if self.asset_path is None:
return
# Load robot environment and set its transform
self.env_path = "/scene"
robot_usd = self.asset_path + "/Robots/Franka/franka.usd"
robot_path = "/scene/robot"
create_prim_from_usd(self._stage, robot_path, robot_usd, Gf.Vec3d(0, 0, 0))
bin_usd = self.asset_path + "/Props/KLT_Bin/large_KLT.usd"
bin_path = "/scene/bin"
create_prim_from_usd(self._stage, bin_path, bin_usd, Gf.Vec3d(40, 0, 4))
# Set robot end effector Target
target_path = "/scene/target"
if self._stage.GetPrimAtPath(target_path):
return
GoalPrim = self._stage.DefinePrim(target_path, "Xform")
self.default_position = _dynamic_control.Transform()
self.default_position.p = [0.4, 0.0, 0.3]
self.default_position.r = [0.0, 1.0, 0.0, 0.0] #TODO: Check values for stability
p = self.default_position.p
r = self.default_position.r
set_translate(GoalPrim, Gf.Vec3d(p.x * 100, p.y * 100, p.z * 100))
set_rotate(GoalPrim, Gf.Matrix3d(Gf.Quatd(r.w, r.x, r.y, r.z)))
# Setup physics simulation
add_ground_plane(self._stage, "/groundPlane", "Z", 1000.0, Gf.Vec3f(0.0), Gf.Vec3f(1.0))
setup_physics(self._stage)
def rand_position(self, bound, margin=0, z_range=None):
"""
Obtain random position contained within a specified bound.
"""
x_range = (bound[0][0] * (1 - margin), bound[1][0] * (1 - margin))
y_range = (bound[0][1] * (1 - margin), bound[1][1] * (1 - margin))
if z_range is None:
z_range = (bound[0][2] * (1 - margin), bound[1][2] * (1 - margin))
x = np.random.uniform(*x_range)
y = np.random.uniform(*y_range)
z = np.random.uniform(*z_range)
return Gf.Vec3d(x, y, z)
# combine add_object and add_and_register_object
def add_object(self, *args, register=True, position=None):
"""
Add object to scene.
"""
prim = self.create_new_objects(position=position)
if not register:
return prim
self._kit.update()
if not hasattr(self, 'objects'):
self.objects = []
self.objects.append(RigidBody(prim, self._dc))
def create_new_objects(self, *args, position=None):
"""
Randomly select and create prim of object in scene.
"""
if not hasattr(self, 'objects_usd'):
return
prim_usd_path = self.objects_usd[random.randint(0, len(self.objects_usd) - 1)]
prim_env_path = "/scene/objects/object_{}".format(self.current_obj)
if position is None:
position = self.rand_position(self.bin_solid.get_bound(), margin=0.2, z_range=(10, 10))
prim = create_prim_from_usd(self._stage, prim_env_path, prim_usd_path, position)
if hasattr(self, 'current_obj'): self.current_obj += 1
else: self.current_obj = 0
return prim
def register_objects(self, *args):
"""
Register all objects.
"""
self.objects = []
objects_path = '/scene/objects'
objects_prim = self._stage.GetPrimAtPath(objects_path)
if objects_prim.IsValid():
for object_prim in objects_prim.GetChildren():
self.objects.append(RigidBody(object_prim, self._dc))
# TODO: Delete method
def add_and_register_object(self, *args):
prim = self.create_new_objects()
self._kit.update()
if not hasattr(self, 'objects'):
self.objects = []
self.objects.append(RigidBody(prim, self._dc))
def register_scene(self, *args):
"""
Register world, panda arm, bin, and objects.
"""
self.world = World(self._dc, self._mp)
self.register_assets(args)
self.register_objects(args)
def register_assets(self, *args):
"""
Connect franka controller to usd assets.
"""
# register robot with RMP
robot_path = "/scene/robot"
self.franka_solid = Franka(
self._stage, self._stage.GetPrimAtPath(robot_path), self._dc, self._mp, self.world, default_config
)
# register bin
bin_path = "/scene/bin"
bin_prim = self._stage.GetPrimAtPath(bin_path)
self.bin_solid = RigidBody(bin_prim, self._dc)
# register stage machine
self.pick_and_place = PickAndPlaceStateMachine(
self._stage,
self.franka_solid,
self._stage.GetPrimAtPath("/scene/robot/panda_hand"),
self.default_position,
)
def perform_tasks(self, *args):
"""
Perform all tasks in scenario if multiple robots are present.
"""
self._start = True
self._paused = False
return False
def step(self, step):
"""
Step the scenario, can be used to update things in the scenario per frame.
"""
if self._editor.is_playing():
if self._pending_stop:
self.stop_tasks()
return
# Updates current references and locations for the robot.
self.world.update()
self.franka_solid.update()
target = self._stage.GetPrimAtPath("/scene/target")
xform_attr = target.GetAttribute("xformOp:transform")
if self._reset:
self._paused = False
if not self._paused:
self._time += 1.0 / 60.0
self.pick_and_place.step(self._time, self._start, self._reset)
if self._reset:
self._paused = True
self._time = 0
self._start_time = 0
p = self.default_position.p
r = self.default_position.r
set_translate(target, Gf.Vec3d(p.x * 100, p.y * 100, p.z * 100))
set_rotate(target, Gf.Matrix3d(Gf.Quatd(r.w, r.x, r.y, r.z)))
else:
state = self.franka_solid.end_effector.status.current_target
state_1 = self.pick_and_place.target_position
tr = state["orig"] * 100.0
set_translate(target, Gf.Vec3d(tr[0], tr[1], tr[2]))
set_rotate(target, Gf.Matrix3d(Gf.Quatd(state_1.r.w, state_1.r.x, state_1.r.y, state_1.r.z)))
self._start = False
self._reset = False
if self.add_objects_timeout > 0:
self.add_objects_timeout -= 1
if self.add_objects_timeout == 0:
self.create_new_objects()
else:
translate_attr = xform_attr.Get().GetRow3(3)
rotate_x = xform_attr.Get().GetRow3(0)
rotate_y = xform_attr.Get().GetRow3(1)
rotate_z = xform_attr.Get().GetRow3(2)
orig = np.array(translate_attr) / 100.0
axis_x = np.array(rotate_x)
axis_y = np.array(rotate_y)
axis_z = np.array(rotate_z)
self.franka_solid.end_effector.go_local(
orig=orig,
axis_x=axis_x, # TODO: consider setting this to [] for stability reasons
axis_y=axis_y,
axis_z=axis_z,
use_default_config=True,
wait_for_target=False,
wait_time=5.0,
)
def stop_tasks(self, *args):
"""
Stop tasks in the scenario if any.
"""
if self.pick_and_place is not None:
if self._editor.is_playing():
self._reset = True
self._pending_stop = False
else:
self._pending_stop = True
def pause_tasks(self, *args):
"""
Pause tasks in the scenario.
"""
self._paused = not self._paused
return self._paused
# TODO: use gripper.width == 0 as a proxy for _gripper_open == False
def actuate_gripper(self):
"""
Actuate Panda gripper.
"""
if self._gripper_open:
self.franka_solid.end_effector.gripper.close()
self._gripper_open = False
else:
self.franka_solid.end_effector.gripper.open()
self._gripper_open = True
def set_target_angle(self, angle):
"""
Set grasp angle in degrees.
"""
if self.pick_and_place is not None:
self.pick_and_place.target_angle = angle
def set_target_position(self, position):
"""
Set grasp position.
"""
if self.pick_and_place is not None:
self.pick_and_place.target_point = position
| 27,230 | Python | 35.502681 | 137 | 0.573265 |
erasromani/isaac-sim-python/grasp/grasping_scenarios/franka.py | # Credits: The majority of this code is taken from build code associated with nvidia/isaac-sim:2020.2.2_ea with minor modifications.
import time
import os
import numpy as np
import carb.tokens
import omni.kit.settings
from pxr import Usd, UsdGeom, Gf
from collections import deque
from omni.isaac.dynamic_control import _dynamic_control
from omni.isaac.motion_planning import _motion_planning
from omni.isaac.samples.scripts.utils import math_utils
# default joint configuration
default_config = (0.00, -1.3, 0.00, -2.87, 0.00, 2.00, 0.75)
# Alternative default config for motion planning
alternate_config = [
(1.5356, -1.3813, -1.5151, -2.0015, -1.3937, 1.5887, 1.4597),
(-1.5356, -1.3813, 1.5151, -2.0015, 1.3937, 1.5887, 0.4314),
]
class Gripper:
"""
Gripper for franka.
"""
def __init__(self, dc, ar):
"""
Initialize gripper.
Args:
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
ar (int): articulation identifier
"""
self.dc = dc
self.ar = ar
self.finger_j1 = self.dc.find_articulation_dof(self.ar, "panda_finger_joint1")
self.finger_j2 = self.dc.find_articulation_dof(self.ar, "panda_finger_joint2")
self.width = 0
self.width_history = deque(maxlen=50)
def open(self, wait=False):
"""
Open gripper.
"""
if self.width < 0.045:
self.move(0.045, wait=True)
self.move(0.09, wait=wait)
def close(self, wait=False, force=0):
"""
Close gripper.
"""
self.move(0, wait=wait)
def move(self, width=0.03, speed=0.2, wait=False):
"""
Modify width.
"""
self.width = width
# if wait:
# time.sleep(0.5)
def update(self):
"""
Actuate gripper.
"""
self.dc.set_dof_position_target(self.finger_j1, self.width * 0.5 * 100)
self.dc.set_dof_position_target(self.finger_j2, self.width * 0.5 * 100)
self.width_history.append(self.get_width())
def get_width(self):
"""
Get current width.
"""
return sum(self.get_position())
def get_position(self):
"""
Get left and right finger local position.
"""
return self.dc.get_dof_position(self.finger_j1), self.dc.get_dof_position(self.finger_j2)
def get_velocity(self, from_articulation=True):
"""
Get left and right finger local velocity.
"""
if from_articulation:
return (self.dc.get_dof_velocity(self.finger_j1), self.dc.get_dof_velocity(self.finger_j2))
else:
leftfinger_handle = self.dc.get_rigid_body(self.dc.get_articulation_path(self.ar) + '/panda_leftfinger')
rightfinger_handle = self.dc.get_rigid_body(self.dc.get_articulation_path(self.ar) + '/panda_rightfinger')
leftfinger_velocity = np.linalg.norm(np.array(self.dc.get_rigid_body_local_linear_velocity(leftfinger_handle)))
rightfinger_velocity = np.linalg.norm(np.array(self.dc.get_rigid_body_local_linear_velocity(rightfinger_handle)))
return (leftfinger_velocity, rightfinger_velocity)
def is_moving(self, tol=1e-2):
"""
Determine if gripper fingers are moving
"""
if len(self.width_history) < self.width_history.maxlen or np.array(self.width_history).std() > tol:
return True
else:
return False
def get_state(self):
"""
Get gripper state.
"""
dof_states = self.dc.get_articulation_dof_states(self.ar, _dynamic_control.STATE_ALL)
return dof_states[-2], dof_states[-1]
def is_closed(self, tol=1e-2):
"""
Determine if gripper is closed.
"""
if self.get_width() < tol:
return True
else:
return False
class Status:
"""
Class that contains status for end effector
"""
def __init__(self, mp, rmp_handle):
"""
Initialize status object.
Args:
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
rmp_handle (int): RMP handle identifier
"""
self.mp = mp
self.rmp_handle = rmp_handle
self.orig = np.array([0, 0, 0])
self.axis_x = np.array([1, 0, 0])
self.axis_y = np.array([0, 1, 0])
self.axis_z = np.array([0, 0, 1])
self.current_frame = {"orig": self.orig, "axis_x": self.axis_x, "axis_y": self.axis_y, "axis_z": self.axis_z}
self.target_frame = {"orig": self.orig, "axis_x": self.axis_x, "axis_y": self.axis_y, "axis_z": self.axis_z}
self.frame = self.current_frame
def update(self):
"""
Update end effector state.
"""
state = self.mp.getRMPState(self.rmp_handle)
target = self.mp.getRMPTarget(self.rmp_handle)
self.orig = np.array([state[0].x, state[0].y, state[0].z])
self.axis_x = np.array([state[1].x, state[1].y, state[1].z])
self.axis_y = np.array([state[2].x, state[2].y, state[2].z])
self.axis_z = np.array([state[3].x, state[3].y, state[3].z])
self.current_frame = {"orig": self.orig, "axis_x": self.axis_x, "axis_y": self.axis_y, "axis_z": self.axis_z}
self.frame = self.current_frame
self.current_target = {
"orig": np.array([target[0].x, target[0].y, target[0].z]),
"axis_x": np.array([target[1].x, target[1].y, target[1].z]),
"axis_y": np.array([target[2].x, target[2].y, target[2].z]),
"axis_z": np.array([target[3].x, target[3].y, target[3].z]),
}
class EndEffector:
"""
End effector object that controls movement.
"""
def __init__(self, dc, mp, ar, rmp_handle):
"""
Initialize end effector.
Args:
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
ar (int): articulation identifier
rmp_handle (int): RMP handle identifier
"""
self.dc = dc
self.ar = ar
self.mp = mp
self.rmp_handle = rmp_handle
self.gripper = Gripper(dc, ar)
self.status = Status(mp, rmp_handle)
self.UpRot = Gf.Rotation(Gf.Vec3d(0, 0, 1), 90)
def freeze(self):
self.go_local(
orig=self.status.orig, axis_x=self.status.axis_x, axis_z=self.status.axis_z, wait_for_target=False
)
def go_local(
self,
target=None,
orig=[],
axis_x=[],
axis_y=[],
axis_z=[],
required_orig_err=0.01,
required_axis_x_err=0.01,
required_axis_y_err=0.01,
required_axis_z_err=0.01,
orig_thresh=None,
axis_x_thresh=None,
axis_y_thresh=None,
axis_z_thresh=None,
approach_direction=[],
approach_standoff=0.1,
approach_standoff_std_dev=0.001,
use_level_surface_orientation=False,
use_target_weight_override=True,
use_default_config=False,
wait_for_target=True,
wait_time=None,
):
self.target_weight_override_value = 10000.0
self.target_weight_override_std_dev = 0.03
if orig_thresh:
required_orig_err = orig_thresh
if axis_x_thresh:
required_axis_x_err = axis_x_thresh
if axis_y_thresh:
required_axis_y_err = axis_y_thresh
if axis_z_thresh:
required_axis_z_err = axis_z_thresh
if target:
orig = target["orig"]
if "axis_x" in target and target["axis_x"] is not None:
axis_x = target["axis_x"]
if "axis_y" in target and target["axis_y"] is not None:
axis_y = target["axis_y"]
if "axis_z" in target and target["axis_z"] is not None:
axis_z = target["axis_z"]
orig = np.array(orig)
axis_x = np.array(axis_x)
axis_y = np.array(axis_y)
axis_z = np.array(axis_z)
approach = _motion_planning.Approach((0, 0, 1), 0, 0)
if len(approach_direction) != 0:
approach = _motion_planning.Approach(approach_direction, approach_standoff, approach_standoff_std_dev)
pose_command = _motion_planning.PartialPoseCommand()
if len(orig) > 0:
pose_command.set(_motion_planning.Command(orig, approach), int(_motion_planning.FrameElement.ORIG))
if len(axis_x) > 0:
pose_command.set(_motion_planning.Command(axis_x), int(_motion_planning.FrameElement.AXIS_X))
if len(axis_y) > 0:
pose_command.set(_motion_planning.Command(axis_y), int(_motion_planning.FrameElement.AXIS_Y))
if len(axis_z) > 0:
pose_command.set(_motion_planning.Command(axis_z), int(_motion_planning.FrameElement.AXIS_Z))
self.mp.goLocal(self.rmp_handle, pose_command)
if wait_for_target and wait_time:
error = 1
future_time = time.time() + wait_time
while error > required_orig_err and time.time() < future_time:
# time.sleep(0.1)
error = self.mp.getError(self.rmp_handle)
def look_at(self, gripper_pos, target):
# Y up works for look at but sometimes flips, go_local might be a safer bet with a locked y_axis
orientation = math_utils.lookAt(gripper_pos, target, (0, 1, 0))
mat = Gf.Matrix3d(orientation).GetTranspose()
self.go_local(
orig=[gripper_pos[0], gripper_pos[1], gripper_pos[2]],
axis_x=[mat.GetColumn(0)[0], mat.GetColumn(0)[1], mat.GetColumn(0)[2]],
axis_z=[mat.GetColumn(2)[0], mat.GetColumn(2)[1], mat.GetColumn(2)[2]],
)
class Franka:
"""
Franka objects that contains implementation details for robot control.
"""
def __init__(self, stage, prim, dc, mp, world=None, group_path="", default_config=None, is_ghost=False):
"""
Initialize Franka controller.
Args:
stage (pxr.Usd.Stage): usd stage
prim (pxr.Usd.Prim): robot prim
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
mp (omni.isaac.dynamic_control._dynamic_control.DynamicControl): dynamic control interface
world (omni.isaac.samples.scripts.utils.world.World): simulation world handler
default_config (tuple or list): default configuration for robot revolute joint drivers
is_ghost (bool): flag for turning off collision and modifying visuals for robot arm
"""
self.dc = dc
self.mp = mp
self.prim = prim
self.stage = stage
# get handle to the articulation for this franka
self.ar = self.dc.get_articulation(prim.GetPath().pathString)
self.is_ghost = is_ghost
self.base = self.dc.get_articulation_root_body(self.ar)
body_count = self.dc.get_articulation_body_count(self.ar)
for bodyIdx in range(body_count):
body = self.dc.get_articulation_body(self.ar, bodyIdx)
self.dc.set_rigid_body_disable_gravity(body, True)
exec_folder = os.path.abspath(
carb.tokens.get_tokens_interface().resolve(
f"{os.environ['ISAAC_PATH']}/exts/omni.isaac.motion_planning/resources/lula/lula_franka"
)
)
self.rmp_handle = self.mp.registerRmp(
exec_folder + "/urdf/lula_franka_gen.urdf",
exec_folder + "/config/robot_descriptor.yaml",
exec_folder + "/config/franka_rmpflow_common.yaml",
prim.GetPath().pathString,
"right_gripper",
True,
)
print("franka rmp handle", self.rmp_handle)
if world is not None:
self.world = world
self.world.rmp_handle = self.rmp_handle
self.world.register_parent(self.base, self.prim, "panda_link0")
settings = omni.kit.settings.get_settings_interface()
self.mp.setFrequency(self.rmp_handle, settings.get("/physics/timeStepsPerSecond"), True)
self.end_effector = EndEffector(self.dc, self.mp, self.ar, self.rmp_handle)
if default_config:
self.mp.setDefaultConfig(self.rmp_handle, default_config)
self.target_visibility = True
if self.is_ghost:
self.target_visibility = False
self.imageable = UsdGeom.Imageable(self.prim)
def __del__(self):
"""
Unregister RMP.
"""
self.mp.unregisterRmp(self.rmp_handle)
print(" Delete Franka")
def set_pose(self, pos, rot):
"""
Set robot pose.
"""
self._mp.setTargetLocal(self.rmp_handle, pos, rot)
def set_speed(self, speed_level):
"""
Set robot speed.
"""
pass
def update(self):
"""
Update robot state.
"""
self.end_effector.gripper.update()
self.end_effector.status.update()
if self.imageable:
if self.target_visibility is not self.imageable.ComputeVisibility(Usd.TimeCode.Default()):
if self.target_visibility:
self.imageable.MakeVisible()
else:
self.imageable.MakeInvisible()
def send_config(self, config):
"""
Set robot default configuration.
"""
if self.is_ghost is False:
self.mp.setDefaultConfig(self.rmp_handle, config)
| 13,794 | Python | 34.371795 | 132 | 0.582935 |
erasromani/isaac-sim-python/grasp/utils/isaac_utils.py | # Credits: All code except class RigidBody and Camera is taken from build code associated with nvidia/isaac-sim:2020.2.2_ea.
import numpy as np
import omni.kit
from pxr import Usd, UsdGeom, Gf, PhysicsSchema, PhysxSchema
def create_prim_from_usd(stage, prim_env_path, prim_usd_path, location):
"""
Create prim from usd.
"""
envPrim = stage.DefinePrim(prim_env_path, "Xform") # create an empty Xform at the given path
envPrim.GetReferences().AddReference(prim_usd_path) # attach the USD to the given path
set_translate(envPrim, location) # set pose
return stage.GetPrimAtPath(envPrim.GetPath().pathString)
def set_up_z_axis(stage):
"""
Utility function to specify the stage with the z axis as "up".
"""
rootLayer = stage.GetRootLayer()
rootLayer.SetPermissionToEdit(True)
with Usd.EditContext(stage, rootLayer):
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z)
def set_translate(prim, new_loc):
"""
Specify position of a given prim, reuse any existing transform ops when possible.
"""
properties = prim.GetPropertyNames()
if "xformOp:translate" in properties:
translate_attr = prim.GetAttribute("xformOp:translate")
translate_attr.Set(new_loc)
elif "xformOp:translation" in properties:
translation_attr = prim.GetAttribute("xformOp:translate")
translation_attr.Set(new_loc)
elif "xformOp:transform" in properties:
transform_attr = prim.GetAttribute("xformOp:transform")
matrix = prim.GetAttribute("xformOp:transform").Get()
matrix.SetTranslateOnly(new_loc)
transform_attr.Set(matrix)
else:
xform = UsdGeom.Xformable(prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
xform_op.Set(Gf.Matrix4d().SetTranslate(new_loc))
def set_rotate(prim, rot_mat):
"""
Specify orientation of a given prim, reuse any existing transform ops when possible.
"""
properties = prim.GetPropertyNames()
if "xformOp:rotate" in properties:
rotate_attr = prim.GetAttribute("xformOp:rotate")
rotate_attr.Set(rot_mat)
elif "xformOp:transform" in properties:
transform_attr = prim.GetAttribute("xformOp:transform")
matrix = prim.GetAttribute("xformOp:transform").Get()
matrix.SetRotateOnly(rot_mat.ExtractRotation())
transform_attr.Set(matrix)
else:
xform = UsdGeom.Xformable(prim)
xform_op = xform.AddXformOp(UsdGeom.XformOp.TypeTransform, UsdGeom.XformOp.PrecisionDouble, "")
xform_op.Set(Gf.Matrix4d().SetRotate(rot_mat))
def create_background(stage, background_stage):
"""
Create background stage.
"""
background_path = "/background"
if not stage.GetPrimAtPath(background_path):
backPrim = stage.DefinePrim(background_path, "Xform")
backPrim.GetReferences().AddReference(background_stage)
# Move the stage down -104cm so that the floor is below the table wheels, move in y axis to get light closer
set_translate(backPrim, Gf.Vec3d(0, -400, -104))
def setup_physics(stage):
"""
Set default physics parameters.
"""
# Specify gravity
metersPerUnit = UsdGeom.GetStageMetersPerUnit(stage)
gravityScale = 9.81 / metersPerUnit
gravity = Gf.Vec3f(0.0, 0.0, -gravityScale)
scene = PhysicsSchema.PhysicsScene.Define(stage, "/physics/scene")
scene.CreateGravityAttr().Set(gravity)
PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/physics/scene"))
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Get(stage, "/physics/scene")
physxSceneAPI.CreatePhysxSceneEnableCCDAttr(True)
physxSceneAPI.CreatePhysxSceneEnableStabilizationAttr(True)
physxSceneAPI.CreatePhysxSceneEnableGPUDynamicsAttr(False)
physxSceneAPI.CreatePhysxSceneBroadphaseTypeAttr("MBP")
physxSceneAPI.CreatePhysxSceneSolverTypeAttr("TGS")
class Camera:
"""
Camera object that contain state information for a camera in the scene.
"""
def __init__(self, camera_path, translation, rotation):
"""
Initializes the Camera object.
Args:
camera_path (str): path of camera in stage hierarchy
translation (list or tuple): camera position
rotation (list or tuple): camera orientation described by euler angles in degrees
"""
self.prim = self._kit.create_prim(
camera_path,
"Camera",
translation=translation,
rotation=rotatation,
)
self.name = self.prim.GetPrimPath().name
self.vpi = omni.kit.viewport.get_viewport_interface
def set_translate(self, position):
"""
Set camera position.
Args:
position (tuple): camera position specified by (X, Y, Z)
"""
if not isinstance(position, tuple): position = tuple(position)
translate_attr = self.prim.GetAttribute("xformOp:translate")
translate_attr.Set(position)
def set_rotate(self, rotation):
"""
Set camera position.
Args:
rotation (tuple): camera orientation specified by three euler angles in degrees
"""
if not isinstance(rotation, tuple): rotation = tuple(rotation)
rotate_attr = self.prim.GetAttribute("xformOp:rotateZYX")
rotate_attr.Set(rotation)
def activate(self):
"""
Activate camera to viewport.
"""
self.vpi.get_viewport_window().set_active_camera(str(self.prim.GetPath()))
def __repr__(self):
return self.name
class Camera:
"""
Camera object that contain state information for a camera in the scene.
"""
def __init__(self, camera_path, translation, rotation):
"""
Initializes the Camera object.
Args:
camera_path (str): path of camera in stage hierarchy
translation (list or tuple): camera position
rotation (list or tuple): camera orientation described by euler angles in degrees
"""
self.prim = self._kit.create_prim(
camera_path,
"Camera",
translation=translation,
rotation=rotation,
)
self.name = self.prim.GetPrimPath().name
self.vpi = omni.kit.viewport.get_viewport_interface
def set_translate(self, position):
"""
Set camera position.
Args:
position (tuple): camera position specified by (X, Y, Z)
"""
if not isinstance(position, tuple): position = tuple(position)
translate_attr = self.prim.GetAttribute("xformOp:translate")
translate_attr.Set(position)
def set_rotate(self, rotation):
"""
Set camera position.
Args:
rotation (tuple): camera orientation specified by three euler angles in degrees
"""
if not isinstance(rotation, tuple): rotation = tuple(rotation)
rotate_attr = self.prim.GetAttribute("xformOp:rotateZYX")
rotate_attr.Set(rotation)
def activate(self):
"""
Activate camera to viewport.
"""
self.vpi.get_viewport_window().set_active_camera(str(self.prim.GetPath()))
def __repr__(self):
return self.name
class RigidBody:
"""
RigidBody objects that contains state information of the rigid body.
"""
def __init__(self, prim, dc):
"""
Initializes for RigidBody object
Args:
prim (pxr.Usd.Prim): rigid body prim
dc (omni.isaac.motion_planning._motion_planning.MotionPlanning): motion planning interface from RMP extension
"""
self.prim = prim
self._dc = dc
self.name = prim.GetPrimPath().name
self.handle = self.get_rigid_body_handle()
def get_rigid_body_handle(self):
"""
Get rigid body handle.
"""
object_children = self.prim.GetChildren()
for child in object_children:
child_path = child.GetPath().pathString
body_handle = self._dc.get_rigid_body(child_path)
if body_handle != 0:
bin_path = child_path
object_handle = self._dc.get_rigid_body(bin_path)
if object_handle != 0: return object_handle
def get_linear_velocity(self):
"""
Get linear velocity of rigid body.
"""
return np.array(self._dc.get_rigid_body_linear_velocity(self.handle))
def get_angular_velocity(self):
"""
Get angular velocity of rigid body.
"""
return np.array(self._dc.get_rigid_body_angular_velocity(self.handle))
def get_speed(self):
"""
Get speed of rigid body given by the l2 norm of the velocity.
"""
velocity = self.get_linear_velocity()
speed = np.linalg.norm(velocity)
return speed
def get_pose(self):
"""
Get pose of the rigid body containing the position and orientation information.
"""
return self._dc.get_rigid_body_pose(self.handle)
def get_position(self):
"""
Get the position of the rigid body object.
"""
pose = self.get_pose()
position = np.array(pose.p)
return position
def get_orientation(self):
"""
Get orientation of the rigid body object.
"""
pose = self.get_pose()
orientation = np.array(pose.r)
return orientation
def get_bound(self):
"""
Get bounds of the rigid body object in global coordinates.
"""
bound = UsdGeom.Mesh(self.prim).ComputeWorldBound(0.0, "default").GetBox()
return [np.array(bound.GetMin()), np.array(bound.GetMax())]
def __repr__(self):
return self.name
| 9,822 | Python | 32.640411 | 124 | 0.633069 |
erasromani/isaac-sim-python/grasp/utils/visualize.py | import os
import ffmpeg
import matplotlib.pyplot as plt
def screenshot(sd_helper, suffix="", prefix="image", directory="images/"):
"""
Take a screenshot of the current time step of a running NVIDIA Omniverse Isaac-Sim simulation.
Args:
sd_helper (omni.isaac.synthetic_utils.SyntheticDataHelper): helper class for visualizing OmniKit simulation
suffix (str or int): suffix for output filename of image screenshot of current time step of simulation
prefix (str): prefix for output filename of image screenshot of current time step of simulation
directory (str): output directory of image screenshot of current time step of simulation
"""
gt = sd_helper.get_groundtruth(
[
"rgb",
]
)
image = gt["rgb"][..., :3]
plt.imshow(image)
if suffix == "":
suffix = 0
if isinstance(suffix, int):
filename = os.path.join(directory, f'{prefix}_{suffix:05}.png')
else:
filename = os.path.join(directory, f'{prefix}_{suffix}.png')
plt.axis('off')
plt.savefig(filename)
def img2vid(input_pattern, output_fn, pattern_type='glob', framerate=25):
"""
Create video from a collection of images.
Args:
input_pattern (str): input pattern for a path of collection of images
output_fn (str): video output filename
pattern_type (str): pattern type for input pattern
framerate (int): video framerate
"""
(
ffmpeg
.input(input_pattern, pattern_type=pattern_type, framerate=framerate)
.output(output_fn)
.run(overwrite_output=True, quiet=True)
)
| 1,647 | Python | 30.692307 | 115 | 0.649059 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/nucleusServer/setup.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="Nucleus Server Tools",
version="1.0",
py_modules=[
'nst'
],
install_requires=[
"boto3",
"python-dotenv",
"Click"
],
entry_points='''
[console_scripts]
nst=nst_cli:main
'''
)
| 576 | Python | 21.192307 | 73 | 0.609375 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/nucleusServer/nst_cli.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
# Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
"""
helper tools for omniverse nucleus deployment configuration
"""
# std lib modules
import os
import logging
from pathlib import Path
# 3rd party modules
import click
import nst.logger as logger
pass_config = click.make_pass_decorator(object, ensure=True)
@click.group()
@pass_config
def main(config):
pass
@main.command()
@pass_config
@click.option("--my_opt_arg")
def hello_world(config, my_opt_arg):
logger.info(f"Hello World: {my_opt_arg=}")
@main.command()
@pass_config
@click.option("--server-ip", required=True)
@click.option("--reverse-proxy-domain", required=True)
@click.option("--instance-name", required=True)
@click.option("--master-password", required=True)
@click.option("--service-password", required=True)
@click.option("--data-root", required=True)
def generate_nucleus_stack_env(
config,
server_ip,
reverse_proxy_domain,
instance_name,
master_password,
service_password,
data_root,
):
logger.info(
f"generate_nucleus_stack_env:{server_ip=},{reverse_proxy_domain=},{instance_name=},{master_password=},{service_password=},{data_root=}"
)
tools_path = "/".join(list(Path(__file__).parts[:-1]))
cur_dir_path = "."
template_name = "nucleus-stack.env"
template_path = f"{tools_path}/templates/{template_name}"
output_path = f"{cur_dir_path}/{template_name}"
if not Path(template_path).is_file():
raise Exception("File not found: {template_path}")
data = ""
with open(template_path, "r") as file:
data = file.read()
data = data.format(
SERVER_IP_OR_HOST=server_ip,
REVERSE_PROXY_DOMAIN=reverse_proxy_domain,
INSTANCE_NAME=instance_name,
MASTER_PASSWORD=master_password,
SERVICE_PASSWORD=service_password,
DATA_ROOT=data_root,
ACCEPT_EULA="1",
SECURITY_REVIEWED="1",
)
with open(f"{output_path}", "w") as file:
file.write(data)
logger.info(output_path)
| 2,391 | Python | 25.876404 | 143 | 0.677123 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/nucleusServer/nst/__init__.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
| 210 | Python | 41.199992 | 73 | 0.766667 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/nucleusServer/nst/logger.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import os
import logging
LOG_LEVEL = os.getenv('LOG_LEVEL', 'DEBUG')
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
def info(*args):
print(*args)
def debug(*args):
print(*args)
def warning(*args):
print(*args)
def error(*args):
print(*args) | 480 | Python | 20.863635 | 73 | 0.708333 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/reverseProxy/rpt_cli.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
"""
helper tools for reverse proxy nginx configuration
"""
# std lib modules
import os
import logging
from pathlib import Path
# 3rd party modules
import click
import rpt.logger as logger
pass_config = click.make_pass_decorator(object, ensure=True)
@click.group()
@pass_config
def main(config):
pass
@main.command()
@pass_config
def hello_world(config):
logger.info(f'Hello World')
@main.command()
@pass_config
@click.option("--cert-arn", required=True)
def generate_acm_yaml(config, cert_arn):
logger.info(f'generate_acm_yaml: {cert_arn=}')
tools_path = '/'.join(list(Path(__file__).parts[:-1]))
cur_dir_path = '.'
template_path = f'{tools_path}/templates/acm.yaml'
output_path = f'{cur_dir_path}/acm.yaml'
logger.info(Path(template_path).is_file())
data = ''
with open(template_path, 'r') as file:
data = file.read()
data = data.format(cert_arn=cert_arn)
with open(f'{output_path}', 'w') as file:
file.write(data)
logger.info(output_path)
@main.command()
@pass_config
@click.option("--domain", required=True)
@click.option("--server-address", required=True)
def generate_nginx_config(config, domain, server_address):
logger.info(f'generate_nginx_config: {domain=}')
nginx_template_path = os.path.join(
os.getcwd(), 'templates', 'nginx.conf')
if Path(nginx_template_path).is_file():
logger.info(f"NGINX template found at: {nginx_template_path}")
else:
raise Exception(
f"ERROR: No NGINX template found at: {nginx_template_path}")
output_path = f'/etc/nginx/nginx.conf'
if Path(output_path).is_file():
logger.info(f"NGINX default configuration found at: {output_path}")
else:
raise Exception(
f"ERROR: No NGINX default configuration found at: {output_path}. Verify NGINX installation.")
data = ''
with open(nginx_template_path, 'r') as file:
data = file.read()
data = data.format(PUBLIC_DOMAIN=domain,
NUCLEUS_SERVER_DOMAIN=server_address)
with open(output_path, 'w') as file:
file.write(data)
logger.info(output_path)
| 2,373 | Python | 24.526881 | 105 | 0.659503 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/tools/reverseProxy/setup.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="Reverse Proxy Tools",
version="1.0",
py_modules=["rpt"],
install_requires=["boto3", "python-dotenv", "Click"],
entry_points="""
[console_scripts]
rpt=rpt_cli:main
""",
)
| 532 | Python | 25.649999 | 73 | 0.657895 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/customResources/reverseProxyConfig/index.py | import os
import logging
import json
from crhelper import CfnResource
import aws_utils.ssm as ssm
import aws_utils.ec2 as ec2
import config.reverseProxy as config
LOG_LEVEL = os.getenv("LOG_LEVEL", "DEBUG")
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
helper = CfnResource(
json_logging=False, log_level="DEBUG", boto_level="CRITICAL"
)
@helper.create
def create(event, context):
logger.info("Create Event: %s", json.dumps(event, indent=2))
response = update_config(
event["ResourceProperties"]["STACK_NAME"],
event["ResourceProperties"]["ARTIFACTS_BUCKET_NAME"],
event["ResourceProperties"]["FULL_DOMAIN"],
event["ResourceProperties"]["RP_AUTOSCALING_GROUP_NAME"],
)
logger.info("Run Command Results: %s", json.dumps(response, indent=2))
@helper.update
def update(event, context):
logger.info("Update Event: %s", json.dumps(event, indent=2))
response = update_config(
event["ResourceProperties"]["STACK_NAME"],
event["ResourceProperties"]["ARTIFACTS_BUCKET_NAME"],
event["ResourceProperties"]["FULL_DOMAIN"],
event["ResourceProperties"]["RP_AUTOSCALING_GROUP_NAME"],
)
logger.info("Run Command Results: %s", json.dumps(response, indent=2))
def update_config(
stack_name,
artifacts_bucket_name,
full_domain,
rp_autoscaling_group_name
):
# get nucleus main instance id
nucleus_instances = []
try:
nucleus_instances = ec2.get_instances_by_tag(
"Name", f"{stack_name}/NucleusServer")
except Exception as e:
raise Exception(
f"Failed to get nucleus instances by name. {e}")
logger.info(f"Nucleus Instances: {nucleus_instances}")
# get nucleus main hostname
nucleus_hostname = ec2.get_instance_private_dns_name(nucleus_instances[0])
logger.info(f"Nucleus Hostname: {nucleus_hostname}")
# generate config for reverse proxy servers
commands = []
try:
commands = config.get_config(
artifacts_bucket_name, nucleus_hostname, full_domain)
logger.debug(commands)
except Exception as e:
raise Exception(f"Failed to get Reverse Proxy config. {e}")
# get reverse proxy instance ids
rp_instances = ec2.get_autoscaling_instance(rp_autoscaling_group_name)
if rp_instances is None:
return None
logger.info(rp_instances)
# run config commands
response = []
for i in rp_instances:
r = ssm.run_commands(
i, commands, document="AWS-RunShellScript"
)
response.append(r)
return response
@helper.delete
def delete(event, context):
logger.info("Delete Event: %s", json.dumps(event, indent=2))
def handler(event, context):
helper(event, context)
| 2,776 | Python | 26.495049 | 78 | 0.667147 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/customResources/nucleusServerConfig/index.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import os
import logging
import json
from crhelper import CfnResource
import aws_utils.ssm as ssm
import aws_utils.sm as sm
import config.nucleus as config
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
helper = CfnResource(json_logging=False, log_level="DEBUG",
boto_level="CRITICAL")
@helper.create
def create(event, context):
logger.info("Create Event: %s", json.dumps(event, indent=2))
instanceId = event["ResourceProperties"]["instanceId"]
reverseProxyDomain = event["ResourceProperties"]["reverseProxyDomain"]
artifactsBucket = event["ResourceProperties"]["artifactsBucket"]
nucleusBuild = event["ResourceProperties"]["nucleusBuild"]
ovMainLoginSecretArn = event["ResourceProperties"]["ovMainLoginSecretArn"]
ovServiceLoginSecretArn = event["ResourceProperties"]["ovServiceLoginSecretArn"]
response = update_nucleus_config(
instanceId,
artifactsBucket,
reverseProxyDomain,
nucleusBuild,
ovMainLoginSecretArn,
ovServiceLoginSecretArn,
)
logger.info("Run Command Results: %s", json.dumps(response, indent=2))
@helper.update
def update(event, context):
logger.info("Update Event: %s", json.dumps(event, indent=2))
instanceId = event["ResourceProperties"]["instanceId"]
reverseProxyDomain = event["ResourceProperties"]["reverseProxyDomain"]
artifactsBucket = event["ResourceProperties"]["artifactsBucket"]
nucleusBuild = event["ResourceProperties"]["nucleusBuild"]
ovMainLoginSecretArn = event["ResourceProperties"]["ovMainLoginSecretArn"]
ovServiceLoginSecretArn = event["ResourceProperties"]["ovServiceLoginSecretArn"]
response = update_nucleus_config(
instanceId,
artifactsBucket,
reverseProxyDomain,
nucleusBuild,
ovMainLoginSecretArn,
ovServiceLoginSecretArn,
)
logger.info("Run Command Results: %s", json.dumps(response, indent=2))
def update_nucleus_config(
instanceId,
artifactsBucket,
reverseProxyDomain,
nucleusBuild,
ovMainLoginSecretArn,
ovServiceLoginSecretArn,
):
ovMainLoginSecret = sm.get_secret(ovMainLoginSecretArn)
ovServiceLoginSecret = sm.get_secret(ovServiceLoginSecretArn)
ovMainLoginPassword = ovMainLoginSecret["password"]
ovServiceLoginPassword = ovServiceLoginSecret["password"]
# generate config for reverse proxy servers
commands = []
try:
commands = config.get_config(
artifactsBucket, reverseProxyDomain, nucleusBuild, ovMainLoginPassword, ovServiceLoginPassword)
logger.debug(commands)
except Exception as e:
raise Exception("Failed to get Reverse Proxy config. {}".format(e))
for p in commands:
print(p)
response = ssm.run_commands(
instanceId, commands, document="AWS-RunShellScript")
return response
@helper.delete
def delete(event, context):
logger.info("Delete Event: %s", json.dumps(event, indent=2))
def handler(event, context):
helper(event, context)
| 3,303 | Python | 30.169811 | 107 | 0.718438 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/asgLifeCycleHooks/reverseProxy/index.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import boto3
import os
import json
import logging
import traceback
from botocore.exceptions import ClientError
import aws_utils.ssm as ssm
import aws_utils.r53 as r53
import aws_utils.ec2 as ec2
import config.reverseProxy as config
logger = logging.getLogger()
logger.setLevel(logging.INFO)
autoscaling = boto3.client("autoscaling")
ARTIFACTS_BUCKET = os.environ["ARTIFACTS_BUCKET"]
NUCLEUS_ROOT_DOMAIN = os.environ["NUCLEUS_ROOT_DOMAIN"]
NUCLEUS_DOMAIN_PREFIX = os.environ["NUCLEUS_DOMAIN_PREFIX"]
NUCLEUS_SERVER_ADDRESS = os.environ["NUCLEUS_SERVER_ADDRESS"]
def send_lifecycle_action(event, result):
try:
response = autoscaling.complete_lifecycle_action(
LifecycleHookName=event["detail"]["LifecycleHookName"],
AutoScalingGroupName=event["detail"]["AutoScalingGroupName"],
LifecycleActionToken=event["detail"]["LifecycleActionToken"],
LifecycleActionResult=result,
InstanceId=event["detail"]["EC2InstanceId"],
)
logger.info(response)
except ClientError as e:
message = "Error completing lifecycle action: {}".format(e)
logger.error(message)
raise Exception(message)
return
def update_nginix_config(
instanceId, artifactsBucket, nucleusServerAddress, domain
):
# generate config for reverse proxy servers
commands = []
try:
commands = config.get_config(
artifactsBucket, nucleusServerAddress, domain)
logger.debug(commands)
except Exception as e:
raise Exception("Failed to get Reverse Proxy config. {}".format(e))
response = ssm.run_commands(
instanceId, commands, document="AWS-RunShellScript"
)
return response
def handler(event, context):
logger.info("Event: %s", json.dumps(event, indent=2))
instanceId = event["detail"]["EC2InstanceId"]
transition = event["detail"]["LifecycleTransition"]
if transition == "autoscaling:EC2_INSTANCE_LAUNCHING":
try:
update_nginix_config(
instanceId,
ARTIFACTS_BUCKET,
NUCLEUS_SERVER_ADDRESS,
f"{NUCLEUS_DOMAIN_PREFIX}.{NUCLEUS_ROOT_DOMAIN}",
)
send_lifecycle_action(event, "CONTINUE")
except Exception as e:
message = "Error running command: {}".format(e)
logger.warning(traceback.format_exc())
logger.error(message)
send_lifecycle_action(event, "ABANDON")
elif transition == "autoscaling:EC2_INSTANCE_TERMINATING":
try:
send_lifecycle_action(event, "CONTINUE")
except Exception as e:
message = "Error running command: {}".format(e)
logger.warning(traceback.format_exc())
logger.error(message)
send_lifecycle_action(event, "ABANDON")
logger.info("Execution Complete")
return
| 3,116 | Python | 28.40566 | 75 | 0.662067 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/common/aws_utils/ec2.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import os
import logging
import boto3
from botocore.exceptions import ClientError
LOG_LEVEL = os.getenv("LOG_LEVEL", "DEBUG")
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
client = boto3.client("ec2")
ec2_resource = boto3.resource("ec2")
autoscaling = boto3.client("autoscaling")
def get_instance_public_dns_name(instanceId):
instance = get_instance_description(instanceId)
if instance is None:
return None
return instance["PublicDnsName"]
def get_instance_private_dns_name(instanceId):
instance = get_instance_description(instanceId)
if instance is None:
return None
return instance["PrivateDnsName"]
def get_instance_description(instanceId):
response = client.describe_instances(
InstanceIds=[instanceId],
)
instances = response["Reservations"][0]["Instances"]
if not instances:
return None
return instances[0]
def get_instance_status(instanceId):
response = client.describe_instance_status(
Filters=[
{
"Name": "string",
"Values": [
"string",
],
},
],
InstanceIds=[
"string",
],
MaxResults=123,
NextToken="string",
DryRun=True | False,
IncludeAllInstances=True | False,
)
statuses = response["InstanceStatuses"][0]
status = {"instanceStatus": None, "systemStatus": None}
if statuses:
status = {
"instanceStatus": statuses["InstanceStatus"]["Status"],
"systemStatus": statuses["SystemStatus"]["Status"],
}
return status
def get_autoscaling_instance(groupName):
response = autoscaling.describe_auto_scaling_groups(
AutoScalingGroupNames=[groupName]
)
logger.debug(response)
instances = response['AutoScalingGroups'][0]["Instances"]
if not instances:
return None
instanceIds = []
for i in instances:
instanceIds.append(i["InstanceId"])
return instanceIds
def update_tag_value(resourceIds: list, tagKey: str, tagValue: str):
client.create_tags(
Resources=resourceIds,
Tags=[{
'Key': tagKey,
'Value': tagValue
}],
)
def delete_tag(resourceIds: list, tagKey: str, tagValue: str):
response = client.delete_tags(
Resources=resourceIds,
Tags=[{
'Key': tagKey,
'Value': tagValue
}],
)
return response
def get_instance_state(id):
instance = ec2_resource.Instance(id)
return instance.state['Name']
def get_instances_by_tag(tagKey, tagValue):
instances = ec2_resource.instances.filter(
Filters=[{'Name': 'tag:{}'.format(tagKey), 'Values': [tagValue]}])
if not instances:
return None
instanceIds = []
for i in instances:
instanceIds.append(i.id)
return instanceIds
def get_instances_by_name(name):
instances = get_instances_by_tag("Name", name)
if not instances:
logger.error(f"ERROR: Failed to get instances by tag: Name, {name}")
return None
return instances
def get_active_instance(instances):
for i in instances:
instance_state = get_instance_state(i)
logger.info(f"Instance: {i}. State: {instance_state}")
if instance_state == "running" or instance_state == "pending":
return i
logger.warn(f"Instances are not active")
return None
def get_volumes_by_instance_id(id):
instance = ec2_resource.Instance(id)
volumes = instance.volumes.all()
volumeIds = []
for i in volumes:
volumeIds.append(i.id)
return volumeIds
def terminate_instances(instance_ids):
response = client.terminate_instances(InstanceIds=instance_ids)
logger.info(response)
return response
| 4,068 | Python | 21.605555 | 76 | 0.630285 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/common/aws_utils/ssm.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import os
import time
import logging
import boto3
from botocore.exceptions import ClientError
LOG_LEVEL = os.getenv("LOG_LEVEL", "DEBUG")
logger = logging.getLogger()
logger.setLevel(LOG_LEVEL)
client = boto3.client("ssm")
def get_param_value(name) -> str:
response = client.get_parameter(Name=name)
logger.info(response)
return response['Parameter']['Value']
def update_param_value(name, value) -> bool:
response = client.put_parameter(Name=name, Value=value, Overwrite=True)
logger.info(response)
try:
return (response['Version'] > 0)
except ClientError as e:
message = "Error calling SendCommand: {}".format(e)
logger.error(message)
return False
def run_commands(
instance_id, commands, document="AWS-RunPowerShellScript", comment="aws_utils.ssm.run_commands"
):
"""alt document options:
AWS-RunShellScript
"""
# Run Commands
logger.info("Calling SendCommand: {} for instance: {}".format(
commands, instance_id))
attempt = 0
response = None
while attempt < 20:
attempt = attempt + 1
try:
time.sleep(10 * attempt)
logger.info("SendCommand, attempt #: {}".format(attempt))
response = client.send_command(
InstanceIds=[instance_id],
DocumentName=document,
Parameters={"commands": commands},
Comment=comment,
CloudWatchOutputConfig={
"CloudWatchLogGroupName": instance_id,
"CloudWatchOutputEnabled": True,
},
)
logger.info(response)
if "Command" in response:
break
if attempt == 10:
message = "Command did not execute successfully in time allowed."
raise Exception(message)
except ClientError as e:
message = "Error calling SendCommand: {}".format(e)
logger.error(message)
continue
if not response:
message = "Command did not execute successfully in time allowed."
raise Exception(message)
# Check Command Status
command_id = response["Command"]["CommandId"]
logger.info(
"Calling GetCommandInvocation for command: {} for instance: {}".format(
command_id, instance_id
)
)
attempt = 0
result = None
while attempt < 10:
attempt = attempt + 1
try:
time.sleep(10 * attempt)
logger.info("GetCommandInvocation, attempt #: {}".format(attempt))
result = client.get_command_invocation(
CommandId=command_id,
InstanceId=instance_id,
)
if result["Status"] == "InProgress":
logger.info("Command is running.")
continue
elif result["Status"] == "Success":
logger.info("Command Output: {}".format(
result["StandardOutputContent"]))
if result["StandardErrorContent"]:
message = "Command returned STDERR: {}".format(
result["StandardErrorContent"])
logger.warning(message)
break
elif result["Status"] == "Failed":
message = "Error Running Command: {}".format(
result["StandardErrorContent"])
logger.error(message)
raise Exception(message)
else:
message = "Command has an unhandled status, will continue: {}".format(
e)
logger.warning(message)
continue
except client.exceptions.InvocationDoesNotExist as e:
message = "Error calling GetCommandInvocation: {}".format(e)
logger.error(message)
raise Exception(message)
if not result or result["Status"] != "Success":
message = "Command did not execute successfully in time allowed."
raise Exception(message)
return result
| 4,304 | Python | 30.195652 | 99 | 0.574814 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/common/aws_utils/r53.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import boto3
client = boto3.client("route53")
def update_hosted_zone_cname_record(hostedZoneID, rootDomain, domainPrefix, serverAddress):
fqdn = f"{domainPrefix}.{rootDomain}"
response = client.change_resource_record_sets(
HostedZoneId=hostedZoneID,
ChangeBatch={
"Comment": "Updating {fqdn}->{serverAddress} CNAME record",
"Changes": [
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": fqdn,
"Type": "CNAME",
"TTL": 300,
"ResourceRecords": [{"Value": serverAddress}],
},
}
],
},
)
return response
def delete_hosted_zone_cname_record(hostedZoneID, rootDomain, domainPrefix, serverAddress):
response = client.change_resource_record_sets(
HostedZoneId=hostedZoneID,
ChangeBatch={
"Comment": "string",
"Changes": [
{
"Action": "DELETE",
"ResourceRecordSet": {
"Name": f"{domainPrefix}.{rootDomain}",
"Type": "CNAME",
"ResourceRecords": [{"Value": serverAddress}],
},
}
],
},
)
# botocore.errorfactory.InvalidInput: An error occurred (InvalidInput) when calling the ChangeResourceRecordSets operation: Invalid request:
# Expected exactly one of [AliasTarget, all of [TTL, and ResourceRecords], or TrafficPolicyInstanceId], but found none in Change with
# [Action=DELETE, Name=nucleus-dev.awsps.myinstance.com, Type=CNAME, SetIdentifier=null]
return response
| 1,989 | Python | 33.310344 | 144 | 0.553042 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/common/aws_utils/sm.py | # Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: LicenseRef-.amazon.com.-AmznSL-1.0
# Licensed under the Amazon Software License http://aws.amazon.com/asl/
import json
import boto3
SM = boto3.client("secretsmanager")
def get_secret(secret_name):
response = SM.get_secret_value(SecretId=secret_name)
secret = json.loads(response["SecretString"])
return secret
| 429 | Python | 25.874998 | 73 | 0.745921 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/common/config/nucleus.py |
def start_nucleus_config() -> list[str]:
return '''
cd /opt/ove/base_stack || exit 1
echo "STARTING NUCLEUS STACK ----------------------------------"
docker-compose --env-file nucleus-stack.env -f nucleus-stack-ssl.yml start
'''.splitlines()
def stop_nucleus_config() -> list[str]:
return '''
cd /opt/ove/base_stack || exit 1
echo "STOPPING NUCLEUS STACK ----------------------------------"
docker-compose --env-file nucleus-stack.env -f nucleus-stack-ssl.yml stop
'''.splitlines()
def restart_nucleus_config() -> list[str]:
return '''
cd /opt/ove/base_stack || exit 1
echo "RESTARTING NUCLEUS STACK ----------------------------------"
docker-compose --env-file nucleus-stack.env -f nucleus-stack-ssl.yml restart
'''.splitlines()
def get_config(artifacts_bucket_name: str, full_domain: str, nucleus_build: str, ov_main_password: str, ov_service_password: str) -> list[str]:
return f'''
echo "------------------------ NUCLEUS SERVER CONFIG ------------------------"
echo "UPDATING AND INSTALLING DEPS ----------------------------------"
sudo apt-get update -y -q && sudo apt-get upgrade -y
sudo apt-get install dialog apt-utils -y
echo "INSTALLING AWS CLI ----------------------------------"
sudo curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
sudo apt-get install unzip
sudo unzip awscliv2.zip
sudo ./aws/install
sudo rm awscliv2.zip
sudo rm -fr ./aws/install
echo "INSTALLING PYTHON ----------------------------------"
sudo apt-get -y install python3.9
sudo curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
sudo python3.9 get-pip.py
sudo pip3 install --upgrade pip
sudo pip3 --version
echo "INSTALLING DOCKER ----------------------------------"
sudo apt-get remove docker docker-engine docker.io containerd runc
sudo apt-get -y install apt-transport-https ca-certificates curl gnupg-agent software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get -y update
sudo apt-get -y install docker-ce docker-ce-cli containerd.io
sudo systemctl enable --now docker
echo "INSTALLING DOCKER COMPOSE ----------------------------------"
sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
echo "INSTALLING NUCLEUS TOOLS ----------------------------------"
sudo mkdir -p /opt/ove
cd /opt/ove || exit 1
aws s3 cp --recursive s3://{artifacts_bucket_name}/tools/nucleusServer/ ./nucleusServer
cd nucleusServer || exit 1
sudo pip3 install -r requirements.txt
echo "UNPACKAGING NUCLEUS STACK ----------------------------------"
sudo tar xzvf stack/{nucleus_build}.tar.gz -C /opt/ove --strip-components=1
cd /opt/ove/base_stack || exit 1
omniverse_data_path=/var/lib/omni/nucleus-data
nucleusHost=$(curl -s http://169.254.169.254/latest/meta-data/hostname)
sudo nst generate-nucleus-stack-env --server-ip $nucleusHost --reverse-proxy-domain {full_domain} --instance-name nucleus_server --master-password {ov_main_password} --service-password {ov_service_password} --data-root $omniverse_data_path
chmod +x ./generate-sample-insecure-secrets.sh
./generate-sample-insecure-secrets.sh
echo "PULLING NUCLEUS IMAGES ----------------------------------"
docker-compose --env-file nucleus-stack.env -f nucleus-stack-ssl.yml pull
echo "STARTING NUCLEUS STACK ----------------------------------"
docker-compose --env-file nucleus-stack.env -f nucleus-stack-ssl.yml up -d
docker-compose --env-file nucleus-stack.env -f nucleus-stack-ssl.yml ps -a
'''.splitlines()
| 4,176 | Python | 48.72619 | 247 | 0.582136 |
aws-samples/nvidia-omniverse-nucleus-on-amazon-ec2/src/lambda/common/config/reverseProxy.py | def get_config(artifacts_bucket_name: str, nucleus_address: str, full_domain: str) -> list[str]:
return f'''
echo "------------------------ REVERSE PROXY CONFIG ------------------------"
echo "UPDATING PACKAGES ----------------------------------"
sudo yum update -y
echo "INSTALLING DEPENDENCIES ----------------------------------"
sudo yum install -y aws-cfn-bootstrap gcc openssl-devel bzip2-devel libffi-devel zlib-devel
echo "INSTALLING NGINX ----------------------------------"
sudo yum install -y amazon-linux-extras
sudo amazon-linux-extras enable nginx1
sudo yum install -y nginx
sudo nginx -v
echo "INSTALLING PYTHON ----------------------------------"
sudo wget https://www.python.org/ftp/python/3.9.9/Python-3.9.9.tgz -P /opt/python3.9
cd /opt/python3.9 || exit 1
sudo tar xzf Python-3.9.9.tgz
cd Python-3.9.9 || exit 1
sudo ./configure --prefix=/usr --enable-optimizations
sudo make install
echo "------------------------ REVERSE PROXY CONFIG ------------------------"
echo "INSTALLING REVERSE PROXY TOOLS ----------------------------------"
cd /opt || exit 1
sudo aws s3 cp --recursive s3://{artifacts_bucket_name}/tools/reverseProxy/ ./reverseProxy
cd reverseProxy || exit 1
pip3 --version
sudo pip3 install -r requirements.txt
sudo rpt generate-nginx-config --domain {full_domain} --server-address {nucleus_address}
echo "STARTING NGINX ----------------------------------"
sudo service nginx restart
'''.splitlines()
| 1,670 | Python | 44.162161 | 99 | 0.511976 |
arhix52/Strelka/conanfile.py | import os
from conan import ConanFile
from conan.tools.cmake import cmake_layout
from conan.tools.files import copy
class StrelkaRecipe(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "CMakeToolchain", "CMakeDeps"
def requirements(self):
self.requires("glm/cci.20230113")
self.requires("spdlog/[>=1.4.1]")
self.requires("imgui/1.89.3")
self.requires("glfw/3.3.8")
self.requires("stb/cci.20230920")
self.requires("glad/0.1.36")
self.requires("doctest/2.4.11")
self.requires("cxxopts/3.1.1")
self.requires("tinygltf/2.8.19")
self.requires("nlohmann_json/3.11.3")
def generate(self):
copy(self, "*glfw*", os.path.join(self.dependencies["imgui"].package_folder,
"res", "bindings"), os.path.join(self.source_folder, "external", "imgui"))
copy(self, "*opengl3*", os.path.join(self.dependencies["imgui"].package_folder,
"res", "bindings"), os.path.join(self.source_folder, "external", "imgui"))
copy(self, "*metal*", os.path.join(self.dependencies["imgui"].package_folder,
"res", "bindings"), os.path.join(self.source_folder, "external", "imgui"))
def layout(self):
cmake_layout(self)
| 1,294 | Python | 37.088234 | 87 | 0.619784 |
cadop/HumanGenerator/exts/siborg.create.human/API_EXAMPLE.py | # Human Generator API Example
# Author: Joshua Grebler | SiBORG Lab | 2023
# Description: This is an example of how to use the Human Generator API to create human models in NVIDIA Omniverse.
# The siborg.create.human extension must be installed and enabled for this to work.
# The script generates 10 humans, placing them throughout the stage. Random modifiers and clothing are applied to each.
import siborg.create.human as hg
from siborg.create.human.shared import data_path
import omni.usd
import random
# Get the stage
context = omni.usd.get_context()
stage = context.get_stage()
# Make a single Human to start with
human = hg.Human()
human.add_to_scene()
# Apply a modifier by name (you can find the names of all the available modifiers
# by using the `get_modifier_names()` method)
height = human.get_modifier_by_name("macrodetails-height/Height")
human.set_modifier_value(height, 1)
# Update the human in the scene
human.update_in_scene(human.prim_path)
# Gather some default clothing items (additional clothing can be downloaded from the extension UI)
clothes = ["nvidia_Casual/nvidia_casual.mhclo", "omni_casual/omni_casual.mhclo", "siborg_casual/siborg_casual.mhclo"]
# Convert the clothing names to their full paths.
clothes = [data_path(f"clothes/{c}") for c in clothes]
# Create 20 humans, placing them randomly throughout the scene, and applying random modifier values
for _ in range(10):
h = hg.Human()
h.add_to_scene()
# Apply a random translation and Y rotation to the human prim
translateOp = h.prim.AddTranslateOp()
translateOp.Set((random.uniform(-50, 50), 0, random.uniform(-50, 50)))
rotateOp = h.prim.AddRotateXYZOp()
rotateOp.Set((0, random.uniform(0, 360), 0))
# Apply a random value to the last 9 modifiers in the list.
# These modifiers are macros that affect the overall shape of the human more than any individual modifier.
# Get the last 9 modifiers
modifiers = h.get_modifiers()[-9:]
# Apply a random value to each modifier. Use the modifier's min/max values to ensure the value is within range.
for m in modifiers:
h.set_modifier_value(m, random.uniform(m.getMin(), m.getMax()))
# Update the human in the scene
h.update_in_scene(h.prim_path)
# Add a random clothing item to the human
h.add_item(random.choice(clothes))
| 2,343 | Python | 36.806451 | 119 | 0.733248 |
cadop/HumanGenerator/exts/siborg.create.human/siborg/create/human/human.py | from typing import Tuple, List, Dict, Union
from .mhcaller import MHCaller
import numpy as np
import omni.kit
import omni.usd
from pxr import Sdf, Usd, UsdGeom, UsdSkel
from .shared import sanitize, data_path
from .skeleton import Skeleton
from module3d import Object3D
from pxr import Usd, UsdGeom, UsdPhysics, UsdShade, Sdf, Gf, Tf, UsdSkel, Vt
import carb
from .materials import get_mesh_texture, create_material, bind_material
class Human:
"""Class representing a human in the scene. This class is used to add a human to the scene,
and to update the human in the scene. The class also contains functions to add and remove
proxies (clothing, etc.) and apply modifiers, as well as a skeleton.
Attributes
----------
name : str
Name of the human
prim : UsdSkel.Root
Reference to the usd prim for the skelroot representing the human in the stage. Can be changed using set_prim()
prim_path : str
Path to the human prim
scale : float
Scale factor for the human. Defaults to 10 (Omniverse provided humans are 10 times larger than makehuman)
skeleton : Makehuman.Skeleton
Skeleton object for the human
usd_skel : UsdSkel.Skeleton
Skeleton object for the human in the USD stage. Imported from the skeleton object.
objects : List[Object3D]
List of objects attached to the human. Fetched from the makehuman app
mh_meshes : List[Object3D]
List of meshes attached to the human. Fetched from the makehuman app
"""
def __init__(self, name='human', **kwargs):
"""Constructs an instance of Human.
Parameters
----------
name : str
Name of the human. Defaults to 'human'
"""
self.name = name
# Reference to the usd prim for the skelroot representing the human in the stage
self.prim = None
# Provide a scale factor (Omniverse provided humans are 10 times larger than makehuman)
self.scale = 10
# Create a skeleton object for the human
self.skeleton = Skeleton(self.scale)
# usd_skel is none until the human is added to the stage
self.usd_skel = None
# Set the human in makehuman to default values
MHCaller.reset_human()
def reset(self):
"""Resets the human in makehuman and adds a new skeleton to the human"""
# Reset the human in makehuman
MHCaller.reset_human()
# Re-add the skeleton to the human
self.skeleton = Skeleton(self.scale)
def delete_proxies(self):
"""Deletes the prims corresponding to proxies attached to the human"""
# Delete any child prims corresponding to proxies
if self.prim:
# Get the children of the human prim and delete them all at once
proxy_prims = [child.GetPath() for child in self.prim.GetChildren() if child.GetCustomDataByKey("Proxy_path:")]
omni.kit.commands.execute("DeletePrims", paths=proxy_prims)
@property
def prim_path(self):
"""Path to the human prim"""
if self.prim:
return self.prim.GetPath().pathString
else:
return None
@property
def objects(self):
"""List of objects attached to the human. Fetched from the makehuman app"""
return MHCaller.objects
@property
def mh_meshes(self):
"""List of meshes attached to the human. Fetched from the makehuman app"""
return MHCaller.meshes
def add_to_scene(self):
"""Adds the human to the scene. Creates a prim for the human with custom attributes
to hold modifiers and proxies. Also creates a prim for each proxy and attaches it to
the human prim.
Returns
-------
str
Path to the human prim"""
# Get the current stage
stage = omni.usd.get_context().get_stage()
root_path = "/"
# Get default prim.
default_prim = stage.GetDefaultPrim()
if default_prim.IsValid():
# Set the rootpath under the stage's default prim, if the default prim is valid
root_path = default_prim.GetPath().pathString
# Create a path for the next available prim
prim_path = omni.usd.get_stage_next_free_path(stage, root_path + "/" + self.name, False)
# Create a prim for the human
# Prim should be a SkelRoot so we can rig the human with a skeleton later
self.prim = UsdSkel.Root.Define(stage, prim_path)
# Write the properties of the human to the prim
self.write_properties(prim_path, stage)
# Get the objects of the human from mhcaller
objects = MHCaller.objects
# Get the human object from the list of objects
human = objects[0]
# Determine the offset for the human from the ground
offset = -1 * human.getJointPosition("ground")
# Import makehuman objects into the scene
mesh_paths = self.import_meshes(prim_path, stage, offset = offset)
# Add the skeleton to the scene
self.usd_skel= self.skeleton.add_to_stage(stage, prim_path, offset = offset)
# Create bindings between meshes and the skeleton. Returns a list of
# bindings the length of the number of meshes
bindings = self.setup_bindings(mesh_paths, stage, self.usd_skel)
# Setup weights for corresponding mh_meshes (which hold the data) and
# bindings (which link USD_meshes to the skeleton)
self.setup_weights(self.mh_meshes, bindings, self.skeleton.joint_names, self.skeleton.joint_paths)
self.setup_materials(self.mh_meshes, mesh_paths, root_path, stage)
# Explicitly setup material for human skin
texture_path = data_path("skins/textures/skin.png")
skin = create_material(texture_path, "Skin", root_path, stage)
# Bind the skin material to the first prim in the list (the human)
bind_material(mesh_paths[0], skin, stage)
Human._set_scale(self.prim.GetPrim(), self.scale)
return self.prim
def update_in_scene(self, prim_path: str):
"""Updates the human in the scene. Writes the properties of the human to the
human prim and imports the human and proxy meshes. This is called when the
human is updated
Parameters
----------
prim_path : str
Path to the human prim (prim type is SkelRoot)
"""
usd_context = omni.usd.get_context()
stage = usd_context.get_stage()
prim = stage.GetPrimAtPath(prim_path)
prim = stage.GetPrimAtPath(prim_path)
if prim and stage:
print(prim.GetPath().pathString)
prim_kind = prim.GetTypeName()
# Check if the prim is a SkelRoot and a human
if prim_kind == "SkelRoot" and prim.GetCustomDataByKey("human"):
# Get default prim.
default_prim = stage.GetDefaultPrim()
if default_prim.IsValid():
# Set the rootpath under the stage's default prim, if the default prim is valid
root_path = default_prim.GetPath().pathString
else:
root_path = "/"
# Write the properties of the human to the prim
self.write_properties(prim_path, stage)
# Get the objects of the human from mhcaller
objects = MHCaller.objects
# Get the human object from the list of objects
human = objects[0]
# Determine the offset for the human from the ground
offset = -1 * human.getJointPosition("ground")
# Import makehuman objects into the scene
mesh_paths = self.import_meshes(prim_path, stage, offset = offset)
# Update the skeleton values and insert it into the stage
self.usd_skel = self.skeleton.update_in_scene(stage, prim_path, offset = offset)
# Create bindings between meshes and the skeleton. Returns a list of
# bindings the length of the number of meshes
bindings = self.setup_bindings(mesh_paths, stage, self.usd_skel)
# Setup weights for corresponding mh_meshes (which hold the data) and
# bindings (which link USD_meshes to the skeleton)
self.setup_weights(self.mh_meshes, bindings, self.skeleton.joint_names, self.skeleton.joint_paths)
self.setup_materials(self.mh_meshes, mesh_paths, root_path, stage)
# Explicitly setup material for human skin
texture_path = data_path("skins/textures/skin.png")
skin = create_material(texture_path, "Skin", root_path, stage)
# Bind the skin material to the first prim in the list (the human)
bind_material(mesh_paths[0], skin, stage)
else:
carb.log_warn("The selected prim must be a human!")
else:
carb.log_warn("Can't update human. No prim selected!")
def import_meshes(self, prim_path: str, stage: Usd.Stage, offset: List[float] = [0, 0, 0]):
"""Imports the meshes of the human into the scene. This is called when the human is
added to the scene, and when the human is updated. This function creates mesh prims
for both the human and its proxies, and attaches them to the human prim. If a mesh already
exists in the scene, its values are updated instead of creating a new mesh.
Parameters
----------
prim_path : str
Path to the human prim
stage : Usd.Stage
Stage to write to
offset : List[float], optional
Offset to move the mesh relative to the prim origin, by default [0, 0, 0]
Returns
-------
paths : array of: Sdf.Path
Usd Sdf paths to geometry prims in the scene
"""
# Get the objects of the human from mhcaller
objects = MHCaller.objects
# Get the meshes of the human and its proxies
meshes = [o.mesh for o in objects]
usd_mesh_paths = []
for mesh in meshes:
# Number of vertices per face
nPerFace = mesh.vertsPerFaceForExport
# Lists to hold pruned lists of vertex and UV indices
newvertindices = []
newuvindices = []
# Array of coordinates organized [[x1,y1,z1],[x2,y2,z2]...]
# Adding the given offset moves the mesh relative to the prim origin
coords = mesh.getCoords() + offset
for fn, fv in enumerate(mesh.fvert):
if not mesh.face_mask[fn]:
continue
# only include <nPerFace> verts for each face, and order them
# consecutively
newvertindices += [(fv[n]) for n in range(nPerFace)]
fuv = mesh.fuvs[fn]
# build an array of (u,v)s for each face
newuvindices += [(fuv[n]) for n in range(nPerFace)]
# Type conversion
newvertindices = np.array(newvertindices)
# Create mesh prim at appropriate path. Does not yet hold any data
name = sanitize(mesh.name)
usd_mesh_path = prim_path + "/" + name
usd_mesh_paths.append(usd_mesh_path)
# Check to see if the mesh prim already exists
prim = stage.GetPrimAtPath(usd_mesh_path)
if prim.IsValid():
# omni.kit.commands.execute("DeletePrims", paths=[usd_mesh_path])
point_attr = prim.GetAttribute('points')
point_attr.Set(coords)
face_count = prim.GetAttribute('faceVertexCounts')
nface = [nPerFace] * int(len(newvertindices) / nPerFace)
face_count.Set(nface)
face_idx = prim.GetAttribute('faceVertexIndices')
face_idx.Set(newvertindices)
normals_attr = prim.GetAttribute('normals')
normals_attr.Set(mesh.getNormals())
meshGeom = UsdGeom.Mesh(prim)
# If it doesn't exist, make it. This will run the first time a human is created and
# whenever a new proxy is added
else:
# First determine if the mesh is a proxy
p = mesh.object.proxy
if p:
# Determine if the mesh is a clothes proxy or a proxymesh. If not, then
# an existing proxy of this type already exists, and we must overwrite it
type = p.type if p.type else "proxymeshes"
if not (type == "clothes" or type == "proxymeshes"):
for child in self.prim.GetChildren():
child_type = child.GetCustomDataByKey("Proxy_type:")
if child_type == type:
# If the child prim has the same type as the proxy, delete it
omni.kit.commands.execute("DeletePrims", paths=[child.GetPath()])
break
meshGeom = UsdGeom.Mesh.Define(stage, usd_mesh_path)
prim = meshGeom.GetPrim()
# Set vertices. This is a list of tuples for ALL vertices in an unassociated
# cloud. Faces are built based on indices of this list.
# Example: 3 explicitly defined vertices:
# meshGeom.CreatePointsAttr([(-10, 0, -10), (-10, 0, 10), (10, 0, 10)]
meshGeom.CreatePointsAttr(coords)
# Set face vertex count. This is an array where each element is the number
# of consecutive vertex indices to include in each face definition, as
# indices are given as a single flat list. The length of this list is the
# same as the number of faces
# Example: 4 faces with 4 vertices each
# meshGeom.CreateFaceVertexCountsAttr([4, 4, 4, 4])
nface = [nPerFace] * int(len(newvertindices) / nPerFace)
meshGeom.CreateFaceVertexCountsAttr(nface)
# Set face vertex indices.
# Example: one face with 4 vertices defined by 4 indices.
# meshGeom.CreateFaceVertexIndicesAttr([0, 1, 2, 3])
meshGeom.CreateFaceVertexIndicesAttr(newvertindices)
# Set vertex normals. Normals are represented as a list of tuples each of
# which is a vector indicating the direction a point is facing. This is later
# Used to calculate face normals
# Example: Normals for 3 vertices
# meshGeom.CreateNormalsAttr([(0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1,
# 0)])
meshGeom.CreateNormalsAttr(mesh.getNormals())
meshGeom.SetNormalsInterpolation("vertex")
# If the mesh is a proxy, write the proxy path to the mesh prim
if mesh.object.proxy:
p = mesh.object.proxy
type = p.type if p.type else "proxymeshes"
prim.SetCustomDataByKey("Proxy_path:", p.file)
prim.SetCustomDataByKey("Proxy_type:", type)
prim.SetCustomDataByKey("Proxy_name:", p.name)
# Set vertex uvs. UVs are represented as a list of tuples, each of which is a 2D
# coordinate. UV's are used to map textures to the surface of 3D geometry
# Example: texture coordinates for 3 vertices
# texCoords.Set([(0, 1), (0, 0), (1, 0)])
texCoords = meshGeom.CreatePrimvar(
"st", Sdf.ValueTypeNames.TexCoord2fArray, UsdGeom.Tokens.faceVarying
)
texCoords.Set(mesh.getUVs(newuvindices))
# # Subdivision is set to none. The mesh is as imported and not further refined
meshGeom.CreateSubdivisionSchemeAttr().Set("none")
# ConvertPath strings to USD Sdf paths. TODO change to map() for performance
paths = [Sdf.Path(mesh_path) for mesh_path in usd_mesh_paths]
return paths
def get_written_modifiers(self) -> Union[Dict[str, float], None]:
"""List of modifier names and values written to the human prim.
MAY BE STALE IF THE HUMAN HAS BEEN UPDATED IN MAKEHUMAN AND THE CHANGES HAVE NOT BEEN WRITTEN TO THE PRIM.
Returns
-------
Dict[str, float]
Dictionary of modifier names and values. Keys are modifier names, values are modifier values"""
return self.prim.GetCustomDataByKey("Modifiers") if self.prim else None
def get_changed_modifiers(self):
"""List of modifiers which have been changed in makehuman. Fetched from the human in makehuman.
MAY NOT MATCH `get_written_modifiers()` IF CHANGES HAVE NOT BEEN WRITTEN TO THE PRIM."""
return MHCaller.modifiers
def get_modifiers(self):
"""Retrieve the list of all modifiers available to the human, whether or not their values have changed."""
return MHCaller.default_modifiers
def set_modifier_value(self, modifier, value: float):
"""Sets the value of a modifier in makehuman. Validates the value before setting it.
Returns true if the value was set, false otherwise.
Parameters
----------
modifier : makehuman.humanmodifier.Modifier
Modifier to change
value : float
Value to set the modifier to
"""
# Get the range of the modifier
val_min = modifier.getMin()
val_max = modifier.getMax()
# Check if the value is within the range of the modifier
if value >= val_min and value <= val_max:
# Set the value of the modifier
modifier.setValue(value)
return True
else:
carb.log_warn(f"Value must be between {str(val_min)} and {str(val_max)}")
return False
def get_modifier_by_name(self, name: str):
"""Gets a modifier from the list of modifiers attached to the human by name
Parameters
----------
name : str
Name of the modifier to get
Returns
-------
makehuman.modifiers.Modifier
Modifier with the given name
"""
return MHCaller.human.getModifier(name)
def get_modifier_names(self):
return MHCaller.human.getModifierNames()
def write_properties(self, prim_path: str, stage: Usd.Stage):
"""Writes the properties of the human to the human prim. This includes modifiers and
proxies. This is called when the human is added to the scene, and when the human is
updated
Parameters
----------
prim_path : str
Path to the human prim
stage : Usd.Stage
Stage to write to
"""
prim = stage.GetPrimAtPath(prim_path)
# Add custom data to the prim by key, designating the prim is a human
prim.SetCustomDataByKey("human", True)
# Get the modifiers of the human in mhcaller
modifiers = MHCaller.modifiers
for m in modifiers:
# Add the modifier to the prim as custom data by key. For modifiers,
# the format is "group/modifer:value"
prim.SetCustomDataByKey("Modifiers:" + m.fullName, m.getValue())
# NOTE We are not currently using proxies in the USD export. Proxy data is stored
# in their respective mesh prims, so that deleting proxy prims will also remove the
# proxies. The following code is left here for reference.
# Get the proxies of the human in mhcaller
# proxies = MHCaller.proxies
# for p in proxies:
# # Add the proxy to the prim as custom data by key under "Proxies".
# # Proxy type should be "proxymeshes" if type cannot be determined from the
# # proxy.type property.
# type = p.type if p.type else "proxymeshes"
# # Only "proxymeshes" and "clothes" should be subdictionaries of "Proxies"
# if type == "clothes" or type == "proxymeshes":
# prim.SetCustomDataByKey("Proxies:" + type + ":" + p.name, p.file)
# # Other proxy types should be added as a key to the prim with their
# # type as the key and the path as the value
# else:
# prim.SetCustomDataByKey("Proxies:" + type, p.file)
def set_prim(self, usd_prim : Usd.Prim):
"""Updates the human based on the given prim's attributes
Parameters
----------
usd_prim : Usd.Prim
Prim from which to update the human model."""
self.prim = usd_prim
# Get the data from the prim
humandata = self.prim.GetCustomData()
# Get the list of modifiers from the prim
modifiers = humandata.get("Modifiers")
for m, v in modifiers.items():
MHCaller.human.getModifier(m).setValue(v, skipDependencies=False)
# Gather proxies from the prim children
proxies = []
for child in self.prim.GetChildren():
if child.GetTypeName() == "Mesh" and child.GetCustomDataByKey("Proxy_path:"):
proxies.append(child)
# Clear the makehuman proxies
MHCaller.clear_proxies()
# # Make sure the proxy list is not empty
if proxies:
for p in proxies:
type = p.GetCustomDataByKey("Proxy_type:")
path = p.GetCustomDataByKey("Proxy_path:")
# name = p.GetCustomDataByKey("Proxy_name:")
MHCaller.add_proxy(path, type)
# Update the human in MHCaller
MHCaller.human.applyAllTargets()
def setup_weights(self, mh_meshes: List['Object3D'], bindings: List[UsdSkel.BindingAPI], joint_names: List[str], joint_paths: List[str]):
"""Apply weights to USD meshes using data from makehuman. USD meshes,
bindings and skeleton must already be in the active scene
Parameters
----------
mh_meshes : list of `Object3D`
Makehuman meshes which store weight data
bindings : list of `UsdSkel.BindingAPI`
USD bindings between meshes and skeleton
joint_names : list of str
Unique, plaintext names of all joints in the skeleton in USD
(breadth-first) order.
joint_paths : list of str
List of the full usd path to each joint corresponding to the skeleton to bind to
"""
# Generate bone weights for all meshes up front so they can be reused for all
rawWeights = MHCaller.human.getVertexWeights(
MHCaller.human.getSkeleton()
) # Basemesh weights
for mesh in self.mh_meshes:
if mesh.object.proxy:
# Transfer weights to proxy
parentWeights = mesh.object.proxy.getVertexWeights(
rawWeights, MHCaller.human.getSkeleton()
)
else:
parentWeights = rawWeights
# Transfer weights to face/vert masked and/or subdivided mesh
weights = mesh.getVertexWeights(parentWeights)
# Attach these vertexWeights to the mesh to pass them around the
# exporter easier, the cloned mesh is discarded afterwards, anyway
# if this is the same person, just skip updating weights
mesh.vertexWeights = weights
# Iterate through corresponding meshes and bindings
for mh_mesh, binding in zip(mh_meshes, bindings):
# Calculate vertex weights
indices, weights = self.calculate_influences(mh_mesh, joint_names)
# Type conversion to native ints and floats from numpy
indices = list(map(int, indices))
weights = list(map(float, weights))
# Type conversion to USD
indices = Vt.IntArray(indices)
weights = Vt.FloatArray(weights)
# The number of weights to apply to each vertex, taken directly from
# MakeHuman data
elementSize = int(mh_mesh.vertexWeights._nWeights)
# weight_data = list(mh_mesh.vertexWeights.data) TODO remove
# We might not need to normalize. Makehuman weights are automatically
# normalized when loaded, see:
# http://www.makehumancommunity.org/wiki/Technical_notes_on_MakeHuman
UsdSkel.NormalizeWeights(weights, elementSize)
UsdSkel.SortInfluences(indices, weights, elementSize)
# Assign indices to binding
indices_attribute = binding.CreateJointIndicesPrimvar(
constant=False, elementSize=elementSize
)
joint_attr = binding.GetPrim().GetAttribute('skel:joints')
joint_attr.Set(joint_paths)
indices_attribute.Set(indices)
# Assign weights to binding
weights_attribute = binding.CreateJointWeightsPrimvar(
constant=False, elementSize=elementSize
)
weights_attribute.Set(weights)
def calculate_influences(self, mh_mesh: Object3D, joint_names: List[str]):
"""Build arrays of joint indices and corresponding weights for each vertex.
Joints are in USD (breadth-first) order.
Parameters
----------
mh_mesh : Object3D
Makehuman-format mesh. Contains weight and vertex data.
joint_names : list of str
Unique, plaintext names of all joints in the skeleton in USD
(breadth-first) order.
Returns
-------
indices : list of int
Flat list of joint indices for each vertex
weights : list of float
Flat list of weights corresponding to joint indices
"""
# The maximum number of weights a vertex might have
max_influences = mh_mesh.vertexWeights._nWeights
# Named joints corresponding to vertices and weights ie.
# {"joint",([indices],[weights])}
influence_joints = mh_mesh.vertexWeights.data
num_verts = mh_mesh.getVertexCount(excludeMaskedVerts=False)
# all skeleton joints in USD order
binding_joints = joint_names
# Corresponding arrays of joint indices and weights of length num_verts.
# Allots the maximum number of weights for every vertex, and pads any
# remaining weights with 0's, per USD spec, see:
# https://graphics.pixar.com/usd/dev/api/_usd_skel__schemas.html#UsdSkel_BindingAPI
# "If a point has fewer influences than are needed for other points, the
# unused array elements of that point should be filled with 0, both for
# joint indices and for weights."
indices = np.zeros((num_verts, max_influences))
weights = np.zeros((num_verts, max_influences))
# Keep track of the number of joint influences on each vertex
influence_counts = np.zeros(num_verts, dtype=int)
for joint, joint_data in influence_joints.items():
# get the index of the joint in our USD-ordered list of all joints
joint_index = binding_joints.index(joint)
for vert_index, weight in zip(*joint_data):
# Use influence_count to keep from overwriting existing influences
influence_count = influence_counts[vert_index]
# Add the joint index to our vertex array
indices[vert_index][influence_count] = joint_index
# Add the weight to the same vertex
weights[vert_index][influence_count] = weight
# Add to the influence count for this vertex
influence_counts[vert_index] += 1
# Check for any unweighted verts (this is a test routine)
# for i, d in enumerate(indices): if np.all((d == 0)): print(i)
# Flatten arrays to one dimensional lists
indices = indices.flatten()
weights = weights.flatten()
return indices, weights
def setup_bindings(self, paths: List[Sdf.Path], stage: Usd.Stage, skeleton: UsdSkel.Skeleton):
"""Setup bindings between meshes in the USD scene and the skeleton
Parameters
----------
paths : List of Sdf.Path
USD Sdf paths to each mesh prim
stage : Usd.Stage
The USD stage where the prims can be found
skeleton : UsdSkel.Skeleton
The USD skeleton to apply bindings to
Returns
-------
array of: UsdSkel.BindingAPI
Array of bindings between each mesh and the skeleton, in "path" order
"""
bindings = []
# TODO rename "mesh" to "path"
for mesh in paths:
# Get the prim in the stage
prim = stage.GetPrimAtPath(mesh)
attrs = prim.GetAttribute('primvars:skel:jointWeights')
# Check if joint weights have already been applied
if attrs.IsValid():
prim_path = prim.GetPath()
sdf_path = Sdf.Path(prim_path)
binding = UsdSkel.BindingAPI.Get(stage, sdf_path)
# relationships = prim.GetRelationships()
# 'material:binding' , 'proxyPrim', 'skel:animationSource','skel:blendShapeTargets','skel:skeleton'
# get_binding.GetSkeletonRel()
else:
# Create a binding applied to the prim
binding = UsdSkel.BindingAPI.Apply(prim)
# Create a relationship between the binding and the skeleton
binding.CreateSkeletonRel().SetTargets([skeleton.GetPath()])
# Add the binding to the list to return
bindings.append(binding)
return bindings
def setup_materials(self, mh_meshes: List['Object3D'], meshes: List[Sdf.Path], root: str, stage: Usd.Stage):
"""Fetches materials from Makehuman meshes and applies them to their corresponding
Usd mesh prims in the stage.
Parameters
----------
mh_meshes : List['Object3D']
List of makehuman meshes
meshes : List[Sdf.Path]
Paths to Usd meshes in the stage
root : str
The root path under which to create new prims
stage : Usd.Stage
Usd stage in which to create materials, and which contains the meshes
to which to apply materials
"""
for mh_mesh, mesh in zip(self.mh_meshes, meshes):
# Get a texture path and name from the makehuman mesh
texture, name = get_mesh_texture(mh_mesh)
if texture:
# If we can get a texture from the makehuman mesh, create a material
# from it and bind it to the corresponding USD mesh in the stage
material = create_material(texture, name, root, stage)
bind_material(mesh, material, stage)
def add_item(self, path: str):
"""Add a new asset to the human. Propagates changes to the Makehuman app
and then upates the stage with the new asset. If the asset is a proxy,
targets will not be applied. If the asset is a skeleton, targets must
be applied.
Parameters
----------
path : str
Path to an asset on disk
"""
# Check if human has a prim
if self.prim:
# Add an item through the MakeHuman instance and update the widget view
MHCaller.add_item(path)
self.update_in_scene(self.prim.GetPath().pathString)
else:
carb.log_warn("Can't add asset. No human prim selected!")
@staticmethod
def _set_scale(prim : Usd.Prim, scale : float):
"""Set scale of a prim.
Parameters
----------
prim : Usd.Prim
The prim to scale.
scale : float
The scale to apply."""
if prim == None:
return
# Uniform scale.
sV = Gf.Vec3f(scale, scale, scale)
scale = prim.GetAttribute("xformOp:scale").Get()
if scale != None:
prim.GetAttribute("xformOp:scale").Set(Gf.Vec3f(sV))
else:
# xformOpOrder is also updated.
xformAPI = UsdGeom.XformCommonAPI(prim)
xformAPI.SetScale(Gf.Vec3f(sV)) | 32,659 | Python | 40.030151 | 141 | 0.598212 |
cadop/HumanGenerator/exts/siborg.create.human/siborg/create/human/styles.py | # Stylesheet for parameter panels
panel_style = {
"Rectangle::group_rect": {
"background_color": 0xFF313333,
"border_radius": 5,
"margin": 5,
},
"VStack::contents": {
"margin": 10,
},
}
# Stylesheet for sliderentry widgets
sliderentry_style = {
"Label::label_param": {
"margin_width": 10,
},
}
# stylesheet for collapseable frame widgets, used for each modifier category
frame_style = {
"CollapsableFrame": {
"background_color": 0xFF1F2123,
},
}
# stylesheet for main UI window
window_style = {
"Rectangle::splitter": {"background_color": 0xFF454545},
"Rectangle::splitter:hovered": {"background_color": 0xFFFFCA83},
}
# Stylesheet for buttons
button_style = {
"Button:disabled": {
"background_color": 0xFF424242,
},
"Button:disabled.Label": {
"color": 0xFF848484,
},
}
| 894 | Python | 20.309523 | 76 | 0.608501 |
cadop/HumanGenerator/exts/siborg.create.human/siborg/create/human/mhcaller.py | from typing import TypeVar, Union
import warnings
import io
import makehuman
from pathlib import Path
# Makehuman loads most modules by manipulating the system path, so we have to
# run this before we can run the rest of our makehuman imports
makehuman.set_sys_path()
import human
import animation
import bvh
import files3d
import mh
from core import G
from mhmain import MHApplication
from shared import wavefront
import humanmodifier, skeleton
import proxy, gui3d, events3d, targets
from getpath import findFile
import numpy as np
import carb
from .shared import data_path
class classproperty:
"""Class property decorator. Allows us to define a property on a class
method rather than an instance method."""
def __init__(cls, fget):
cls.fget = fget
def __get__(cls, obj, owner):
return cls.fget(owner)
class MHCaller:
"""A singleton wrapper around the Makehuman app. Lets us use Makehuman functions without
launching the whole application. Also holds all data about the state of our Human
and available modifiers/assets, and allows us to create new humans without creating a new
instance of MHApp.
Attributes
----------
G : Globals
Makehuman global object. Stores globals needed by Makehuman internally
human : Human
Makehuman Human object. Encapsulates all human data (parameters, available)
modifiers, skeletons, meshes, assets, etc) and functions.
"""
G = G
human = None
def __init__(cls):
"""Constructs an instance of MHCaller. This involves setting up the
needed components to use makehuman modules independent of the GUI.
This includes app globals (G) and the human object."""
cls._config_mhapp()
cls.init_human()
def __new__(cls):
"""Singleton pattern. Only one instance of MHCaller can exist at a time."""
if not hasattr(cls, 'instance'):
cls.instance = super(MHCaller, cls).__new__(cls)
return cls.instance
@classmethod
def _config_mhapp(cls):
"""Declare and initialize the makehuman app, and move along if we
encounter any errors (omniverse sometimes fails to purge the app
singleton on extension reload, which can throw an error. This just means
the app already exists)
"""
try:
cls.G.app = MHApplication()
except:
return
cls.human_mapper = {}
@classmethod
def reset_human(cls):
"""Resets the human object to its initial state. This involves setting the
human's name to its default, resetting all modifications, and resetting all
proxies. Does not reset the skeleton. Also flags the human as having been
reset so that the new name can be created when adding to the Usd stage.
"""
cls.human.resetMeshValues()
# Subdivide the human mesh. This also means that any proxies added to the human are subdivided
cls.human.setSubdivided(True)
# Restore eyes
# cls.add_proxy(data_path("eyes/high-poly/high-poly.mhpxy"), "eyes")
# Reset skeleton to the game skeleton
cls.human.setSkeleton(cls.game_skel)
# Reset the human to tpose
cls.set_tpose()
# HACK Set the age to itcls to force an update of targets, otherwise humans
# are created with the MH base mesh, see:
# http://static.makehumancommunity.org/makehuman/docs/professional_mesh_topology.html
cls.human.setAge(cls.human.getAge())
@classmethod
def init_human(cls):
"""Initialize the human and set some required files from disk. This
includes the skeleton and any proxies (hair, clothes, accessories etc.)
The weights from the base skeleton must be transfered to the chosen
skeleton or else there will be unweighted verts on the meshes.
"""
cls.human = human.Human(files3d.loadMesh(mh.getSysDataPath("3dobjs/base.obj"), maxFaces=5))
# set the makehuman instance human so that features (eg skeletons) can
# access it globally
cls.G.app.selectedHuman = cls.human
humanmodifier.loadModifiers(mh.getSysDataPath("modifiers/modeling_modifiers.json"), cls.human)
# Add eyes
# cls.add_proxy(data_path("eyes/high-poly/high-poly.mhpxy"), "eyes")
cls.base_skel = skeleton.load(
mh.getSysDataPath("rigs/default.mhskel"),
cls.human.meshData,
)
# Load the game developer skeleton
# The root of this skeleton is at the origin which is better for animation
# retargeting
cls.game_skel = skeleton.load(data_path("rigs/game_engine.mhskel"), cls.human.meshData)
# Build joint weights on our chosen skeleton, derived from the base
# skeleton
cls.game_skel.autoBuildWeightReferences(cls.base_skel)
# Set the base skeleton
cls.human.setBaseSkeleton(cls.base_skel)
# Set the game skeleton
cls.human.setSkeleton(cls.game_skel)
@classproperty
def objects(cls):
"""List of objects attached to the human.
Returns
-------
list of: guiCommon.Object
All 3D objects included in the human. This includes the human
itcls, as well as any proxies
"""
# Make sure proxies are up-to-date
cls.update()
return cls.human.getObjects()
@classproperty
def meshes(cls):
"""All of the meshes of all of the objects attached to a human. This
includes the mesh of the human itcls as well as the meshes of all proxies
(clothing, hair, musculature, eyes, etc.)"""
return [o.mesh for o in cls.objects]
@classproperty
def modifiers(cls):
"""List of modifers attached to the human. These are all macros as well as any
individual modifiers which have changed.
Returns
-------
list of: humanmodifier.Modifier
The macros and changed modifiers included in the human
"""
return [m for m in cls.human.modifiers if m.getValue() or m.isMacro()]
@classproperty
def default_modifiers(cls):
"""List of all the loaded modifiers, whether or not their default values have been changed.
-------
list of: humanmodifier.Modifier
The macros and changed modifiers included in the human
"""
return cls.human.modifiers
@classproperty
def proxies(cls):
"""List of proxies attached to the human.
Returns
-------
list of: proxy.Proxy
All proxies included in the human
"""
return cls.human.getProxies()
@classmethod
def update(cls):
"""Propagate changes to meshes and proxies"""
# For every mesh object except for the human (first object), update the
# mesh and corresponding proxy
# See https://github.com/makehumancommunity/makehuman/search?q=adaptproxytohuman
for obj in cls.human.getObjects()[1:]:
mesh = obj.getSeedMesh()
pxy = obj.getProxy()
# Update the proxy and fit to posed human
# args are (mesh, fit_to_posed = false) by default
pxy.update(mesh, True)
# Update the mesh
mesh.update()
@classmethod
def add_proxy(cls, proxypath : str, proxy_type : str = None):
"""Load a proxy (hair, nails, clothes, etc.) and apply it to the human
Parameters
----------
proxypath : str
Path to the proxy file on disk
proxy_type: str, optional
Proxy type, None by default
Can be automatically determined using path names, but otherwise
must be defined (this is a limitation of how makehuman handles
proxies)
"""
# Derived from work by @tomtom92 at the MH-Community forums
# See: http://www.makehumancommunity.org/forum/viewtopic.php?f=9&t=17182&sid=7c2e6843275d8c6c6e70288bc0a27ae9
# Get proxy type if none is given
if proxy_type is None:
proxy_type = cls.guess_proxy_type(proxypath)
# Load the proxy
pxy = proxy.loadProxy(cls.human, proxypath, type=proxy_type)
# Get the mesh and Object3D object from the proxy applied to the human
mesh, obj = pxy.loadMeshAndObject(cls.human)
# TODO is this next line needed?
mesh.setPickable(True)
# TODO Can this next line be deleted? The app isn't running
gui3d.app.addObject(obj)
# Fit the proxy mesh to the human
mesh2 = obj.getSeedMesh()
fit_to_posed = True
pxy.update(mesh2, fit_to_posed)
mesh2.update()
# Set the object to be subdivided if the human is subdivided
obj.setSubdivided(cls.human.isSubdivided())
# Set/add proxy based on type
if proxy_type == "eyes":
cls.human.setEyesProxy(pxy)
elif proxy_type == "clothes":
cls.human.addClothesProxy(pxy)
elif proxy_type == "eyebrows":
cls.human.setEyebrowsProxy(pxy)
elif proxy_type == "eyelashes":
cls.human.setEyelashesProxy(pxy)
elif proxy_type == "hair":
cls.human.setHairProxy(pxy)
else:
# Body proxies (musculature, etc)
cls.human.setProxy(pxy)
vertsMask = np.ones(cls.human.meshData.getVertexCount(), dtype=bool)
proxyVertMask = proxy.transferVertexMaskToProxy(vertsMask, pxy)
# Apply accumulated mask from previous layers on this proxy
obj.changeVertexMask(proxyVertMask)
# Delete masked vertices
# TODO add toggle for this feature in UI
# verts = np.argwhere(pxy.deleteVerts)[..., 0]
# vertsMask[verts] = False
# cls.human.changeVertexMask(vertsMask)
Proxy = TypeVar("Proxy")
@classmethod
def remove_proxy(cls, proxy: Proxy):
"""Removes a proxy from the human. Executes a particular method for removal
based on proxy type.
Parameters
----------
proxy : proxy.Proxy
The Makehuman proxy to remove from the human
"""
proxy_type = proxy.type.lower()
# Use MakeHuman internal methods to remove proxy based on type
if proxy_type == "eyes":
cls.human.setEyesProxy(None)
elif proxy_type == "clothes":
cls.human.removeClothesProxy(proxy.uuid)
elif proxy_type == "eyebrows":
cls.human.setEyebrowsProxy(None)
elif proxy_type == "eyelashes":
cls.human.setEyelashesProxy(None)
elif proxy_type == "hair":
cls.human.setHairProxy(None)
else:
# Body proxies (musculature, etc)
cls.human.setProxy(None)
@classmethod
def clear_proxies(cls):
"""Removes all proxies from the human"""
for pxy in cls.proxies:
cls.remove_proxy(pxy)
Skeleton = TypeVar("Skeleton")
@classmethod
def remove_item(cls, item : Union[Skeleton, Proxy]):
"""Removes a Makehuman asset from the human. Assets include Skeletons
as well as proxies. Determines removal method based on asset object type.
Parameters
----------
item : Union[Skeleton,Proxy]
Makehuman skeleton or proxy to remove from the human
"""
if isinstance(item, proxy.Proxy):
cls.remove_proxy(item)
else:
return
@classmethod
def add_item(cls, path : str):
"""Add a Makehuman asset (skeleton or proxy) to the human.
Parameters
----------
path : str
Path to the asset on disk
"""
if "mhpxy" in path or "mhclo" in path:
cls.add_proxy(path)
elif "mhskel" in path:
cls.set_skel(path)
@classmethod
def set_skel(cls, path : str):
"""Change the skeleton applied to the human. Loads a skeleton from disk.
The skeleton position can be used to drive the human position in the scene.
Parameters
----------
path : str
The path to the skeleton to load from disk
"""
# Load skeleton from path
skel = skeleton.load(path, cls.human.meshData)
# Build skeleton weights based on base skeleton
skel.autoBuildWeightReferences(cls.base_skel)
# Set the skeleton and update the human
cls.human.setSkeleton(skel)
cls.human.applyAllTargets()
# Return the skeleton object
return skel
@classmethod
def guess_proxy_type(cls, path : str):
"""Guesses a proxy's type based on the path from which it is loaded.
Parameters
----------
path : str
The path to the proxy on disk
Returns
-------
Union[str,None]
The proxy type, or none if the type could not be determined
"""
proxy_types = ("eyes", "clothes", "eyebrows", "eyelashes", "hair")
for type in proxy_types:
if type in path:
return type
return None
@classmethod
def set_tpose(cls):
"""Sets the human to the T-Pose"""
# Load the T-Pose BVH file
filepath = data_path('poses\\tpose.bvh')
bvh_file = bvh.load(filepath, convertFromZUp="auto")
# Create an animation track from the BVH file
anim = bvh_file.createAnimationTrack(cls.human.getBaseSkeleton())
# Add the animation to the human
cls.human.addAnimation(anim)
# Set the active animation to the T-Pose
cls.human.setActiveAnimation(anim.name)
# Refresh the human pose
cls.human.refreshPose()
return
# Create an instance of MHCaller when imported
MHCaller()
| 13,875 | Python | 34.218274 | 118 | 0.622198 |
cadop/HumanGenerator/exts/siborg.create.human/siborg/create/human/extension.py | import omni.ext
import omni.ui as ui
import carb
import carb.events
import omni
from functools import partial
import asyncio
import omni.usd
from pxr import Usd
from typing import Union
from .window import MHWindow, WINDOW_TITLE, MENU_PATH
class MakeHumanExtension(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query additional information, like where
# this extension is located on filesystem.
def on_startup(self, ext_id):
# subscribe to stage events
# see https://github.com/mtw75/kit_customdata_view
_usd_context = omni.usd.get_context()
self._selection = _usd_context.get_selection()
self._human_selection_event = carb.events.type_from_string("siborg.create.human.human_selected")
# subscribe to stage events
self._events = _usd_context.get_stage_event_stream()
self._stage_event_sub = self._events.create_subscription_to_push(
self._on_stage_event,
name='human seletion changed',
)
# get message bus event stream so we can push events to the message bus
self._bus = omni.kit.app.get_app().get_message_bus_event_stream()
ui.Workspace.set_show_window_fn(WINDOW_TITLE, partial(self.show_window, None))
# create a menu item to open the window
editor_menu = omni.kit.ui.get_editor_menu()
if editor_menu:
self._menu = editor_menu.add_item(
MENU_PATH, self.show_window, toggle=True, value=True
)
# show the window
ui.Workspace.show_window(WINDOW_TITLE)
print("[siborg.create.human] HumanGeneratorExtension startup")
def on_shutdown(self):
self._menu = None
if self._window:
self._window.destroy()
self._window = None
# Deregister the function that shows the window from omni.ui
ui.Workspace.set_show_window_fn(WINDOW_TITLE, None)
async def _destroy_window_async(self):
# wait one frame, this is due to the one frame defer
# in Window::_moveToMainOSWindow()
await omni.kit.app.get_app().next_update_async()
if self._window:
self._window.destroy()
self._window = None
def visibility_changed(self, visible):
# Called when window closed by user
editor_menu = omni.kit.ui.get_editor_menu()
# Update the menu item to reflect the window state
if editor_menu:
editor_menu.set_value(MENU_PATH, visible)
if not visible:
# Destroy the window, since we are creating new window
# in show_window
asyncio.ensure_future(self._destroy_window_async())
def show_window(self, menu, value):
"""Handles showing and hiding the window"""
if value:
self._window = MHWindow(WINDOW_TITLE)
# # Dock window wherever the "Content" tab is found (bottom panel by default)
self._window.deferred_dock_in("Content", ui.DockPolicy.CURRENT_WINDOW_IS_ACTIVE)
self._window.set_visibility_changed_fn(self.visibility_changed)
elif self._window:
self._window.visible = False
def _on_stage_event(self, event):
"""Handles stage events. This is where we get notified when the user selects/deselects a prim in the viewport."""
if event.type == int(omni.usd.StageEventType.SELECTION_CHANGED):
# Get the current selection
selection = self._selection.get_selected_prim_paths()
# Check if the selection is empty
if not selection:
# Push an event to the message bus with "None" as a payload
# This event will be picked up by the window and used to update the UI
carb.log_warn("Human deselected")
self._bus.push(self._human_selection_event, payload={"prim_path": None})
else:
# Get the stage
_usd_context = omni.usd.get_context()
stage = _usd_context.get_stage()
if selection and stage:
if len(selection) > 0:
path = selection[-1]
print(path)
prim = stage.GetPrimAtPath(path)
prim = self._get_typed_parent(prim, "SkelRoot")
# If the selection is a human, push an event to the event stream with the prim as a payload
# This event will be picked up by the window and used to update the UI
if prim and prim.GetCustomDataByKey("human"):
# carb.log_warn("Human selected")
path = prim.GetPath().pathString
self._bus.push(self._human_selection_event, payload={"prim_path": path})
else:
# carb.log_warn("Human deselected")
self._bus.push(self._human_selection_event, payload={"prim_path": None})
def _get_typed_parent(self, prim: Union[Usd.Prim, None], type_name: str, level: int = 5):
"""Returns the first parent of the given prim with the given type name. If no parent is found, returns None.
Parameters:
-----------
prim : Usd.Prim or None
The prim to search from. If None, returns None.
type_name : str
The parent type name to search for
level : int
The maximum number of levels to traverse. Defaults to 5.
Returns:
--------
Usd.Prim
The first parent of the given prim with the given type name. If no match is found, returns None.
"""
if (not prim) or level == 0:
return None
elif prim and prim.GetTypeName() == type_name:
return prim
else:
return self._get_typed_parent(prim.GetParent(), type_name, level - 1)
| 6,003 | Python | 40.986014 | 121 | 0.590038 |
cadop/HumanGenerator/exts/siborg.create.human/siborg/create/human/skeleton.py | from pxr import Usd, Gf, UsdSkel
from typing import List
import numpy as np
from .shared import sanitize
from .mhcaller import skeleton as mhskel
from .mhcaller import MHCaller
class Bone:
"""Bone which constitutes skeletons to be imported using the HumanGenerator
extension. Has a parent and children, transforms in space, and named joints
at the head and tail.
Attributes
----------
name : str
Human-readable bone name.
"""
def __init__(self, skel: 'Skeleton', name: str, parent: str, head: str, tail: str) -> None:
"""Create a Bone instance
Parameters
----------
skel : Skeleton
Skeleton to which the bone belongs
name : str
Name of the bone
parent : str
Name of the parent bone. This is the bone "above" and is one level closer to
the root of the skeleton
head : str
Name of the head joint
tail : str
Name of the tail joint
"""
self._mh_bone = mhskel.Bone(skel, name, parent, head, tail)
self.name = name
self.skeleton = skel
self.headJoint = head
self.tailJoint = tail
def getRelativeMatrix(self, offset: List[float] = [0, 0, 0]) -> np.ndarray:
"""_summary_
Parameters
----------
offset : List[float], optional
Geometric translation to apply, by default [0, 0, 0]
Returns
-------
np.ndarray
_description_
"""
return self._mh_bone.getRelativeMatrix(offset)
def getRestMatrix(self, offset: List[float] = [0, 0, 0]) -> np.ndarray:
"""_summary_
Parameters
----------
offset : List[float], optional
Geometric translation to apply, by default [0, 0, 0]
Returns
-------
np.ndarray
_description_
"""
return self._mh_bone.getRestMatrix(offset)
def getBindMatrix(self, offset: List[float] = [0, 0, 0]) -> np.ndarray:
"""_summary_
Parameters
----------
offset : List[float], optional
Geometric translation to apply, by default [0, 0, 0]
Returns
-------
np.ndarray
_description_
"""
return self._mh_bone.getBindMatrix(offset)[1]
class Skeleton:
"""Skeleton which can be imported using the HumanGenerator extension. Provides
root bone(s), which have a tree of children that can be traversed to get the data
for the entire rig.
Attributes
----------
name : str
Name of the skeleton rig, by default "Skeleton"
roots : list of Bone
Root bones. Bones which have children that can be traversed to constitute the
entire skeleton.
joint_paths : list of: str
Paths to joints in the stage hierarchy that are used as joint indices
joint_names : list of: str
List of joint names in USD (breadth-first traversal) order. It is
important that joints be ordered this way so that their indices can be
used for skinning / weighting.
"""
def __init__(self, name="Skeleton") -> None:
"""Create a skeleton instance
Parameters
----------
name : str, optional
Name of the skeleton, by default "Skeleton"
"""
# Set the skeleton to the makehuman default
_mh_skeleton = MHCaller.human.getSkeleton()
self._rel_transforms = []
self._bind_transforms = []
self.roots = _mh_skeleton.roots
self.joint_paths = []
self.joint_names = []
self.name = name
def addBone(self, name: str, parent: str, head: str, tail: str) -> Bone:
"""Add a new bone to the Skeleton
Parameters
----------
name : str
Name of the new bone
parent : str
Name of the parent bone under which to put the new bone
head : str
Name of the joint at the head of the new bone
tail : str
Name of the joint at the tail of the new bone
Returns
-------
Bone
The bone which has been added to the skeleton
"""
_bone = Bone(self, name, parent, head, tail)
# HACK Bone() creates a new Bone for _mh_bone by default. How can we
# avoid doing this twice without revealing it to the user?
_bone._mh_bone = self._mh_skeleton.addBone(name, parent, head, tail)
return _bone
def add_to_stage(self, stage: Usd.Stage, skel_root_path: str, offset: List[float] = [0, 0, 0], new_root_bone: bool = False):
"""Adds the skeleton to the USD stage
Parameters
----------
stage : Usd.Stage
Stage in which to create skeleton prims
skelroot_path : str
Path to the human root prim in the stage
offset : List[float], optional
Geometric translation to apply, by default [0, 0, 0]
new_root_bone : bool, optional
Whether or not to prepend a new root at the origin, by default False
"""
root_bone = self.roots[0]
if new_root_bone:
root_bone = self.prepend_root(root_bone)
self.setup_skeleton(root_bone, offset=offset)
skeleton_path = skel_root_path + "/Skeleton"
usdSkel = UsdSkel.Skeleton.Define(stage, skeleton_path)
# add joints to skeleton by path
attribute = usdSkel.GetJointsAttr()
# exclude root
attribute.Set(self.joint_paths)
# Add bind transforms to skeleton
usdSkel.CreateBindTransformsAttr(self._bind_transforms)
# setup rest transforms in joint-local space
usdSkel.CreateRestTransformsAttr(self._rel_transforms)
return usdSkel
def prepend_root(self, oldRoot: Bone, newroot_name: str = "RootJoint", offset: List[float] = [0, 0, 0]) -> Bone:
"""Adds a new root bone to the head of a skeleton, ahead of the existing root bone.
Parameters
----------
oldRoot : Bone
The original MakeHuman root bone
newroot_name : str, optional
The name for the new root bone, by default "RootJoint"
offset : List[float], optional
Geometric translation to apply, by default [0, 0, 0]
Returns
-------
newRoot : Bone
The new root bone of the Skeleton
"""
# make a "super-root" bone, parent to the root, with identity transforms so
# we can abide by Lina Halper's animation retargeting guidelines:
# https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_animation-retargeting.html
newRoot = self.addBone(
newroot_name, None, "newRoot_head", oldRoot.tailJoint)
oldRoot.parent = newRoot
newRoot.headPos -= offset
newRoot.build()
newRoot.children.append(oldRoot)
return newRoot
def _process_bone(self, bone: Bone, path: str, offset: List[float] = [0, 0, 0]) -> None:
"""Get the name, path, relative transform, and bind transform of a joint
and add its values to the lists of stored values
Parameters
----------
bone : Bone
The bone to process for Usd
path : str
Path to the parent of this bone
offset : List[float], optional
Geometric translation to apply, by default [0, 0, 0]
"""
# sanitize the name for USD paths
name = sanitize(bone.name)
path += name
self.joint_paths.append(path)
# store original name for later joint weighting
self.joint_names.append(bone.name)
# Get matrix for joint transform relative to its parent. Move to offset
# to match mesh transform in scene
relxform = bone.getRelativeMatrix(offsetVect=offset)
# Transpose the matrix as USD stores transforms in row-major format
relxform = relxform.transpose()
# Convert type for USD and store
relative_transform = Gf.Matrix4d(relxform.tolist())
self._rel_transforms.append(relative_transform)
# Get matrix which represents a joints transform in its binding position
# for binding to a mesh. Move to offset to match mesh transform.
# getBindMatrix() returns a tuple of the bind matrix and the bindinv
# matrix. Since omniverse uses row-major format, we can just use the
# already transposed bind matrix.
bxform = bone.getBindMatrix(offsetVect=offset)
# Convert type for USD and store
bind_transform = Gf.Matrix4d(bxform[1].tolist())
# bind_transform = Gf.Matrix4d().SetIdentity() TODO remove
self._bind_transforms.append(bind_transform)
def setup_skeleton(self, bone: Bone, offset: List[float] = [0, 0, 0]) -> None:
"""Traverse the imported skeleton and get the data for each bone for
adding to the stage
Parameters
----------
bone : Bone
The root bone at which to start traversing the imported skeleton.
offset : List[float], optional
Geometric translation to apply, by default [0, 0, 0]
"""
# Setup a breadth-first search of our skeleton as a tree
# Use the new root of the imported skeleton as the root bone of our tree
visited = [] # List to keep track of visited bones.
queue = [] # Initialize a queue
path_queue = [] # Keep track of paths in a parallel queue
visited.append(bone)
queue.append(bone)
name = sanitize(bone.name)
path_queue.append(name + "/")
# joints are relative to the root, so we don't prepend a path for the root
self._process_bone(bone, "", offset=offset)
# Traverse skeleton (breadth-first) and store joint data
while queue:
v = queue.pop(0)
path = path_queue.pop(0)
for neighbor in v.children:
if neighbor not in visited:
visited.append(neighbor)
queue.append(neighbor)
name = sanitize(neighbor.name)
path_queue.append(path + name + "/")
self._process_bone(neighbor, path, offset)
def update_in_scene(self, stage: Usd.Stage, skel_root_path: str, offset: List[float] = [0, 0, 0]):
"""Resets the skeleton values in the stage, updates the skeleton from makehuman.
Parameters
----------
stage : Usd.Stage
The stage in which to update the skeleton
skel_root_path : str
The path to the skeleton root in the stage
offset : List[float], optional
Geometric translation to apply, by default [0, 0, 0]
Returns
-------
UsdSkel.Skeleton
The updated skeleton in USD
"""
# Get the skeleton from makehuman
_mh_skeleton = MHCaller.human.getSkeleton()
# Clear out any existing data
self._rel_transforms = []
self._bind_transforms = []
self.joint_paths = []
self.joint_names = []
# Get the root bone(s) of the skeleton
self.roots = _mh_skeleton.roots
# Overwrite the skeleton in the stage with the new skeleton
return self.add_to_stage(stage, skel_root_path, offset)
| 11,468 | Python | 33.032641 | 128 | 0.586327 |
cadop/HumanGenerator/exts/siborg.create.human/siborg/create/human/__init__.py | from .extension import *
from .human import Human | 49 | Python | 23.999988 | 24 | 0.795918 |
cadop/HumanGenerator/exts/siborg.create.human/siborg/create/human/materials.py | from typing import List
from module3d import Object3D
from pxr import Usd, UsdGeom, UsdShade, Sdf
def get_mesh_texture(mh_mesh: Object3D):
"""Gets mesh diffuse texture from a Makehuman mesh object
Parameters
----------
mh_mesh : Object3D
A Makehuman mesh object. Contains path to bound material/textures
Returns
-------
Tuple (str,str)
Returns the path to a texture on disk, and a name for the texture
Returns (None, None) if no texture exists
"""
# TODO return additional maps (AO, roughness, normals, etc)
material = mh_mesh.material
texture = material.diffuseTexture
name = material.name
if texture:
return texture, name
else:
return (None, None)
def create_material(diffuse_image_path: str, name: str, root_path: str, stage: Usd.Stage):
"""Create OmniPBR Material with specified diffuse texture
Parameters
----------
diffuse_image_path : str
Path to diffuse texture on disk
name : str
Material name
root_path : str
Root path under which to place material scope
stage : Usd.Stage
USD stage into which to add the material
Returns
-------
UsdShade.Material
Material with diffuse texture applied
"""
materialScopePath = root_path + "/Materials"
# Check for a scope in which to keep materials. If it doesn't exist, make
# one
scopePrim = stage.GetPrimAtPath(materialScopePath)
if scopePrim.IsValid() is False:
UsdGeom.Scope.Define(stage, materialScopePath)
# Create material (omniPBR).
materialPath = materialScopePath + "/" + name
material = UsdShade.Material.Define(stage, materialPath)
# Store shaders inside their respective material path
shaderPath = materialPath + "/Shader"
# Create shader
shader = UsdShade.Shader.Define(stage, shaderPath)
# Use OmniPBR as a source to define our shader
shader.SetSourceAsset("OmniPBR.mdl", "mdl")
shader.GetPrim().CreateAttribute(
"info:mdl:sourceAsset:subIdentifier",
Sdf.ValueTypeNames.Token,
False,
Sdf.VariabilityUniform,
).Set("OmniPBR")
# Set Diffuse texture.
diffTexIn = shader.CreateInput("diffuse_texture", Sdf.ValueTypeNames.Asset)
diffTexIn.Set(diffuse_image_path)
diffTexIn.GetAttr().SetColorSpace("sRGB")
# Set Diffuse value. TODO make default color NVIDIA Green
# diffTintIn = shader.CreateInput("diffuse_tint", Sdf.ValueTypeNames.Color3f)
# diffTintIn.Set((0.9, 0.9, 0.9))
# Connect Material to Shader.
mdlOutput = material.CreateSurfaceOutput("mdl")
mdlOutput.ConnectToSource(shader, "out")
return material
def bind_material(mesh_path: Sdf.Path, material: UsdShade.Material, stage: Usd.Stage):
"""Bind a material to a mesh
Parameters
----------
mesh_path : Sdf.Path
The USD formatted path to a mesh prim
material : UsdShade.Material
USD material object
stage : Usd.Stage
Stage in which to find mesh prim
"""
# Get the mesh prim
meshPrim = stage.GetPrimAtPath(mesh_path)
# Bind the mesh
UsdShade.MaterialBindingAPI(meshPrim).Bind(material) | 3,210 | Python | 29.292453 | 90 | 0.669782 |
cadop/HumanGenerator/exts/siborg.create.human/siborg/create/human/ext_ui.py | import omni.ui as ui
from typing import List, TypeVar, Union, Callable
from dataclasses import dataclass, field
from . import styles
from .mhcaller import MHCaller
from pxr import Usd
import os
import inspect
import makehuman
import targets
from siborg.create.human.shared import data_path
class SliderEntry:
"""Custom UI element that encapsulates a labeled slider and field
Attributes
----------
label : str
Label to display for slider/field
model : ui.SimpleFloatModel
Model to publish changes to
fn : object
Function to run when updating the human after changes are made
image: str
Path on disk to an image to display
step : float
Division between values for the slider
min : float
Minimum value
max : float
Maximum value
default : float
Default parameter value
"""
def __init__(
self,
label: str,
model: ui.SimpleFloatModel,
fn: object,
image: str = None,
step: float = 0.01,
min: float = None,
max: float = None,
default: float = 0,
):
"""Constructs an instance of SliderEntry
Parameters
----------
label : str
Label to display for slider/field
model : ui.SimpleFloatModel
Model to publish changes to
fn : object
Function to run when changes are made
image: str, optional
Path on disk to an image to display. By default None
step : float, optional
Division between values for the slider, by default 0.01
min : float, optional
Minimum value, by default None
max : float, optional
Maximum value, by default None
default : float, optional
Default parameter value, by default 0
"""
self.label = label
self.model = model
self.fn = fn
self.step = step
self.min = min
self.max = max
self.default = default
self.image = image
self._build_widget()
def _build_widget(self):
"""Construct the UI elements"""
with ui.HStack(height=0, style=styles.sliderentry_style):
# If an image is available, display it
if self.image:
ui.Image(self.image, height=75, style={"border_radius": 5})
# Stack the label and slider on top of each other
with ui.VStack(spacing = 5):
ui.Label(
self.label,
height=15,
alignment=ui.Alignment.CENTER,
name="label_param",
)
# create a floatdrag (can be used as a slider or an entry field) to
# input parameter values
self.drag = ui.FloatDrag(model=self.model, step=self.step)
# Limit drag values to within min and max if provided
if self.min is not None:
self.drag.min = self.min
if self.max is not None:
self.drag.max = self.max
@dataclass
class Param:
"""Dataclass to store SliderEntry parameter data
Attributes
----------
name: str
The name of the parameter. Used for labeling.
full_name: str
The full name of the parameter. Used for referencing
fn: object
The method to execute when making changes to the parameter
image: str, optional
The path to the image to use for labeling. By default None
min: float, optional
The minimum allowed value of the parameter. By default 0
max: float
The maximum allowed value of the parameter. By default 1
default: float
The default value of the parameter. By default 0.5
value : ui.SimpleFloatModel
The model to track the current value of the parameter. By default None
"""
name: str
full_name: str
fn: object
image: str = None
min: float = 0
max: float = 1
default: float = 0.5
value: ui.SimpleFloatModel = None
class SliderEntryPanelModel:
"""Provides a model for referencing SliderEntryPanel data. References models
for each individual SliderEntry widget in the SliderEntryPanel widget.
Attributes
----------
params : list of `Param`
List of parameter objects. Each contains a float model to track the current value
toggle : ui.SimpleBoolModel
Tracks whether or not the human should update immediately when changes are made
instant_update : Callable
A function to call when instant update is toggled
subscriptions : list of `Subscription`
List of event subscriptions triggered by editing a SliderEntry
"""
def __init__(self, params: List[Param], toggle: ui.SimpleBoolModel = None, instant_update: Callable = None):
"""Constructs an instance of SliderEntryPanelModel and instantiates models
to hold parameter data for individual SliderEntries
Parameters
----------
params : list of `Param`
A list of parameter objects, each of which contains the data to create
a SliderEntry widget and a model to track the current value
toggle : ui.SimpleBoolModel, optional
Tracks whether or not the human should update immediately when changes are made, by default None
instant_update : Callable
A function to call when instant update is toggled
"""
self.params = []
"""Param objects corresponding to each SliderEntry widget"""
self.changed_params = []
"""Params o SliderEntry widgets that have been changed"""
self.toggle = toggle
self.instant_update = instant_update
self.subscriptions = []
"""List of event subscriptions triggered by editing a SliderEntry"""
for p in params:
self.add_param(p)
def add_param(self, param: Param):
"""Adds a parameter to the SliderEntryPanelModel. Subscribes to the parameter's model
to check for editing changes
Parameters
----------
param : Param
The Parameter object from which to create the subscription
"""
# Create a model to track the current value of the parameter. Set the value to the default
param.value = ui.SimpleFloatModel(param.default)
# Add the parameter to the list of parameters
self.params.append(param)
# Subscribe to changes in parameter editing
self.subscriptions.append(
param.value.subscribe_end_edit_fn(
lambda m: self._sanitize_and_run(param))
)
def reset(self):
"""Resets the values of each floatmodel to parameter default for UI reset
"""
for param in self.params:
param.value.set_value(param.default)
def _sanitize_and_run(self, param: Param):
"""Make sure that values are within an acceptable range and then add the parameter to the
list of changed parameters
Parameters
----------
param : Param
Parameter object which contains acceptable value bounds and
references the function to run
"""
m = param.value
# Get the value from the slider model
getval = m.get_value_as_float
# Set the value to the min or max if it goes under or over respectively
if getval() < param.min:
m.set_value(param.min)
if getval() > param.max:
m.set_value(param.max)
# Check if the parameter is already in the list of changed parameters. If so, remove it.
# Then, add the parameter to the list of changed parameters
if param in self.changed_params:
self.changed_params.remove(param)
self.changed_params.append(param)
# If instant update is toggled on, add the changes to the stage instantly
if self.toggle.get_value_as_bool():
# Apply the changes
self.apply_changes()
# Run the instant update function
self.instant_update()
def apply_changes(self):
"""Apply the changes made to the parameters. Runs the function associated with each
parameter using the value from the widget
"""
for param in self.changed_params:
param.fn(param.value.get_value_as_float())
# Clear the list of changed parameters
self.changed_params = []
def destroy(self):
"""Destroys the instance of SliderEntryPanelModel. Deletes event
subscriptions. Important for preventing zombie-UI and unintended behavior
when the extension is reloaded.
"""
self.subscriptions = None
class SliderEntryPanel:
"""A UI widget providing a labeled group of slider entries
Attributes
----------
model : SliderEntryPanelModel
Model to hold parameters for each slider
label : str
Display title for the group. Can be none if no title is desired.
"""
def __init__(self, model: SliderEntryPanelModel, label: str = None):
"""
Parameters
----------
model : SliderEntryPanelModel
Model to hold parameters
label : str, Optional
Display title for the group, by default None
"""
self.label = label
self.model = model
self._build_widget()
def _build_widget(self):
"""Construct the UI elements"""
# Layer widgets on top of a rectangle to create a group frame
with ui.ZStack(style=styles.panel_style, height=0):
ui.Rectangle(name="group_rect")
with ui.VStack(name="contents", spacing = 8):
# If the panel has a label, show it
if self.label:
ui.Label(self.label, height=0)
# Create a slider entry for each parameter
for param in self.model.params:
SliderEntry(
param.name,
param.value,
param.fn,
image=param.image,
min=param.min,
max=param.max,
default=param.default,
)
def destroy(self):
"""Destroys the instance of SliderEntryPanel. Executes the destructor of
the SliderEntryPanel's SliderEntryPanelModel instance.
"""
self.model.destroy()
class ParamPanelModel(ui.AbstractItemModel):
def __init__(self, toggle: ui.SimpleBoolModel, **kwargs):
"""Constructs an instance of ParamPanelModel, which stores data for a ParamPanel.
Parameters
----------
toggle : ui.SimpleBoolModel
Model to track whether changes should be instant
"""
super().__init__(**kwargs)
# model to track whether changes should be instant
self.toggle = toggle
# Reference to models for each modifier/parameter. The models store modifier
# data for reference in the UI, and track the values of the sliders
self.models = []
class ParamPanel(ui.Frame):
"""UI Widget for displaying and modifying human parameters
Attributes
----------
model : ParamPanelModel
Stores data for the panel
toggle : ui.SimpleBoolModel
Model to track whether changes should be instant
models : list of SliderEntryPanelModel
Models for each group of parameter sliders
"""
def __init__(self, model: ParamPanelModel, instant_update : Callable = None, **kwargs):
"""Constructs an instance of ParamPanel. Panel contains a scrollable list of collapseable groups. These include
a group of macros (which affect multiple modifiers simultaneously), as well as groups of modifiers for
different body parts. Each modifier can be adjusted using a slider or doubleclicking to enter values directly.
Values are restricted based on the limits of a particular modifier.
Parameters
----------
model: ParamPanelModel
Stores data for the panel. Contains a toggle model to track whether changes should be instant
instant_update : Callable
Function to call when a parameter is changed (if instant update is toggle on)
"""
# Subclassing ui.Frame allows us to use styling on the whole widget
super().__init__(**kwargs)
self.model = model
self.toggle = model.toggle
# If no instant update function is passed, use a dummy function and do nothing
self.instant_update = instant_update if instant_update else lambda *args: None
self.models = model.models
self.set_build_fn(self._build_widget)
def _build_widget(self):
"""Build widget UI
"""
Modifier = TypeVar('Modifier')
def modifier_param(m: Modifier):
"""Generate a parameter data object from a human modifier,
Parameters
----------
m : Modifier
Makehuman Human modifier object. Represents a set of targets to apply to the human when modifying
Returns
-------
Param
Parameter data object holding all the modifier data needed to build UI elements
"""
# print(m.name)
# Guess a suitable title from the modifier name
tlabel = m.name.split("-")
if "|" in tlabel[len(tlabel) - 1]:
tlabel = tlabel[:-1]
if len(tlabel) > 1 and tlabel[0] == m.groupName:
label = tlabel[1:]
else:
label = tlabel
label = " ".join([word.capitalize() for word in label])
# Guess a suitable image path from modifier name
tlabel = m.name.replace("|", "-").split("-")
image = modifier_image(("%s.png" % "-".join(tlabel)).lower())
# Store modifier info in dataclass for building UI elements
return Param(
label,
m.fullName,
m.updateValue,
image=image,
min=m.getMin(),
max=m.getMax(),
default=m.getDefaultValue(),
)
def group_params(group: str):
"""Creates a list of parameters for all the modifiers in the given group
Parameters
----------
group : str
The name name of a modifier group
Returns
-------
List of Param
A list of all the parameters built from modifiers in the group
"""
params = [modifier_param(m)
for m in MHCaller.human.getModifiersByGroup(group)]
return params
def build_macro_frame():
"""Builds UI widget for the group of macro modifiers (which affect multiple individual modifiers
simultaneously). This includes:
+ Gender
+ Age
+ Muscle
+ Weight
+ Height
+ Proportions
Parameters that affect how much the human resembles a particular racial group:
+ African
+ Asian
+ Caucasian
"""
# Shorten human reference for convenience
human = MHCaller.human
# Explicitly create parameters for panel of macros (general modifiers that
# affect a group of targets). Otherwise these look bad. Creates a nice
# panel to have open by default
macro_params = (
Param("Gender", "macrodetails/Gender", human.setGender),
Param("Age", "macrodetails/Age", human.setAge),
Param("Muscle", "macrodetails-universal/Muscle", human.setMuscle),
Param("Weight", "macrodetails-universal/Weight", human.setWeight),
Param("Height", "macrodetails-height/Height", human.setHeight),
Param("Proportions", "macrodetails-proportions/BodyProportions", human.setBodyProportions),
)
# Create a model for storing macro parameter data
macro_model = SliderEntryPanelModel(macro_params, self.toggle, self.instant_update)
# Separate set of race parameters to also be included in the Macros group
# TODO make race parameters automatically normalize in UI
race_params = (
Param("African", "macrodetails/African", human.setAfrican),
Param("Asian", "macrodetails/Asian", human.setAsian),
Param("Caucasian", "macrodetails/Caucasian", human.setCaucasian),
)
# Create a model for storing race parameter data
race_model = SliderEntryPanelModel(race_params, self.toggle, self.instant_update)
self.models.append(macro_model)
self.models.append(race_model)
# Create category widget for macros
with ui.CollapsableFrame("Macros", style=styles.frame_style, height=0, collapsed=True):
with ui.VStack():
# Create panels for macros and race
self.panels = (
SliderEntryPanel(macro_model, label="General"),
SliderEntryPanel(race_model, label="Race"),
)
# The scrollable list of modifiers
with ui.ScrollingFrame():
with ui.VStack():
# Add the macros frame first
build_macro_frame()
# Create a set of all modifier groups that include macros
macrogroups = [
g for g in MHCaller.human.modifierGroups if "macrodetails" in g]
macrogroups = set(macrogroups)
# Remove macro groups from list of modifier groups as we have already
# included them explicitly
allgroups = set(
MHCaller.human.modifierGroups).difference(macrogroups)
for group in allgroups:
# Create a collapseable frame for each modifier group
with ui.CollapsableFrame(group.capitalize(), style=styles.frame_style, collapsed=True):
# Model to hold panel parameters
model = SliderEntryPanelModel(
group_params(group), self.toggle,self.instant_update)
self.models.append(model)
# Create panel of slider entries for modifier group
SliderEntryPanel(model)
def reset(self):
"""Reset every SliderEntryPanel to set UI values to defaults
"""
for model in self.models:
model.reset()
def load_values(self, human_prim: Usd.Prim):
"""Load values from the human prim into the UI. Specifically, this function
loads the values of the modifiers from the prim and updates any which
have changed.
Parameters
----------
HumanPrim : Usd.Prim
The USD prim representing the human
"""
# Make the prim exists
if not human_prim.IsValid():
return
# Reset the UI to defaults
self.reset()
# Get the data from the prim
humandata = human_prim.GetCustomData()
modifiers = humandata.get("Modifiers")
# Set any changed values in the models
for SliderEntryPanelModel in self.models:
for param in SliderEntryPanelModel.params:
if param.full_name in modifiers:
param.value.set_value(modifiers[param.full_name])
def update_models(self):
"""Update all models"""
for model in self.models:
model.apply_changes()
def destroy(self):
"""Destroys the ParamPanel instance as well as the models attached to each group of parameters
"""
super().destroy()
for model in self.models:
model.destroy()
class NoSelectionNotification:
"""
When no human selected, show notification.
"""
def __init__(self):
self._container = ui.ZStack()
with self._container:
ui.Rectangle()
with ui.VStack(spacing=10):
ui.Spacer(height=10)
with ui.HStack(height=0):
ui.Spacer()
ui.ImageWithProvider(
data_path('human_icon.png'),
width=192,
height=192,
fill_policy=ui.IwpFillPolicy.IWP_PRESERVE_ASPECT_FIT
)
ui.Spacer()
self._message_label = ui.Label(
"No human is current selected.",
height=0,
alignment=ui.Alignment.CENTER
)
self._suggestion_label = ui.Label(
"Select a human prim to see its properties here.",
height=0,
alignment=ui.Alignment.CENTER
)
@property
def visible(self) -> bool:
return self._container.visible
@visible.setter
def visible(self, value) -> None:
self._container.visible = value
def set_message(self, message: str) -> None:
messages = message.split("\n")
self._message_label.text = messages[0]
self._suggestion_label.text = messages[1]
def modifier_image(name : str):
"""Guess the path to a modifier's corresponding image on disk based on the name
of the modifier. Useful for building UI for list of modifiers.
Parameters
----------
name : str
Name of the modifier
Returns
-------
str
The path to the image on disk
"""
if name is None:
# If no modifier name is provided, we can't guess the file name
return None
name = name.lower()
# Return the modifier path based on the modifier name
# TODO determine if images can be loaded from the Makehuman module stored in
# site-packages so we don't have to include the data twice
return os.path.join(os.path.dirname(inspect.getfile(makehuman)),targets.getTargets().images.get(name, name))
| 22,469 | Python | 35.359223 | 119 | 0.582936 |
cadop/HumanGenerator/exts/siborg.create.human/siborg/create/human/shared.py | from pathlib import Path
import os
# Shared methods that are useful to several modules
def data_path(path):
"""Returns the absolute path of a path given relative to "exts/<omni.ext>/data"
Parameters
----------
path : str
Relative path
Returns
-------
str
Absolute path
"""
# Uses an absolute path, and then works its way up the folder directory to find the data folder
data = os.path.join(str(Path(__file__).parents[3]), "data", path)
return data
def sanitize(s: str):
"""Sanitize strings for use a prim names. Strips and replaces illegal
characters.
Parameters
----------
s : str
Input string
Returns
-------
s : str
Primpath-safe output string
"""
# List of illegal characters
# TODO create more comprehensive list
# TODO switch from blacklisting illegal characters to whitelisting valid ones
illegal = (".", "-")
for c in illegal:
# Replace illegal characters with underscores
s = s.replace(c, "_")
return s
| 1,072 | Python | 21.829787 | 99 | 0.613806 |
cadop/HumanGenerator/exts/siborg.create.human/siborg/create/human/window.py | from .ext_ui import ParamPanelModel, ParamPanel, NoSelectionNotification
from .browser import MHAssetBrowserModel, AssetBrowserFrame
from .human import Human
from .mhcaller import MHCaller
from .styles import window_style, button_style
import omni.ui as ui
import omni.kit.ui
import omni
import carb
WINDOW_TITLE = "Human Generator"
MENU_PATH = f"Window/{WINDOW_TITLE}"
class MHWindow(ui.Window):
"""
Main UI window. Contains all UI widgets. Extends omni.ui.Window.
Attributes
-----------
panel : HumanPanel
A widget that includes panels for modifiers, listing/removing applied
proxies, and executing human creation and updates
browser: AssetBrowserFrame
A browser for MakeHuman assets, including clothing, hair, and skeleton rigs.
"""
def __init__(self, title):
"""Constructs an instance of MHWindow
Parameters
----------
menu_path : str
The path to the menu item that opens the window
"""
super().__init__(title)
# Holds the state of the realtime toggle
self.toggle_model = ui.SimpleBoolModel()
# Holds the state of the parameter list
self.param_model = ParamPanelModel(self.toggle_model)
# Keep track of the human
self._human = Human()
# A model to hold browser data
self.browser_model = MHAssetBrowserModel(
self._human,
filter_file_suffixes=["mhpxy", "mhskel", "mhclo"],
timeout=carb.settings.get_settings().get(
"/exts/siborg.create.human.browser.asset/data/timeout"
),
)
# Subscribe to selection events on the message bus
bus = omni.kit.app.get_app().get_message_bus_event_stream()
selection_event = carb.events.type_from_string("siborg.create.human.human_selected")
self._selection_sub = bus.create_subscription_to_push_by_type(selection_event, self._on_selection_changed)
self.frame.set_build_fn(self._build_ui)
def _build_ui(self):
spacer_width = 3
with self.frame:
# Widgets are built starting on the left
with ui.HStack(style=window_style):
# Widget to show if no human is selected
self.no_selection_notification = NoSelectionNotification()
self.property_panel = ui.HStack(visible=False)
with self.property_panel:
with ui.ZStack(width=0):
# Draggable splitter
with ui.Placer(offset_x=self.frame.computed_content_width/1.8, draggable=True, drag_axis=ui.Axis.X):
ui.Rectangle(width=spacer_width, name="splitter")
with ui.HStack():
# Left-most panel is a browser for MakeHuman assets. It includes
# a reference to the list of applied proxies so that an update
# can be triggered when new assets are added
self.browser = AssetBrowserFrame(self.browser_model)
ui.Spacer(width=spacer_width)
with ui.HStack():
with ui.VStack():
self.param_panel = ParamPanel(self.param_model,self.update_human)
with ui.HStack(height=0):
# Toggle whether changes should propagate instantly
ui.ToolButton(text = "Update Instantly", model = self.toggle_model)
with ui.VStack(width = 100, style=button_style):
# Creates a new human in scene and resets modifiers and assets
ui.Button(
"New Human",
clicked_fn=self.new_human,
)
# Updates current human in omniverse scene
self.update_button = ui.Button(
"Update Human",
clicked_fn=self.update_human,
enabled=False,
)
# Resets modifiers and assets on selected human
self.reset_button = ui.Button(
"Reset Human",
clicked_fn=self.reset_human,
enabled=False,
)
def _on_selection_changed(self, event):
"""Callback for human selection events
Parameters
----------
event : carb.events.Event
The event that was pushed to the event stream. Contains payload data with
the selected prim path, or "None" if no human is selected
"""
# Get the stage
stage = omni.usd.get_context().get_stage()
prim_path = event.payload["prim_path"]
# If a valid human prim is selected,
if not prim_path or not stage.GetPrimAtPath(prim_path):
# Hide the property panel
self.property_panel.visible = False
# Show the no selection notification
self.no_selection_notification.visible = True
# Deactivate the update and reset buttons
self.update_button.enabled = False
self.reset_button.enabled = False
else:
# Show the property panel
self.property_panel.visible = True
# Hide the no selection notification
self.no_selection_notification.visible = False
# Activate the update and reset buttons
self.update_button.enabled = True
self.reset_button.enabled = True
# Get the prim from the path in the event payload
prim = stage.GetPrimAtPath(prim_path)
# Update the human in MHCaller
self._human.set_prim(prim)
# Update the list of applied modifiers
self.param_panel.load_values(prim)
def new_human(self):
"""Creates a new human in the scene and selects it"""
# Reset the human class
self._human.reset()
# Create a new human
self._human.prim = self._human.add_to_scene()
# Get selection.
selection = omni.usd.get_context().get_selection()
# Select the new human.
selection.set_selected_prim_paths([self._human.prim_path], True)
def update_human(self):
"""Updates the current human in the scene"""
# Collect changed values from the parameter panel
self.param_panel.update_models()
# Update the human in the scene
self._human.update_in_scene(self._human.prim_path)
def reset_human(self):
"""Resets the current human in the scene"""
# Reset the human
self._human.reset()
# Delete the proxy prims
self._human.delete_proxies()
# Update the human in the scene and reset parameter widgets
self.update_human()
def destroy(self):
"""Called when the window is destroyed. Unsuscribes from human selection events"""
self._selection_sub.unsubscribe()
self._selection_sub = None
super().destroy()
| 7,214 | Python | 36 | 124 | 0.570834 |
cadop/HumanGenerator/exts/siborg.create.human/siborg/create/human/browser/delegate.py | import carb
from omni.kit.browser.folder.core.models.folder_browser_item import FileDetailItem
import omni.ui as ui
import omni.kit.app
from omni.kit.browser.core import get_legacy_viewport_interface
from omni.kit.browser.folder.core import FolderDetailDelegate
from .model import MHAssetBrowserModel, AssetDetailItem
from ..mhcaller import MHCaller
import asyncio
from pathlib import Path
from typing import Optional
# TODO remove unused imports
# TODO remove
CURRENT_PATH = Path(__file__).parent
ICON_PATH = CURRENT_PATH.parent.parent.parent.parent.joinpath("icons")
class AssetDetailDelegate(FolderDetailDelegate):
"""Delegate to show Makehuman asset item in detail view and execute drag-and-
drop and doubleclick behavior.
Attributes
----------
model : MHAssetBrowserModel
Model that stores AssetBrowser data
"""
def __init__(self, model: MHAssetBrowserModel):
"""Constructs an instance of AssetDetailDelegate, which handles
execution of functions
Parameters
----------
model : MHAssetBrowserModel
Makehuman asset browser model
"""
super().__init__(model=model)
# Reference to the browser asset model
self.model = model
# Reference to the human
self._human = model.human
self._settings = carb.settings.get_settings()
# The context menu that opens on right_click
self._context_menu: Optional[ui.Menu] = None
self._action_item: Optional[AssetDetailItem] = None
self._viewport = None
self._drop_helper = None
def destroy(self):
"""Destructor for AssetDetailDelegate. Removes references and destroys superclass."""
self._viewport = None
self._drop_helper = None
super().destroy()
def get_thumbnail(self, item : AssetDetailItem) -> str:
"""Get the thumbnail for an asset
Parameters
----------
item : AssetDetailItem
The item in the browser for which we are getting a thumbnail
Returns
-------
str
Path to the thumbnail image
"""
return item.thumbnail
def on_drag(self, item: AssetDetailItem) -> str:
"""Displays a translucent UI widget when an asset is dragged
Parameters
----------
item : AssetDetailItem
The item being dragged
Returns
-------
str
The path on disk of the item being dragged (passed to whatever widget
accepts the drop)
"""
thumbnail = self.get_thumbnail(item)
icon_size = 128
with ui.VStack(width=icon_size):
if thumbnail:
ui.Spacer(height=2)
with ui.HStack():
ui.Spacer()
ui.ImageWithProvider(
thumbnail, width=icon_size, height=icon_size
)
ui.Spacer()
ui.Label(
item.name,
word_wrap=False,
elided_text=True,
skip_draw_when_clipped=True,
alignment=ui.Alignment.TOP,
style_type_name_override="GridView.Item",
)
# Return the path of the item being dragged so it can be accessed by
# the widget on which it is dropped
return item.url
def on_double_click(self, item: FileDetailItem):
"""Method to execute when an asset is doubleclicked. Adds the asset to the
human.
Parameters
----------
item : FileDetailItem
The item that has been doubleclicked
"""
self._human.add_item(item.url)
| 3,725 | Python | 30.05 | 93 | 0.595973 |
cadop/HumanGenerator/exts/siborg.create.human/siborg/create/human/browser/downloader.py | from typing import Callable
import carb
import aiohttp
import omni.client
import os, zipfile
class Downloader:
"""Downloads and unzips remote files and tracks download status/progress"""
def __init__(self, log_fn : Callable[[float], None]) -> None:
"""Construct an instance of Downloader. Assigns the logging function and sets initial is_downloading status
Parameters
----------
log_fn : Callable[[float], None]
Function to which to pass progress. Recieves a proportion that represents the amount downloaded
"""
self._is_downloading = False
self._log_fn = log_fn
async def download(self, url : str, dest_url : str) -> None:
"""Download a given url to disk and unzip it
Parameters
----------
url : str
Remote URL to fetch
dest_url : str
Local path at which to write and then unzip the downloaded files
Returns
-------
dict of str, Union[omni.client.Result, str]
Error message and location on disk
"""
ret_value = {"url": None}
async with aiohttp.ClientSession() as session:
self._is_downloading = True
content = bytearray()
# Download content from the given url
downloaded = 0
async with session.get(url) as response:
size = int(response.headers.get("content-length", 0))
if size > 0:
async for chunk in response.content.iter_chunked(1024 * 512):
content.extend(chunk)
downloaded += len(chunk)
if self._log_fn:
self._log_fn(float(downloaded) / size)
else:
if self._log_fn:
self._log_fn(0)
content = await response.read()
if self._log_fn:
self._log_fn(1)
if response.ok:
# Write to destination
filename = os.path.basename(url.split("?")[0])
dest_url = f"{dest_url}/{filename}"
(result, list_entry) = await omni.client.stat_async(dest_url)
ret_value["status"] = await omni.client.write_file_async(dest_url, content)
ret_value["url"] = dest_url
if ret_value["status"] == omni.client.Result.OK:
# TODO handle file already exists
pass
z = zipfile.ZipFile(dest_url, 'r')
z.extractall(os.path.dirname(dest_url))
else:
carb.log_error(f"[access denied: {url}")
ret_value["status"] = omni.client.Result.ERROR_ACCESS_DENIED
self._is_downloading = False
return ret_value
def not_downloading(self):
return not self._is_downloading | 2,935 | Python | 37.12987 | 115 | 0.529131 |
cadop/HumanGenerator/exts/siborg.create.human/siborg/create/human/browser/__init__.py | from omni.kit.browser.folder.core import FolderBrowserWidget
from .delegate import AssetDetailDelegate
from .model import MHAssetBrowserModel
from .options_menu import FolderOptionsMenu
import omni.ui as ui
class AssetBrowserFrame:
"""A widget to browse and select Makehuman assets
Attributes
----------
mhcaller : MHCaller
Wrapper object for Makehuman functions
"""
def __init__(self, model: MHAssetBrowserModel, **kwargs):
"""Constructs an instance of AssetBrowserFrame. This is a browser that
displays available Makehuman assets (skeletons/rigs, proxies) and allows
a user to apply them to the human.
Parameters
----------
model : MHAssetBrowserModel
A model to hold browser data
"""
self.model = model
self.build_widget()
def build_widget(self):
"""Build UI widget"""
# The delegate to execute browser actions
self._delegate = AssetDetailDelegate(self.model)
# Drop down menu to hold options
self._options_menu = FolderOptionsMenu()
with ui.VStack():
self._widget = FolderBrowserWidget(
self.model, detail_delegate=self._delegate, options_menu=self._options_menu)
ui.Separator(height=2)
# Progress bar to show download progress (initially hidden)
self._progress_bar = ui.ProgressBar(height=20, visible=False)
self._options_menu.bind_progress_bar(self._progress_bar)
| 1,520 | Python | 34.372092 | 92 | 0.653289 |
cadop/HumanGenerator/exts/siborg.create.human/siborg/create/human/browser/model.py | import os
from typing import List, Union
import carb.settings
import omni.kit.commands
from siborg.create.human.mhcaller import MHCaller
import omni.usd
from omni.kit.browser.core import DetailItem
from omni.kit.browser.folder.core import (
FolderBrowserModel,
FileDetailItem,
BrowserFile,
)
from siborg.create.human.shared import data_path
from ..human import Human
class AssetDetailItem(FileDetailItem):
"""Represents Makehuman asset detail item
"""
def __init__(self, file: BrowserFile):
"""Constructs an instance of AssetDetailItem
Parameters
----------
file : BrowserFile
BrowserFile object from which to create detail item
"""
dirs = file.url.split("/")
name = dirs[-1]
super().__init__(name, file.url, file, file.thumbnail)
class MHAssetBrowserModel(FolderBrowserModel):
"""Represents Makehuman asset browser model
"""
def __init__(self, human: Human, *args, **kwargs):
"""Constructs an instance of MHAssetBrowserModel
Parameters
----------
human : Human
The human to which to add assets
"""
self.human = human
super().__init__(
*args,
show_category_subfolders=True,
hide_file_without_thumbnails=False,
**kwargs,
)
# Add the data path as the root folder from which to build a collection
super().append_root_folder(data_path(""), name="MakeHuman")
def create_detail_item(
self, file: BrowserFile
) -> Union[FileDetailItem, List[FileDetailItem]]:
"""Create detail item(s) from a file.
A file may include multiple detail items.
Overwrite parent function to add thumbnails.
Parameters
----------
file : BrowserFile
File object to create detail item(s)
Returns
-------
Union[FileDetailItem, List[FileDetailItem]]
FileDetailItem or list of items created from file
"""
dirs = file.url.split("/")
name = dirs[-1]
# Get the file name without the extension
filename_noext = os.path.splitext(file.url)[0]
thumb = filename_noext + ".thumb"
thumb_png = filename_noext + ".png"
# If there is already a PNG, get it. If not, rename the thumb file to a PNG
# (They are the same format just with different extensions). This lets us
# use Makehuman's asset thumbnails
if os.path.exists(thumb_png):
thumb = thumb_png
elif os.path.exists(thumb):
os.rename(thumb, thumb_png)
thumb = thumb_png
else:
thumb = None
return FileDetailItem(name, file.url, file, thumb)
| 2,789 | Python | 28.0625 | 83 | 0.603801 |
cadop/HumanGenerator/exts/siborg.create.human/siborg/create/human/browser/options_menu.py | from omni.kit.browser.core import OptionMenuDescription, OptionsMenu
from omni.kit.browser.folder.core.models.folder_browser_item import FolderCollectionItem
import carb
import asyncio
from ..shared import data_path
from .downloader import Downloader
import omni.ui as ui
class FolderOptionsMenu(OptionsMenu):
"""
Represent options menu used in material browser.
"""
def __init__(self):
super().__init__()
# Progress bar widget to show download progress
self._progress_bar : ui.ProgressBar = None
self.downloader = Downloader(self.progress_fn,)
self._download_menu_desc = OptionMenuDescription(
"Download Assets",
clicked_fn=self._on_download_assets,
get_text_fn=self._get_menu_item_text,
enabled_fn=self.downloader.not_downloading
)
self.append_menu_item(self._download_menu_desc)
def destroy(self) -> None:
super().destroy()
def progress_fn(self, proportion: float):
carb.log_info(f"Download is {int(proportion * 100)}% done")
if self._progress_bar:
self._progress_bar.model.set_value(proportion)
def _get_menu_item_text(self) -> str:
# Show download state if download starts
if self.downloader._is_downloading:
return "Download In Progress"
return "Download Assets"
def bind_progress_bar(self, progress_bar):
self._progress_bar = progress_bar
def _on_download_assets(self):
# Show progress bar
if self._progress_bar:
self._progress_bar.visible = True
loop = asyncio.get_event_loop()
asyncio.run_coroutine_threadsafe(self._download(), loop)
def _is_remove_collection_enabled(self) -> None:
'''Don't allow removing the default collection'''
if self._browser_widget is not None:
return self._browser_widget.collection_index >= 1
else:
return False
def _on_remove_collection(self) -> None:
if self._browser_widget is None or self._browser_widget.collection_index < 0:
return
else:
browser_model = self._browser_widget.model
collection_items = browser_model.get_collection_items()
if browser_model.remove_collection(collection_items[self._browser_widget.collection_index]):
# Update collection combobox and default none selected
browser_model._item_changed(None)
self._browser_widget.collection_index -= 1
def _hide_progress_bar(self):
if self._progress_bar:
self._progress_bar.visible = False
async def _download(self):
# Makehuman system assets
url = "http://files.makehumancommunity.org/asset_packs/makehuman_system_assets/makehuman_system_assets_cc0.zip"
# Smaller zip for testing
# url = "https://download.tuxfamily.org/makehuman/asset_packs/shirts03/shirts03_ccby.zip"
dest_url = data_path("")
await self.downloader.download(url, dest_url)
self._hide_progress_bar()
self.refresh_collection()
def refresh_collection(self):
collection_item: FolderCollectionItem = self._browser_widget.collection_selection
if collection_item:
folder = collection_item.folder
folder._timeout = 10
asyncio.ensure_future(folder.start_traverse())
| 3,422 | Python | 37.033333 | 119 | 0.643776 |
cadop/crowds/exts/siborg.simulate.crowd/siborg/simulate/crowd/simulator.py | import numpy as np
from numpy import random
from omni.physx import get_physx_interface
import omni
import carb
from pxr import UsdGeom, Gf, Sdf, UsdShade
import warp as wp
import copy
import usdrt
from siborg.simulate.crowd.crowds import CrowdConfig
from siborg.simulate.crowd.models import socialforces
from siborg.simulate.crowd.models import pam
wp.init()
from siborg.simulate.crowd.models import socialforces_warp as crowd_force
class Simulator(CrowdConfig):
def __init__(self, world=None):
super().__init__()
self.world = world
# a default dt that is surely overwritten later
self._dt = 1/60.0
# set radius
self.radius = 0.7
self.radius_min = 0.5
self.radius_max = 1.0
# A subscription to the physics simulation, used when this class
# is asked to manage the updates
self._simulation_event = None
# Will use a physics scene
self.rigidbody = False
self.use_pam = False
self.on_gpu = False
self.use_instancer = False
self.add_jane = False
self.use_heading = False
# Tracks if user wants to update agent position on each sim step
self.update_agents_sim = False
self.update_viz = False
self.instancer_paths = ["/World/PointInstancer_Bob", "/World/PointInstancer_Jane"]
self.point_instancer_sets = []
self.agent_instance_path_bob = '/World/Scope/CrowdBob'
self.agent_instance_path_jane = '/World/Scope/CrowdJane'
self.instance_forward_vec = (1.0,0.0,0.0) # TODO get from instance object
self.instance_up_vec = (0.0,1.0,0.0) # TODO Fix to be flexible later
self.vel_epsilon = 0.05
self._get_world_up()
def _get_world_up(self):
stage = omni.usd.get_context().get_stage()
up = UsdGeom.GetStageUpAxis(stage)
if up =='X': self.world_up = 0
if up =='Y': self.world_up = 1
if up =='Z': self.world_up = 2
return
def register_simulation(self):
self._callbacks()
# we need to update the agents, otherwise won't see these results
self.update_agents_sim = True
self.update_viz = True
def _callbacks(self):
self._simulation_event = get_physx_interface(
).subscribe_physics_step_events(
self._on_simulation_update)
def _unregister(self):
try:
self._simulation_event.unsubscribe()
except:
self._simulation_event = None
def _on_simulation_update(self, dt):
if self.agent_bodies is None:
return
self._dt = dt
self.run()
def set_xform_goal(self, p):
'''set the goal based on a subscribed xform
Example of usage
watcher = omni.usd.get_watcher()
self._goal_subscriber = watcher.subscribe_to_change_info_path(
Sdf.Path('/World/Goal.xformOp:translate'),
self.Sim.set_xform_goal)
Parameters
----------
p : str(prim path)
a subscribed path
'''
stage = omni.usd.get_context().get_stage()
prim = stage.GetPrimAtPath(str(p).split('.')[0])
goal_point = omni.usd.utils.get_world_transform_matrix(prim).ExtractTranslation()
# Set agent destination
self.goals = np.asarray([goal_point for x in range(self.nagents)])
def integrate(self, x, v, f, dt):
''' take position, velocity, force, and dt to compute updated position and velocity '''
v1 = v + ( (f * 1.0) * dt ) # new velocity
x1 = x + (v1 * dt) # new position
return x1, v1
def update_goals(self, new_goal):
'''update the goals of agents
Parameters
----------
new_goal : ndarray([x,y,z])
can either be one goal that is applied to all agents, or a list of
the same size as number of agents
'''
if len(new_goal) == 1:
self.goals = np.asarray([new_goal for x in range(self.nagents)])
else:
self.goals = new_goal
def compute_step(self, agent):
# Set the model to PAM if asked
if self.use_pam: model = pam
else: model = socialforces
# Get the neighbors of this agent to use in computing forces
pn = model.get_neighbors(self.agents_pos[agent],
self.agents_pos,
self.agents_percept[agent])[1]
_force = model.compute_force(self.agents_pos[agent],
self.agents_radi[agent],
self.agents_vel[agent],
self.agents_mass[agent],
self.goals[agent],
self.agents_pos[pn],
self.agents_vel[pn],
self.agents_radi[pn],
self._dt)
return _force
def run(self):
'''Runs the simulation for one step
Updates agent positions and velocities if instance flag is true
Returns
-------
ndarray[x,y,z] forces
'''
self.force_list = []
for agent in range(self.nagents):
_force = self.compute_step(agent)
# remove world (up) forces
_force[self.world_up] = 0
# Store all forces to be applied to agents
self.force_list.append(_force)
self.step_processing()
def step_processing(self):
'''Process the computed step for simulation
Returns
-------
_type_
_description_
'''
# only update agent positions if user requests, otherwise they might want to
# update using forces themselves
if self.update_agents_sim:
# If using rigid body, apply forces to agents
if self.rigidbody:
self.apply_force(self.force_list)
else:
self.internal_integration()
if self.use_instancer:
self.set_instance_agents()
else:
self.set_geompoints()
def internal_integration(self):
# Integrate for new position
for i in range(self.nagents):
self.agents_pos[i], self.agents_vel[i] = self.integrate(self.agents_pos[i],
self.agents_vel[i],
self.force_list[i],
self._dt)
def apply_force(self, force_list):
'''Used for when rigidbody agents are used
Parameters
----------
force_list : List[x,y,z]
list of forces in order of the agents
'''
# Apply forces to simulation
# with Sdf.ChangeBlock():
# for idx, force in enumerate(force_list):
# self._add_force(force, self.agent_bodies[idx], self.agent_bodies[idx].position)
self._add_force3(force_list, self.agent_bodies)
# Update positions and velocities
for i in range(self.nagents):
self.agents_pos[i] = self.agent_bodies[i].position
self.agents_vel[i] = self.agent_bodies[i].velocity
def _add_force(self, force, rigid_body, position):
force = carb.Float3(force)
position = carb.Float3(position)
get_physx_interface().apply_force_at_pos(rigid_body.skinMeshPath, force, position)
def _add_force2(self, force, rigid_body, position):
# force = Gf.Vec3d(force)
_ = force[0]
force = Gf.Vec3d(float(force[0]), float(force[1]),float(force[2]))
rigid_body.forceAttr.Set(force) #position
def _add_force3(self, force_list, rigid_body):
# force = Gf.Vec3d(force)
# stage = usdrt.Usd.Stage.Attach(omni.usd.get_context().get_stage_id())
# # prim = stage.GetPrimAtPath("/World/boxActor")
# attr = prim.CreateAttribute("_worldForce", usdrt.Sdf.ValueTypeNames.Float3, True)
# if attr:
# attr.Set(usdrt.Gf.Vec3f(50000.0, 0.0, 0.0))
# prefixes = set(prefix for path in paths for prefix in path.GetPrefixes())
# with Sdf.ChangeBlock():
# for path in prefixes:
# prim_spec = Sdf.CreatePrimInLayer(layer, path)
# prim_spec.specifier = Sdf.SpecifierDef
# prim_spec.typeName = UsdGeom.Xform.__name__
for idx, body in enumerate(rigid_body):
force = force_list[idx]
force = usdrt.Gf.Vec3d(float(force[0]), float(force[1]),float(force[2]))
# body.forceAttr.Set(force) #position
if body.world_force_attr:
body.world_force_attr.Set(force)
def create_geompoints(self, stage_path=None, color=None):
'''create and manage geompoints representing agents
Parameters
----------
stage_path : str, optional
if not set, will use /World/Points, by default None
color : (r,g,b), optional
if not set, will make color red, by default None
'''
if stage_path: stage_loc = stage_path
else: stage_loc = "/World/Points"
self.stage = omni.usd.get_context().get_stage()
self.agent_point_prim = UsdGeom.Points.Define(self.stage, stage_loc)
self.agent_point_prim.CreatePointsAttr()
width_attr = self.agent_point_prim.CreateWidthsAttr()
width_attr.Set(self.agents_radi)
# width_attr.Set([1 for x in range(self.nagents)])
self.agent_point_prim.CreateDisplayColorAttr()
# For RTX renderers, this only works for UsdGeom.Tokens.constant
color_primvar = self.agent_point_prim.CreateDisplayColorPrimvar(UsdGeom.Tokens.constant)
if color: point_color = color
else: point_color = (1,0,0)
color_primvar.Set([point_color])
def set_geompoints(self):
# Set the position with an offset based on the radius
# Since it is a sphere, we
render_pos = np.copy(self.agents_pos)
render_pos[:,1] += (self.agents_radi/2)
self.agent_point_prim.GetPointsAttr().Set(render_pos)
def create_instance_agents(self):
if self.add_jane:
bob_size = int(self.nagents/2)
bob_pos = self.agents_pos[:bob_size]
point_instancer = self._single_agent_instance(bob_pos, bob_size, self.agent_instance_path_bob, self.instancer_paths[0])
self.point_instancer_sets.append(point_instancer)
# TODO find way to split colors of instances
jane_size = int(self.nagents/2)
jane_pos = self.agents_pos[bob_size:]
point_instancer = self._single_agent_instance(jane_pos, jane_size , self.agent_instance_path_jane, self.instancer_paths[1])
self.point_instancer_sets.append(point_instancer)
else:
point_instancer = self._single_agent_instance(self.agents_pos, self.nagents, self.agent_instance_path_bob, self.instancer_paths[0])
self.point_instancer_sets.append(point_instancer)
def _single_agent_instance(self, agent_pos, nagents, agent_instance_path, instance_path):
stage = omni.usd.get_context().get_stage()
point_instancer = UsdGeom.PointInstancer.Get(stage, instance_path)
if not point_instancer:
point_instancer = UsdGeom.PointInstancer(stage.DefinePrim(instance_path, "PointInstancer"))
point_instancer.CreatePrototypesRel().SetTargets([agent_instance_path])
self.proto_indices_attr = point_instancer.CreateProtoIndicesAttr()
self.proto_indices_attr.Set([0] * nagents)
## max radius is scale of 1
agent_scales = self.agents_radi/self.radius_max
self.agent_instancer_scales = [(x,x,x) for x in agent_scales] # change to numpy
# Set scale
point_instancer.GetScalesAttr().Set(self.agent_instancer_scales)
point_instancer.GetPositionsAttr().Set(agent_pos)
# Set orientation
rot = Gf.Rotation()
rot.SetRotateInto(self.instance_forward_vec, self.instance_forward_vec)
self.agent_headings = [Gf.Quath(rot.GetQuat()) for x in range(nagents)]
point_instancer.GetOrientationsAttr().Set(self.agent_headings)
return point_instancer
def set_instance_agents(self):
# update the points
# self.point_instancer.CreatePrototypesRel().SetTargets([self.agent_instance_path])
# self.proto_indices_attr = self.point_instancer.CreateProtoIndicesAttr()
# self.proto_indices_attr.Set([0] * self.nagents)
for idx, point_instancer in enumerate(self.point_instancer_sets):
if len(self.point_instancer_sets) == 1:
agents_pos = self.agents_pos
else:
_slice = int(self.nagents/2)
if idx == 0:
# Positions for this instance
agents_pos = self.agents_pos[:_slice]
else:
# Positions for this instance
agents_pos = self.agents_pos[_slice:]
# Set position
point_instancer.GetPositionsAttr().Set(agents_pos)
if not self.use_heading: continue
self.set_heading()
def set_heading(self):
for idx, point_instancer in enumerate(self.point_instancer_sets):
if len(self.point_instancer_sets) == 1:
agents_vel = self.agents_vel
nagents = self.nagents
else:
_slice = int(self.nagents/2)
nagents = _slice
if idx == 0:
# Velocities for this instance
agents_vel = self.agents_vel[:_slice]
else:
# Velocities for this instance
agents_vel = self.agents_vel[_slice:]
# Create array of agent headings based on velocity
normalize_vel = agents_vel
rot = Gf.Rotation()
self.agent_headings = []
cur_orient = point_instancer.GetOrientationsAttr().Get()
for i in range(0, nagents):
if np.sqrt(normalize_vel[i].dot(normalize_vel[i])) < self.vel_epsilon:
tovec = cur_orient[i]
self.agent_headings.append(cur_orient[i])
else:
tovec = Gf.Vec3d(tuple(normalize_vel[i]))
rot.SetRotateInto(self.instance_forward_vec, tovec)
self.agent_headings.append(Gf.Quath(rot.GetQuat()))
# Set orientation
point_instancer.GetOrientationsAttr().Set(self.agent_headings)
return
#### Change colors
stage = omni.usd.get_context().get_stage()
# get path of material
mat_path = '/CrowdBob/Looks/Linen_Blue'
linen_mat = Sdf.Path(f'/World/Scope{mat_path}')
mat_prim = stage.GetPrimAtPath(linen_mat)
# print(mat_prim)
# shader_path = '/Shader.inputs:diffuse_tint'
# tint_shader = f'/World{mat_path}{shader_path}'
shader = omni.usd.get_shader_from_material(mat_prim)
# print(shader)
#inp = shader.GetInput('diffuse_tint').Get()
inp = shader.GetInput('diffuse_tint').Set((0.5,0.5,1.0))
class WarpCrowd(Simulator):
'''A class to manage the warp-based version of crowd simulation
'''
def __init__(self, world=None):
super().__init__(world)
self.device = 'cuda:0'
# generate n number of agents
self.nagents = 9
# set radius
self.radius = 0.7
self.radius_min = 0.5
self.radius_max = 1.0
self.hash_radius = 0.7 # Radius to use for hashgrid
# set mass
self.mass = 20
# set pereption radius
self.perception_radius = 6
# self.dt = 1.0/30.0
self.goal = [0.0,0.0,0.0]
self.generation_origin = [10,10.0,0.0]
self.inv_up = wp.vec3(1.0,1.0,1.0) # z-up
self.inv_up[self.world_up] = 0.0
self.on_gpu = True
def demo_agents(self, s=1.6, m=50, n=50):
o = self.generation_origin
# Initialize agents in a grid for testing
self.agents_pos = np.asarray([
np.array([(s/2) + (x * s) +(o[0]/2) ,
(s/2) + (y * s) +(o[1]/2),
0
], dtype=np.double)
for x in range(m)
for y in range(n)
])
self.nagents = len(self.agents_pos)
self.configure_params()
def configure_params(self):
'''Convert all parameters to warp
'''
self.agents_pos = np.asarray(self.agents_pos)
# self.agents_pos = np.asarray([np.array([0,0,0], dtype=float) for x in range(self.nagents)])
self.agents_vel = np.asarray([np.array([0,0,0], dtype=float) for x in range(self.nagents)])
# # Set a quath for heading
# rot = Gf.Rotation()
# rot.SetRotateInto(self.instance_forward_vec, self.instance_forward_vec) # from, to
# _hquat = Gf.Quath(rot.GetQuat())
# # Get rotation between agent forward direction
self.agents_hdir = np.asarray([np.array([0,0,0,1], dtype=float) for x in range(self.nagents)])
self.force_list = np.asarray([np.array([0,0,0], dtype=float) for x in range(self.nagents)])
self.agents_radi = np.random.uniform(self.radius_min, self.radius_max, self.nagents)
self.agents_mass = [self.mass for x in range(self.nagents)]
self.agents_percept = np.asarray([self.perception_radius for x in range(self.nagents)])
self.agents_goal = np.asarray([np.array(self.goal, dtype=float) for x in range(self.nagents)])
self.agent_force_wp = wp.zeros(shape=self.nagents,device=self.device, dtype=wp.vec3)
self.agents_pos_wp = wp.array(self.agents_pos, device=self.device, dtype=wp.vec3)
self.agents_vel_wp = wp.array(self.agents_vel, device=self.device, dtype=wp.vec3)
self.agents_hdir_wp = wp.array(self.agents_hdir, device=self.device, dtype=wp.vec4)
self.agents_goal_wp = wp.array(self.agents_goal, device=self.device, dtype=wp.vec3)
self.agents_radi_wp = wp.array(self.agents_radi, device=self.device, dtype=float)
self.agents_mass_wp = wp.array(self.agents_mass, device=self.device, dtype=float)
self.agents_percept_wp = wp.array(self.agents_percept, device=self.device, dtype=float)
self.xnew_wp = wp.zeros_like(wp.array(self.agents_pos, device=self.device, dtype=wp.vec3))
self.vnew_wp = wp.zeros_like(wp.array(self.agents_pos, device=self.device, dtype=wp.vec3))
self.hdir_wp = wp.zeros_like(wp.array(self.agents_hdir, device=self.device, dtype=wp.vec4))
def config_hasgrid(self, nagents=None):
'''Create a hash grid based on the number of agents
Currently assumes z up
Parameters
----------
nagents : int, optional
_description_, by default None
'''
if nagents is None: nagents = self.nagents
self.grid = wp.HashGrid(dim_x=200, dim_y=200, dim_z=1, device=self.device)
# self.grid = wp.HashGrid(dim_x=nagents, dim_y=nagents, dim_z=1, device=self.device)
def config_mesh(self, points, faces):
'''Create a warp mesh object from points and faces
Parameters
----------
points : List[[x,y,z]]
A list of floating point xyz vertices of a mesh
faces : List[int]
A list of integers corresponding to vertices. Must be triangle-based
'''
# fake some points and faces if empty list was passed
if len(points) == 0:
points = [(0,0,0), (0,0,0), (0,0,0)]
faces = [[1, 2, 3]]
# print(points)
# print(faces)
# Init mesh for environment collision
self.mesh = wp.Mesh( points=wp.array(points, dtype=wp.vec3, device=self.device),
indices=wp.array(faces, dtype=int ,device=self.device)
)
def update_goals(self, new_goal):
if len(new_goal) == 1:
self.goals = np.asarray([new_goal for x in range(self.nagents)])
else:
self.goals = new_goal
self.agents_goal_wp = wp.array(self.goals, device=self.device, dtype=wp.vec3)
def run(self):
# Rebuild hashgrid given new positions
self.grid.build(points=self.agents_pos_wp, radius=self.hash_radius)
# launch kernel
wp.launch(kernel=crowd_force.get_forces,
dim=self.nagents,
inputs=[self.agents_pos_wp, self.agents_vel_wp, self.agents_goal_wp, self.agents_radi_wp,
self.agents_mass_wp, self._dt, self.agents_percept_wp, self.grid.id, self.mesh.id,
self.inv_up],
outputs=[self.agent_force_wp],
device=self.device
)
self.force_list = self.agent_force_wp.numpy()
self.step_processing()
self.agents_pos_wp = wp.array(self.agents_pos, device=self.device, dtype=wp.vec3)
self.agents_vel_wp = wp.array(self.agents_vel, device=self.device, dtype=wp.vec3)
return self.agent_force_wp
def internal_integration(self):
# Given the forces, integrate for pos and vel
wp.launch(kernel=crowd_force.integrate,
dim=self.nagents,
inputs=[self.agents_pos_wp, self.agents_vel_wp, self.agent_force_wp, self._dt],
outputs=[self.xnew_wp, self.vnew_wp],
device=self.device
)
self.agents_pos_wp = self.xnew_wp
self.agents_vel_wp = self.vnew_wp
self.agents_pos = self.agents_pos_wp.numpy()
self.agents_vel = self.agents_vel_wp.numpy()
def set_heading(self):
up = wp.vec3(0.0,1.0,0.0)
forward = wp.vec3(1.0,0.0,0.0)
wp.launch(kernel=crowd_force.heading,
dim=self.nagents,
inputs=[self.agents_vel_wp, up, forward],
outputs=[self.hdir_wp],
device=self.device
)
self.agents_hdir_wp = self.hdir_wp
self.agents_hdir = self.agents_hdir_wp.numpy()
for idx, point_instancer in enumerate(self.point_instancer_sets):
if len(self.point_instancer_sets) == 1:
agent_headings = self.agents_hdir
else:
_slice = int(self.nagents/2)
if idx == 0:
agent_headings = self.agents_hdir[:_slice]
else:
agent_headings = self.agents_hdir[_slice:]
# Set orientation
point_instancer.GetOrientationsAttr().Set(agent_headings)
| 23,588 | Python | 37.231767 | 143 | 0.558632 |
cadop/crowds/exts/siborg.simulate.crowd/siborg/simulate/crowd/usd_utils.py | import numpy as np
from pxr import UsdGeom, Gf, Usd
import omni
def get_mesh(usd_stage, objs):
points, faces = [],[]
for obj in objs:
f_offset = len(points)
# f, p = convert_to_mesh(obj)#usd_stage.GetPrimAtPath(obj))
f, p = meshconvert(obj)#usd_stage.GetPrimAtPath(obj))
points.extend(p)
faces.extend(f+f_offset)
return points, faces
def get_all_stage_mesh(stage, start_prim):
found_meshes = []
# Traverse the scene graph and print the paths of prims, including instance proxies
for x in Usd.PrimRange(start_prim, Usd.TraverseInstanceProxies()):
if x.IsA(UsdGeom.Mesh):
found_meshes.append(x)
points, faces = get_mesh(stage, found_meshes)
return points, faces
def convert_to_mesh(prim):
''' convert a prim to BVH '''
# Get mesh name (prim name)
m = UsdGeom.Mesh(prim)
# Get verts and triangles
tris = m.GetFaceVertexIndicesAttr().Get()
tris_cnt = m.GetFaceVertexCountsAttr().Get()
verts = m.GetPointsAttr().Get()
tri_list = np.array(tris)
vert_list = np.array(verts)
xform = UsdGeom.Xformable(prim)
time = Usd.TimeCode.Default() # The time at which we compute the bounding box
world_transform: Gf.Matrix4d = xform.ComputeLocalToWorldTransform(time)
translation: Gf.Vec3d = world_transform.ExtractTranslation()
rotation: Gf.Rotation = world_transform.ExtractRotationMatrix()
# rotation: Gf.Rotation = world_transform.ExtractRotation()
scale: Gf.Vec3d = Gf.Vec3d(*(v.GetLength() for v in world_transform.ExtractRotationMatrix()))
rotation = rotation.GetOrthonormalized()
# New vertices
vert_list = np.dot((vert_list * scale ), rotation) + translation
# vert_scaled = vert_list
# vert_list[:,0] *= scale[0]
# vert_list[:,1] *= scale[1]
# vert_list[:,2] *= scale[2]
# vert_rotated = np.dot(vert_scaled, rotation) # Rotate points
# vert_translated = vert_rotated + translation
# vert_list = vert_translated
# Check if the face counts are 4, if so, reshape and turn to triangles
if tris_cnt[0] == 4:
quad_list = tri_list.reshape(-1,4)
tri_list = quad_to_tri(quad_list)
tri_list = tri_list.flatten()
return tri_list, vert_list
def quad_to_tri(a):
idx = np.flatnonzero(a[:,-1] == 0)
out0 = np.empty((a.shape[0],2,3),dtype=a.dtype)
out0[:,0,1:] = a[:,1:-1]
out0[:,1,1:] = a[:,2:]
out0[...,0] = a[:,0,None]
out0.shape = (-1,3)
mask = np.ones(out0.shape[0],dtype=bool)
mask[idx*2+1] = 0
return out0[mask]
def selected_as_mesh():
# Get the current active selection of the stage
stage = omni.usd.get_context().get_stage()
# Get the selections from the stage
_usd_context = omni.usd.get_context()
_selection = _usd_context.get_selection()
selected_paths = _selection.get_selected_prim_paths()
# Expects a list, so take first selection
prims = [stage.GetPrimAtPath(x) for x in selected_paths]
points, faces = get_mesh(stage, selected_paths)
return points, faces
def children_as_mesh(stage, parent_prim):
children = parent_prim.GetAllChildren()
children = [child.GetPrimPath() for child in children]
points, faces = get_mesh(stage, children)
return points, faces
def meshconvert(prim):
# Create an XformCache object to efficiently compute world transforms
xform_cache = UsdGeom.XformCache()
# Get the mesh schema
mesh = UsdGeom.Mesh(prim)
# Get verts and triangles
tris = mesh.GetFaceVertexIndicesAttr().Get()
if not tris:
return [], []
tris_cnt = mesh.GetFaceVertexCountsAttr().Get()
# Get the vertices in local space
points_attr = mesh.GetPointsAttr()
local_points = points_attr.Get()
# Convert the VtVec3fArray to a NumPy array
points_np = np.array(local_points, dtype=np.float64)
# Add a fourth component (with value 1.0) to make the points homogeneous
num_points = len(local_points)
ones = np.ones((num_points, 1), dtype=np.float64)
points_np = np.hstack((points_np, ones))
# Compute the world transform for this prim
world_transform = xform_cache.GetLocalToWorldTransform(prim)
# Convert the GfMatrix to a NumPy array
matrix_np = np.array(world_transform, dtype=np.float64).reshape((4, 4))
# Transform all vertices to world space using matrix multiplication
world_points = np.dot(points_np, matrix_np)
tri_list = convert_to_triangle_mesh(tris, tris_cnt)
tri_list = tri_list.flatten()
world_points = world_points[:,:3]
return tri_list, world_points
def convert_to_triangle_mesh(FaceVertexIndices, FaceVertexCounts):
"""
Convert a list of vertices and a list of faces into a triangle mesh.
A list of triangle faces, where each face is a list of indices of the vertices that form the face.
"""
# Parse the face vertex indices into individual face lists based on the face vertex counts.
faces = []
start = 0
for count in FaceVertexCounts:
end = start + count
face = FaceVertexIndices[start:end]
faces.append(face)
start = end
# Convert all faces to triangles
triangle_faces = []
for face in faces:
if len(face) < 3:
newface = [] # Invalid face
elif len(face) == 3:
newface = [face] # Already a triangle
else:
# Fan triangulation: pick the first vertex and connect it to all other vertices
v0 = face[0]
newface = [[v0, face[i], face[i + 1]] for i in range(1, len(face) - 1)]
triangle_faces.extend(newface)
return np.array(triangle_faces)
# from pxr import UsdGeom, Sdf, Usd
# import os
# def add_ext_reference(prim: Usd.Prim, ref_asset_path: str, ref_target_path: Sdf.Path) -> None:
# references: Usd.References = prim.GetReferences()
# references.AddReference(
# assetPath=ref_asset_path,
# primPath=ref_target_path # OPTIONAL: Reference a specific target prim. Otherwise, uses the referenced layer's defaultPrim.
# )
# class makescope:
# def __init__(self):
# self.stage = omni.usd.get_context().get_stage()
# scope = UsdGeom.Scope.Define(self.stage, Sdf.Path('/World/Scope'))
# ref_prim = UsdGeom.Xform.Define(self.stage, Sdf.Path('/World/Scope/CrowdJane')).GetPrim()
# dir_path = os.path.join('G:/ProjectRepos/crowds/exts/siborg.simulate.crowd/siborg/simulate/crowd/data/', 'CrowdBob.usda')
# add_ext_reference(ref_prim, dir_path, Sdf.Path("<Default Prim>"))
# ms = makescope()
| 6,666 | Python | 30.154205 | 132 | 0.645665 |
cadop/crowds/exts/siborg.simulate.crowd/siborg/simulate/crowd/extension.py | import numpy as np
import omni.ext
import omni.ui as ui
import omni.usd
from omni.physx import get_physx_interface
try:
from omni.usd import get_world_transform_matrix
except:
from omni.usd.utils import get_world_transform_matrix
from . import window
from . import simulator
from .env import Environment
from . import usd_utils
class SFsim(omni.ext.IExt):
# ext_id is current extension id. It can be used with extension manager to query
# additional information, like where this extension is located on filesystem.
def on_startup(self, ext_id):
print("[siborg.simulate.crowd] Social Forces Sim startup")
self.goal_prim_path = '/World/CrowdGoals'
self.obstacle_prim_path = '/World/Obstacles'
self.grid_size = 3
self.rigid_flag = False
self.pam_flag = False
self.gpu_flag = False
self.instancer_flag = False
self.jane_flag = False
self.heading_flag = False
self.init_scene()
self.show()
self.goal_prim_dict = {} # {Prim path, subscriber}
self._on_update_sub = None
def show(self):
self._window = ui.Window("Social Forces Demo Settings", width=500, height=250)
gui_window = window.make_window_elements(self, self._window, self.Sim)
def init_goal_prim(self, prim_path):
omni.kit.commands.execute('CreatePrimWithDefaultXform',
prim_type='Xform',
prim_path=prim_path,
attributes={},
select_new_prim=True)
def modify_goals(self, _new_goals):
if len(_new_goals) == 0: return
if self.Sim.nagents == 0: return
# Assign goals based on number of goals available
if len(_new_goals)>self.Sim.nagents:
_new_goals = _new_goals[self.Sim.nagents:]
# Get strides
self.Sim.goals = np.asarray(self.Sim.goals, dtype=object)
goal_cast = np.array_split(self.Sim.goals, len(_new_goals))
# Reassign the split arrays their new goals
for idx in range(len(goal_cast)):
goal_cast[idx][:] = _new_goals[idx]
# Reshape into xyz vector
goal_cast = np.vstack(goal_cast)
goal_cast = np.asarray(goal_cast, dtype=np.float)
# Update the simulations goals
self.Sim.update_goals(goal_cast)
def init_scene(self):
self.World = Environment()
if self.gpu_flag: self.Sim = simulator.WarpCrowd()
else: self.Sim = simulator.Simulator()
# Create the goal hierarchy
self.init_goal_prim(self.goal_prim_path)
self.init_goal_prim(self.obstacle_prim_path)
def _on_update_event(self, dt):
# Check the Goals xform path and see if there are any changes needed to the goal watchers
self.stage = omni.usd.get_context().get_stage()
parent_prim = self.stage.GetPrimAtPath(self.goal_prim_path)
children = parent_prim.GetAllChildren()
# Check if any children are gone from our dict, if so, unsubscribe their watcher
dead_kids = [kid for kid in self.goal_prim_dict.keys() if kid not in children]
for kid in dead_kids:
try: self.goal_prim_dict[kid].unsubscribe()
except: self.goal_prim_dict[kid] = None
self.goal_prim_dict.pop(kid)
# Check if there are any new children not in our dict, if so, add them as a goal and update watcher
babies = [child for child in children if child not in self.goal_prim_dict.keys()]
for baby in babies:
self.goal_prim_dict[baby] = None
# Update the goals
new_goals = []
for x in self.goal_prim_dict.keys():
_prim = x
try:
t = omni.usd.get_world_transform_matrix(_prim).ExtractTranslation()
except:
t = omni.usd.utils.get_world_transform_matrix(_prim).ExtractTranslation()
new_goals.append(t)
if len(new_goals) == 0:
return
self.modify_goals(new_goals)
def assign_meshes(self):
self.stage = omni.usd.get_context().get_stage()
# Use the meshes that are
parent_prim = self.stage.GetPrimAtPath(self.obstacle_prim_path)
# points, faces = usd_utils.children_as_mesh(self.stage, parent_prim)
points, faces = usd_utils.get_all_stage_mesh(self.stage,parent_prim)
self.Sim.config_mesh(points, faces)
def api_example(self):
self.Sim._unregister()
if self.gpu_flag:
self.Sim = simulator.WarpCrowd(self.World)
self.Sim.config_hasgrid()
self.assign_meshes()
else:
self.Sim = simulator.Simulator(self.World)
self.demo_api_call(self.Sim)
def demo_api_call(self, Sim):
# Use the builtin function for demo agents
Sim.rigidbody = self.rigid_flag
# Set origin for spawning agents
self.stage = omni.usd.get_context().get_stage()
parent_prim = self.stage.GetPrimAtPath('/World/GenerationOrigin')
Sim.generation_origin = [0,0,0]
if parent_prim:
Sim.generation_origin = get_world_transform_matrix(parent_prim).ExtractTranslation()
Sim.generation_origin[2] = Sim.generation_origin[1]
Sim.init_demo_agents(m=self.grid_size,n=self.grid_size,s=1.6)
if self.pam_flag:
Sim.use_pam = True
if self.gpu_flag:
Sim.configure_params()
if not Sim.rigidbody:
if self.jane_flag: # TODO make this work for all sim types
Sim.add_jane = True
else:
Sim.add_jane = False
if self.instancer_flag:
Sim.point_instancer_sets = []
Sim.use_instancer = True
if self.heading_flag:
Sim.use_heading = True
Sim.create_instance_agents() # Create a usdgeom point instance for easy visualization
Sim.set_instance_agents() # update the usdgeom points for visualization
else:
Sim.use_instancer = False
Sim.create_geompoints() # Create a usdgeom point instance for easy visualization
Sim.set_geompoints() # update the usdgeom points for visualization
# tell simulator to update positions after each run
Sim.update_agents_sim = True
# tell simulator to handle the update visualization
Sim.update_viz = True
# Register the simulation to updates, and the Sim will handle it from here
Sim.register_simulation()
if not self._on_update_sub:
self._on_update_sub = get_physx_interface().subscribe_physics_step_events(self._on_update_event)
def on_shutdown(self):
print("[siborg.simulate.crowd] Crowd Sim shutdown")
try: self.Sim._unregister()
except: pass
try: self._goal_subscriber.unsubscribe()
except: self._goal_subscriber = None
try: self._on_update_sub.unsubscribe()
except: self._on_update_sub = None
self.Sim._simulation_event = None
self._window = None
self.Sim = None | 7,277 | Python | 35.39 | 108 | 0.601896 |
cadop/crowds/exts/siborg.simulate.crowd/siborg/simulate/crowd/crowds.py | import numpy as np
from siborg.simulate.crowd.agent import Agent
class CrowdConfig:
def __init__(self):
self._goal = [0,0,0]
self.goals = None
self.agent_bodies = None
self.nagents = 1
# set pereption radius
self.perception_radius = 1.5
# set radius
self.radius = .5
# set mass
self.mass = 2
# Will use a physics scene
self.rigidbody = False
# Assume z-up world
self.world_up = 2
def create_agents(self, num=None, goals=None, pos=None):
'''Creates a set of agents and goals
Uses the class instance defaults for radius, mass, perception, etc.
Parameters
----------
num : int, optional
number of agents to create (if not defined in init), by default None
goals : ndarray([x,y,z]), optional
either 1 or size equal to number of agents, by default None
pos : ndarray([x,y,z]), optional
must be same size as number of agents (otherwise will set all to origin, which is bad),
because they will explode, by default None
'''
# generate n number of agents
if num:
self.nagents = num
# Check we can assign goals to agents
if not goals:
goals = [self._goal]
if len(goals) != 1:
if len(goals) != self.nagents:
raise ValueError('If goals is not 1, must be same size as number of agents')
elif len(goals) == 1:
self.goals = np.asarray([goals[0] for x in range(self.nagents)], dtype=np.double)
else:
self.goals = goals
# Set the agent positions
if pos is not None:
self.agents_pos = np.asarray(pos, dtype=np.double)
else:
self.agents_pos = np.asarray([np.array(0,0,0, dtype=np.double) for x in range(self.nagents)])
# only create an agent instance if user wants physics-based spheres
if self.rigidbody:
self.agent_bodies = [Agent() for x in range(self.nagents)]
# move agents to their positions
for i in range(len(self.agent_bodies)):
x,y,z = self.agents_pos[i]
self.agent_bodies[i].translate(x,y,z)
else:
self.agent_bodies = [None for x in range(self.nagents)]
# set initial velocities to 0
self.agents_vel = np.asarray([np.array([0,0,0], dtype=np.double) for x in range(self.nagents)])
self.set_radius()
self.set_mass()
self.set_perception_radius()
def set_radius(self,v=None):
'''sets agents radius
Parameters
----------
v : List[float], float, optional
set the radius of the agents, if None, all agents get same radius, by default None
'''
if v:
if type(v) is float:
self.agents_radi = np.asarray([v for x in range(self.nagents)])
elif len(v) != self.nagents:
raise ValueError('Radius array must be same size as number of agents')
else:
self.agents_radi = v
else:
self.agents_radi = np.asarray([self.radius for x in range(self.nagents)])
def set_mass(self,v=None):
'''sets agents mass
Parameters
----------
v : List[float], optional
set the mass of the agents, if None, all agents get same mass, by default None
Raises
------
ValueError
if size of mass array does not match number of agents
'''
if v:
if type(v) is float:
self.agents_mass = np.asarray([v for x in range(self.nagents)])
elif len(v) != self.nagents:
raise ValueError('mass array must be same size as number of agents')
else:
self.agents_mass = v
else:
self.agents_mass = np.asarray([self.mass for x in range(self.nagents)])
def set_perception_radius(self, v=None):
'''sets agents perception radius
Parameters
----------
v : List[float], optional
set the percept radius of the agents, if None, all agents get same raidus, by default None
Raises
------
ValueError
if size of perception array does not match number of agents
'''
if v:
if type(v) is float:
self.agents_percept = np.asarray([v for x in range(self.nagents)])
elif len(v) != self.nagents:
raise ValueError('perception radius array must be same size as number of agents')
else:
self.agents_percept = v
else:
self.agents_percept = np.asarray([self.perception_radius for x in range(self.nagents)])
def init_demo_agents(self, m=5, n=5, s=1, o=[0,0,0]):
'''Create a set of demo agents
Parameters
----------
m : int, optional
number of agents in row, by default 5
n : int, optional
number of agents in col, by default 5
s : int, optional
spacing between agents, by default 1
'''
o = self.generation_origin
# Initialize agents in a grid for testing
self.agents_pos = np.asarray([
np.array([(s/2) + (x * s) +(o[0]/2) ,
(s/2) + (y * s) +(o[1]/2),
0],
dtype=np.double)
for x in range(m)
for y in range(n)
])
# # Initialize agents in a grid for testing
# self.agents_pos = np.asarray([
# np.array([(s/2) + (x * s), (s/2) + (y * s), 0], dtype=np.double)
# for x in range(m)
# for y in range(n)
# ])
self.agents_pos[:, [2, self.world_up]] = self.agents_pos[:, [self.world_up, 2]]
self.nagents = len(self.agents_pos)
####
if self.rigidbody:
self.agent_bodies = [Agent() for x in range(self.nagents)]
for i in range(len(self.agent_bodies)):
x,y,z = self.agents_pos[i]
self.agent_bodies[i].translate(x,y,z)
else:
self.agent_bodies = [None for x in range(self.nagents)]
self.goals = np.asarray([self._goal for x in range(self.nagents)], dtype=np.double)
self.agents_vel = np.asarray([np.array([0,0,0],dtype=np.double) for x in range(self.nagents)])
self.set_radius()
self.set_mass()
self.set_perception_radius()
| 6,938 | Python | 34.584615 | 105 | 0.511098 |
cadop/crowds/exts/siborg.simulate.crowd/siborg/simulate/crowd/env.py | import omni
import omni.kit.commands
from pxr import Usd, Gf
from pxr import UsdGeom
from pxr import UsdPhysics, PhysxSchema
class Environment:
def __init__(self):
print('Initializing Environment')
self._stage = omni.usd.get_context().get_stage()
self.set_scene(self._stage)
def set_scene(self, stage):
print(f'Setting up {stage}')
self._stage = stage
self.defaultPrimPath = str(self._stage.GetDefaultPrim().GetPath())
# Physics scene
# UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z)
UsdGeom.SetStageMetersPerUnit(stage, 1.0)
self.scene = UsdPhysics.Scene.Define(stage, self.defaultPrimPath + "/physicsScene")
stage_axis = UsdGeom.GetStageUpAxis(stage)
gravity_dir = Gf.Vec3f(0.0, 0.0, 0)
if stage_axis is 'X': gravity_dir[0] = -1.0
if stage_axis is 'Y': gravity_dir[1] = -1.0
if stage_axis is 'Z': gravity_dir[2] = -1.0
self.scene.CreateGravityDirectionAttr().Set(gravity_dir)
self.scene.CreateGravityMagnitudeAttr().Set(9.81)
physxSceneAPI = PhysxSchema.PhysxSceneAPI.Apply(self.scene.GetPrim())
physxSceneAPI.CreateEnableCCDAttr().Set(True)
# Check if there is a physics groundplane in the scene
plane_path = self.defaultPrimPath+"/GroundPlane"
if self._stage.GetPrimAtPath(plane_path).IsValid():
pass
else:
# If not, make one
omni.kit.commands.execute('AddGroundPlaneCommand',
stage=self._stage,
planePath='/GroundPlane',
axis=UsdGeom.GetStageUpAxis(stage),
size=1.0,
position=Gf.Vec3f(0.0, 0.0, 0.0),
color=Gf.Vec3f(0.5, 0.5, 0.5))
| 1,923 | Python | 34.629629 | 91 | 0.566823 |
cadop/crowds/exts/siborg.simulate.crowd/siborg/simulate/crowd/agent.py |
import omni
from omni.physx.scripts import physicsUtils
from pxr import Gf, UsdPhysics, PhysxSchema, UsdGeom, UsdShade
import usdrt
class Agent:
def __init__(self):
stage = omni.usd.get_context().get_stage()
# Create a sphere representing the agent
self.skin_mesh , self.skinMeshPath = self.sphere(stage)
# Set a rigid body material and collider
self.set_material(stage, self.skinMeshPath)
# Add a translation operator and set it to zero position
# Since we changed to create this object with an xform, don't need to add, just get it.
# self.translateOp = self.skin_mesh.AddTranslateOp()
self.translateOp = UsdGeom.XformOp(self.skin_mesh.GetPrim().GetAttribute("xformOp:translate"))
self.translateOp.Set(Gf.Vec3f(0.0, 0.0, 0.0))
def sphere(self, stage):
# Create sphere representing agent
_, skinMeshPath = omni.kit.commands.execute("CreateMeshPrimWithDefaultXform",
prim_type="Sphere",
prim_path='/World/Agents/Sphere',
prepend_default_prim=True)
skin_mesh = UsdGeom.Mesh.Get(stage, skinMeshPath)
prim = skin_mesh.GetPrim()
# setup physics - rigid body
self.rigidBodyAPI = UsdPhysics.RigidBodyAPI.Apply(prim)
linVelocity = Gf.Vec3f(0.0, 0.0, 0.0)
angularVelocity = Gf.Vec3f(0.0, 0.0, 0.0)
# apply initial velocities
self.rigidBodyAPI.CreateVelocityAttr().Set(linVelocity)
self.rigidBodyAPI.CreateAngularVelocityAttr().Set(angularVelocity)
self.massAPI = UsdPhysics.MassAPI.Apply(prim)
self.massAPI.CreateMassAttr(2)
self.massAPI.CreateCenterOfMassAttr().Set(Gf.Vec3f(0.0, 0.0, 0.0))
# Add a force attribute
# shuttleForcePath = skinMeshPath + "/shuttleForce"
# xform = UsdGeom.Xform.Define(stage, shuttleForcePath)
# self.forceApi = PhysxSchema.PhysxForceAPI.Apply(xform.GetPrim())
#
# self.forceApi = PhysxSchema.PhysxForceAPI.Apply(prim)
# self.forceAttr = self.forceApi.GetForceAttr()
self.usdrt_stage = usdrt.Usd.Stage.Attach(omni.usd.get_context().get_stage_id())
prim = self.usdrt_stage.GetPrimAtPath(skinMeshPath)
self.world_force_attr = prim.CreateAttribute("_worldForce", usdrt.Sdf.ValueTypeNames.Float3, True)
return skin_mesh, skinMeshPath
def translate(self, x=0, y=0, z=0):
self.translateOp.Set(self.translateOp.Get() + Gf.Vec3d( x, y, z))
@property
def position(self):
return self.translateOp.Get()
@property
def velocity(self):
return self.rigidBodyAPI.GetVelocityAttr().Get()
def set_material(self, stage, skinMeshPath):
defaultPrimPath = str(stage.GetDefaultPrim().GetPath())
# Floor Material
path = defaultPrimPath + "/rigidMaterial"
prim_path = stage.GetPrimAtPath(skinMeshPath)
# Set it as a rigid body
rigidBodyAPI = UsdPhysics.RigidBodyAPI.Apply(prim_path)
# Add a collider (defaults to mesh triangulation)
UsdPhysics.CollisionAPI.Apply(prim_path)
# Apply a specific mass parameter
UsdPhysics.MassAPI.Apply(prim_path)
#Get the rigidbody parameter to set values on
physxRbAPI = PhysxSchema.PhysxRigidBodyAPI.Apply(prim_path)
#Enable CCD for this object
physxRbAPI.CreateEnableCCDAttr().Set(True)
# Create a (separate) physics material that gets added to the object
path = defaultPrimPath + "/highdensitymaterial"
UsdShade.Material.Define(stage, path)
material = UsdPhysics.MaterialAPI.Apply(stage.GetPrimAtPath(path))
material.CreateStaticFrictionAttr().Set(0)
material.CreateDynamicFrictionAttr().Set(0)
material.CreateRestitutionAttr().Set(.2)
material.CreateDensityAttr().Set(0.01)
# Add material
physicsUtils.add_physics_material_to_prim(stage, prim_path, path)
| 4,141 | Python | 38.075471 | 107 | 0.642357 |
cadop/crowds/exts/siborg.simulate.crowd/siborg/simulate/crowd/window.py | from .models.socialforces import Parameters
import omni.ui as ui
combo_sub = None
def make_window_elements(self, _window, Sim):
with _window.frame:
with ui.VStack():
with ui.HStack():
ui.Label('Max Speed')
max_speed = ui.FloatField(height=20)
max_speed.model.add_value_changed_fn(lambda m : setattr(Parameters, 'max_speed', m.get_value_as_float()))
max_speed.model.set_value(Parameters.max_speed)
with ui.HStack():
ui.Label('Desired Speed')
v_desired = ui.FloatField(height=20)
v_desired.model.add_value_changed_fn(lambda m : setattr(Parameters, 'v_desired', m.get_value_as_float()))
v_desired.model.set_value(Parameters.v_desired)
with ui.HStack():
ui.Label('A')
A = ui.FloatField(height=20)
A.model.add_value_changed_fn(lambda m : setattr(Parameters, 'A', m.get_value_as_float()))
A.model.set_value(Parameters.A)
with ui.HStack():
ui.Label('B')
B = ui.FloatField(height=20)
B.model.add_value_changed_fn(lambda m : setattr(Parameters, 'B', m.get_value_as_float()))
B.model.set_value(Parameters.B)
with ui.HStack():
ui.Label('kn')
kn = ui.FloatField(height=20)
kn.model.add_value_changed_fn(lambda m : setattr(Parameters, 'kn', m.get_value_as_float()))
kn.model.set_value(Parameters.kn)
with ui.HStack():
ui.Label('kt')
kt = ui.FloatField(height=20)
kt.model.add_value_changed_fn(lambda m : setattr(Parameters, 'kt', m.get_value_as_float()))
kt.model.set_value(Parameters.kt)
with ui.HStack():
ui.Label('Agent grid (nxn)')
agent_grid = ui.IntField(height=20)
agent_grid.model.add_value_changed_fn(lambda m : setattr(self, 'grid_size', m.get_value_as_int()))
agent_grid.model.set_value(3)
# with ui.HStack():
# ui.Label('Agent Mass')
# kt = ui.FloatField(height=20)
# kt.model.add_value_changed_fn(lambda m : setattr(Sim, 'mass', m.get_value_as_float()))
# kt.model.set_value(Sim.mass)
# with ui.HStack():
# ui.Label('Agent Radius')
# kt = ui.FloatField(height=20)
# kt.model.add_value_changed_fn(lambda m : Sim.set_radius(m.get_value_as_float()))
# kt.model.set_value(Sim.radius)
# with ui.HStack():
# ui.Label('Agent Perception Radius')
# kt = ui.FloatField(height=20)
# kt.model.add_value_changed_fn(lambda m : setattr(Sim, 'perception_radius', m.get_value_as_float()))
# kt.model.set_value(Sim.perception_radius)
# with ui.HStack(height=20):
# ui.Button("Gen Agents", clicked_fn=Sim.create_agents)
# nagents = ui.IntField(height=5)
# nagents.model.set_value(Sim.nagents)
# nagents.model.add_value_changed_fn(lambda m : setattr(Sim, 'nagents', m.get_value_as_int()))
with ui.HStack(height=20):
ui.Label('GPU', width=20)
WarpModel = ui.CheckBox(width=30)
WarpModel.model.add_value_changed_fn(lambda m : setattr(self, 'gpu_flag', m.get_value_as_bool()))
WarpModel.model.set_value(True)
ui.Label('Use Instances', width=20)
SFModel = ui.CheckBox(width=30)
SFModel.model.add_value_changed_fn(lambda m : setattr(self, 'instancer_flag', m.get_value_as_bool()))
SFModel.model.set_value(True)
ui.Label('Add Jane', width=5)
RigidBody = ui.CheckBox(width=30)
RigidBody.model.add_value_changed_fn(lambda m : setattr(self, 'jane_flag', m.get_value_as_bool()))
RigidBody.model.set_value(False)
ui.Label('Use Direction', width=5)
RigidBody = ui.CheckBox(width=30)
RigidBody.model.add_value_changed_fn(lambda m : setattr(self, 'heading_flag', m.get_value_as_bool()))
RigidBody.model.set_value(True)
ui.Label('Rigid Body', width=5)
RigidBody = ui.CheckBox(width=30)
RigidBody.model.add_value_changed_fn(lambda m : setattr(self, 'rigid_flag', m.get_value_as_bool()))
RigidBody.model.set_value(False)
ui.Label('PAM', width=20)
SFModel = ui.CheckBox(width=30)
SFModel.model.add_value_changed_fn(lambda m : setattr(self, 'pam_flag', m.get_value_as_bool()))
SFModel.model.set_value(False)
# options = ["GeomPoints", "RigidBody"]
# combo_model: ui.AbstractItemModel = ui.ComboBox(0, *options).model
# def combo_changed(item_model: ui.AbstractItemModel, item: ui.AbstractItem):
# value_model = item_model.get_item_value_model(item)
# current_index = value_model.as_int
# option = options[current_index]
# print(f"Selected '{option}' at index {current_index}.")
# combo_sub = combo_model.subscribe_item_changed_fn(combo_changed)
# def clicked():
# value_model = combo_model.get_item_value_model()
# current_index = value_model.as_int
# option = options[current_index]
# print(f"Button Clicked! Selected '{option}' at index {current_index}.")
# self.api_example(current_index)
# ui.Button("Set Selected Meshes", width=5, clicked_fn=self.assign_meshes)
ui.Button("Start Demo", width=5, clicked_fn=self.api_example)
with ui.HStack(height=10):
pass | 6,133 | Python | 45.1203 | 121 | 0.536768 |
cadop/crowds/exts/siborg.simulate.crowd/siborg/simulate/crowd/examples/ex4.py | '''_summary_
'''
from siborg.simulate.crowd.simulator import Simulator
def example_4():
# Example of using API
Sim = Simulator()
Sim.rigidbody = True # use rigid bodies
Sim.init_demo_agents(m=3, n=5, s=1.1)
# Register the simulation to updates, and the Sim will handle it from here
Sim.register_simulation()
# tell simulator to update positions after each run, if not need to call Sim.integrate()
Sim.update_agents_sim = True
# tell simulator to handle the update visualization
Sim.update_viz = True
example_4() | 558 | Python | 26.949999 | 92 | 0.691756 |
cadop/crowds/exts/siborg.simulate.crowd/siborg/simulate/crowd/examples/ex3.py | '''_summary_
'''
import time
from omni.physx import get_physx_interface
from siborg.simulate.crowd.simulator import Simulator
Sim = Simulator()
start_time = time.time()
_simulation_event = None
def example_3():
# Example of using API
# Use a builtin helper function to generate a grid of agents
Sim.init_demo_agents(m=3, n=5, s=1.1)
Sim.create_geompoints() # Create a usdgeom point instance for easy visualization
Sim.set_geompoints() # update the usdgeom points for visualization
# tell simulator to update positions after each run, if not need to call Sim.integrate()
Sim.update_agents_sim = True
# don't have the simulator update the geompoints, we do it ourselves
Sim.update_viz = False
# Register to our own physx update
sim_subscriber()
def sim_subscriber():
# This would need to get cleaned up
_simulation_event = get_physx_interface().subscribe_physics_step_events(_on_update)
def _on_update(dt):
# Run one step of simulation
# don't need to use forces since we told simulator to update
forces = Sim.run()
Sim.set_geompoints() # update the usdgeom points for visualization
# For this demo we will unsubscribe after a few seconds
if time.time() - start_time > 100 :
print('ending')
_simulation_event.unsubscribe()
example_3()
| 1,338 | Python | 28.755555 | 92 | 0.701046 |
cadop/crowds/exts/siborg.simulate.crowd/siborg/simulate/crowd/examples/ex2.py | '''Example for Simulator handling update and using GeomPoints.
Uses a helper function for initializing agents
'''
from siborg.simulate.crowd.simulator import Simulator
def example_2():
Sim = Simulator()
# Use a builtin helper function to generate a grid of agents
Sim.init_demo_agents(m=3,n=5,s=1.1)
Sim.create_geompoints() # Create a usdgeom point instance for easy visualization
# tell simulator to update positions after each run, if not need to call Sim.integrate()
Sim.update_agents_sim = True
# don't have the simulator update the geompoints, we do it ourselves
Sim.update_viz = True
Sim.register_simulation()
example_2() | 669 | Python | 32.499998 | 92 | 0.730942 |
cadop/crowds/exts/siborg.simulate.crowd/siborg/simulate/crowd/examples/ex1.py | '''Example for Simulator handling update and using GeomPoints
'''
from siborg.simulate.crowd.simulator import Simulator
import numpy as np
from math import sqrt
def example_1():
# Example of using API
Sim = Simulator()
nagents = 10
# Some trickery to make a grid of agents and get the cloest number of agents to an even grid
pos = np.asarray([
np.array([(1/2) + (x), (1/2) + (y), 0], dtype=np.double)
for x in range(int(sqrt(nagents)))
for y in range(int(sqrt(nagents)))
])
pos[:, [2, Sim.world_up]] = pos[:, [Sim.world_up, 2]]
nagents = len(pos)
Sim.create_agents(num=nagents, goals=[[10,10,0]], pos=pos) # initialize a set of agents
Sim.create_geompoints() # Create a usdgeom point instance for easy visualization
# tell simulator to update positions after each run, if not need to call Sim.integrate()
Sim.update_agents_sim = True
# don't have the simulator update the geompoints, we do it ourselves
Sim.update_viz = True
Sim.register_simulation()
example_1() | 1,119 | Python | 35.129031 | 96 | 0.626452 |
cadop/crowds/exts/siborg.simulate.crowd/siborg/simulate/crowd/models/pam.py | ''' Python implementation of the Predictive Avoidance Model (PAM)
from
A Predictive Collision Avoidance Model for Pedestrian Simulation,
I. Karamouzas, P. Heil, P. van Beek, M. H. Overmars
Motion in Games (MIG 2009), Lecture Notes in Computer Science (LNCS), Vol. 5884, 2009
'''
from dataclasses import dataclass
import numpy as np
from scipy.spatial import distance
@dataclass
class Parameters:
# The agents field of view
field_of_view = 200.0
# The agents radius ? Used here in this implementation or in sim?
agent_radius = 0.5
# Minimum agent distance
min_agent_dist = 0.1
# the mid distance parameters in peicewise personal space function predictive force dist
dmid = 4.0
# KSI
ksi = 0.5
# Nearest Neighbour distance ? Used here in this implementation or in sim?
neighbor_dist = 10.0
# Maximum neighbours to consider ? Used here in this implementation or in sim?
max_neighbors = 3
# Maximum acceleration ? Used here in this implementation or in sim/physics?
max_accel = 20.0
# Maximum speed
max_speed = 7
# Preferred Speed
preferred_vel = 2.5
# Goal acquired radius
goal_radius = 1.0
# Time Horizon
time_horizon = 4.0
# Agent Distance
agent_dist = 0.1
# Wall Distance
wall_dist = 0.1
# Wall Steepnes
wall_steepness = 2.0
# Agent Strength
agent_strength = 1.0
# wFactor, factor to progressively scale down forces in when in a non-collision state
w_factor = 0.8
# Noise flag (should noise be added to the movement action)
noise = False
force_clamp = 40.0
# *private* Ideal wall distance
_ideal_wall_dist = agent_radius + wall_dist
# *private* Squared ideal wall distance
_SAFE = _ideal_wall_dist * _ideal_wall_dist
# *private* Agent Personal space
_agent_personal_space = agent_radius + min_agent_dist
# *private* the min distance parameters in peicewise personal space function
_dmin = agent_radius + _agent_personal_space
# *private* the max distance parameters in peicewise personal space function
_dmax = time_horizon * max_speed
# *private* FOV cosine
_cosFOV = np.cos((0.5 * np.pi * field_of_view) / 180.0)
def ray_intersects_disc(pi, pj, v, r):
# calc ray disc est. time to collision
t = 0.0
w = pj - pi
a = np.dot(v, v)
b = np.dot(w, v)
c = np.dot(w, w) - (r * r)
discr = (b * b) - (a * c)
if discr > 0.0:
t = (b - np.sqrt(discr)) / a
if t < 0.0:
t = 999999.0
else:
t = 999999.0
return t
def mag(v):
# calc magnitude of vector
v_mag = np.sqrt(v.dot(v))
return v_mag
def norm(v):
# normalize a vector
v_norm = np.array([0, 0, 0], dtype='float64')
magnitude = mag(v)
if magnitude > 0.0:
v_norm = v / magnitude
return v_norm
def get_neighbors(cur, agents, pn_r):
dist = distance.cdist([cur], agents)
pn = dist < pn_r
# Index to remove is when its zero
pn_self = dist == 0
pn_self = np.nonzero(pn_self)
pn[pn_self] = False
pn = np.nonzero(pn)
return pn
def wall_force(obstacles, rr_i, closest_point, SAFE, add_force):
for i in range(len(obstacles)):
# Step 1: get closest point on obstacle to agent
# [[ Need python code for this in simulation ]]
n_w = rr_i - closest_point
d_w = mag(n_w) * mag(n_w)
if (d_w < SAFE):
d_w = np.sqrt(d_w)
if (d_w > 0):
n_w /= d_w
if ((d_w - Parameters.agent_radius) < 0.001):
dist_min_radius = 0.001
else:
d_w - Parameters.agent_radius
obstacle_force = (Parameters._ideal_wall_dist - d_w) / np.pow(dist_min_radius, Parameters.wall_steepness) * n_w
add_force(obstacle_force)
def calc_goal_force(goal, rr_i, vv_i):
# Preferred velocity is preferred speed in direction of goal
preferred_vel = Parameters.preferred_vel * norm(goal - rr_i)
# Goal force, is always added
goal_force = (preferred_vel - vv_i) / Parameters.ksi
return goal_force
def collision_param(rr_i, vv_i, desired_vel, pn_rr, pn_vv, pn_r):
# Keep track of if we ever enter a collision state
agent_collision = False
t_pairs = []
# Handle agents tc values for predictive forces among neighbours
for j, rr_j in enumerate(pn_rr):
# Get position and velocity of neighbor agent
vv_j = pn_vv[j]
# Get radii of neighbor agent
rj = pn_r[j]
combined_radius = Parameters._agent_personal_space + rj
w = rr_j - rr_i
if (mag(w) < combined_radius):
agent_collision = True
t_pairs.append((0.0, j))
else:
rel_dir = norm(w)
if np.dot(rel_dir, norm(vv_i)) < Parameters._cosFOV:
continue
tc = ray_intersects_disc(rr_i, rr_j, desired_vel - vv_j, combined_radius)
if tc < Parameters.time_horizon:
if len(t_pairs) < Parameters.max_neighbors:
t_pairs.append((tc, j))
elif tc < t_pairs[0][0]:
t_pairs.pop()
t_pairs.append((tc, j))
return t_pairs, agent_collision
def predictive_force(rr_i, desired_vel, desired_speed, pn_rr, pn_vv, pn_r, vv_i):
# Handle predictive forces// Predictive forces
# Setup collision parameters
t_pairs, agent_collision = collision_param(rr_i, vv_i, desired_vel, pn_rr, pn_vv, pn_r)
# This will be all the other forces, added in a particular way
steering_force = np.array([0, 0, 0], dtype='float64')
# will store a list of tuples, each tuple is (tc, agent)
force_count = 0
for t_pair in t_pairs:
# Nice variables from the t_pair tuples
t = t_pair[0]
agent_idx = t_pair[1]
force_dir = rr_i + (desired_vel * t) - pn_rr[agent_idx] - (pn_vv[agent_idx] * t)
force_dist = mag(force_dir)
if force_dist > 0:
force_dir /= force_dist
collision_dist = np.maximum(force_dist - Parameters.agent_radius - pn_r[agent_idx], 0.0)
#D = input to evasive force magnitude piecewise function
D = np.maximum( (desired_speed * t) + collision_dist, 0.001)
force_mag = 0.0
if D < Parameters._dmin:
force_mag = Parameters.agent_strength * Parameters._dmin / D
elif D < Parameters.dmid:
force_mag = Parameters.agent_strength
elif D < Parameters._dmax:
force_mag = Parameters.agent_strength * (Parameters._dmax - D) / (Parameters._dmax - Parameters.dmid)
else:
continue
force_mag *= np.power( (1.0 if agent_collision else Parameters.w_factor), force_count)
force_count += 1
steering_force = force_mag * force_dir
return steering_force
def add_noise(steering_force):
angle = np.random.uniform(0.0, 1.0) * 2.0 * np.pi
dist = np.random.uniform(0.0, 1.0) * 0.001
steering_force += dist * np.array([np.cos(angle),np.sin(angle),0], dtype='float64')
return steering_force
def compute_force(rr_i, ri, vv_i, mass, goal, pn_rr, pn_vv, pn_r, dt):
# Get the goal force
goal_force = calc_goal_force(goal, rr_i, vv_i)
# Desired values if all was going well in an empty world
desired_vel = vv_i + goal_force * dt
desired_speed = mag(desired_vel)
# Get obstacle (wall) forces
obstacle_force = np.array([0, 0, 0], dtype='float64')
#@TODO
# obstacle_force = wall_force()
# Get predictive steering forces
steering_force = predictive_force(rr_i, desired_vel, desired_speed, pn_rr, pn_vv, pn_r, vv_i)
# Add noise for reducing deadlocks adding naturalness
if Parameters.noise:
steering_force = add_noise(steering_force)
# Clamp driving force
if mag(steering_force) > Parameters.force_clamp:
steering_force = norm(steering_force) * Parameters.force_clamp
return goal_force + obstacle_force + steering_force | 8,170 | Python | 32.080972 | 123 | 0.599143 |
cadop/crowds/exts/siborg.simulate.crowd/siborg/simulate/crowd/models/socialforces.py | from dataclasses import dataclass
import numpy as np
from scipy.spatial import distance
# zero_vec = np.array([0,0,0], dtype='float64')
@dataclass
class Parameters:
# names from https://www.sciencedirect.com/science/article/pii/S0378437120306853
Tau = 0.5 #(s)
A = 2000.0
B = 0.08
kn = 1.2 * 100_000 # Kgs^-2
kt = 2.4 * 100_000 # Kg m^-1 s^-1
max_speed = 10
v_desired = 3.5
def calc_wall_force():
# TODO add wall and geometry recognition
force = np.array([0,0,0], dtype='float64')
return force
def calc_agent_force(rr_i, ri, vv_i, pn_rr, pn_vv, pn_r):
# Sum the forces of neighboring agents
force = np.array([0,0,0], dtype='float64')
# Set the total force of the other agents to zero
ff_ij = np.array([0,0,0], dtype='float64')
rr_j =np.array([0,0,0], dtype='float64')
# Iterate through the neighbors and sum (f_ij)
for j, rr_j in enumerate(pn_rr):
# Get position and velocity of neighbor agent
vv_j = pn_vv[j]
# Get radii of neighbor agent
rj = pn_r[j]
# Pass agent position to AgentForce calculation
ff_ij = neighbor_force(rr_i, ri, vv_i, rr_j, rj, vv_j)
# Sum Forces
force += ff_ij
return force
def neighbor_force(rr_i, ri, vv_i, rr_j, rj, vv_j):
# Calculate the force exerted by another agent
# Take in this agent (i) and a neighbors (j) position and radius
# Sum of radii
rij = ri + rj
# distance between center of mass
d_ij = mag(rr_i - rr_j)
# "n_ij is the normalized vector points from pedestrian j to i"
n_ij = norm(rr_i - rr_j) # Normalized vector pointing from j to i
# t_ij "Vector of tangential relative velocity pointing from i to j."
# A sliding force is applied on agent i in this direction to reduce the relative velocity.
t_ij = np.cross(vv_j - vv_i, [0,0,1] )
dv_ji = np.dot(vv_j - vv_i, t_ij)
# Calculate f_ij
force = repulsion(rij, d_ij, n_ij) + proximity(rij, d_ij, n_ij) + sliding(rij, d_ij, dv_ji, t_ij)
return force
def calc_goal_force(goal, pos, vel, mass, v_desired, dt):
ee_i = norm(goal - pos)
force = mass * ( ( (v_desired * ee_i) - vel ) / Parameters.Tau )
return force
def G(r_ij, d_ij):
# g(x) is a function that returns zero if pedestrians touch
# otherwise is equal to the argument x
if (d_ij > r_ij): return 0.0
return r_ij - d_ij;
def repulsion(r_ij, d_ij, n_ij):
force = Parameters.A * np.exp( (r_ij - d_ij) / Parameters.B) * n_ij
return force
def proximity(r_ij, d_ij, n_ij):
force = Parameters.kn * G(r_ij, d_ij) * n_ij
return force
def sliding(r_ij, d_ij, dv_ji, t_ij):
force = Parameters.kt * G(r_ij, d_ij) * (dv_ji * t_ij)
return force
def mag(v):
# calc magnitude of vector
v_mag = np.sqrt(v.dot(v))
return v_mag
def norm(v):
# normalize a vector
v_norm = v / mag(v)
return v_norm
def get_neighbors(cur, agents, pn_r):
dist = distance.cdist([cur], agents)
pn = dist < pn_r
# Index to remove is when its zero
pn_self = dist == 0
pn_self = np.nonzero(pn_self)
pn[pn_self] = False
pn = np.nonzero(pn)
return pn
def compute_force(rr_i, ri, vv_i, mass, goal, pn_rr, pn_vv, pn_r, dt):
# Get the force for this agent to the goal
goal = calc_goal_force(goal, rr_i, vv_i, mass, Parameters.v_desired, dt)
agent = calc_agent_force(rr_i, ri, vv_i, pn_rr, pn_vv, pn_r)
wall = calc_wall_force()
force = goal + agent + wall
force = norm(force) * min(mag(force), Parameters.max_speed)
return force
| 3,633 | Python | 26.323308 | 102 | 0.603909 |
cadop/crowds/exts/siborg.simulate.crowd/siborg/simulate/crowd/models/socialforces_warp.py | import warp as wp
Tau = wp.constant(0.5) # s (acceleration)
A = wp.constant(2000.0) # N
B = wp.constant(0.08) # m
kn = wp.constant(1.2 * 100000) # kg/s^-2
kt = wp.constant(2.4 * 100000) # kg/m^-1 s^-2
max_speed = wp.constant(10.0) # m/s
v_desired = wp.constant(2.5) # m/s
@wp.kernel
def get_forces(positions: wp.array(dtype=wp.vec3),
velocities: wp.array(dtype=wp.vec3),
goals: wp.array(dtype=wp.vec3),
radius: wp.array(dtype=float),
mass: wp.array(dtype=float),
dt: float,
percept : wp.array(dtype=float),
grid : wp.uint64,
mesh: wp.uint64,
inv_up: wp.vec3,
forces: wp.array(dtype=wp.vec3),
):
# thread index
tid = wp.tid()
cur_pos = positions[tid]
cur_rad = radius[tid]
cur_vel = velocities[tid]
cur_mass = mass[tid]
goal = goals[tid]
pn = percept[tid]
_force = compute_force(cur_pos,
cur_rad,
cur_vel,
cur_mass,
goal,
positions,
velocities,
radius,
dt,
pn,
grid,
mesh)
# Clear any vertical forces with Element-wise mul
_force = wp.cw_mul(_force, inv_up)
# compute distance of each point from origin
forces[tid] = _force
@wp.kernel
def integrate(x : wp.array(dtype=wp.vec3),
v : wp.array(dtype=wp.vec3),
f : wp.array(dtype=wp.vec3),
dt: float,
xnew: wp.array(dtype=wp.vec3),
vnew: wp.array(dtype=wp.vec3),
):
tid = wp.tid()
x0 = x[tid]
v0 = v[tid]
f0 = f[tid]
v1 = v0 + (f0*1.0) * dt
x1 = x0 + v1 * dt
xnew[tid] = x1
vnew[tid] = v1
@wp.kernel
def heading(v : wp.array(dtype=wp.vec3),
up : wp.vec3,
forward : wp.vec3,
hdir: wp.array(dtype=wp.vec4),
):
tid = wp.tid()
v0 = v[tid]
vnorm = wp.normalize(v0)
hdir[tid] = velocity_to_quaternion(up, forward, vnorm)
@wp.func
def velocity_to_quaternion(up : wp.vec3,
forward : wp.vec3,
velocity: wp.vec3):
# Construct a quaternion that rotates the agent's forward direction to align with the velocity vector
if wp.length(forward) > 0: forward = wp.normalize(forward)
if wp.length(velocity) > 0: velocity = wp.normalize(velocity)
else:
velocity = forward
dot = wp.dot(forward, velocity) # Clip the dot product to avoid numerical instability
if dot == 1.0:
# If the forward and velocity vectors are already aligned, return the identity quaternion
return wp.vec4(0.0, 0.0, 0.0, 1.0)
else:
axis = wp.cross(forward, velocity)
axis = up * wp.sign(wp.dot(axis, up)) # Project the axis onto the up plane
if wp.length(axis) > 0.0: axis = wp.normalize(axis) # Normalize the axis of rotation
else:axis = up # Use a default axis of rotation if the iwput is a zero vector
angle = wp.acos(dot) # Calculate the angle of rotation with clipping
qw = wp.cos(angle/2.0) # Calculate the scalar component of the quaternion
qx = wp.sin(angle/2.0) * axis[0] # Calculate the vector component of the quaternion
qy = wp.sin(angle/2.0) * axis[1] # Calculate the vector component of the quaternion
qz = wp.sin(angle/2.0) * axis[2] # Calculate the vector component of the quaternion
return wp.vec4(qx, qy, qz, qw)
@wp.func
def calc_goal_force(goal: wp.vec3,
pos: wp.vec3,
vel: wp.vec3,
mass: float,
v_desired: float,
dt: float):
ee_i = wp.normalize(goal - pos)
force = mass * ( ( (v_desired * ee_i) - vel ) / (Tau) )
return force
@wp.func
def calc_wall_force(rr_i: wp.vec3,
ri: float,
vv_i: wp.vec3,
mesh: wp.uint64):
'''
rr_i : position
ri : radius
vv_i : velocity
Computes: (A * exp[(ri-diw)/B] + kn*g(ri-diw))*niw - kt * g(ri-diw)(vi * tiw)tiw
'''
face_index = int(0)
face_u = float(0.0)
face_v = float(0.0)
sign = float(0.0)
force = wp.vec3(0.0,0.0,0.0)
# Define the up direction
up_dir = wp.vec3(0.0, 0.0, 1.0)
max_dist = float(ri * 5.0)
has_point = wp.mesh_query_point(mesh, rr_i, max_dist, sign, face_index, face_u, face_v)
if (not has_point):
return wp.vec3(0.0, 0.0, 0.0)
p = wp.mesh_eval_position(mesh, face_index, face_u, face_v)
# d_iw = distance to wall W
d_iw = wp.length(p - rr_i)
# vector of the wall to the agent
nn_iw = wp.normalize(rr_i - p)
# perpendicular vector of the agent-wall (tangent force)
tt_iw = wp.cross(up_dir, nn_iw)
if wp.dot(vv_i, tt_iw) < 0.0:
tt_iw = -1.0 * tt_iw
# Compute force
# f_iW = { A * exp[(ri-diw)/B] + kn*g(ri-diw) } * niw
# - kt * g(ri-diw)(vi * tiw)tiw
f_rep = ( A * wp.exp((ri-d_iw)/B) + kn * G(ri, d_iw) ) * nn_iw
f_tan = kt * G(ri,d_iw) * wp.dot(vv_i, tt_iw) * tt_iw
force = f_rep - f_tan
return force
@wp.func
def calc_agent_force(rr_i: wp.vec3,
ri: float,
vv_i: wp.vec3,
pn_rr: wp.array(dtype=wp.vec3),
pn_vv: wp.array(dtype=wp.vec3),
pn_r: wp.array(dtype=float),
pn: float,
grid : wp.uint64,
):
'''Sum the forces of neighboring agents'''
# Set the total force of the other agents to zero
force = wp.vec3(0.0, 0.0, 0.0)
ff_ij = wp.vec3(0.0, 0.0, 0.0)
rr_j = wp.vec3(0.0, 0.0, 0.0)
# create grid query around point
query = wp.hash_grid_query(grid, rr_i, pn)
index = int(0)
# Iterate through the neighbors and sum (f_ij)
while(wp.hash_grid_query_next(query, index)):
j = index
neighbor = pn_rr[j]
# compute distance to neighbor point
dist = wp.length(rr_i-neighbor)
if (dist <= pn):
# Get position and velocity of neighbor agent
rr_j = pn_rr[j]
vv_j = pn_vv[j]
# Get radii of neighbor agent
rj = pn_r[j]
# Pass agent position to AgentForce calculation
ff_ij = neighbor_force(rr_i, ri, vv_i, rr_j, rj, vv_j)
# Sum Forces
force += ff_ij
return force
@wp.func
def neighbor_force(rr_i: wp.vec3,
ri: float,
vv_i: wp.vec3,
rr_j: wp.vec3,
rj: float,
vv_j: wp.vec3):
'''Calculate the force exerted by another agent.
Take in this agent (i) and a neighbors (j) position and radius'''
# Sum of radii
rij = ri + rj
# distance between center of mass
d_ij = wp.length(rr_i - rr_j)
# "n_ij is the normalized vector points from pedestrian j to i"
n_ij = wp.normalize(rr_i - rr_j) # Normalized vector pointing from j to i
# t_ij "Vector of tangential relative velocity pointing from i to j."
# A sliding force is applied on agent i in this direction to reduce the relative velocity.
t_ij = vv_j - vv_i
dv_ji = wp.dot(vv_j - vv_i, t_ij)
# Calculate f_ij
force = repulsion(rij, d_ij, n_ij) + proximity(rij, d_ij, n_ij) + sliding(rij, d_ij, dv_ji, t_ij)
return force
@wp.func
def G(r_ij: float,
d_ij: float
):
# g(x) is a function that returns zero if pedestrians touch
# otherwise is equal to the argument x
if (d_ij > r_ij): return 0.0
return r_ij - d_ij
@wp.func
def repulsion(r_ij: float,
d_ij: float,
n_ij: wp.vec3):
force = A * wp.exp( (r_ij - d_ij) / B) * n_ij
return force
@wp.func
def proximity(r_ij: float,
d_ij: float,
n_ij: wp.vec3):
force = (kn * G(r_ij, d_ij)) * n_ij # body force
return force
@wp.func
def sliding(r_ij: float,
d_ij: float,
dv_ji: float,
t_ij: wp.vec3):
force = kt * G(r_ij, d_ij) * (dv_ji * t_ij)
return force
@wp.func
def compute_force(rr_i: wp.vec3,
ri: float,
vv_i: wp.vec3,
mass:float,
goal:wp.vec3,
pn_rr: wp.array(dtype=wp.vec3),
pn_vv: wp.array(dtype=wp.vec3),
pn_r: wp.array(dtype=float),
dt: float,
pn: float,
grid : wp.uint64,
mesh: wp.uint64
):
'''
rr_i : position
ri : radius
vv_i : velocity
pn_rr : List[perceived neighbor positions]
pn_vv : List[perceived neighbor velocities]
pn_r : List[perceived neighbor radius]
'''
# Get the force for this agent to the goal
goal = calc_goal_force(goal, rr_i, vv_i, mass, v_desired, dt)
agent = calc_agent_force(rr_i, ri, vv_i, pn_rr, pn_vv, pn_r, pn, grid)
wall = calc_wall_force(rr_i, ri, vv_i, mesh)
# Sum of forces
force = goal + agent + wall
force = wp.normalize(force) * wp.min(wp.length(force), max_speed)
return force
| 9,633 | Python | 29.200627 | 105 | 0.508876 |
cadop/crowds/exts/siborg.simulate.crowd/siborg/simulate/crowd/tests/__init__.py | from .test_hello_world import * | 31 | Python | 30.999969 | 31 | 0.774194 |
cadop/arduverse/puppet_handle_1.py | from omni.kit.scripting import BehaviorScript
import socket
import numpy as np
import math
from pxr import Gf
import numpy as np
import math
class Puppet2(BehaviorScript):
def on_init(self):
print(f"{__class__.__name__}.on_init()->{self.prim_path}")
# Set up the server address and port
UDP_IP = "0.0.0.0"
UDP_PORT = 8881
# Create a UDP socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind((UDP_IP, UDP_PORT))
self.sock.setblocking(0)
print("Waiting for data...")
def on_destroy(self):
print(f"{__class__.__name__}.on_destroy()->{self.prim_path}")
self.sock = None
rot = [0, 0, 0]
self.prim.GetAttribute('xformOp:rotateXYZ').Set(Gf.Vec3d(rot))
def on_play(self):
print(f"{__class__.__name__}.on_play()->{self.prim_path}")
# Set up the server address and port
UDP_IP = "0.0.0.0"
UDP_PORT = 8881
# Create a UDP socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind((UDP_IP, UDP_PORT))
self.sock.setblocking(0)
# Time interval between sensor readings in seconds
self.dt = 0.02
def on_pause(self):
print(f"{__class__.__name__}.on_pause()->{self.prim_path}")
def on_stop(self):
print(f"{__class__.__name__}.on_stop()->{self.prim_path}")
self.on_destroy()
def on_update(self, current_time: float, delta_time: float):
self.get_data()
def get_data(self):
# # Receive data from the Arduino
data = self.clear_socket_buffer()
if data is None: return
# Decode the data and split it into Pitch and Roll
data = data.decode()
device, pitch, roll, yaw = data.split(",")
x,y,z = float(roll), float(yaw), 180-float(pitch)
rot = [x, y, z]
self.prim.GetAttribute('xformOp:rotateXYZ').Set(Gf.Vec3d(rot))
def clear_socket_buffer(self):
# Function to clear the socket's buffer
latest_data = None
while True:
try:
# Try to read data from the socket in a non-blocking way
latest_data, addr = self.sock.recvfrom(1024)
except BlockingIOError:
# No more data to read (buffer is empty)
return latest_data | 2,384 | Python | 30.8 | 72 | 0.575084 |
cadop/arduverse/udp.py | import socket
# Set up the server address and port
UDP_IP = "0.0.0.0"
UDP_PORT1 = 8881
UDP_PORT2 = 8882
# Create a UDP socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock1.bind((UDP_IP, UDP_PORT1))
sock2.bind((UDP_IP, UDP_PORT2))
sock1.setblocking(0)
sock2.setblocking(0)
print("Waiting for data...")
while True:
# Receive data from the Arduino
try:
data, addr = sock1.recvfrom(1024)
print("Received message 1:", data.decode())
except: pass
try:
data, addr = sock2.recvfrom(1024)
print("Received message 2:", data.decode())
except: pass | 667 | Python | 22.857142 | 56 | 0.668666 |
jshrake-nvidia/kit-cv-video-example/exts/omni.cv-video.example/omni/cv-video/example/extension.py | """
Omniverse Kit example extension that demonstrates how to stream video (such as RTSP) to a dynamic texture using [OpenCV VideoCapture](https://docs.opencv.org/3.4/dd/d43/tutorial_py_video_display.html)
and [omni.ui.DynamicTextureProvider](https://docs.omniverse.nvidia.com/kit/docs/omni.ui/latest/omni.ui/omni.ui.ByteImageProvider.html#byteimageprovider).
TODO:
- [x] Investigate how to perform the color space conversion and texture updates in a separate thread
- [ ] Investigate how to avoid the color space conversion and instead use the native format of the frame provided by OpenCV
"""
import asyncio
import threading
import time
from typing import List
import carb
import carb.profiler
import cv2 as cv
import numpy as np
import omni.ext
import omni.kit.app
import omni.ui
from pxr import Kind, Sdf, Usd, UsdGeom, UsdShade
DEFAULT_STREAM_URI = "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4"
#DEFAULT_STREAM_URI = "C:/Users/jshrake/Downloads/1080p.mp4"
def create_textured_plane_prim(
stage: Usd.Stage, prim_path: str, texture_name: str, width: float, height: float
) -> Usd.Prim:
"""
Creates a plane prim and an OmniPBR material with a dynamic texture for the albedo map
"""
hw = width / 2
hh = height / 2
# This code is mostly copy pasted from https://graphics.pixar.com/usd/release/tut_simple_shading.html
billboard: UsdGeom.Mesh = UsdGeom.Mesh.Define(stage, f"{prim_path}/Mesh")
billboard.CreatePointsAttr([(-hw, -hh, 0), (hw, -hh, 0), (hw, hh, 0), (-hw, hh, 0)])
billboard.CreateFaceVertexCountsAttr([4])
billboard.CreateFaceVertexIndicesAttr([0, 1, 2, 3])
billboard.CreateExtentAttr([(-430, -145, 0), (430, 145, 0)])
texCoords = UsdGeom.PrimvarsAPI(billboard).CreatePrimvar(
"st", Sdf.ValueTypeNames.TexCoord2fArray, UsdGeom.Tokens.varying
)
texCoords.Set([(0, 0), (1, 0), (1, 1), (0, 1)])
material_path = f"{prim_path}/Material"
material: UsdShade.Material = UsdShade.Material.Define(stage, material_path)
shader: UsdShade.Shader = UsdShade.Shader.Define(stage, f"{material_path}/Shader")
shader.SetSourceAsset("OmniPBR.mdl", "mdl")
shader.SetSourceAssetSubIdentifier("OmniPBR", "mdl")
shader.CreateIdAttr("OmniPBR")
shader.CreateInput("diffuse_texture", Sdf.ValueTypeNames.Asset).Set(f"dynamic://{texture_name}")
material.CreateSurfaceOutput().ConnectToSource(shader.ConnectableAPI(), "surface")
billboard.GetPrim().ApplyAPI(UsdShade.MaterialBindingAPI)
UsdShade.MaterialBindingAPI(billboard).Bind(material)
return billboard
class OpenCvVideoStream:
"""
A small abstraction around OpenCV VideoCapture and omni.ui.DynamicTextureProvider,
making a one-to-one mapping between the two
Resources:
- https://docs.opencv.org/3.4/d8/dfe/classcv_1_1VideoCapture.html
- https://docs.opencv.org/3.4/dd/d43/tutorial_py_video_display.html
- https://docs.omniverse.nvidia.com/kit/docs/omni.ui/latest/omni.ui/omni.ui.ByteImageProvider.html#omni.ui.ByteImageProvider.set_bytes_data_from_gpu
"""
def __init__(self, name: str, stream_uri: str):
self.name = name
self.uri = stream_uri
self.texture_array = None
try:
# Attempt to treat the uri as an int
# https://docs.opencv.org/3.4/d8/dfe/classcv_1_1VideoCapture.html#a5d5f5dacb77bbebdcbfb341e3d4355c1
stream_uri_as_int = int(stream_uri)
self._video_capture = cv.VideoCapture(stream_uri_as_int)
except:
# Otherwise treat the uri as a str
self._video_capture = cv.VideoCapture(stream_uri)
self.fps: float = self._video_capture.get(cv.CAP_PROP_FPS)
self.width: int = self._video_capture.get(cv.CAP_PROP_FRAME_WIDTH)
self.height: int = self._video_capture.get(cv.CAP_PROP_FRAME_HEIGHT)
self._dynamic_texture = omni.ui.DynamicTextureProvider(name)
self._last_read = time.time()
self.is_ok = self._video_capture.isOpened()
# If this FPS is 0, set it to something sensible
if self.fps == 0:
self.fps = 24
@carb.profiler.profile
def update_texture(self):
# Rate limit frame reads to the underlying FPS of the capture stream
now = time.time()
time_delta = now - self._last_read
if time_delta < 1.0 / self.fps:
return
self._last_read = now
# Read the frame
carb.profiler.begin(0, "read")
ret, frame = self._video_capture.read()
carb.profiler.end(0)
# The video may be at the end, loop by setting the frame position back to 0
if not ret:
self._video_capture.set(cv.CAP_PROP_POS_FRAMES, 0)
self._last_read = time.time()
return
# By default, OpenCV converts the frame to BGR
# We need to convert the frame to a texture format suitable for RTX
# In this case, we convert to BGRA, but the full list of texture formats can be found at
# # kit\source\extensions\omni.gpu_foundation\bindings\python\omni.gpu_foundation_factory\GpuFoundationFactoryBindingsPython.cpp
frame: np.ndarray
carb.profiler.begin(0, "color space conversion")
frame = cv.cvtColor(frame, cv.COLOR_BGR2RGBA)
carb.profiler.end(0)
height, width, channels = frame.shape
carb.profiler.begin(0, "set_bytes_data")
self._dynamic_texture.set_data_array(frame, [width, height, channels])
carb.profiler.end(0)
class OmniRtspExample(omni.ext.IExt):
def on_startup(self, ext_id):
# stream = omni.kit.app.get_app().get_update_event_stream()
# self._sub = stream.create_subscription_to_pop(self._update_streams, name="update")
self._streams: List[OpenCvVideoStream] = []
self._stream_threads: List[threading.Thread] = []
self._stream_uri_model = omni.ui.SimpleStringModel(DEFAULT_STREAM_URI)
self._window = omni.ui.Window("OpenCV Video Streaming Example", width=800, height=200)
with self._window.frame:
with omni.ui.VStack():
omni.ui.StringField(model=self._stream_uri_model)
omni.ui.Button("Create", clicked_fn=self._on_click_create)
@carb.profiler.profile
def _update_stream(self, i):
async def loop():
while self._running:
await asyncio.sleep(0.001)
self._streams[i].update_texture()
asyncio.run(loop())
def _on_click_create(self):
name = f"Video{len(self._streams)}"
image_name = name
usd_context = omni.usd.get_context()
stage: Usd.Stage = usd_context.get_stage()
prim_path = f"/World/{name}"
# If the prim already exists, remove it so we can create it again
try:
stage.RemovePrim(prim_path)
self._streams = [stream for stream in self._streams if stream.name != image_name]
except:
pass
# Create the stream
stream_uri = self._stream_uri_model.get_value_as_string()
video_stream = OpenCvVideoStream(image_name, stream_uri)
if not video_stream.is_ok:
carb.log_error(f"Error opening stream: {stream_uri}")
return
self._streams.append(video_stream)
carb.log_info(f"Creating video steam {stream_uri} {video_stream.width}x{video_stream.height}")
# Create the mesh + material + shader
model_root = UsdGeom.Xform.Define(stage, prim_path)
Usd.ModelAPI(model_root).SetKind(Kind.Tokens.component)
create_textured_plane_prim(stage, prim_path, image_name, video_stream.width, video_stream.height)
# Clear the string model
# self._stream_uri_model.set_value("")
# Create the thread to pump the video stream
self._running = True
i = len(self._streams) - 1
thread = threading.Thread(target=self._update_stream, args=(i, ))
thread.daemon = True
thread.start()
self._stream_threads.append(thread)
def on_shutdown(self):
# self._sub.unsubscribe()
self._running = False
for thread in self._stream_threads:
thread.join()
self._stream_threads = []
self._streams = []
| 8,262 | Python | 43.187166 | 201 | 0.657105 |
jshrake-nvidia/kit-cv-video-example/exts/omni.cv-video.example/omni/cv-video/example/__init__.py | # TODO: Work around OM-108110
# by explicitly adding the python3.dll directory to the DLL search path list.
# cv2.dll fails to load because it can't load the python3.dll dependency
try:
import os
import pathlib
import sys
# The python3.dll lives in the python directory adjacent to the kit executable
# Get the path to the current kit process
exe_path = sys.executable
exe_dir = pathlib.Path(exe_path).parent
python_dir = exe_dir / "python"
print(f"Adding {python_dir} to DLL search path list")
os.add_dll_directory(python_dir)
except Exception as e:
print(f"Error adding python directory to DLL search path list {e}")
from .extension import *
| 690 | Python | 33.549998 | 82 | 0.718841 |
jshrake-nvidia/kit-dynamic-texture-example/exts/omni.dynamic_texture_example/omni/dynamic_texture_example/extension.py | '''
Demonstrates how to programmatically generate a textured quad using the omni.ui.DynamicTextureProvider API.
This is contrived example that reads the image from the local filesystem (cat.jpg). You can imagine
sourcing the image bytes from a network request instead.
Resources:
- https://docs.omniverse.nvidia.com/kit/docs/omni.ui/latest/omni.ui/omni.ui.ByteImageProvider.html
- See the full list of omni.ui.TextureFormat variants at .\app\kit\extscore\omni.gpu_foundation\omni\gpu_foundation_factory\_gpu_foundation_factory.pyi
TODO(jshrake):
- [ ] Currently the dynamic texture name only works with the OmniPBR.mdl material. Need to understand why it doesn't work
with other materials, such as UsdPreviewSurface.
- [ ] Test instantiating and using the DynamicTextureProvider in a separate thread
'''
from typing import Tuple, Union
import pathlib
import omni
import omni.ui as ui
from PIL import Image
from pxr import Kind, Sdf, Usd, UsdGeom, UsdShade
def create_textured_plane_prim(stage: Usd.Stage, prim_path: str, texture_name: str) -> Usd.Prim:
# This code is mostly copy pasted from https://graphics.pixar.com/usd/release/tut_simple_shading.html
billboard: UsdGeom.Mesh = UsdGeom.Mesh.Define(stage, f"{prim_path}/Mesh")
billboard.CreatePointsAttr([(-430, -145, 0), (430, -145, 0), (430, 145, 0), (-430, 145, 0)])
billboard.CreateFaceVertexCountsAttr([4])
billboard.CreateFaceVertexIndicesAttr([0,1,2,3])
billboard.CreateExtentAttr([(-430, -145, 0), (430, 145, 0)])
texCoords = UsdGeom.PrimvarsAPI(billboard).CreatePrimvar("st",
Sdf.ValueTypeNames.TexCoord2fArray,
UsdGeom.Tokens.varying)
texCoords.Set([(0, 0), (1, 0), (1,1), (0, 1)])
material_path = f"{prim_path}/Material"
material = UsdShade.Material.Define(stage, material_path)
shader: UsdShade.Shader = UsdShade.Shader.Define(stage, f"{material_path}/Shader")
shader.SetSourceAsset("OmniPBR.mdl", "mdl")
shader.SetSourceAssetSubIdentifier("OmniPBR", "mdl")
shader.CreateIdAttr("OmniPBR")
shader.CreateInput("diffuse_texture", Sdf.ValueTypeNames.Asset).Set(f"dynamic://{texture_name}")
material.CreateSurfaceOutput().ConnectToSource(shader.ConnectableAPI(), "surface")
billboard.GetPrim().ApplyAPI(UsdShade.MaterialBindingAPI)
UsdShade.MaterialBindingAPI(billboard).Bind(material)
return billboard
def create_dynamic_texture(texture_name: str, bytes: bytes, resolution: Tuple[int, int], format: ui.TextureFormat) -> ui.DynamicTextureProvider:
# See https://docs.omniverse.nvidia.com/kit/docs/omni.ui/latest/omni.ui/omni.ui.ByteImageProvider.html#omni.ui.ByteImageProvider.set_bytes_data_from_gpu
bytes_list = list(bytes)
dtp = ui.DynamicTextureProvider(texture_name)
dtp.set_bytes_data(bytes_list, list(resolution), format)
return dtp
class DynamicTextureProviderExample(omni.ext.IExt):
def on_startup(self, ext_id):
self._texture: Union[None, ui.DynamicTextureProvider] = None
self._window = ui.Window("Create Dynamic Texture Provider Example", width=300, height=300)
with self._window.frame:
ui.Button("Create", clicked_fn=self._on_click_create)
def _on_click_create(self):
usd_context = omni.usd.get_context()
stage: Usd.Stage = usd_context.get_stage()
name = f"TexturePlane"
image_name = name
prim_path = f"/World/{name}"
# If the prim already exists, remove it so we can create it again
try:
stage.RemovePrim(prim_path)
self._texture = None
except:
pass
# Create the prim root
model_root = UsdGeom.Xform.Define(stage, prim_path)
Usd.ModelAPI(model_root).SetKind(Kind.Tokens.component)
# Create the mesh + material + shader
create_textured_plane_prim(stage, prim_path, image_name)
# Open the adjacent cat.jpg file and create the texture
dir = pathlib.Path(__file__).parent.resolve()
image_path = dir.joinpath("cat.jpg")
image: Image.Image = Image.open(image_path, mode='r')
# Ensure the image format is RGBA
image = image.convert('RGBA')
image_bytes = image.tobytes()
image_resolution = (image.width, image.height)
image_format = ui.TextureFormat.RGBA8_UNORM
self._texture = create_dynamic_texture(image_name, image_bytes, image_resolution, image_format)
def on_shutdown(self):
self._texture = None
| 4,533 | Python | 48.282608 | 156 | 0.692698 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/openDogV2/openDogV2-original/Release03/code/Python/camera100.py | import RPi.GPIO as GPIO
import jetson.inference
import jetson.utils
import time
import argparse
import sys
# parse the command line
parser = argparse.ArgumentParser(description="Locate objects in a live camera stream using an object detection DNN.",
formatter_class=argparse.RawTextHelpFormatter, epilog=jetson.inference.detectNet.Usage() +
jetson.utils.videoSource.Usage() + jetson.utils.videoOutput.Usage() + jetson.utils.logUsage())
parser.add_argument("input_URI", type=str, default="", nargs='?', help="URI of the input stream")
parser.add_argument("output_URI", type=str, default="", nargs='?', help="URI of the output stream")
parser.add_argument("--network", type=str, default="ssd-mobilenet-v2", help="pre-trained model to load (see below for options)")
parser.add_argument("--overlay", type=str, default="box,labels,conf", help="detection overlay flags (e.g. --overlay=box,labels,conf)\nvalid combinations are: 'box', 'labels', 'conf', 'none'")
parser.add_argument("--threshold", type=float, default=0.5, help="minimum detection threshold to use")
is_headless = ["--headless"] if sys.argv[0].find('console.py') != -1 else [""]
try:
opt = parser.parse_known_args()[0]
except:
print("")
parser.print_help()
sys.exit(0)
# load the object detection network
net = jetson.inference.detectNet(opt.network, sys.argv, opt.threshold)
# create video sources & outputs
input = jetson.utils.videoSource(opt.input_URI, argv=sys.argv)
output = jetson.utils.videoOutput(opt.output_URI, argv=sys.argv+is_headless)
#setup GPIO pins
GPIO.setmode(GPIO.BCM) #RaspPi pin numbering
GPIO.setup(18, GPIO.OUT, initial=GPIO.HIGH)
GPIO.output(18, GPIO.HIGH)
GPIO.setup(17, GPIO.OUT, initial=GPIO.HIGH)
GPIO.output(17, GPIO.HIGH)
GPIO.setup(16, GPIO.OUT, initial=GPIO.HIGH)
GPIO.output(16, GPIO.HIGH)
GPIO.setup(20, GPIO.OUT, initial=GPIO.HIGH)
GPIO.output(20, GPIO.HIGH)
GPIO.setup(21, GPIO.OUT, initial=GPIO.HIGH)
GPIO.output(21, GPIO.HIGH)
def back():
GPIO.output(18, GPIO.LOW)
GPIO.output(17, GPIO.HIGH)
GPIO.output(16, GPIO.HIGH)
GPIO.output(20, GPIO.HIGH)
GPIO.output(21, GPIO.HIGH)
print("back")
def forward():
GPIO.output(18, GPIO.HIGH)
GPIO.output(17, GPIO.LOW)
GPIO.output(16, GPIO.HIGH)
GPIO.output(20, GPIO.HIGH)
GPIO.output(21, GPIO.HIGH)
print("forward")
def left():
GPIO.output(18, GPIO.HIGH)
GPIO.output(17, GPIO.HIGH)
GPIO.output(16, GPIO.LOW)
GPIO.output(20, GPIO.HIGH)
GPIO.output(21, GPIO.HIGH)
print("left")
def right():
GPIO.output(18, GPIO.HIGH)
GPIO.output(17, GPIO.HIGH)
GPIO.output(16, GPIO.HIGH)
GPIO.output(20, GPIO.LOW)
GPIO.output(21, GPIO.HIGH)
print("right")
def up():
GPIO.output(18, GPIO.HIGH)
GPIO.output(17, GPIO.HIGH)
GPIO.output(16, GPIO.HIGH)
GPIO.output(20, GPIO.HIGH)
GPIO.output(21, GPIO.LOW)
print("up")
def nothing():
GPIO.output(18, GPIO.HIGH)
GPIO.output(17, GPIO.HIGH)
GPIO.output(16, GPIO.HIGH)
GPIO.output(20, GPIO.HIGH)
GPIO.output(21, GPIO.HIGH)
print("nothing")
# declare variables as global and that
global index
global width
global location
global confidence
index = 0
width = 0
location = 0
condifence = 0;
# process frames until the user exits
while True:
# capture the next image
img = input.Capture()
# detect objects in the image (with overlay)
detections = net.Detect(img, overlay=opt.overlay)
# print the detections
#print("detected {:d} objects in image".format(len(detections)))
# check for detections, otherwise nothing
if(len(detections) > 0):
print("object detected")
for detection in detections:
index = detections[0].ClassID
confidence = (detections[0].Confidence)
width = (detections[0].Width)
location = (detections[0].Center[0])
# print index of item, width and horizonal location
print(index)
print(width)
print(location)
print(confidence)
# look for detections
if (index == 1 and confidence > 0.9):
back()
elif (index == 2 and confidence > 0.7):
forward()
elif (index == 3 and confidence > 0.7):
left()
elif (index == 4 and confidence > 0.7):
right()
elif (index == 5 and confidence > 0.7):
up()
else:
nothing() # nothing is detected
# render the image
output.Render(img)
# update the title bar
output.SetStatus("{:s} | Network {:.0f} FPS".format(opt.network, net.GetNetworkFPS()))
# print out performance info
#net.PrintProfilerTimes()
# exit on input/output EOS
if not input.IsStreaming() or not output.IsStreaming():
break
| 4,525 | Python | 24.570621 | 192 | 0.698122 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/calibrate_servos.py | from pupper.HardwareInterface import HardwareInterface
from pupper.Config import PWMParams, ServoParams
import numpy as np
import re
def get_motor_name(i, j):
motor_type = {0: "abduction", 1: "inner", 2: "outer"} # Top # Bottom
leg_pos = {0: "front-right", 1: "front-left", 2: "back-right", 3: "back-left"}
final_name = motor_type[i] + " " + leg_pos[j]
return final_name
def get_motor_setpoint(i, j):
data = np.array([[0, 0, 0, 0], [45, 45, 45, 45], [45, 45, 45, 45]])
return data[i, j]
def degrees_to_radians(input_array):
"""Converts degrees to radians.
Parameters
----------
input_array : Numpy array or float
Degrees
Returns
-------
Numpy array or float
Radians
"""
return input_array * np.pi / 180.0
def radians_to_degrees(input_array):
"""Converts degrees to radians.
Parameters
----------
input_array : Numpy array or float
Radians
Returns
-------
Numpy array or float
Degrees
"""
return input_array * 180.0 / np.pi
def step_until(hardware_interface, axis, leg, set_point):
"""Returns the angle offset needed to correct a given link by asking the user for input.
Returns
-------
Float
Angle offset needed to correct the link.
"""
found_position = False
set_names = ["horizontal", "horizontal", "vertical"]
offset = 0
while not found_position:
move_input = str(
input("Enter 'a' or 'b' to move the link until it is **" + set_names[axis] + "**. Enter 'd' when done. Input: "
)
)
if move_input == "a":
offset += 1.0
hardware_interface.set_actuator_position(
degrees_to_radians(set_point + offset),
axis,
leg,
)
elif move_input == "b":
offset -= 1.0
hardware_interface.set_actuator_position(
degrees_to_radians(set_point + offset),
axis,
leg,
)
elif move_input == "d":
found_position = True
print("Offset: ", offset)
return offset
def calibrate_angle_offset(hardware_interface):
"""Calibrate the angle offset for the twelve motors on the robot. Note that servo_params is modified in-place.
Parameters
----------
servo_params : ServoParams
Servo parameters. This variable is updated in-place.
pi_board : Pi
RaspberryPi object.
pwm_params : PWMParams
PWMParams object.
"""
# Found K value of (11.4)
print("The scaling constant for your servo represents how much you have to increase\nthe pwm pulse width (in microseconds) to rotate the servo output 1 degree.")
print("This value is currently set to: {:.3f}".format(degrees_to_radians(hardware_interface.servo_params.micros_per_rad)))
print("For newer CLS6336 and CLS6327 servos the value should be 11.333.")
ks = input("Press <Enter> to keep the current value, or enter a new value: ")
if ks != '':
k = float(ks)
hardware_interface.servo_params.micros_per_rad = k * 180 / np.pi
hardware_interface.servo_params.neutral_angle_degrees = np.zeros((3, 4))
for leg_index in range(4):
for axis in range(3):
# Loop until we're satisfied with the calibration
completed = False
while not completed:
motor_name = get_motor_name(axis, leg_index)
print("\n\nCalibrating the **" + motor_name + " motor **")
set_point = get_motor_setpoint(axis, leg_index)
# Zero out the neutral angle
hardware_interface.servo_params.neutral_angle_degrees[axis, leg_index] = 0
# Move servo to set_point angle
hardware_interface.set_actuator_position(
degrees_to_radians(set_point),
axis,
leg_index,
)
# Adjust the angle using keyboard input until it matches the reference angle
offset = step_until(
hardware_interface, axis, leg_index, set_point
)
print("Final offset: ", offset)
# The upper leg link has a different equation because we're calibrating to make it horizontal, not vertical
if axis == 1:
hardware_interface.servo_params.neutral_angle_degrees[axis, leg_index] = set_point - offset
else:
hardware_interface.servo_params.neutral_angle_degrees[axis, leg_index] = -(set_point + offset)
print("Calibrated neutral angle: ", hardware_interface.servo_params.neutral_angle_degrees[axis, leg_index])
# Send the servo command using the new beta value and check that it's ok
hardware_interface.set_actuator_position(
degrees_to_radians([0, 45, -45][axis]),
axis,
leg_index,
)
okay = ""
prompt = "The leg should be at exactly **" + ["horizontal", "45 degrees", "45 degrees"][axis] + "**. Are you satisfied? Enter 'yes' or 'no': "
while okay not in ["y", "n", "yes", "no"]:
okay = str(
input(prompt)
)
completed = okay == "y" or okay == "yes"
def overwrite_ServoCalibration_file(servo_params):
preamble = """# WARNING: This file is machine generated. Edit at your own risk.
import numpy as np
"""
# Format array object string for np.array
p1 = re.compile("([0-9]\.) ( *)") # pattern to replace the space that follows each number with a comma
partially_formatted_matrix = p1.sub(r"\1,\2", str(servo_params.neutral_angle_degrees))
p2 = re.compile("(\]\n)") # pattern to add a comma at the end of the first two lines
formatted_matrix_with_required_commas = p2.sub("],\n", partially_formatted_matrix)
# Overwrite pupper/ServoCalibration.py file with modified values
with open("pupper/ServoCalibration.py", "w") as f:
print(preamble, file = f)
print("MICROS_PER_RAD = {:.3f} * 180.0 / np.pi".format(degrees_to_radians(servo_params.micros_per_rad)), file = f)
print("NEUTRAL_ANGLE_DEGREES = np.array(", file = f)
print(formatted_matrix_with_required_commas, file = f)
print(")", file = f)
def main():
"""Main program
"""
hardware_interface = HardwareInterface()
calibrate_angle_offset(hardware_interface)
overwrite_ServoCalibration_file(hardware_interface.servo_params)
print("\n\n CALIBRATION COMPLETE!\n")
print("Calibrated neutral angles:")
print(hardware_interface.servo_params.neutral_angle_degrees)
main()
| 6,862 | Python | 34.376288 | 165 | 0.581609 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/run_robot.py | import numpy as np
import time
from src.IMU import IMU
from src.Controller import Controller
from src.JoystickInterface import JoystickInterface
from src.State import State
from pupper.HardwareInterface import HardwareInterface
from pupper.Config import Configuration
from pupper.Kinematics import four_legs_inverse_kinematics
def main(use_imu=False):
"""Main program
"""
# Create config
config = Configuration()
hardware_interface = HardwareInterface()
# Create imu handle
if use_imu:
imu = IMU(port="/dev/ttyACM0")
imu.flush_buffer()
# Create controller and user input handles
controller = Controller(
config,
four_legs_inverse_kinematics,
)
state = State()
print("Creating joystick listener...")
joystick_interface = JoystickInterface(config)
print("Done.")
last_loop = time.time()
print("Summary of gait parameters:")
print("overlap time: ", config.overlap_time)
print("swing time: ", config.swing_time)
print("z clearance: ", config.z_clearance)
print("x shift: ", config.x_shift)
# Wait until the activate button has been pressed
while True:
print("Waiting for L1 to activate robot.")
while True:
command = joystick_interface.get_command(state)
joystick_interface.set_color(config.ps4_deactivated_color)
if command.activate_event == 1:
break
time.sleep(0.1)
print("Robot activated.")
joystick_interface.set_color(config.ps4_color)
while True:
now = time.time()
if now - last_loop < config.dt:
continue
last_loop = time.time()
# Parse the udp joystick commands and then update the robot controller's parameters
command = joystick_interface.get_command(state)
if command.activate_event == 1:
print("Deactivating Robot")
break
# Read imu data. Orientation will be None if no data was available
quat_orientation = (
imu.read_orientation() if use_imu else np.array([1, 0, 0, 0])
)
state.quat_orientation = quat_orientation
# Step the controller forward by dt
controller.run(state, command)
# Update the pwm widths going to the servos
hardware_interface.set_actuator_postions(state.joint_angles)
main()
| 2,473 | Python | 29.925 | 95 | 0.627982 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/src/Gaits.py | class GaitController:
def __init__(self, config):
self.config = config
def phase_index(self, ticks):
"""Calculates which part of the gait cycle the robot should be in given the time in ticks.
Parameters
----------
ticks : int
Number of timesteps since the program started
gaitparams : GaitParams
GaitParams object
Returns
-------
Int
The index of the gait phase that the robot should be in.
"""
phase_time = ticks % self.config.phase_length
phase_sum = 0
for i in range(self.config.num_phases):
phase_sum += self.config.phase_ticks[i]
if phase_time < phase_sum:
return i
assert False
def subphase_ticks(self, ticks):
"""Calculates the number of ticks (timesteps) since the start of the current phase.
Parameters
----------
ticks : Int
Number of timesteps since the program started
gaitparams : GaitParams
GaitParams object
Returns
-------
Int
Number of ticks since the start of the current phase.
"""
phase_time = ticks % self.config.phase_length
phase_sum = 0
subphase_ticks = 0
for i in range(self.config.num_phases):
phase_sum += self.config.phase_ticks[i]
if phase_time < phase_sum:
subphase_ticks = phase_time - phase_sum + self.config.phase_ticks[i]
return subphase_ticks
assert False
def contacts(self, ticks):
"""Calculates which feet should be in contact at the given number of ticks
Parameters
----------
ticks : Int
Number of timesteps since the program started.
gaitparams : GaitParams
GaitParams object
Returns
-------
numpy array (4,)
Numpy vector with 0 indicating flight and 1 indicating stance.
"""
return self.config.contact_phases[:, self.phase_index(ticks)]
| 2,154 | Python | 28.930555 | 98 | 0.545032 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/src/Command.py | import numpy as np
class Command:
"""Stores movement command
"""
def __init__(self):
self.horizontal_velocity = np.array([0, 0])
self.yaw_rate = 0.0
self.height = -0.16
self.pitch = 0.0
self.roll = 0.0
self.activation = 0
self.hop_event = False
self.trot_event = False
self.activate_event = False | 392 | Python | 20.833332 | 51 | 0.533163 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/src/SwingLegController.py | import numpy as np
from transforms3d.euler import euler2mat
class SwingController:
def __init__(self, config):
self.config = config
def raibert_touchdown_location(
self, leg_index, command
):
delta_p_2d = (
self.config.alpha
* self.config.stance_ticks
* self.config.dt
* command.horizontal_velocity
)
delta_p = np.array([delta_p_2d[0], delta_p_2d[1], 0])
theta = (
self.config.beta
* self.config.stance_ticks
* self.config.dt
* command.yaw_rate
)
R = euler2mat(0, 0, theta)
return R @ self.config.default_stance[:, leg_index] + delta_p
def swing_height(self, swing_phase, triangular=True):
if triangular:
if swing_phase < 0.5:
swing_height_ = swing_phase / 0.5 * self.config.z_clearance
else:
swing_height_ = self.config.z_clearance * (1 - (swing_phase - 0.5) / 0.5)
return swing_height_
def next_foot_location(
self,
swing_prop,
leg_index,
state,
command,
):
assert swing_prop >= 0 and swing_prop <= 1
foot_location = state.foot_locations[:, leg_index]
swing_height_ = self.swing_height(swing_prop)
touchdown_location = self.raibert_touchdown_location(leg_index, command)
time_left = self.config.dt * self.config.swing_ticks * (1.0 - swing_prop)
v = (touchdown_location - foot_location) / time_left * np.array([1, 1, 0])
delta_foot_location = v * self.config.dt
z_vector = np.array([0, 0, swing_height_ + command.height])
return foot_location * np.array([1, 1, 0]) + z_vector + delta_foot_location
| 1,781 | Python | 32.622641 | 89 | 0.563167 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/src/State.py | import numpy as np
from enum import Enum
class State:
def __init__(self):
self.horizontal_velocity = np.array([0.0, 0.0])
self.yaw_rate = 0.0
self.height = -0.16
self.pitch = 0.0
self.roll = 0.0
self.activation = 0
self.behavior_state = BehaviorState.REST
self.ticks = 0
self.foot_locations = np.zeros((3, 4))
self.joint_angles = np.zeros((3, 4))
self.behavior_state = BehaviorState.REST
class BehaviorState(Enum):
DEACTIVATED = -1
REST = 0
TROT = 1
HOP = 2
FINISHHOP = 3 | 589 | Python | 20.851851 | 55 | 0.568761 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/src/StanceController.py | import numpy as np
from transforms3d.euler import euler2mat
class StanceController:
def __init__(self, config):
self.config = config
def position_delta(self, leg_index, state, command):
"""Calculate the difference between the next desired body location and the current body location
Parameters
----------
z_measured : float
Z coordinate of the feet relative to the body.
stance_params : StanceParams
Stance parameters object.
movement_reference : MovementReference
Movement reference object.
gait_params : GaitParams
Gait parameters object.
Returns
-------
(Numpy array (3), Numpy array (3, 3))
(Position increment, rotation matrix increment)
"""
z = state.foot_locations[2, leg_index]
v_xy = np.array(
[
-command.horizontal_velocity[0],
-command.horizontal_velocity[1],
1.0
/ self.config.z_time_constant
* (state.height - z),
]
)
delta_p = v_xy * self.config.dt
delta_R = euler2mat(0, 0, -command.yaw_rate * self.config.dt)
return (delta_p, delta_R)
# TODO: put current foot location into state
def next_foot_location(self, leg_index, state, command):
foot_location = state.foot_locations[:, leg_index]
(delta_p, delta_R) = self.position_delta(leg_index, state, command)
incremented_location = delta_R @ foot_location + delta_p
return incremented_location
| 1,628 | Python | 32.244897 | 104 | 0.57801 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/src/IMU.py | import serial
import numpy as np
import time
class IMU:
def __init__(self, port, baudrate=500000):
self.serial_handle = serial.Serial(
port=port,
baudrate=baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=0,
)
self.last_quat = np.array([1, 0, 0, 0])
self.start_time = time.time()
def flush_buffer(self):
self.serial_handle.reset_input_buffer()
def read_orientation(self):
"""Reads quaternion measurements from the Teensy until none are left. Returns the last read quaternion.
Parameters
----------
serial_handle : Serial object
Handle to the pyserial Serial object
Returns
-------
np array (4,)
If there was quaternion data to read on the serial port returns the quaternion as a numpy array, otherwise returns the last read quaternion.
"""
while True:
x = self.serial_handle.readline().decode("utf").strip()
if x is "" or x is None:
return self.last_quat
else:
parsed = x.split(",")
if len(parsed) == 4:
self.last_quat = np.array(parsed, dtype=np.float64)
else:
print("Did not receive 4-vector from imu")
| 1,440 | Python | 30.326086 | 152 | 0.543056 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/src/Tests.py | # using LinearAlgebra
# using Profile
# using StaticArrays
# using Plots
# using BenchmarkTools
# include("Kinematics.jl")
# include("PupperConfig.jl")
# include("Gait.jl")
# include("StanceController.jl")
# include("SwingLegController.jl")
# include("Types.jl")
# include("Controller.jl")
import numpy as np
import matplotlib.pyplot as plt
from Kinematics import leg_explicit_inverse_kinematics
from PupperConfig import *
from Gaits import *
from StanceController import position_delta, stance_foot_location
from SwingLegController import *
from Types import MovementReference, GaitParams, StanceParams, SwingParams
from Controller import *
# function round_(a, dec)
# return map(x -> round(x, digits=dec), a)
# end
# function testInverseKinematicsExplicit!()
# println("\n-------------- Testing Inverse Kinematics -----------")
# config = PupperConfig()
# println("\nTesting Inverse Kinematics")
# function testHelper(r, alpha_true, i; do_assert=true)
# eps = 1e-6
# @time α = leg_explicitinversekinematics_prismatic(r, i, config)
# println("Leg ", i, ": r: ", r, " -> α: ", α)
# if do_assert
# @assert norm(α - alpha_true) < eps
# end
# end
# c = config.LEG_L/sqrt(2)
# offset = config.ABDUCTION_OFFSET
# testHelper(SVector(0, offset, -0.125), SVector(0, 0, 0), 2)
# testHelper(SVector(c, offset, -c), SVector(0, -pi/4, 0), 2)
# testHelper(SVector(-c, offset, -c), SVector(0, pi/4, 0), 2)
# testHelper(SVector(0, c, -c), missing, 2, do_assert=false)
# testHelper(SVector(-c, -offset, -c), [0, pi/4, 0], 1)
# testHelper(SVector(config.LEG_L * sqrt(3)/2, offset, -config.LEG_L / 2), SVector(0, -pi/3, 0), 2)
# end
def test_inverse_kinematics_linkage():
print("\n-------------- Testing Five-bar Linkage Inverse Kinematics -----------")
config = PupperConfig()
print("\nTesting Inverse Kinematics")
def testHelper(r, alpha_true, i, do_assert=True):
eps = 1e-6
alpha = leg_explicit_inverse_kinematics(r, i, config)
print("Leg ", i, ": r: ", r, " -> α: ", alpha)
if do_assert:
assert np.linalg.norm(alpha - alpha_true) < eps
c = config.LEG_L / (2 ** 0.5)
offset = config.ABDUCTION_OFFSET
testHelper(np.array([0, offset, -0.125]), None, 1, do_assert=False)
testHelper(np.array([c, offset, -c]), None, 1, do_assert=False)
testHelper(np.array([-c, offset, -c]), None, 1, do_assert=False)
testHelper(np.array([0, c, -c]), None, 1, do_assert=False)
testHelper(np.array([-c, -offset, -c]), None, 0, do_assert=False)
testHelper(
np.array([config.LEG_L * (3 ** 0.5) / 2, offset, -config.LEG_L / 2]),
None,
1,
do_assert=False,
)
# function testForwardKinematics!()
# println("\n-------------- Testing Forward Kinematics -----------")
# config = PupperConfig()
# println("\nTesting Forward Kinematics")
# function testHelper(alpha, r_true, i; do_assert=true)
# eps = 1e-6
# r = zeros(3)
# println("Vectors")
# a = [alpha.data...]
# @time legForwardKinematics!(r, a, i, config)
# println("SVectors")
# @time r = legForwardKinematics(alpha, i, config)
# println("Leg ", i, ": α: ", alpha, " -> r: ", r)
# if do_assert
# @assert norm(r_true - r) < eps
# end
# end
# l = config.LEG_L
# offset = config.ABDUCTION_OFFSET
# testHelper(SVector{3}([0.0, 0.0, 0.0]), SVector{3}([0, offset, -l]), 2)
# testHelper(SVector{3}([0.0, pi/4, 0.0]), missing, 2, do_assert=false)
# # testHelper([0.0, 0.0, 0.0], [0, offset, -l], 2)
# # testHelper([0.0, pi/4, 0.0], missing, 2, do_assert=false)
# end
# function testForwardInverseAgreeance()
# println("\n-------------- Testing Forward/Inverse Consistency -----------")
# config = PupperConfig()
# println("\nTest forward/inverse consistency")
# eps = 1e-6
# for i in 1:10
# alpha = SVector(rand()-0.5, rand()-0.5, (rand()-0.5)*0.05)
# leg = rand(1:4)
# @time r = legForwardKinematics(alpha, leg, config)
# # @code_warntype legForwardKinematics!(r, alpha, leg, config)
# @time alpha_prime = leg_explicitinversekinematics_prismatic(r, leg, config)
# # @code_warntype inverseKinematicsExplicit!(alpha_prime, r, leg, config)
# println("Leg ", leg, ": α: ", round_(alpha, 3), " -> r_body_foot: ", round_(r, 3), " -> α': ", round_(alpha_prime, 3))
# @assert norm(alpha_prime - alpha) < eps
# end
# end
# function testAllInverseKinematics()
# println("\n-------------- Testing Four Leg Inverse Kinematics -----------")
# function helper(r_body, alpha_true; do_assert=true)
# println("Timing for fourlegs_inversekinematics")
# config = PupperConfig()
# @time alpha = fourlegs_inversekinematics(SMatrix(r_body), config)
# @code_warntype fourlegs_inversekinematics(SMatrix(r_body), config)
# println("r: ", r_body, " -> α: ", alpha)
# if do_assert
# @assert norm(alpha - alpha_true) < 1e-10
# end
# end
# config = PupperConfig()
# f = config.LEG_FB
# l = config.LEG_LR
# s = -0.125
# o = config.ABDUCTION_OFFSET
# r_body = MMatrix{3,4}(zeros(3,4))
# r_body[:,1] = [f, -l-o, s]
# r_body[:,2] = [f, l+o, s]
# r_body[:,3] = [-f, -l-o, s]
# r_body[:,4] = [-f, l+o, s]
# helper(r_body, zeros(3,4))
# helper(SMatrix{3,4}(zeros(3,4)), missing, do_assert=false)
# end
# function testKinematics()
# testInverseKinematicsExplicit!()
# testForwardKinematics!()
# testForwardInverseAgreeance()
# testAllInverseKinematics()
# end
# function testGait()
# println("\n-------------- Testing Gait -----------")
# p = GaitParams()
# # println("Gait params=",p)
# t = 680
# println("Timing for phaseindex")
# @time ph = phaseindex(t, p)
# # @code_warntype phaseindex(t, p)
# println("t=",t," phase=",ph)
# @assert ph == 4
# @assert phaseindex(0, p) == 1
# println("Timing for contacts")
# @time c = contacts(t, p)
# # @code_warntype contacts(t, p)
# @assert typeof(c) == SArray{Tuple{4},Int64,1,4}
# println("t=", t, " contacts=", c)
# end
def test_stance_controller():
print("\n-------------- Testing Stance Controller -----------")
stanceparams = StanceParams()
gaitparams = GaitParams()
zmeas = -0.20
mvref = MovementReference()
dp, dR = position_delta(zmeas, stanceparams, mvref, gaitparams)
assert np.linalg.norm(dR - np.eye(3)) < 1e-10
assert np.linalg.norm(dp - np.array([0, 0, gaitparams.dt * 0.04])) < 1e-10
zmeas = -0.18
mvref = MovementReference()
mvref.v_xy_ref = np.array([1.0, 0.0])
mvref.z_ref = -0.18
dp, dR = position_delta(zmeas, stanceparams, mvref, gaitparams)
zmeas = -0.20
mvref = MovementReference()
mvref.wz_ref = 1.0
mvref.z_ref = -0.20
dp, dR = position_delta(zmeas, stanceparams, mvref, gaitparams)
assert np.linalg.norm(dp - np.array([0, 0, 0])) < 1e-10
assert np.linalg.norm(dR[0, 1] - (gaitparams.dt)) < 1e-6
stancefootloc = np.zeros(3)
sloc = stance_foot_location(stancefootloc, stanceparams, gaitparams, mvref)
# function typeswinglegcontroller()
# println("\n--------------- Code warn type for raibert_tdlocation[s] ----------")
# swp = SwingParams()
# stp = StanceParams()
# gp = GaitParams()
# mvref = MovementReference(SVector(1.0, 0.0), 0, -0.18)
# raibert_tdlocations(swp, stp, gp, mvref)
# mvref = MovementReference(SVector(1.0, 0.0), 0, -0.18)
# raibert_tdlocation(1, swp, stp, gp, mvref)
# end
# function TestSwingLegController()
# println("\n-------------- Testing Swing Leg Controller -----------")
# swp = SwingParams()
# stp = StanceParams()
# gp = GaitParams()
# p = ControllerParams()
# println("Timing for swingheight:")
# @time z = swingheight(0.5, swp)
# println("z clearance at t=1/2swingtime =>",z)
# @assert abs(z - swp.zclearance) < 1e-10
# println("Timing for swingheight:")
# @time z = swingheight(0, swp)
# println("Z clearance at t=0 =>",z)
# @assert abs(z) < 1e-10
# mvref = MovementReference(SVector(1.0, 0.0), 0, -0.18)
# println("Timing for raibert tdlocation*s*:")
# @time l = raibert_tdlocations(swp, stp, gp, mvref)
# target = stp.defaultstance .+ [gp.stanceticks*gp.dt*0.5*1, 0, 0]
# println("Touchdown locations =>", l, " <?=> ", target)
# @assert norm(l - target) <= 1e-10
# mvref = MovementReference(SVector(1.0, 0.0), 0, -0.18)
# println("Timing for raibert tdlocation:")
# @time l = raibert_tdlocation(1, swp, stp, gp, mvref)
# fcurrent = SMatrix{3, 4, Float64}(stp.defaultstance)
# mvref = MovementReference()
# tswing = 0.125
# println("Timing for swingfootlocation*s* increment")
# @time l = swingfootlocations(tswing, fcurrent, swp, stp, gp, mvref)
# println(l)
# fcurrent = SVector{3, Float64}(0.0, 0.0, 0.0)
# println("Timing for swingfootlocation")
# @time swingfootlocation(tswing, fcurrent, 1, swp, stp, gp, mvref)
# typeswinglegcontroller()
# return nothing
# end
def test_run():
print("Run timing")
foot_loc_history, joint_angle_history = run()
plt.subplot(211)
x = plt.plot(foot_loc_history[0, :, :].T, label="x")
y = plt.plot(foot_loc_history[1, :, :].T, label="y")
z = plt.plot(foot_loc_history[2, :, :].T, label="z")
plt.subplot(212)
alpha = plt.plot(joint_angle_history[0, :, :].T, label="alpha")
beta = plt.plot(joint_angle_history[1, :, :].T, label="beta")
gamma = plt.plot(joint_angle_history[2, :, :].T, label="gamma")
plt.show()
# plot(x, β, y, α, z, γ, layout=(3,2), legend=false))
# function teststep()
# swingparams = SwingParams()
# stanceparams = StanceParams()
# gaitparams = GaitParams()
# mvref = MovementReference(vxyref=SVector{2}(0.2, 0.0), wzref=0.0)
# conparams = ControllerParams()
# robotconfig = PupperConfig()
# footlocations::SMatrix{3, 4, Float64, 12} = stanceparams.defaultstance .+ SVector{3, Float64}(0, 0, mvref.zref)
# ticks = 1
# println("Timing for step!")
# @btime step($ticks, $footlocations, $swingparams, $stanceparams, $gaitparams, $mvref, $conparams)
# @code_warntype step(ticks, footlocations, swingparams, stanceparams, gaitparams, mvref, conparams)
# end
# # testGait()
# # testKinematics()
# # TestStanceController()
# # testStaticArrays()
# # TestSwingLegController()
# test_inversekinematics_linkage()
# # teststep()
# # testrun()
test_inverse_kinematics_linkage()
test_stance_controller()
test_run()
| 10,778 | Python | 33.548077 | 128 | 0.59705 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/src/JoystickInterface.py | import UDPComms
import numpy as np
import time
from src.State import BehaviorState, State
from src.Command import Command
from src.Utilities import deadband, clipped_first_order_filter
class JoystickInterface:
def __init__(
self, config, udp_port=8830, udp_publisher_port = 8840,
):
self.config = config
self.previous_gait_toggle = 0
self.previous_state = BehaviorState.REST
self.previous_hop_toggle = 0
self.previous_activate_toggle = 0
self.message_rate = 50
self.udp_handle = UDPComms.Subscriber(udp_port, timeout=0.3)
self.udp_publisher = UDPComms.Publisher(udp_publisher_port)
def get_command(self, state, do_print=False):
try:
msg = self.udp_handle.get()
command = Command()
####### Handle discrete commands ########
# Check if requesting a state transition to trotting, or from trotting to resting
gait_toggle = msg["R1"]
command.trot_event = (gait_toggle == 1 and self.previous_gait_toggle == 0)
# Check if requesting a state transition to hopping, from trotting or resting
hop_toggle = msg["x"]
command.hop_event = (hop_toggle == 1 and self.previous_hop_toggle == 0)
activate_toggle = msg["L1"]
command.activate_event = (activate_toggle == 1 and self.previous_activate_toggle == 0)
# Update previous values for toggles and state
self.previous_gait_toggle = gait_toggle
self.previous_hop_toggle = hop_toggle
self.previous_activate_toggle = activate_toggle
####### Handle continuous commands ########
x_vel = msg["ly"] * self.config.max_x_velocity
y_vel = msg["lx"] * -self.config.max_y_velocity
command.horizontal_velocity = np.array([x_vel, y_vel])
command.yaw_rate = msg["rx"] * -self.config.max_yaw_rate
message_rate = msg["message_rate"]
message_dt = 1.0 / message_rate
pitch = msg["ry"] * self.config.max_pitch
deadbanded_pitch = deadband(
pitch, self.config.pitch_deadband
)
pitch_rate = clipped_first_order_filter(
state.pitch,
deadbanded_pitch,
self.config.max_pitch_rate,
self.config.pitch_time_constant,
)
command.pitch = state.pitch + message_dt * pitch_rate
height_movement = msg["dpady"]
command.height = state.height - message_dt * self.config.z_speed * height_movement
roll_movement = - msg["dpadx"]
command.roll = state.roll + message_dt * self.config.roll_speed * roll_movement
return command
except UDPComms.timeout:
if do_print:
print("UDP Timed out")
return Command()
def set_color(self, color):
joystick_msg = {"ps4_color": color}
self.udp_publisher.send(joystick_msg) | 3,099 | Python | 36.349397 | 98 | 0.575992 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/src/Utilities.py | import numpy as np
def deadband(value, band_radius):
return max(value - band_radius, 0) + min(value + band_radius, 0)
def clipped_first_order_filter(input, target, max_rate, tau):
rate = (target - input) / tau
return np.clip(rate, -max_rate, max_rate)
| 268 | Python | 23.454543 | 68 | 0.671642 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/src/Controller.py | from src.Gaits import GaitController
from src.StanceController import StanceController
from src.SwingLegController import SwingController
from src.Utilities import clipped_first_order_filter
from src.State import BehaviorState, State
import numpy as np
from transforms3d.euler import euler2mat, quat2euler
from transforms3d.quaternions import qconjugate, quat2axangle
from transforms3d.axangles import axangle2mat
class Controller:
"""Controller and planner object
"""
def __init__(
self,
config,
inverse_kinematics,
):
self.config = config
self.smoothed_yaw = 0.0 # for REST mode only
self.inverse_kinematics = inverse_kinematics
self.contact_modes = np.zeros(4)
self.gait_controller = GaitController(self.config)
self.swing_controller = SwingController(self.config)
self.stance_controller = StanceController(self.config)
self.hop_transition_mapping = {BehaviorState.REST: BehaviorState.HOP, BehaviorState.HOP: BehaviorState.FINISHHOP, BehaviorState.FINISHHOP: BehaviorState.REST, BehaviorState.TROT: BehaviorState.HOP}
self.trot_transition_mapping = {BehaviorState.REST: BehaviorState.TROT, BehaviorState.TROT: BehaviorState.REST, BehaviorState.HOP: BehaviorState.TROT, BehaviorState.FINISHHOP: BehaviorState.TROT}
self.activate_transition_mapping = {BehaviorState.DEACTIVATED: BehaviorState.REST, BehaviorState.REST: BehaviorState.DEACTIVATED}
def step_gait(self, state, command):
"""Calculate the desired foot locations for the next timestep
Returns
-------
Numpy array (3, 4)
Matrix of new foot locations.
"""
contact_modes = self.gait_controller.contacts(state.ticks)
new_foot_locations = np.zeros((3, 4))
for leg_index in range(4):
contact_mode = contact_modes[leg_index]
foot_location = state.foot_locations[:, leg_index]
if contact_mode == 1:
new_location = self.stance_controller.next_foot_location(leg_index, state, command)
else:
swing_proportion = (
self.gait_controller.subphase_ticks(state.ticks) / self.config.swing_ticks
)
new_location = self.swing_controller.next_foot_location(
swing_proportion,
leg_index,
state,
command
)
new_foot_locations[:, leg_index] = new_location
return new_foot_locations, contact_modes
def run(self, state, command):
"""Steps the controller forward one timestep
Parameters
----------
controller : Controller
Robot controller object.
"""
########## Update operating state based on command ######
if command.activate_event:
state.behavior_state = self.activate_transition_mapping[state.behavior_state]
elif command.trot_event:
state.behavior_state = self.trot_transition_mapping[state.behavior_state]
elif command.hop_event:
state.behavior_state = self.hop_transition_mapping[state.behavior_state]
if state.behavior_state == BehaviorState.TROT:
state.foot_locations, contact_modes = self.step_gait(
state,
command,
)
# Apply the desired body rotation
rotated_foot_locations = (
euler2mat(
command.roll, command.pitch, 0.0
)
@ state.foot_locations
)
# Construct foot rotation matrix to compensate for body tilt
(roll, pitch, yaw) = quat2euler(state.quat_orientation)
correction_factor = 0.8
max_tilt = 0.4
roll_compensation = correction_factor * np.clip(roll, -max_tilt, max_tilt)
pitch_compensation = correction_factor * np.clip(pitch, -max_tilt, max_tilt)
rmat = euler2mat(roll_compensation, pitch_compensation, 0)
rotated_foot_locations = rmat.T @ rotated_foot_locations
state.joint_angles = self.inverse_kinematics(
rotated_foot_locations, self.config
)
elif state.behavior_state == BehaviorState.HOP:
state.foot_locations = (
self.config.default_stance
+ np.array([0, 0, -0.09])[:, np.newaxis]
)
state.joint_angles = self.inverse_kinematics(
state.foot_locations, self.config
)
elif state.behavior_state == BehaviorState.FINISHHOP:
state.foot_locations = (
self.config.default_stance
+ np.array([0, 0, -0.22])[:, np.newaxis]
)
state.joint_angles = self.inverse_kinematics(
state.foot_locations, self.config
)
elif state.behavior_state == BehaviorState.REST:
yaw_proportion = command.yaw_rate / self.config.max_yaw_rate
self.smoothed_yaw += (
self.config.dt
* clipped_first_order_filter(
self.smoothed_yaw,
yaw_proportion * -self.config.max_stance_yaw,
self.config.max_stance_yaw_rate,
self.config.yaw_time_constant,
)
)
# Set the foot locations to the default stance plus the standard height
state.foot_locations = (
self.config.default_stance
+ np.array([0, 0, command.height])[:, np.newaxis]
)
# Apply the desired body rotation
rotated_foot_locations = (
euler2mat(
command.roll,
command.pitch,
self.smoothed_yaw,
)
@ state.foot_locations
)
state.joint_angles = self.inverse_kinematics(
rotated_foot_locations, self.config
)
state.ticks += 1
state.pitch = command.pitch
state.roll = command.roll
state.height = command.height
def set_pose_to_default(self):
state.foot_locations = (
self.config.default_stance
+ np.array([0, 0, self.config.default_z_ref])[:, np.newaxis]
)
state.joint_angles = controller.inverse_kinematics(
state.foot_locations, self.config
)
| 6,547 | Python | 37.292397 | 205 | 0.583168 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/pupper/ServoCalibration.py | # WARNING: This file is machine generated. Edit at your own risk.
import numpy as np
MICROS_PER_RAD = 11.333 * 180.0 / np.pi
NEUTRAL_ANGLE_DEGREES = np.array(
[[ 0., 0., 0., 0.],
[ 45., 45., 45., 45.],
[-45.,-45.,-45.,-45.]]
)
| 236 | Python | 18.749998 | 65 | 0.576271 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/pupper/HardwareInterface.py | import pigpio
from pupper.Config import ServoParams, PWMParams
class HardwareInterface:
def __init__(self):
self.pi = pigpio.pi()
self.pwm_params = PWMParams()
self.servo_params = ServoParams()
initialize_pwm(self.pi, self.pwm_params)
def set_actuator_postions(self, joint_angles):
send_servo_commands(self.pi, self.pwm_params, self.servo_params, joint_angles)
def set_actuator_position(self, joint_angle, axis, leg):
send_servo_command(self.pi, self.pwm_params, self.servo_params, joint_angle, axis, leg)
def pwm_to_duty_cycle(pulsewidth_micros, pwm_params):
"""Converts a pwm signal (measured in microseconds) to a corresponding duty cycle on the gpio pwm pin
Parameters
----------
pulsewidth_micros : float
Width of the pwm signal in microseconds
pwm_params : PWMParams
PWMParams object
Returns
-------
float
PWM duty cycle corresponding to the pulse width
"""
return int(pulsewidth_micros / 1e6 * pwm_params.freq * pwm_params.range)
def angle_to_pwm(angle, servo_params, axis_index, leg_index):
"""Converts a desired servo angle into the corresponding PWM command
Parameters
----------
angle : float
Desired servo angle, relative to the vertical (z) axis
servo_params : ServoParams
ServoParams object
axis_index : int
Specifies which joint of leg to control. 0 is abduction servo, 1 is inner hip servo, 2 is outer hip servo.
leg_index : int
Specifies which leg to control. 0 is front-right, 1 is front-left, 2 is back-right, 3 is back-left.
Returns
-------
float
PWM width in microseconds
"""
angle_deviation = (
angle - servo_params.neutral_angles[axis_index, leg_index]
) * servo_params.servo_multipliers[axis_index, leg_index]
pulse_width_micros = (
servo_params.neutral_position_pwm
+ servo_params.micros_per_rad * angle_deviation
)
return pulse_width_micros
def angle_to_duty_cycle(angle, pwm_params, servo_params, axis_index, leg_index):
return pwm_to_duty_cycle(
angle_to_pwm(angle, servo_params, axis_index, leg_index), pwm_params
)
def initialize_pwm(pi, pwm_params):
for leg_index in range(4):
for axis_index in range(3):
pi.set_PWM_frequency(
pwm_params.pins[axis_index, leg_index], pwm_params.freq
)
pi.set_PWM_range(pwm_params.pins[axis_index, leg_index], pwm_params.range)
def send_servo_commands(pi, pwm_params, servo_params, joint_angles):
for leg_index in range(4):
for axis_index in range(3):
duty_cycle = angle_to_duty_cycle(
joint_angles[axis_index, leg_index],
pwm_params,
servo_params,
axis_index,
leg_index,
)
pi.set_PWM_dutycycle(pwm_params.pins[axis_index, leg_index], duty_cycle)
def send_servo_command(pi, pwm_params, servo_params, joint_angle, axis, leg):
duty_cycle = angle_to_duty_cycle(joint_angle, pwm_params, servo_params, axis, leg)
pi.set_PWM_dutycycle(pwm_params.pins[axis, leg], duty_cycle)
def deactivate_servos(pi, pwm_params):
for leg_index in range(4):
for axis_index in range(3):
pi.set_PWM_dutycycle(pwm_params.pins[axis_index, leg_index], 0)
| 3,408 | Python | 32.097087 | 114 | 0.639085 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/pupper/Kinematics.py | import numpy as np
from transforms3d.euler import euler2mat
def leg_explicit_inverse_kinematics(r_body_foot, leg_index, config):
"""Find the joint angles corresponding to the given body-relative foot position for a given leg and configuration
Parameters
----------
r_body_foot : [type]
[description]
leg_index : [type]
[description]
config : [type]
[description]
Returns
-------
numpy array (3)
Array of corresponding joint angles.
"""
(x, y, z) = r_body_foot
# Distance from the leg origin to the foot, projected into the y-z plane
R_body_foot_yz = (y ** 2 + z ** 2) ** 0.5
# Distance from the leg's forward/back point of rotation to the foot
R_hip_foot_yz = (R_body_foot_yz ** 2 - config.ABDUCTION_OFFSET ** 2) ** 0.5
# Interior angle of the right triangle formed in the y-z plane by the leg that is coincident to the ab/adduction axis
# For feet 2 (front left) and 4 (back left), the abduction offset is positive, for the right feet, the abduction offset is negative.
arccos_argument = config.ABDUCTION_OFFSETS[leg_index] / R_body_foot_yz
arccos_argument = np.clip(arccos_argument, -0.99, 0.99)
phi = np.arccos(arccos_argument)
# Angle of the y-z projection of the hip-to-foot vector, relative to the positive y-axis
hip_foot_angle = np.arctan2(z, y)
# Ab/adduction angle, relative to the positive y-axis
abduction_angle = phi + hip_foot_angle
# theta: Angle between the tilted negative z-axis and the hip-to-foot vector
theta = np.arctan2(-x, R_hip_foot_yz)
# Distance between the hip and foot
R_hip_foot = (R_hip_foot_yz ** 2 + x ** 2) ** 0.5
# Angle between the line going from hip to foot and the link L1
arccos_argument = (config.LEG_L1 ** 2 + R_hip_foot ** 2 - config.LEG_L2 ** 2) / (
2 * config.LEG_L1 * R_hip_foot
)
arccos_argument = np.clip(arccos_argument, -0.99, 0.99)
trident = np.arccos(arccos_argument)
# Angle of the first link relative to the tilted negative z axis
hip_angle = theta + trident
# Angle between the leg links L1 and L2
arccos_argument = (config.LEG_L1 ** 2 + config.LEG_L2 ** 2 - R_hip_foot ** 2) / (
2 * config.LEG_L1 * config.LEG_L2
)
arccos_argument = np.clip(arccos_argument, -0.99, 0.99)
beta = np.arccos(arccos_argument)
# Angle of the second link relative to the tilted negative z axis
knee_angle = hip_angle - (np.pi - beta)
return np.array([abduction_angle, hip_angle, knee_angle])
def four_legs_inverse_kinematics(r_body_foot, config):
"""Find the joint angles for all twelve DOF correspoinding to the given matrix of body-relative foot positions.
Parameters
----------
r_body_foot : numpy array (3,4)
Matrix of the body-frame foot positions. Each column corresponds to a separate foot.
config : Config object
Object of robot configuration parameters.
Returns
-------
numpy array (3,4)
Matrix of corresponding joint angles.
"""
alpha = np.zeros((3, 4))
for i in range(4):
body_offset = config.LEG_ORIGINS[:, i]
alpha[:, i] = leg_explicit_inverse_kinematics(
r_body_foot[:, i] - body_offset, i, config
)
return alpha
| 3,324 | Python | 34.752688 | 136 | 0.639892 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/pupper/HardwareConfig.py | """
Per-robot configuration file that is particular to each individual robot, not just the type of robot.
"""
PS4_COLOR = {"red": 0, "blue": 0, "green": 255}
PS4_DEACTIVATED_COLOR = {"red": 0, "blue": 0, "green": 50} | 217 | Python | 35.333327 | 101 | 0.658986 |
renanmb/Omniverse_legged_robotics/URDF-Descriptions/Mini_pupper/StanfordQuadruped-pupper/pupper/Config.py | import numpy as np
from pupper.ServoCalibration import MICROS_PER_RAD, NEUTRAL_ANGLE_DEGREES
from pupper.HardwareConfig import PS4_COLOR, PS4_DEACTIVATED_COLOR
from enum import Enum
# TODO: put these somewhere else
class PWMParams:
def __init__(self):
self.pins = np.array([[2, 14, 18, 23], [3, 15, 27, 24], [4, 17, 22, 25]])
self.range = 4000
self.freq = 250
class ServoParams:
def __init__(self):
self.neutral_position_pwm = 1500 # Middle position
self.micros_per_rad = MICROS_PER_RAD # Must be calibrated
# The neutral angle of the joint relative to the modeled zero-angle in degrees, for each joint
self.neutral_angle_degrees = NEUTRAL_ANGLE_DEGREES
self.servo_multipliers = np.array(
[[1, 1, 1, 1], [-1, 1, -1, 1], [1, -1, 1, -1]]
)
@property
def neutral_angles(self):
return self.neutral_angle_degrees * np.pi / 180.0 # Convert to radians
class Configuration:
def __init__(self):
################# CONTROLLER BASE COLOR ##############
self.ps4_color = PS4_COLOR
self.ps4_deactivated_color = PS4_DEACTIVATED_COLOR
#################### COMMANDS ####################
self.max_x_velocity = 0.4
self.max_y_velocity = 0.3
self.max_yaw_rate = 2.0
self.max_pitch = 30.0 * np.pi / 180.0
#################### MOVEMENT PARAMS ####################
self.z_time_constant = 0.02
self.z_speed = 0.03 # maximum speed [m/s]
self.pitch_deadband = 0.02
self.pitch_time_constant = 0.25
self.max_pitch_rate = 0.15
self.roll_speed = 0.16 # maximum roll rate [rad/s]
self.yaw_time_constant = 0.3
self.max_stance_yaw = 1.2
self.max_stance_yaw_rate = 2.0
#################### STANCE ####################
self.delta_x = 0.1
self.delta_y = 0.09
self.x_shift = 0.0
self.default_z_ref = -0.16
#################### SWING ######################
self.z_coeffs = None
self.z_clearance = 0.07
self.alpha = (
0.5 # Ratio between touchdown distance and total horizontal stance movement
)
self.beta = (
0.5 # Ratio between touchdown distance and total horizontal stance movement
)
#################### GAIT #######################
self.dt = 0.01
self.num_phases = 4
self.contact_phases = np.array(
[[1, 1, 1, 0], [1, 0, 1, 1], [1, 0, 1, 1], [1, 1, 1, 0]]
)
self.overlap_time = (
0.10 # duration of the phase where all four feet are on the ground
)
self.swing_time = (
0.15 # duration of the phase when only two feet are on the ground
)
######################## GEOMETRY ######################
self.LEG_FB = 0.10 # front-back distance from center line to leg axis
self.LEG_LR = 0.04 # left-right distance from center line to leg plane
self.LEG_L2 = 0.115
self.LEG_L1 = 0.1235
self.ABDUCTION_OFFSET = 0.03 # distance from abduction axis to leg
self.FOOT_RADIUS = 0.01
self.HIP_L = 0.0394
self.HIP_W = 0.0744
self.HIP_T = 0.0214
self.HIP_OFFSET = 0.0132
self.L = 0.276
self.W = 0.100
self.T = 0.050
self.LEG_ORIGINS = np.array(
[
[self.LEG_FB, self.LEG_FB, -self.LEG_FB, -self.LEG_FB],
[-self.LEG_LR, self.LEG_LR, -self.LEG_LR, self.LEG_LR],
[0, 0, 0, 0],
]
)
self.ABDUCTION_OFFSETS = np.array(
[
-self.ABDUCTION_OFFSET,
self.ABDUCTION_OFFSET,
-self.ABDUCTION_OFFSET,
self.ABDUCTION_OFFSET,
]
)
################### INERTIAL ####################
self.FRAME_MASS = 0.560 # kg
self.MODULE_MASS = 0.080 # kg
self.LEG_MASS = 0.030 # kg
self.MASS = self.FRAME_MASS + (self.MODULE_MASS + self.LEG_MASS) * 4
# Compensation factor of 3 because the inertia measurement was just
# of the carbon fiber and plastic parts of the frame and did not
# include the hip servos and electronics
self.FRAME_INERTIA = tuple(
map(lambda x: 3.0 * x, (1.844e-4, 1.254e-3, 1.337e-3))
)
self.MODULE_INERTIA = (3.698e-5, 7.127e-6, 4.075e-5)
leg_z = 1e-6
leg_mass = 0.010
leg_x = 1 / 12 * self.LEG_L1 ** 2 * leg_mass
leg_y = leg_x
self.LEG_INERTIA = (leg_x, leg_y, leg_z)
@property
def default_stance(self):
return np.array(
[
[
self.delta_x + self.x_shift,
self.delta_x + self.x_shift,
-self.delta_x + self.x_shift,
-self.delta_x + self.x_shift,
],
[-self.delta_y, self.delta_y, -self.delta_y, self.delta_y],
[0, 0, 0, 0],
]
)
################## SWING ###########################
@property
def z_clearance(self):
return self.__z_clearance
@z_clearance.setter
def z_clearance(self, z):
self.__z_clearance = z
# b_z = np.array([0, 0, 0, 0, self.__z_clearance])
# A_z = np.array(
# [
# [0, 0, 0, 0, 1],
# [1, 1, 1, 1, 1],
# [0, 0, 0, 1, 0],
# [4, 3, 2, 1, 0],
# [0.5 ** 4, 0.5 ** 3, 0.5 ** 2, 0.5 ** 1, 0.5 ** 0],
# ]
# )
# self.z_coeffs = solve(A_z, b_z)
########################### GAIT ####################
@property
def overlap_ticks(self):
return int(self.overlap_time / self.dt)
@property
def swing_ticks(self):
return int(self.swing_time / self.dt)
@property
def stance_ticks(self):
return 2 * self.overlap_ticks + self.swing_ticks
@property
def phase_ticks(self):
return np.array(
[self.overlap_ticks, self.swing_ticks, self.overlap_ticks, self.swing_ticks]
)
@property
def phase_length(self):
return 2 * self.overlap_ticks + 2 * self.swing_ticks
class SimulationConfig:
def __init__(self):
self.XML_IN = "pupper.xml"
self.XML_OUT = "pupper_out.xml"
self.START_HEIGHT = 0.3
self.MU = 1.5 # coeff friction
self.DT = 0.001 # seconds between simulation steps
self.JOINT_SOLREF = "0.001 1" # time constant and damping ratio for joints
self.JOINT_SOLIMP = "0.9 0.95 0.001" # joint constraint parameters
self.GEOM_SOLREF = "0.01 1" # time constant and damping ratio for geom contacts
self.GEOM_SOLIMP = "0.9 0.95 0.001" # geometry contact parameters
# Joint params
G = 220 # Servo gear ratio
m_rotor = 0.016 # Servo rotor mass
r_rotor = 0.005 # Rotor radius
self.ARMATURE = G ** 2 * m_rotor * r_rotor ** 2 # Inertia of rotational joints
# print("Servo armature", self.ARMATURE)
NATURAL_DAMPING = 1.0 # Damping resulting from friction
ELECTRICAL_DAMPING = 0.049 # Damping resulting from back-EMF
self.REV_DAMPING = (
NATURAL_DAMPING + ELECTRICAL_DAMPING
) # Damping torque on the revolute joints
# Servo params
self.SERVO_REV_KP = 300 # Position gain [Nm/rad]
# Force limits
self.MAX_JOINT_TORQUE = 3.0
self.REVOLUTE_RANGE = 1.57
| 7,666 | Python | 32.480349 | 102 | 0.501435 |