code stringlengths 17 6.64M |
|---|
class FurthestPointSampling(Function):
'Uses iterative furthest point sampling to select a set of features whose\n corresponding points have the furthest distance.'
@staticmethod
def forward(ctx, points_xyz: torch.Tensor, num_points: int) -> torch.Tensor:
'\n Args:\n points_x... |
class FurthestPointSamplingWithDist(Function):
'Uses iterative furthest point sampling to select a set of features whose\n corresponding points have the furthest distance.'
@staticmethod
def forward(ctx, points_dist: torch.Tensor, num_points: int) -> torch.Tensor:
'\n Args:\n ... |
class FusedBiasLeakyReLUFunctionBackward(Function):
'Calculate second order deviation.\n\n This function is to compute the second order deviation for the fused leaky\n relu operation.\n '
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
... |
class FusedBiasLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
out = ext_module.fused_bias_leakyrelu(input, bias, empty, act=3, grad=0, alpha=negative_slope, scale=scale)
ctx.save_for_backward(out)
ctx... |
class FusedBiasLeakyReLU(nn.Module):
'Fused bias leaky ReLU.\n\n This function is introduced in the StyleGAN2:\n `Analyzing and Improving the Image Quality of StyleGAN\n <http://arxiv.org/abs/1912.04958>`_\n\n The bias term comes from the convolution operation. In addition, to keep\n the variance o... |
def fused_bias_leakyrelu(input, bias, negative_slope=0.2, scale=(2 ** 0.5)):
'Fused bias leaky ReLU function.\n\n This function is introduced in the StyleGAN2:\n `Analyzing and Improving the Image Quality of StyleGAN\n <http://arxiv.org/abs/1912.04958>`_\n\n The bias term comes from the convolution op... |
def bias_leakyrelu_ref(x, bias, negative_slope=0.2, scale=(2 ** 0.5)):
if (bias is not None):
assert (bias.ndim == 1)
assert (bias.shape[0] == x.shape[1])
x = (x + bias.reshape([((- 1) if (i == 1) else 1) for i in range(x.ndim)]))
x = F.leaky_relu(x, negative_slope)
if (scale != 1)... |
class GatherPoints(Function):
'Gather points with given index.'
@staticmethod
def forward(ctx, features: torch.Tensor, indices: torch.Tensor) -> torch.Tensor:
'\n Args:\n features (torch.Tensor): (B, C, N) features to gather.\n indices (torch.Tensor): (B, M) where M i... |
def get_onnxruntime_op_path():
wildcard = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), '_ext_ort.*.so')
paths = glob.glob(wildcard)
if (len(paths) > 0):
return paths[0]
else:
return ''
|
def boxes_iou_bev(boxes_a, boxes_b):
"Calculate boxes IoU in the Bird's Eye View.\n\n Args:\n boxes_a (torch.Tensor): Input boxes a with shape (M, 5).\n boxes_b (torch.Tensor): Input boxes b with shape (N, 5).\n\n Returns:\n torch.Tensor: IoU result with shape (M, N).\n "
ans_iou... |
def nms_bev(boxes, scores, thresh, pre_max_size=None, post_max_size=None):
'NMS function GPU implementation (for BEV boxes). The overlap of two\n boxes for IoU calculation is defined as the exact overlapping area of the\n two boxes. In this function, one can also set ``pre_max_size`` and\n ``post_max_siz... |
def nms_normal_bev(boxes, scores, thresh):
'Normal NMS function GPU implementation (for BEV boxes). The overlap of\n two boxes for IoU calculation is defined as the exact overlapping area of\n the two boxes WITH their yaw angle set to 0.\n\n Args:\n boxes (torch.Tensor): Input boxes with shape (N,... |
class KNN(Function):
'KNN (CUDA) based on heap data structure.\n\n Modified from `PAConv <https://github.com/CVMI-Lab/PAConv/tree/main/\n scene_seg/lib/pointops/src/knnquery_heap>`_.\n\n Find k-nearest points.\n '
@staticmethod
def forward(ctx, k: int, xyz: torch.Tensor, center_xyz: torch.Ten... |
class BaseMergeCell(nn.Module):
'The basic class for cells used in NAS-FPN and NAS-FCOS.\n\n BaseMergeCell takes 2 inputs. After applying convolution\n on them, they are resized to the target size. Then,\n they go through binary_op, which depends on the type of cell.\n If with_out_conv is True, the re... |
class SumCell(BaseMergeCell):
def __init__(self, in_channels, out_channels, **kwargs):
super(SumCell, self).__init__(in_channels, out_channels, **kwargs)
def _binary_op(self, x1, x2):
return (x1 + x2)
|
class ConcatCell(BaseMergeCell):
def __init__(self, in_channels, out_channels, **kwargs):
super(ConcatCell, self).__init__((in_channels * 2), out_channels, **kwargs)
def _binary_op(self, x1, x2):
ret = torch.cat([x1, x2], dim=1)
return ret
|
class GlobalPoolingCell(BaseMergeCell):
def __init__(self, in_channels=None, out_channels=None, **kwargs):
super().__init__(in_channels, out_channels, **kwargs)
self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
def _binary_op(self, x1, x2):
x2_att = self.global_pool(x2).sigmoid()
... |
def min_area_polygons(pointsets):
'Find the smallest polygons that surrounds all points in the point sets.\n\n Args:\n pointsets (Tensor): point sets with shape (N, 18).\n\n Returns:\n torch.Tensor: Return the smallest polygons with shape (N, 8).\n '
polygons = pointsets.new_zeros((poi... |
class ModulatedDeformConv2dFunction(Function):
@staticmethod
def symbolic(g, input, offset, mask, weight, bias, stride, padding, dilation, groups, deform_groups):
input_tensors = [input, offset, mask, weight]
if (bias is not None):
input_tensors.append(bias)
return g.op('m... |
class ModulatedDeformConv2d(nn.Module):
@deprecated_api_warning({'deformable_groups': 'deform_groups'}, cls_name='ModulatedDeformConv2d')
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, deform_groups=1, bias=True):
super(ModulatedDeformConv2d, sel... |
@CONV_LAYERS.register_module('DCNv2')
class ModulatedDeformConv2dPack(ModulatedDeformConv2d):
'A ModulatedDeformable Conv Encapsulation that acts as normal Conv\n layers.\n\n Args:\n in_channels (int): Same as nn.Conv2d.\n out_channels (int): Same as nn.Conv2d.\n kernel_size (int or tup... |
def pixel_group(score, mask, embedding, kernel_label, kernel_contour, kernel_region_num, distance_threshold):
'Group pixels into text instances, which is widely used text detection\n methods.\n\n Arguments:\n score (np.array or torch.Tensor): The foreground score with size hxw.\n mask (np.arra... |
def points_in_boxes_part(points, boxes):
'Find the box in which each point is (CUDA).\n\n Args:\n points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate.\n boxes (torch.Tensor): [B, T, 7],\n num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz] in\n LiDA... |
def points_in_boxes_cpu(points, boxes):
'Find all boxes in which each point is (CPU). The CPU version of\n :meth:`points_in_boxes_all`.\n\n Args:\n points (torch.Tensor): [B, M, 3], [x, y, z] in\n LiDAR/DEPTH coordinate\n boxes (torch.Tensor): [B, T, 7],\n num_valid_boxes... |
def points_in_boxes_all(points, boxes):
'Find all boxes in which each point is (CUDA).\n\n Args:\n points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate\n boxes (torch.Tensor): [B, T, 7],\n num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz],\n (x, y,... |
def points_in_polygons(points, polygons):
'Judging whether points are inside polygons, which is used in the ATSS\n assignment for the rotated boxes.\n\n It should be noted that when the point is just at the polygon boundary, the\n judgment will be inaccurate, but the effect on assignment is limited.\n\n ... |
class PSAMaskFunction(Function):
@staticmethod
def symbolic(g, input, psa_type, mask_size):
return g.op('mmcv::MMCVPSAMask', input, psa_type_i=psa_type, mask_size_i=mask_size)
@staticmethod
def forward(ctx, input, psa_type, mask_size):
ctx.psa_type = psa_type
ctx.mask_size = ... |
class PSAMask(nn.Module):
def __init__(self, psa_type, mask_size=None):
super(PSAMask, self).__init__()
assert (psa_type in ['collect', 'distribute'])
if (psa_type == 'collect'):
psa_type_enum = 0
else:
psa_type_enum = 1
self.psa_type_enum = psa_typ... |
class RiRoIAlignRotatedFunction(Function):
@staticmethod
def forward(ctx, features, rois, out_size, spatial_scale, num_samples=0, num_orientations=8, clockwise=False):
if isinstance(out_size, int):
out_h = out_size
out_w = out_size
elif is_tuple_of(out_size, int):
... |
class RiRoIAlignRotated(nn.Module):
'Rotation-invariant RoI align pooling layer for rotated proposals.\n\n It accepts a feature map of shape (N, C, H, W) and rois with shape\n (n, 6) with each roi decoded as (batch_index, center_x, center_y,\n w, h, angle). The angle is in radian.\n\n The details are ... |
class RoIAlignFunction(Function):
@staticmethod
def symbolic(g, input, rois, output_size, spatial_scale, sampling_ratio, pool_mode, aligned):
from ..onnx import is_custom_op_loaded
has_custom_op = is_custom_op_loaded()
if has_custom_op:
return g.op('mmcv::MMCVRoiAlign', in... |
class RoIAlign(nn.Module):
"RoI align pooling layer.\n\n Args:\n output_size (tuple): h, w\n spatial_scale (float): scale the input boxes by this number\n sampling_ratio (int): number of inputs samples to take for each\n output sample. 0 to take samples densely for current model... |
class RoIAlignRotatedFunction(Function):
@staticmethod
def symbolic(g, input, rois, output_size, spatial_scale, sampling_ratio, aligned, clockwise):
if isinstance(output_size, int):
out_h = output_size
out_w = output_size
elif isinstance(output_size, tuple):
... |
class RoIAlignRotated(nn.Module):
"RoI align pooling layer for rotated proposals.\n\n It accepts a feature map of shape (N, C, H, W) and rois with shape\n (n, 6) with each roi decoded as (batch_index, center_x, center_y,\n w, h, angle). The angle is in radian.\n\n Args:\n output_size (tuple): h... |
class RoIPoolFunction(Function):
@staticmethod
def symbolic(g, input, rois, output_size, spatial_scale):
return g.op('MaxRoiPool', input, rois, pooled_shape_i=output_size, spatial_scale_f=spatial_scale)
@staticmethod
def forward(ctx, input, rois, output_size, spatial_scale=1.0):
ctx.... |
class RoIPool(nn.Module):
def __init__(self, output_size, spatial_scale=1.0):
super(RoIPool, self).__init__()
self.output_size = _pair(output_size)
self.spatial_scale = float(spatial_scale)
def forward(self, input, rois):
return roi_pool(input, rois, self.output_size, self.sp... |
class RoIAwarePool3d(nn.Module):
"Encode the geometry-specific features of each 3D proposal.\n\n Please refer to `PartA2 <https://arxiv.org/pdf/1907.03670.pdf>`_ for more\n details.\n\n Args:\n out_size (int or tuple): The size of output features. n or\n [n1, n2, n3].\n max_pts_p... |
class RoIAwarePool3dFunction(Function):
@staticmethod
def forward(ctx, rois, pts, pts_feature, out_size, max_pts_per_voxel, mode):
'\n Args:\n rois (torch.Tensor): [N, 7], in LiDAR coordinate,\n (x, y, z) is the bottom center of rois.\n pts (torch.Tensor): ... |
class RoIPointPool3d(nn.Module):
'Encode the geometry-specific features of each 3D proposal.\n\n Please refer to `Paper of PartA2 <https://arxiv.org/pdf/1907.03670.pdf>`_\n for more details.\n\n Args:\n num_sampled_points (int, optional): Number of samples in each roi.\n Default: 512.\n... |
class RoIPointPool3dFunction(Function):
@staticmethod
def forward(ctx, points, point_features, boxes3d, num_sampled_points=512):
'\n Args:\n points (torch.Tensor): Input points whose shape is (B, N, C).\n point_features (torch.Tensor): Features of input points whose shape... |
class RotatedFeatureAlignFunction(Function):
'Using the feature interpolation to obtain the position information\n correspond to the refined rotate anchors and reconstruct the feature maps\n in pixel-wise manner to achieve feature alignment.\n\n The details are described in the paper\n `R3Det: Refined... |
def rotated_feature_align(features, best_rbboxes, spatial_scale=(1 / 8), points=1):
return RotatedFeatureAlignFunction.apply(features, best_rbboxes, spatial_scale, points)
|
@CONV_LAYERS.register_module(name='SAC')
class SAConv2d(ConvAWS2d):
"SAC (Switchable Atrous Convolution)\n\n This is an implementation of `DetectoRS: Detecting Objects with Recursive\n Feature Pyramid and Switchable Atrous Convolution\n <https://arxiv.org/abs/2006.02334>`_.\n\n Args:\n in_chann... |
def _calculate_fan_in_and_fan_out_hwio(tensor):
dimensions = tensor.ndimension()
if (dimensions < 2):
raise ValueError('fan in and fan out can not be computed for tensorwith fewer than 2 dimensions')
if (dimensions == 2):
fan_in = tensor.size((- 2))
fan_out = tensor.size((- 1))
... |
class SparseConvolution(SparseModule):
def __init__(self, ndim, in_channels, out_channels, kernel_size=3, stride=1, padding=0, dilation=1, groups=1, bias=True, subm=False, output_padding=0, transposed=False, inverse=False, indice_key=None, fused_bn=False):
super(SparseConvolution, self).__init__()
... |
@CONV_LAYERS.register_module()
class SparseConv2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConv2d, self).__init__(2, in_channels, out_channels, kernel_size, stride, padding, dilation,... |
@CONV_LAYERS.register_module()
class SparseConv3d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConv3d, self).__init__(3, in_channels, out_channels, kernel_size, stride, padding, dilation,... |
@CONV_LAYERS.register_module()
class SparseConv4d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConv4d, self).__init__(4, in_channels, out_channels, kernel_size, stride, padding, dilation,... |
@CONV_LAYERS.register_module()
class SparseConvTranspose2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConvTranspose2d, self).__init__(2, in_channels, out_channels, kernel_size, stride, ... |
@CONV_LAYERS.register_module()
class SparseConvTranspose3d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SparseConvTranspose3d, self).__init__(3, in_channels, out_channels, kernel_size, stride, ... |
@CONV_LAYERS.register_module()
class SparseInverseConv2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, indice_key=None, bias=True):
super(SparseInverseConv2d, self).__init__(2, in_channels, out_channels, kernel_size, bias=bias, inverse=True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SparseInverseConv3d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, indice_key=None, bias=True):
super(SparseInverseConv3d, self).__init__(3, in_channels, out_channels, kernel_size, bias=bias, inverse=True, indice_key=indice_key)
|
@CONV_LAYERS.register_module()
class SubMConv2d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SubMConv2d, self).__init__(2, in_channels, out_channels, kernel_size, stride, padding, dilation, gro... |
@CONV_LAYERS.register_module()
class SubMConv3d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SubMConv3d, self).__init__(3, in_channels, out_channels, kernel_size, stride, padding, dilation, gro... |
@CONV_LAYERS.register_module()
class SubMConv4d(SparseConvolution):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, indice_key=None):
super(SubMConv4d, self).__init__(4, in_channels, out_channels, kernel_size, stride, padding, dilation, gro... |
class SparseConvFunction(Function):
'Sparse Convolution.\n\n Please refer to `SECOND <https://www.mdpi.com/1424-8220/18/10/3337>`_ for\n more details.\n '
@staticmethod
def forward(ctx, features, filters, indice_pairs, indice_pair_num, num_activate_out):
'\n Args:\n fea... |
class SparseInverseConvFunction(Function):
@staticmethod
def forward(ctx, features, filters, indice_pairs, indice_pair_num, num_activate_out):
'\n Args:\n features (torch.Tensor): Features that needs to convolute.\n filters (torch.nn.parameter.Parameter): Convolution filt... |
class SubMConvFunction(Function):
@staticmethod
def forward(ctx, features, filters, indice_pairs, indice_pair_num, num_activate_out):
'\n Args:\n features (torch.Tensor): Features that needs to convolute.\n filters (torch.nn.parameter.Parameter): Convolution filters.\n ... |
class SparseMaxPoolFunction(Function):
@staticmethod
def forward(ctx, features, indice_pairs, indice_pair_num, num_activate_out):
'\n Args:\n features (torch.Tensor): Features that needs to convolute.\n indice_pairs (torch.Tensor): Indice pairs between inputs locations\n ... |
def is_spconv_module(module):
spconv_modules = (SparseModule,)
return isinstance(module, spconv_modules)
|
def is_sparse_conv(module):
from .sparse_conv import SparseConvolution
return isinstance(module, SparseConvolution)
|
def _mean_update(vals, m_vals, t):
outputs = []
if (not isinstance(vals, list)):
vals = [vals]
if (not isinstance(m_vals, list)):
m_vals = [m_vals]
for (val, m_val) in zip(vals, m_vals):
output = (((t / float((t + 1))) * m_val) + ((1 / float((t + 1))) * val))
outputs.ap... |
class SparseModule(nn.Module):
'place holder, All module subclass from this will take sptensor in\n SparseSequential.'
pass
|
class SparseSequential(SparseModule):
"A sequential container.\n Modules will be added to it in the order they are passed in the\n constructor.\n Alternatively, an ordered dict of modules can also be passed in.\n\n To make it easier to understand, given is a small example::\n\n Example:\n >>... |
class ToDense(SparseModule):
'convert SparseConvTensor to NCHW dense tensor.'
def forward(self, x: SparseConvTensor):
return x.dense()
|
class RemoveGrid(SparseModule):
'remove pre-allocated grid buffer.'
def forward(self, x: SparseConvTensor):
x.grid = None
return x
|
def get_conv_output_size(input_size, kernel_size, stride, padding, dilation):
ndim = len(input_size)
output_size = []
for i in range(ndim):
size = (((((input_size[i] + (2 * padding[i])) - (dilation[i] * (kernel_size[i] - 1))) - 1) // stride[i]) + 1)
if (kernel_size[i] == (- 1)):
... |
def get_deconv_output_size(input_size, kernel_size, stride, padding, dilation, output_padding):
ndim = len(input_size)
output_size = []
for i in range(ndim):
if (kernel_size[i] == (- 1)):
raise ValueError("deconv don't support kernel_size < 0")
size = (((((input_size[i] - 1) * ... |
def get_indice_pairs(indices, batch_size, spatial_shape, ksize=3, stride=1, padding=0, dilation=1, out_padding=0, subm=False, transpose=False, grid=None):
ndim = (indices.shape[1] - 1)
if (not isinstance(ksize, (list, tuple))):
ksize = ([ksize] * ndim)
if (not isinstance(stride, (list, tuple))):
... |
def indice_conv(features, filters, indice_pairs, indice_pair_num, num_activate_out, inverse=False, subm=False):
if ((filters.dtype == torch.float32) or (filters.dtype == torch.half)):
return ext_module.indice_conv_forward(features, filters, indice_pairs, indice_pair_num, num_activate_out, int(inverse), in... |
def fused_indice_conv(features, filters, bias, indice_pairs, indice_pair_num, num_activate_out, inverse, subm):
if ((features.dtype == torch.half) or (filters.dtypes == torch.float32)):
func = ext_module.fused_indice_conv_forward
else:
raise NotImplementedError
return func(features, filter... |
def indice_conv_backward(features, filters, out_bp, indice_pairs, indice_pair_num, inverse=False, subm=False):
if ((filters.dtype == torch.float32) or (filters.dtype == torch.half)):
return ext_module.indice_conv_backward(features, filters, out_bp, indice_pairs, indice_pair_num, int(inverse), int(subm))
... |
def indice_maxpool(features, indice_pairs, indice_pair_num, num_activate_out):
if ((features.dtype == torch.float32) or (features.dtype == torch.half)):
return ext_module.indice_maxpool_forward(features, indice_pairs, indice_pair_num, num_activate_out)
else:
raise NotImplementedError
|
def indice_maxpool_backward(features, out_features, out_bp, indice_pairs, indice_pair_num):
if ((features.dtype == torch.float32) or (features.dtype == torch.half)):
return ext_module.indice_maxpool_backward(features, out_features, out_bp, indice_pairs, indice_pair_num)
else:
raise NotImplemen... |
class SparseMaxPool(SparseModule):
def __init__(self, ndim, kernel_size, stride=1, padding=0, dilation=1, subm=False):
super(SparseMaxPool, self).__init__()
if (not isinstance(kernel_size, (list, tuple))):
kernel_size = ([kernel_size] * ndim)
if (not isinstance(stride, (list, ... |
class SparseMaxPool2d(SparseMaxPool):
def __init__(self, kernel_size, stride=1, padding=0, dilation=1):
super(SparseMaxPool2d, self).__init__(2, kernel_size, stride, padding, dilation)
|
class SparseMaxPool3d(SparseMaxPool):
def __init__(self, kernel_size, stride=1, padding=0, dilation=1):
super(SparseMaxPool3d, self).__init__(3, kernel_size, stride, padding, dilation)
|
class SyncBatchNormFunction(Function):
@staticmethod
def symbolic(g, input, running_mean, running_var, weight, bias, momentum, eps, group, group_size, stats_mode):
return g.op('mmcv::MMCVSyncBatchNorm', input, running_mean, running_var, weight, bias, momentum_f=momentum, eps_f=eps, group_i=group, gro... |
@NORM_LAYERS.register_module(name='MMSyncBN')
class SyncBatchNorm(Module):
"Synchronized Batch Normalization.\n\n Args:\n num_features (int): number of features/chennels in input tensor\n eps (float, optional): a value added to the denominator for numerical\n stability. Defaults to 1e-... |
class ThreeInterpolate(Function):
'Performs weighted linear interpolation on 3 features.\n\n Please refer to `Paper of PointNet++ <https://arxiv.org/abs/1706.02413>`_\n for more details.\n '
@staticmethod
def forward(ctx, features: torch.Tensor, indices: torch.Tensor, weight: torch.Tensor) -> to... |
class ThreeNN(Function):
'Find the top-3 nearest neighbors of the target set from the source set.\n\n Please refer to `Paper of PointNet++ <https://arxiv.org/abs/1706.02413>`_\n for more details.\n '
@staticmethod
def forward(ctx, target: torch.Tensor, source: torch.Tensor) -> Tuple[(torch.Tenso... |
class TINShiftFunction(Function):
@staticmethod
def forward(ctx, input, shift):
C = input.size(2)
num_segments = shift.size(1)
if (((C // num_segments) <= 0) or ((C % num_segments) != 0)):
raise ValueError(f'C should be a multiple of num_segments, but got C={C} and num_seg... |
class TINShift(nn.Module):
'Temporal Interlace Shift.\n\n Temporal Interlace shift is a differentiable temporal-wise frame shifting\n which is proposed in "Temporal Interlacing Network"\n\n Please refer to `Temporal Interlacing Network\n <https://arxiv.org/abs/2001.06499>`_ for more details.\n\n Co... |
class _Voxelization(Function):
@staticmethod
def forward(ctx, points, voxel_size, coors_range, max_points=35, max_voxels=20000, deterministic=True):
'Convert kitti points(N, >=3) to voxels.\n\n Args:\n points (torch.Tensor): [N, ndim]. Points[:, :3] contain xyz points\n ... |
class Voxelization(nn.Module):
'Convert kitti points(N, >=3) to voxels.\n\n Please refer to `Point-Voxel CNN for Efficient 3D Deep Learning\n <https://arxiv.org/abs/1907.03739>`_ for more details.\n\n Args:\n voxel_size (tuple or float): The size of voxel with the shape of [3].\n point_clou... |
def scatter(input, devices, streams=None):
'Scatters tensor across multiple GPUs.'
if (streams is None):
streams = ([None] * len(devices))
if isinstance(input, list):
chunk_size = (((len(input) - 1) // len(devices)) + 1)
outputs = [scatter(input[i], [devices[(i // chunk_size)]], [s... |
def synchronize_stream(output, devices, streams):
if isinstance(output, list):
chunk_size = (len(output) // len(devices))
for i in range(len(devices)):
for j in range(chunk_size):
synchronize_stream(output[((i * chunk_size) + j)], [devices[i]], [streams[i]])
elif is... |
def get_input_device(input):
if isinstance(input, list):
for item in input:
input_device = get_input_device(item)
if (input_device != (- 1)):
return input_device
return (- 1)
elif isinstance(input, torch.Tensor):
return (input.get_device() if inp... |
class Scatter():
@staticmethod
def forward(target_gpus, input):
input_device = get_input_device(input)
streams = None
if ((input_device == (- 1)) and (target_gpus != [(- 1)])):
streams = [_get_stream(device) for device in target_gpus]
outputs = scatter(input, targe... |
def collate(batch, samples_per_gpu=1):
'Puts each data field into a tensor/DataContainer with outer dimension\n batch size.\n\n Extend default_collate to add support for\n :type:`~mmcv.parallel.DataContainer`. There are 3 cases.\n\n 1. cpu_only = True, e.g., meta data\n 2. cpu_only = False, stack =... |
def assert_tensor_type(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if (not isinstance(args[0].data, torch.Tensor)):
raise AttributeError(f'{args[0].__class__.__name__} has no attribute {func.__name__} for type {args[0].datatype}')
return func(*args, **kwargs)
r... |
class DataContainer():
'A container for any type of objects.\n\n Typically tensors will be stacked in the collate function and sliced along\n some dimension in the scatter function. This behavior has some limitations.\n 1. All tensors have to be the same size.\n 2. Types are limited (numpy array or Te... |
class MMDataParallel(DataParallel):
'The DataParallel module that supports DataContainer.\n\n MMDataParallel has two main differences with PyTorch DataParallel:\n\n - It supports a custom type :class:`DataContainer` which allows more\n flexible control of input data during both GPU and CPU inference.\n... |
class MMDistributedDataParallel(DistributedDataParallel):
'The DDP module that supports DataContainer.\n\n MMDDP has two main differences with PyTorch DDP:\n\n - It supports a custom type :class:`DataContainer` which allows more\n flexible control of input data.\n - It implement two APIs ``train_ste... |
@MODULE_WRAPPERS.register_module()
class MMDistributedDataParallel(nn.Module):
def __init__(self, module, dim=0, broadcast_buffers=True, bucket_cap_mb=25):
super(MMDistributedDataParallel, self).__init__()
self.module = module
self.dim = dim
self.broadcast_buffers = broadcast_buff... |
def scatter(inputs, target_gpus, dim=0):
'Scatter inputs to target gpus.\n\n The only difference from original :func:`scatter` is to add support for\n :type:`~mmcv.parallel.DataContainer`.\n '
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
if (target_gpus != [(- 1)]):
... |
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
'Scatter with support for kwargs dictionary.'
inputs = (scatter(inputs, target_gpus, dim) if inputs else [])
kwargs = (scatter(kwargs, target_gpus, dim) if kwargs else [])
if (len(inputs) < len(kwargs)):
inputs.extend([() for _ in range((... |
def is_module_wrapper(module):
'Check if a module is a module wrapper.\n\n The following 3 modules in MMCV (and their subclasses) are regarded as\n module wrappers: DataParallel, DistributedDataParallel,\n MMDistributedDataParallel (the deprecated version). You may add you own\n module wrapper by regi... |
class BaseModule(nn.Module, metaclass=ABCMeta):
'Base module for all modules in openmmlab.\n\n ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional\n functionality of parameter initialization. Compared with\n ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes.\n\n - ``init_... |
class Sequential(BaseModule, nn.Sequential):
'Sequential module in openmmlab.\n\n Args:\n init_cfg (dict, optional): Initialization config dict.\n '
def __init__(self, *args, init_cfg=None):
BaseModule.__init__(self, init_cfg)
nn.Sequential.__init__(self, *args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.