code stringlengths 17 6.64M |
|---|
def obj2tensor(pyobj, device='cuda'):
'Serialize picklable python object to tensor.'
storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj))
return torch.ByteTensor(storage).to(device=device)
|
def tensor2obj(tensor):
'Deserialize tensor to picklable python object.'
return pickle.loads(tensor.cpu().numpy().tobytes())
|
@functools.lru_cache()
def _get_global_gloo_group():
'Return a process group based on gloo backend, containing all the ranks\n The result is cached.'
if (dist.get_backend() == 'nccl'):
return dist.new_group(backend='gloo')
else:
return dist.group.WORLD
|
def all_reduce_dict(py_dict, op='sum', group=None, to_float=True):
"Apply all reduce function for python dict object.\n\n The code is modified from https://github.com/Megvii-\n BaseDetection/YOLOX/blob/main/yolox/utils/allreduce_norm.py.\n\n NOTE: make sure that py_dict in different ranks has the same ke... |
def palette_val(palette):
'Convert palette to matplotlib palette.\n\n Args:\n palette List[tuple]: A list of color tuples.\n\n Returns:\n List[tuple[float]]: A list of RGB matplotlib color tuples.\n '
new_palette = []
for color in palette:
color = [(c / 255) for c in color]
... |
def get_palette(palette, num_classes):
'Get palette from various inputs.\n\n Args:\n palette (list[tuple] | str | tuple | :obj:`Color`): palette inputs.\n num_classes (int): the number of classes.\n\n Returns:\n list[tuple[int]]: A list of color tuples.\n '
assert isinstance(num_... |
class COCO(_COCO):
'This class is almost the same as official pycocotools package.\n\n It implements some snake case function aliases. So that the COCO class has\n the same interface as LVIS class.\n '
def __init__(self, annotation_file=None):
if (getattr(pycocotools, '__version__', '0') >= ... |
def pq_compute_single_core(proc_id, annotation_set, gt_folder, pred_folder, categories, file_client=None):
'The single core function to evaluate the metric of Panoptic\n Segmentation.\n\n Same as the function with the same name in `panopticapi`. Only the function\n to load the images is changed to use th... |
def pq_compute_multi_core(matched_annotations_list, gt_folder, pred_folder, categories, file_client=None):
'Evaluate the metrics of Panoptic Segmentation with multithreading.\n\n Same as the function with the same name in `panopticapi`.\n\n Args:\n matched_annotations_list (list): The matched annotat... |
def _concat_dataset(cfg, default_args=None):
from .dataset_wrappers import ConcatDataset
ann_files = cfg['ann_file']
img_prefixes = cfg.get('img_prefix', None)
seg_prefixes = cfg.get('seg_prefix', None)
proposal_files = cfg.get('proposal_file', None)
separate_eval = cfg.get('separate_eval', Tr... |
def build_dataset(cfg, default_args=None):
from .dataset_wrappers import ClassBalancedDataset, ConcatDataset, MultiImageMixDataset, RepeatDataset
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif (cfg['type'] == 'ConcatDataset'):
... |
def build_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, runner_type='EpochBasedRunner', persistent_workers=False, **kwargs):
'Build PyTorch DataLoader.\n\n In distributed training, each GPU/process has a dataloader.\n In non-distributed training, there... |
def worker_init_fn(worker_id, num_workers, rank, seed):
worker_seed = (((num_workers * rank) + worker_id) + seed)
np.random.seed(worker_seed)
random.seed(worker_seed)
|
@DATASETS.register_module()
class CityscapesDataset(CocoDataset):
CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle')
PALETTE = [(220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32)]
def _filter_imgs(self, min_size=... |
@DATASETS.register_module()
class CustomDataset(Dataset):
"Custom dataset for detection.\n\n The annotation format is shown as follows. The `ann` field is optional for\n testing.\n\n .. code-block:: none\n\n [\n {\n 'filename': 'a.jpg',\n 'width': 1280,\n ... |
@DATASETS.register_module()
class ConcatDataset(_ConcatDataset):
'A wrapper of concatenated dataset.\n\n Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but\n concat the group flag for image aspect ratio.\n\n Args:\n datasets (list[:obj:`Dataset`]): A list of datasets.\n separate_eva... |
@DATASETS.register_module()
class RepeatDataset():
'A wrapper of repeated dataset.\n\n The length of repeated dataset will be `times` larger than the original\n dataset. This is useful when the data loading time is long but the dataset\n is small. Using RepeatDataset can reduce the data loading time betw... |
@DATASETS.register_module()
class ClassBalancedDataset():
'A wrapper of repeated dataset with repeat factor.\n\n Suitable for training on class imbalanced datasets like LVIS. Following\n the sampling strategy in the `paper <https://arxiv.org/abs/1908.03195>`_,\n in each epoch, an image may appear multipl... |
@DATASETS.register_module()
class MultiImageMixDataset():
'A wrapper of multiple images mixed dataset.\n\n Suitable for training on multiple images mixed data augmentation like\n mosaic and mixup. For the augmentation pipeline of mixed image data,\n the `get_indexes` method needs to be provided to obtain... |
@DATASETS.register_module()
class DeepFashionDataset(CocoDataset):
CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair', 'skin', 'face')
PALETTE = [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64), (0, 192, 224), (0, 192... |
@PIPELINES.register_module()
class Compose():
'Compose multiple transforms sequentially.\n\n Args:\n transforms (Sequence[dict | callable]): Sequence of transform object or\n config dict to be composed.\n '
def __init__(self, transforms):
assert isinstance(transforms, collecti... |
def to_tensor(data):
'Convert objects of various python types to :obj:`torch.Tensor`.\n\n Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,\n :class:`Sequence`, :class:`int` and :class:`float`.\n\n Args:\n data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to\n ... |
@PIPELINES.register_module()
class ToTensor():
'Convert some results to :obj:`torch.Tensor` by given keys.\n\n Args:\n keys (Sequence[str]): Keys that need to be converted to Tensor.\n '
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
'Call function t... |
@PIPELINES.register_module()
class ImageToTensor():
'Convert image to :obj:`torch.Tensor` by given keys.\n\n The dimension order of input image is (H, W, C). The pipeline will convert\n it to (C, H, W). If only 2 dimension (H, W) is given, the output would be\n (1, H, W).\n\n Args:\n keys (Sequ... |
@PIPELINES.register_module()
class Transpose():
'Transpose some results by given keys.\n\n Args:\n keys (Sequence[str]): Keys of results to be transposed.\n order (Sequence[int]): Order of transpose.\n '
def __init__(self, keys, order):
self.keys = keys
self.order = order
... |
@PIPELINES.register_module()
class ToDataContainer():
"Convert results to :obj:`mmcv.DataContainer` by given fields.\n\n Args:\n fields (Sequence[dict]): Each field is a dict like\n ``dict(key='xxx', **kwargs)``. The ``key`` in result will\n be converted to :obj:`mmcv.DataContainer... |
@PIPELINES.register_module()
class DefaultFormatBundle():
'Default formatting bundle.\n\n It simplifies the pipeline of formatting common fields, including "img",\n "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg".\n These fields are formatted as follows.\n\n - img: (1)transpose... |
@PIPELINES.register_module()
class DefaultFormatBundleFlickr(DefaultFormatBundle):
def __call__(self, results):
'Call function to transform and format common fields in results.\n Args:\n results (dict): Result dict contains the data to convert.\n Returns:\n dict: The r... |
@PIPELINES.register_module()
class Collect():
'Collect data from the loader relevant to the specific task.\n\n This is usually the last stage of the data loader pipeline. Typically keys\n is set to some subset of "img", "proposals", "gt_bboxes",\n "gt_bboxes_ignore", "gt_labels", and/or "gt_masks".\n\n ... |
@PIPELINES.register_module()
class WrapFieldsToLists():
"Wrap fields of the data dictionary into lists for evaluation.\n\n This class can be used as a last step of a test or validation\n pipeline for single image evaluation or inference.\n\n Example:\n >>> test_pipeline = [\n >>> dict(ty... |
@PIPELINES.register_module()
class InstaBoost():
'Data augmentation method in `InstaBoost: Boosting Instance\n Segmentation Via Probability Map Guided Copy-Pasting\n <https://arxiv.org/abs/1908.07801>`_.\n\n Refer to https://github.com/GothicAi/Instaboost for implementation details.\n\n Args:\n ... |
@PIPELINES.register_module()
class MultiScaleFlipAug():
'Test-time augmentation with multiple scales and flipping.\n\n An example configuration is as followed:\n\n .. code-block::\n\n img_scale=[(1333, 400), (1333, 800)],\n flip=True,\n transforms=[\n dict(type=\'Resize\', ke... |
class DistributedSampler(_DistributedSampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, seed=0):
super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
self.seed = (seed if (seed is not None) else 0)
def __iter__(self):
if self... |
class GroupSampler(Sampler):
def __init__(self, dataset, samples_per_gpu=1):
assert hasattr(dataset, 'flag')
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.flag = dataset.flag.astype(np.int64)
self.group_sizes = np.bincount(self.flag)
self.num_s... |
class DistributedGroupSampler(Sampler):
'Sampler that restricts data loading to a subset of the dataset.\n\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n ... |
class InfiniteGroupBatchSampler(Sampler):
'Similar to `BatchSampler` warping a `GroupSampler. It is designed for\n iteration-based runners like `IterBasedRunner` and yields a mini-batch\n indices each time, all indices in a batch should be in the same group.\n\n The implementation logic is referred to\n ... |
class InfiniteBatchSampler(Sampler):
'Similar to `BatchSampler` warping a `DistributedSampler. It is designed\n iteration-based runners like `IterBasedRunner` and yields a mini-batch\n indices each time.\n\n The implementation logic is referred to\n https://github.com/facebookresearch/detectron2/blob/... |
def replace_ImageToTensor(pipelines):
"Replace the ImageToTensor transform in a data pipeline to\n DefaultFormatBundle, which is normally useful in batch inference.\n\n Args:\n pipelines (list[dict]): Data pipeline configs.\n\n Returns:\n list: The new pipeline list with all ImageToTensor r... |
def get_loading_pipeline(pipeline):
"Only keep loading image and annotations related configuration.\n\n Args:\n pipeline (list[dict]): Data pipeline configs.\n\n Returns:\n list[dict]: The new pipeline list with only keep\n loading image and annotations related configuration.\n\n ... |
@HOOKS.register_module()
class NumClassCheckHook(Hook):
def _check_head(self, runner):
'Check whether the `num_classes` in head matches the length of\n `CLASSES` in `dataset`.\n\n Args:\n runner (obj:`EpochBasedRunner`): Epoch based Runner.\n '
model = runner.model... |
@DATASETS.register_module()
class VOCDataset(XMLDataset):
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')
PALETTE = [(106, 0, 228), (119, 11, 32), (165,... |
@DATASETS.register_module()
class WIDERFaceDataset(XMLDataset):
'Reader for the WIDER Face dataset in PASCAL VOC format.\n\n Conversion scripts can be found in\n https://github.com/sovrasov/wider-face-pascal-voc-annotations\n '
CLASSES = ('face',)
PALETTE = [(0, 255, 0)]
def __init__(self, *... |
@DATASETS.register_module()
class XMLDataset(CustomDataset):
'XML dataset for detection.\n\n Args:\n min_size (int | float, optional): The minimum size of bounding\n boxes in the images. If the size of a bounding box is less than\n ``min_size``, it would be add to ignored field.\n ... |
class ResBlock(BaseModule):
"The basic residual block used in Darknet. Each ResBlock consists of two\n ConvModules and the input is added to the final output. Each ConvModule is\n composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer\n has half of the number of the filters as much as ... |
@BACKBONES.register_module()
class Darknet(BaseModule):
"Darknet backbone.\n\n Args:\n depth (int): Depth of Darknet. Currently only support 53.\n out_indices (Sequence[int]): Output from which stages.\n frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n -... |
class Bottleneck(_Bottleneck):
'Bottleneck for the ResNet backbone in `DetectoRS\n <https://arxiv.org/pdf/2006.02334.pdf>`_.\n\n This bottleneck allows the users to specify whether to use\n SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid).\n\n Args:\n inplanes (int): The... |
class ResLayer(Sequential):
"ResLayer to build ResNet style backbone for RPF in detectoRS.\n\n The difference between this module and base class is that we pass\n ``rfp_inplanes`` to the first block.\n\n Args:\n block (nn.Module): block used to build ResLayer.\n inplanes (int): inplanes of ... |
@BACKBONES.register_module()
class DetectoRS_ResNet(ResNet):
'ResNet backbone for DetectoRS.\n\n Args:\n sac (dict, optional): Dictionary to construct SAC (Switchable Atrous\n Convolution). Default: None.\n stage_with_sac (list): Which stage to use sac. Default: (False, False,\n ... |
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs):
'Bottleneck block for ResNeXt.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the fi... |
@BACKBONES.register_module()
class DetectoRS_ResNeXt(DetectoRS_ResNet):
'ResNeXt backbone for DetectoRS.\n\n Args:\n groups (int): The number of groups in ResNeXt.\n base_width (int): The base width of ResNeXt.\n '
arch_settings = {50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 2... |
class HourglassModule(BaseModule):
"Hourglass Module for HourglassNet backbone.\n\n Generate module recursively and use BasicBlock as the base unit.\n\n Args:\n depth (int): Depth of current HourglassModule.\n stage_channels (list[int]): Feature channels of sub-modules in current\n ... |
@BACKBONES.register_module()
class HourglassNet(BaseModule):
'HourglassNet backbone.\n\n Stacked Hourglass Networks for Human Pose Estimation.\n More details can be found in the `paper\n <https://arxiv.org/abs/1603.06937>`_ .\n\n Args:\n downsample_times (int): Downsample times in a HourglassMo... |
class HRModule(BaseModule):
'High-Resolution Module for HRNet.\n\n In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange\n is in this module.\n '
def __init__(self, num_branches, blocks, num_blocks, in_channels, num_channels, multiscale_output=True, with_cp=False, conv_cfg=None... |
@BACKBONES.register_module()
class HRNet(BaseModule):
"HRNet backbone.\n\n `High-Resolution Representations for Labeling Pixels and Regions\n arXiv: <https://arxiv.org/abs/1904.04514>`_.\n\n Args:\n extra (dict): Detailed configuration for each stage of HRNet.\n There must be 4 stages, ... |
@BACKBONES.register_module()
class MobileNetV2(BaseModule):
"MobileNetV2 backbone.\n\n Args:\n widen_factor (float): Width multiplier, multiply number of\n channels in each layer by this amount. Default: 1.0.\n out_indices (Sequence[int], optional): Output from which stages.\n ... |
@BACKBONES.register_module()
class RegNet(ResNet):
'RegNet backbone.\n\n More details can be found in `paper <https://arxiv.org/abs/2003.13678>`_ .\n\n Args:\n arch (dict): The parameter of RegNets.\n\n - w0 (int): initial width\n - wa (float): slope of width\n - wm (... |
class Bottle2neck(_Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, scales=4, base_width=26, base_channels=64, stage_type='normal', **kwargs):
'Bottle2neck block for Res2Net.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the st... |
class Res2Layer(Sequential):
"Res2Layer to build Res2Net style backbone.\n\n Args:\n block (nn.Module): block used to build ResLayer.\n inplanes (int): inplanes of block.\n planes (int): planes of block.\n num_blocks (int): number of blocks.\n stride (int): stride of the firs... |
@BACKBONES.register_module()
class Res2Net(ResNet):
'Res2Net backbone.\n\n Args:\n scales (int): Scales used in Res2Net. Default: 4\n base_width (int): Basic width of each scale. Default: 26\n depth (int): Depth of res2net, from {50, 101, 152}.\n in_channels (int): Number of input i... |
class RSoftmax(nn.Module):
'Radix Softmax module in ``SplitAttentionConv2d``.\n\n Args:\n radix (int): Radix of input.\n groups (int): Groups of input.\n '
def __init__(self, radix, groups):
super().__init__()
self.radix = radix
self.groups = groups
def forwar... |
class SplitAttentionConv2d(BaseModule):
'Split-Attention Conv2d in ResNeSt.\n\n Args:\n in_channels (int): Number of channels in the input feature map.\n channels (int): Number of intermediate channels.\n kernel_size (int | tuple[int]): Size of the convolution kernel.\n stride (int ... |
class Bottleneck(_Bottleneck):
'Bottleneck block for ResNeSt.\n\n Args:\n inplane (int): Input planes of this block.\n planes (int): Middle planes of this block.\n groups (int): Groups of conv2.\n base_width (int): Base of width in terms of base channels. Default: 4.\n base_c... |
@BACKBONES.register_module()
class ResNeSt(ResNetV1d):
'ResNeSt backbone.\n\n Args:\n groups (int): Number of groups of Bottleneck. Default: 1\n base_width (int): Base width of Bottleneck. Default: 4\n radix (int): Radix of SplitAttentionConv2d. Default: 2\n reduction_factor (int): ... |
class BasicBlock(BaseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_cfg=None):
super(BasicBlock, self).__init__(init_cfg)
assert (dcn is No... |
class Bottleneck(BaseModule):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, init_cfg=None):
'Bottleneck block for ResNet.\n\n If style is "pytorch", the ... |
@BACKBONES.register_module()
class ResNet(BaseModule):
'ResNet backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n stem_channels (int | None): Number of stem channels. If not specified,\n it will be the same as `base_channels`. Default: None.\n bas... |
@BACKBONES.register_module()
class ResNetV1d(ResNet):
'ResNetV1d variant described in `Bag of Tricks\n <https://arxiv.org/pdf/1812.01187.pdf>`_.\n\n Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in\n the input stem with three 3x3 convs. And in the downsampling block, a 2x2\n ... |
class Bottleneck(_Bottleneck):
expansion = 4
def __init__(self, inplanes, planes, groups=1, base_width=4, base_channels=64, **kwargs):
'Bottleneck block for ResNeXt.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is the fi... |
@BACKBONES.register_module()
class ResNeXt(ResNet):
'ResNeXt backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n in_channels (int): Number of input image channels. Default: 3.\n num_stages (int): Resnet stages. Default: 4.\n groups (int): Group of resn... |
@BACKBONES.register_module()
class SSDVGG(VGG, BaseModule):
'VGG Backbone network for single-shot-detection.\n\n Args:\n depth (int): Depth of vgg, from {11, 13, 16, 19}.\n with_last_pool (bool): Whether to add a pooling layer at the last\n of the model\n ceil_mode (bool): When ... |
class L2Norm(ssd_neck.L2Norm):
def __init__(self, **kwargs):
super(L2Norm, self).__init__(**kwargs)
warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py is deprecated, please use L2Norm in mmdet/models/necks/ssd_neck.py instead')
|
class TridentConv(BaseModule):
'Trident Convolution Module.\n\n Args:\n in_channels (int): Number of channels in input.\n out_channels (int): Number of channels in output.\n kernel_size (int): Size of convolution kernel.\n stride (int, optional): Convolution stride. Default: 1.\n ... |
class TridentBottleneck(Bottleneck):
'BottleBlock for TridentResNet.\n\n Args:\n trident_dilations (tuple[int, int, int]): Dilations of different\n trident branch.\n test_branch_idx (int): In inference, all 3 branches will be used\n if `test_branch_idx==-1`, otherwise only b... |
def make_trident_res_layer(block, inplanes, planes, num_blocks, stride=1, trident_dilations=(1, 2, 3), style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None, test_branch_idx=(- 1)):
'Build Trident Res Layers.'
downsample = None
if ((stride != 1) or (inplanes != (p... |
@BACKBONES.register_module()
class TridentResNet(ResNet):
'The stem layer, stage 1 and stage 2 in Trident ResNet are identical to\n ResNet, while in stage 3, Trident BottleBlock is utilized to replace the\n normal BottleBlock to yield trident output. Different branch shares the\n convolution weight but u... |
def build_backbone(cfg):
'Build backbone.'
return BACKBONES.build(cfg)
|
def build_neck(cfg):
'Build neck.'
return NECKS.build(cfg)
|
def build_roi_extractor(cfg):
'Build roi extractor.'
return ROI_EXTRACTORS.build(cfg)
|
def build_shared_head(cfg):
'Build shared head.'
return SHARED_HEADS.build(cfg)
|
def build_head(cfg):
'Build head.'
return HEADS.build(cfg)
|
def build_loss(cfg):
'Build loss.'
return LOSSES.build(cfg)
|
def build_detector(cfg, train_cfg=None, test_cfg=None):
'Build detector.'
if ((train_cfg is not None) or (test_cfg is not None)):
warnings.warn('train_cfg and test_cfg is deprecated, please specify them in model', UserWarning)
assert ((cfg.get('train_cfg') is None) or (train_cfg is None)), 'train_... |
@HEADS.register_module()
class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
'Anchor-free head (FCOS, Fovea, RepPoints, etc.).\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n ... |
class BaseMaskHead(BaseModule, metaclass=ABCMeta):
'Base class for mask heads used in One-Stage Instance Segmentation.'
def __init__(self, init_cfg):
super(BaseMaskHead, self).__init__(init_cfg)
@abstractmethod
def loss(self, **kwargs):
pass
@abstractmethod
def get_results(s... |
@HEADS.register_module()
class GARetinaHead(GuidedAnchorHead):
'Guided-Anchor-based RetinaNet head.'
def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, init_cfg=None, **kwargs):
if (init_cfg is None):
init_cfg = dict(type='Normal', layer='Conv2d', ... |
@HEADS.register_module()
class LADHead(PAAHead):
'Label Assignment Head from the paper: `Improving Object Detection by\n Label Assignment Distillation <https://arxiv.org/pdf/2108.10520.pdf>`_'
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds'))
def get_label_assignment(self, cls_scores, bb... |
@HEADS.register_module()
class NASFCOSHead(FCOSHead):
'Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.\n\n It is quite similar with FCOS head, except for the searched structure of\n classification branch and bbox regression branch, where a structure of\n "dconv3x3, conv3x3, dconv3... |
@HEADS.register_module()
class PISARetinaHead(RetinaHead):
'PISA Retinanet Head.\n\n The head owns the same structure with Retinanet Head, but differs in two\n aspects:\n 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to\n change the positive loss weights.\n ... |
@HEADS.register_module()
class PISASSDHead(SSDHead):
def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None):
'Compute losses of the head.\n\n Args:\n cls_scores (list[Tensor]): Box scores for each scale level\n Has shape (N, num_anc... |
@HEADS.register_module()
class RetinaHead(AnchorHead):
'An anchor-based head used in `RetinaNet\n <https://arxiv.org/pdf/1708.02002.pdf>`_.\n\n The head contains two subnetworks. The first classifies anchor boxes and\n the second regresses deltas for the anchors.\n\n Example:\n >>> import torch... |
@HEADS.register_module()
class RetinaSepBNHead(AnchorHead):
'"RetinaHead with separate BN.\n\n In RetinaHead, conv/norm layers are shared across different FPN levels,\n while in RetinaSepBNHead, conv layers are shared across different FPN\n levels, but BN layers are separated.\n '
def __init__(se... |
@HEADS.register_module()
class SSDHead(AnchorHead):
'SSD head used in https://arxiv.org/abs/1512.02325.\n\n Args:\n num_classes (int): Number of categories excluding the background\n category.\n in_channels (int): Number of channels in the input feature map.\n stacked_convs (int... |
@DETECTORS.register_module()
class ATSS(SingleStageDetector):
'Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_.'
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg, ... |
@DETECTORS.register_module()
class AutoAssign(SingleStageDetector):
'Implementation of `AutoAssign: Differentiable Label Assignment for Dense\n Object Detection <https://arxiv.org/abs/2007.03496>`_.'
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None):
sup... |
class BaseDetector(BaseModule, metaclass=ABCMeta):
'Base class for detectors.'
def __init__(self, init_cfg=None):
super(BaseDetector, self).__init__(init_cfg)
self.fp16_enabled = False
@property
def with_neck(self):
'bool: whether the detector has a neck'
return (hasa... |
@DETECTORS.register_module()
class CascadeRCNN(TwoStageDetector):
'Implementation of `Cascade R-CNN: Delving into High Quality Object\n Detection <https://arxiv.org/abs/1906.09756>`_'
def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None, init... |
@DETECTORS.register_module()
class CenterNet(SingleStageDetector):
'Implementation of CenterNet(Objects as Points)\n\n <https://arxiv.org/abs/1904.07850>.\n '
def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(CenterNet, self).__i... |
@DETECTORS.register_module()
class DeformableDETR(DETR):
def __init__(self, *args, **kwargs):
super(DETR, self).__init__(*args, **kwargs)
|
@DETECTORS.register_module()
class DETR(SingleStageDetector):
'Implementation of `DETR: End-to-End Object Detection with\n Transformers <https://arxiv.org/pdf/2005.12872>`_'
def __init__(self, backbone, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(DETR, self).__... |
@DETECTORS.register_module()
class FastRCNN(TwoStageDetector):
'Implementation of `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_'
def __init__(self, backbone, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None):
super(FastRCNN, self).__init__(backbone=backbone, neck=neck, roi_... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.