code
stringlengths
17
6.64M
@DETECTORS.register_module() class FasterRCNN(TwoStageDetector): 'Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(FasterRCNN, self).__init__(backbone=backbone,...
@DETECTORS.register_module() class FCOS(SingleStageDetector): 'Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg, t...
@DETECTORS.register_module() class FOVEA(SingleStageDetector): 'Implementation of `FoveaBox <https://arxiv.org/abs/1904.03797>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(FOVEA, self).__init__(backbone, neck, bbox_head, train_...
@DETECTORS.register_module() class FSAF(SingleStageDetector): 'Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg, t...
@DETECTORS.register_module() class GFL(SingleStageDetector): def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg)
@DETECTORS.register_module() class GridRCNN(TwoStageDetector): 'Grid R-CNN.\n\n This detector is the implementation of:\n - Grid R-CNN (https://arxiv.org/abs/1811.12030)\n - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)\n ' def __init__(self, backbone, rpn_head, roi_head, ...
@DETECTORS.register_module() class HybridTaskCascade(CascadeRCNN): 'Implementation of `HTC <https://arxiv.org/abs/1901.07518>`_' def __init__(self, **kwargs): super(HybridTaskCascade, self).__init__(**kwargs) @property def with_semantic(self): 'bool: whether the detector has a semant...
@DETECTORS.register_module() class KnowledgeDistillationSingleStageDetector(SingleStageDetector): 'Implementation of `Distilling the Knowledge in a Neural Network.\n <https://arxiv.org/abs/1503.02531>`_.\n\n Args:\n teacher_config (str | dict): Config file path\n or the config object of te...
@DETECTORS.register_module() class LAD(KnowledgeDistillationSingleStageDetector): 'Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_.' def __init__(self, backbone, neck, bbox_head, teacher_backbone, teacher_neck, teacher_bbox_head, teacher_ckpt, eval_teacher=True, train_cfg=None, test_cfg=None,...
@DETECTORS.register_module() class MaskRCNN(TwoStageDetector): 'Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(MaskRCNN, self).__init__(backbone=backbone, neck=...
@DETECTORS.register_module() class MaskScoringRCNN(TwoStageDetector): 'Mask Scoring RCNN.\n\n https://arxiv.org/abs/1903.00241\n ' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(MaskScoringRCNN, self).__init__(backbone=bac...
@DETECTORS.register_module() class NASFCOS(SingleStageDetector): 'NAS-FCOS: Fast Neural Architecture Search for Object Detection.\n\n https://arxiv.org/abs/1906.0442\n ' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(NASFCOS, ...
@DETECTORS.register_module() class PAA(SingleStageDetector): 'Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_.' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg,...
@DETECTORS.register_module() class PanopticFPN(TwoStagePanopticSegmentor): 'Implementation of `Panoptic feature pyramid\n networks <https://arxiv.org/pdf/1901.02446>`_' def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None, sema...
@DETECTORS.register_module() class PointRend(TwoStageDetector): 'PointRend: Image Segmentation as Rendering\n\n This detector is the implementation of\n `PointRend <https://arxiv.org/abs/1912.08193>`_.\n\n ' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=...
@DETECTORS.register_module() class QueryInst(SparseRCNN): 'Implementation of\n `Instances as Queries <http://arxiv.org/abs/2105.01928>`_' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(QueryInst, self).__init__(backbone=backb...
@DETECTORS.register_module() class RepPointsDetector(SingleStageDetector): 'RepPoints: Point Set Representation for Object Detection.\n\n This detector is the implementation of:\n - RepPoints detector (https://arxiv.org/pdf/1904.11490)\n ' def __init__(self, backbone, neck, bbox_head, train_...
@DETECTORS.register_module() class RetinaNet(SingleStageDetector): 'Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(RetinaNet, self).__init__(backbone, neck, bbox_hea...
@DETECTORS.register_module() class SCNet(CascadeRCNN): 'Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_' def __init__(self, **kwargs): super(SCNet, self).__init__(**kwargs)
@DETECTORS.register_module() class SingleStageDetector(BaseDetector): 'Base class for single-stage detectors.\n\n Single-stage detectors directly and densely predict bounding boxes on the\n output features of the backbone+neck.\n ' def __init__(self, backbone, neck=None, bbox_head=None, train_cfg=No...
@DETECTORS.register_module() class SOLO(SingleStageInstanceSegmentor): '`SOLO: Segmenting Objects by Locations\n <https://arxiv.org/abs/1912.04488>`_\n\n ' def __init__(self, backbone, neck=None, bbox_head=None, mask_head=None, train_cfg=None, test_cfg=None, init_cfg=None, pretrained=None): sup...
@DETECTORS.register_module() class SparseRCNN(TwoStageDetector): 'Implementation of `Sparse R-CNN: End-to-End Object Detection with\n Learnable Proposals <https://arxiv.org/abs/2011.12450>`_' def __init__(self, *args, **kwargs): super(SparseRCNN, self).__init__(*args, **kwargs) assert self...
@DETECTORS.register_module() class TOOD(SingleStageDetector): 'Implementation of `TOOD: Task-aligned One-stage Object Detection.\n <https://arxiv.org/abs/2108.07755>`_.' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(TOOD, self)....
@DETECTORS.register_module() class TridentFasterRCNN(FasterRCNN): 'Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_' def __init__(self, backbone, rpn_head, roi_head, train_cfg, test_cfg, neck=None, pretrained=None, init_cfg=None): super(TridentFasterRCNN, self).__init__(backbone=bac...
@DETECTORS.register_module() class TwoStageDetector(BaseDetector): 'Base class for two-stage detectors.\n\n Two-stage detectors typically consisting of a region proposal network and a\n task-specific regression head.\n ' def __init__(self, backbone, neck=None, rpn_head=None, roi_head=None, train_cfg...
@DETECTORS.register_module() class VFNet(SingleStageDetector): 'Implementation of `VarifocalNet\n (VFNet).<https://arxiv.org/abs/2008.13367>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(VFNet, self).__init__(backbone, neck, ...
@DETECTORS.register_module() class YOLACT(SingleStageDetector): 'Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_' def __init__(self, backbone, neck, bbox_head, segm_head, mask_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(YOLACT, self).__init__(backbone, n...
@DETECTORS.register_module() class YOLOV3(SingleStageDetector): def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained, init_cfg) def onnx_export(self, img, ...
@DETECTORS.register_module() class YOLOF(SingleStageDetector): 'Implementation of `You Only Look One-level Feature\n <https://arxiv.org/abs/2103.09460>`_' def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None): super(YOLOF, self).__init__(backbone, neck, bbox...
@mmcv.jit(coderize=True) def accuracy(pred, target, topk=1, thresh=None): 'Calculate accuracy according to the prediction and target.\n\n Args:\n pred (torch.Tensor): The model prediction, shape (N, num_class)\n target (torch.Tensor): The target of each prediction, shape (N, )\n topk (int ...
class Accuracy(nn.Module): def __init__(self, topk=(1,), thresh=None): 'Module to calculate the accuracy.\n\n Args:\n topk (tuple, optional): The criterion used to calculate the\n accuracy. Defaults to (1,).\n thresh (float, optional): If not None, predictions ...
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5, reduction='mean'): 'Calculate balanced L1 loss.\n\n Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_\n\n Args:\n pred (torch.Tensor): The prediction with shape...
@LOSSES.register_module() class BalancedL1Loss(nn.Module): 'Balanced L1 Loss.\n\n arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)\n\n Args:\n alpha (float): The denominator ``alpha`` in the balanced L1 loss.\n Defaults to 0.5.\n gamma (float): The ``gamma`` in the balanced L...
def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=(- 100)): 'Calculate the CrossEntropy loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C), C is the number\n of classes.\n label (torch.Tensor): The learni...
def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index): 'Expand onehot labels to match the size of prediction.' bin_labels = labels.new_full((labels.size(0), label_channels), 0) valid_mask = ((labels >= 0) & (labels != ignore_index)) inds = torch.nonzero((valid_mask & (labels <...
def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None, class_weight=None, ignore_index=(- 100)): 'Calculate the binary CrossEntropy loss.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, 1).\n label (torch.Tensor): The learning label of the prediction....
def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None, class_weight=None, ignore_index=None): 'Calculate the CrossEntropy loss for masks.\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C, *), C is the\n number of classes. The trailing * indicates arbitr...
@LOSSES.register_module() class CrossEntropyLoss(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', class_weight=None, ignore_index=None, loss_weight=1.0): 'CrossEntropyLoss.\n\n Args:\n use_sigmoid (bool, optional): Whether the prediction uses sigmoid\n...
def dice_loss(pred, target, weight=None, eps=0.001, reduction='mean', naive_dice=False, avg_factor=None): 'Calculate dice loss, there are two forms of dice loss is supported:\n\n - the one proposed in `V-Net: Fully Convolutional Neural\n Networks for Volumetric Medical Image Segmentation\n ...
@LOSSES.register_module() class DiceLoss(nn.Module): def __init__(self, use_sigmoid=True, activate=True, reduction='mean', naive_dice=False, loss_weight=1.0, eps=0.001): 'Compute dice loss.\n\n Args:\n use_sigmoid (bool, optional): Whether to the prediction is\n used for ...
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0): '`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian\n distribution.\n\n Args:\n pred (torch.Tensor): The prediction.\n gaussian_target (torch.Tenso...
@LOSSES.register_module() class GaussianFocalLoss(nn.Module): 'GaussianFocalLoss is a variant of focal loss.\n\n More details can be found in the `paper\n <https://arxiv.org/abs/1808.01244>`_\n Code is modified from `kp_utils.py\n <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/...
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def quality_focal_loss(pred, target, beta=2.0): 'Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning\n Qualified and Distributed Bounding Boxes for Dense Object Detection\n <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n pred ...
@weighted_loss def quality_focal_loss_with_prob(pred, target, beta=2.0): 'Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning\n Qualified and Distributed Bounding Boxes for Dense Object Detection\n <https://arxiv.org/abs/2006.04388>`_.\n Different from `quality_focal_loss`, this function a...
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def distribution_focal_loss(pred, label): 'Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning\n Qualified and Distributed Bounding Boxes for Dense Object Detection\n <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n pred (...
@LOSSES.register_module() class QualityFocalLoss(nn.Module): 'Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:\n Learning Qualified and Distributed Bounding Boxes for Dense Object\n Detection <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n use_sigmoid (bool): Whether sigmoid ope...
@LOSSES.register_module() class DistributionFocalLoss(nn.Module): "Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss:\n Learning Qualified and Distributed Bounding Boxes for Dense Object\n Detection <https://arxiv.org/abs/2006.04388>`_.\n\n Args:\n reduction (str): Options are ...
def _expand_onehot_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero(((labels >= 0) & (labels < label_channels)), as_tuple=False).squeeze() if (inds.numel() > 0): bin_labels[(inds, labels[inds])] = 1 bin_label_...
@LOSSES.register_module() class GHMC(nn.Module): 'GHM Classification Loss.\n\n Details of the theorem can be viewed in the paper\n `Gradient Harmonized Single-stage Detector\n <https://arxiv.org/abs/1811.05181>`_.\n\n Args:\n bins (int): Number of the unit regions for distribution calculation.\...
@LOSSES.register_module() class GHMR(nn.Module): 'GHM Regression Loss.\n\n Details of the theorem can be viewed in the paper\n `Gradient Harmonized Single-stage Detector\n <https://arxiv.org/abs/1811.05181>`_.\n\n Args:\n mu (float): The parameter for the Authentic Smooth L1 loss.\n bins...
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def knowledge_distillation_kl_div_loss(pred, soft_label, T, detach_target=True): 'Loss function for knowledge distilling using KL divergence.\n\n Args:\n pred (Tensor): Predicted logits with shape (N, n + 1).\n soft_label (Tensor): Target log...
@LOSSES.register_module() class KnowledgeDistillationKLDivLoss(nn.Module): "Loss function for knowledge distilling using KL divergence.\n\n Args:\n reduction (str): Options are `'none'`, `'mean'` and `'sum'`.\n loss_weight (float): Loss weight of current loss.\n T (int): Temperature for di...
@weighted_loss def mse_loss(pred, target): 'Warpper of mse loss.' return F.mse_loss(pred, target, reduction='none')
@LOSSES.register_module() class MSELoss(nn.Module): 'MSELoss.\n\n Args:\n reduction (str, optional): The method that reduces the loss to a\n scalar. Options are "none", "mean" and "sum".\n loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n ' def __init__(s...
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): 'Smooth L1 loss.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n beta (float, optional): The threshold in the piecewise func...
@mmcv.jit(derivate=True, coderize=True) @weighted_loss def l1_loss(pred, target): 'L1 loss.\n\n Args:\n pred (torch.Tensor): The prediction.\n target (torch.Tensor): The learning target of the prediction.\n\n Returns:\n torch.Tensor: Calculated loss\n ' if (target.numel() == 0): ...
@LOSSES.register_module() class SmoothL1Loss(nn.Module): 'Smooth L1 loss.\n\n Args:\n beta (float, optional): The threshold in the piecewise function.\n Defaults to 1.0.\n reduction (str, optional): The method to reduce the loss.\n Options are "none", "mean" and "sum". Defau...
@LOSSES.register_module() class L1Loss(nn.Module): 'L1 loss.\n\n Args:\n reduction (str, optional): The method to reduce the loss.\n Options are "none", "mean" and "sum".\n loss_weight (float, optional): The weight of loss.\n ' def __init__(self, reduction='mean', loss_weight=1...
def reduce_loss(loss, reduction): 'Reduce loss as specified.\n\n Args:\n loss (Tensor): Elementwise loss tensor.\n reduction (str): Options are "none", "mean" and "sum".\n\n Return:\n Tensor: Reduced loss tensor.\n ' reduction_enum = F._Reduction.get_enum(reduction) if (reduc...
@mmcv.jit(derivate=True, coderize=True) def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): 'Apply element-wise weight and reduce loss.\n\n Args:\n loss (Tensor): Element-wise loss.\n weight (Tensor): Element-wise weights.\n reduction (str): Same as built-in losse...
def weighted_loss(loss_func): "Create a weighted version of a given loss function.\n\n To use this decorator, the loss function must have the signature like\n `loss_func(pred, target, **kwargs)`. The function only needs to compute\n element-wise loss without any reduction. This decorator will add weight\...
@mmcv.jit(derivate=True, coderize=True) def varifocal_loss(pred, target, weight=None, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', avg_factor=None): '`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_\n\n Args:\n pred (torch.Tensor): The prediction with shape (N, C), C is the\n ...
@LOSSES.register_module() class VarifocalLoss(nn.Module): def __init__(self, use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='mean', loss_weight=1.0): '`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_\n\n Args:\n use_sigmoid (bool, optional): Whether the predic...
@NECKS.register_module() class BFP(BaseModule): "BFP (Balanced Feature Pyramids)\n\n BFP takes multi-level features as inputs and gather them into a single one,\n then refine the gathered feature and scatter the refined results to\n multi-level features. This module is used in Libra R-CNN (CVPR 2019), se...
@NECKS.register_module() class ChannelMapper(BaseModule): "Channel Mapper to reduce/increase channels of backbone features.\n\n This is used to reduce/increase channels of backbone features.\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number o...
class Bottleneck(nn.Module): 'Bottleneck block for DilatedEncoder used in `YOLOF.\n\n <https://arxiv.org/abs/2103.09460>`.\n\n The Bottleneck contains three ConvLayers and one residual connection.\n\n Args:\n in_channels (int): The number of input channels.\n mid_channels (int): The number ...
@NECKS.register_module() class DilatedEncoder(nn.Module): 'Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.\n\n This module contains two types of components:\n - the original FPN lateral convolution layer and fpn convolution layer,\n which are 1x1 conv + 3x3 conv\n - th...
class Transition(BaseModule): 'Base class for transition.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n ' def __init__(self, in_channels, out_channels, init_cfg=None): super().__init__(init_cfg) self.in_channels...
class UpInterpolationConv(Transition): 'A transition used for up-sampling.\n\n Up-sample the input by interpolation then refines the feature by\n a convolution layer.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n scale_fac...
class LastConv(Transition): 'A transition used for refining the output of the last stage.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n num_inputs (int): Number of inputs of the FPN features.\n kernel_size (int): Kernel s...
@NECKS.register_module() class FPG(BaseModule): "FPG.\n\n Implementation of `Feature Pyramid Grids (FPG)\n <https://arxiv.org/abs/2004.03580>`_.\n This implementation only gives the basic structure stated in the paper.\n But users can implement different type of transitions to fully explore the\n t...
@NECKS.register_module() class FPN(BaseModule): "Feature Pyramid Network.\n\n This is an implementation of paper `Feature Pyramid Networks for Object\n Detection <https://arxiv.org/abs/1612.03144>`_.\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (in...
@NECKS.register_module() class HRFPN(BaseModule): 'HRFPN (High Resolution Feature Pyramids)\n\n paper: `High-Resolution Representations for Labeling Pixels and Regions\n <https://arxiv.org/abs/1904.04514>`_.\n\n Args:\n in_channels (list): number of channels for each branch.\n out_channels ...
@NECKS.register_module() class NASFPN(BaseModule): 'NAS-FPN.\n\n Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture\n for Object Detection <https://arxiv.org/abs/1904.07392>`_\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (in...
@NECKS.register_module() class NASFCOS_FPN(BaseModule): 'FPN structure in NASFPN.\n\n Implementation of paper `NAS-FCOS: Fast Neural Architecture Search for\n Object Detection <https://arxiv.org/abs/1906.04423>`_\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out...
@NECKS.register_module() class PAFPN(FPN): "Path Aggregation Network for Instance Segmentation.\n\n This is an implementation of the `PAFPN in Path Aggregation Network\n <https://arxiv.org/abs/1803.01534>`_.\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_chan...
class ASPP(BaseModule): 'ASPP (Atrous Spatial Pyramid Pooling)\n\n This is an implementation of the ASPP module used in DetectoRS\n (https://arxiv.org/pdf/2006.02334.pdf)\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of channels produced by this modul...
@NECKS.register_module() class RFP(FPN): 'RFP (Recursive Feature Pyramid)\n\n This is an implementation of RFP in `DetectoRS\n <https://arxiv.org/pdf/2006.02334.pdf>`_. Different from standard FPN, the\n input of RFP should be multi level features along with origin input image\n of backbone.\n\n Ar...
class DetectionBlock(BaseModule): "Detection block in YOLO neck.\n\n Let out_channels = n, the DetectionBlock contains:\n Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer.\n The first 6 ConvLayers are formed the following way:\n 1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n.\n The Conv2D layer is 1...
@NECKS.register_module() class YOLOV3Neck(BaseModule): "The neck of YOLOV3.\n\n It can be treated as a simplified version of FPN. It\n will take the result from Darknet backbone and do some upsampling and\n concatenation. It will finally output the detection result.\n\n Note:\n The input feats ...
@NECKS.register_module() class YOLOXPAFPN(BaseModule): "Path Aggregation Network used in YOLOX.\n\n Args:\n in_channels (List[int]): Number of input channels per scale.\n out_channels (int): Number of output channels (used at each scale)\n num_csp_blocks (int): Number of bottlenecks in CSP...
@PLUGIN_LAYERS.register_module() class DropBlock(nn.Module): 'Randomly drop some regions of feature maps.\n\n Please refer to the method proposed in `DropBlock\n <https://arxiv.org/abs/1810.12890>`_ for details.\n\n Args:\n drop_prob (float): The probability of dropping each block.\n bloc...
class BaseRoIHead(BaseModule, metaclass=ABCMeta): 'Base class for RoIHeads.' def __init__(self, bbox_roi_extractor=None, bbox_head=None, mask_roi_extractor=None, mask_head=None, shared_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): super(BaseRoIHead, self).__init__(init_cf...
@HEADS.register_module() class ConvFCBBoxHead(BBoxHead): 'More general bbox head, with shared conv and fc layers and two optional\n separated branches.\n\n .. code-block:: none\n\n /-> cls convs -> cls fcs -> cls\n shared convs -> shared fcs\n ...
@HEADS.register_module() class Shared2FCBBoxHead(ConvFCBBoxHead): def __init__(self, fc_out_channels=1024, *args, **kwargs): super(Shared2FCBBoxHead, self).__init__(*args, num_shared_convs=0, num_shared_fcs=2, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels...
@HEADS.register_module() class Shared4Conv1FCBBoxHead(ConvFCBBoxHead): def __init__(self, fc_out_channels=1024, *args, **kwargs): super(Shared4Conv1FCBBoxHead, self).__init__(*args, num_shared_convs=4, num_shared_fcs=1, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_ou...
class BasicResBlock(BaseModule): 'Basic residual block.\n\n This block is a little different from the block in the ResNet backbone.\n The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock.\n\n Args:\n in_channels (int): Channels of the input feature map.\n out_channels (i...
@HEADS.register_module() class DoubleConvFCBBoxHead(BBoxHead): 'Bbox head used in Double-Head R-CNN\n\n .. code-block:: none\n\n /-> cls\n /-> shared convs ->\n \\-> reg\n roi features\n ...
@HEADS.register_module() class SCNetBBoxHead(ConvFCBBoxHead): 'BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us\n to get intermediate shared feature.\n ' def _forward_shared(self, x): 'Forward function ...
@HEADS.register_module() class DoubleHeadRoIHead(StandardRoIHead): 'RoI head for Double Head RCNN.\n\n https://arxiv.org/abs/1904.06493\n ' def __init__(self, reg_roi_scale_factor, **kwargs): super(DoubleHeadRoIHead, self).__init__(**kwargs) self.reg_roi_scale_factor = reg_roi_scale_fac...
@HEADS.register_module() class CoarseMaskHead(FCNMaskHead): 'Coarse mask head used in PointRend.\n\n Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample\n the input feature map instead of upsample it.\n\n Args:\n num_convs (int): Number of conv layers in the head. Default: 0...
@HEADS.register_module() class DynamicMaskHead(FCNMaskHead): 'Dynamic Mask Head for\n `Instances as Queries <http://arxiv.org/abs/2105.01928>`_\n\n Args:\n num_convs (int): Number of convolution layer.\n Defaults to 4.\n roi_feat_size (int): The output size of RoI extractor,\n ...
@HEADS.register_module() class FeatureRelayHead(BaseModule): 'Feature Relay Head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n Args:\n in_channels (int, optional): number of input channels. Default: 256.\n conv_out_channels (int, optional): number of output channels before\n ...
@HEADS.register_module() class FusedSemanticHead(BaseModule): 'Multi-level fused semantic segmentation head.\n\n .. code-block:: none\n\n in_1 -> 1x1 conv ---\n |\n in_2 -> 1x1 conv -- |\n ||\n in_3 -> 1x1 conv - ||\n ...
@HEADS.register_module() class GlobalContextHead(BaseModule): 'Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n Args:\n num_convs (int, optional): number of convolutional layer in GlbCtxHead.\n Default: 4.\n in_channels (int, optional): number of input chann...
@HEADS.register_module() class HTCMaskHead(FCNMaskHead): def __init__(self, with_conv_res=True, *args, **kwargs): super(HTCMaskHead, self).__init__(*args, **kwargs) self.with_conv_res = with_conv_res if self.with_conv_res: self.conv_res = ConvModule(self.conv_out_channels, sel...
@HEADS.register_module() class SCNetMaskHead(FCNMaskHead): 'Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n Args:\n conv_to_res (bool, optional): if True, change the conv layers to\n ``SimplifiedBasicBlock``.\n ' def __init__(self, conv_to_res=True, **kwargs): s...
@HEADS.register_module() class SCNetSemanticHead(FusedSemanticHead): 'Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n Args:\n conv_to_res (bool, optional): if True, change the conv layers to\n ``SimplifiedBasicBlock``.\n ' def __init__(self, conv_to_res=True, **kwargs):...
@HEADS.register_module() class PISARoIHead(StandardRoIHead): 'The RoI head for `Prime Sample Attention in Object Detection\n <https://arxiv.org/abs/1904.04821>`_.' def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): "Forward function fo...
@SHARED_HEADS.register_module() class ResLayer(BaseModule): def __init__(self, depth, stage=3, stride=2, dilation=1, style='pytorch', norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, with_cp=False, dcn=None, pretrained=None, init_cfg=None): super(ResLayer, self).__init__(init_cfg) se...