code stringlengths 17 6.64M |
|---|
def test_is_filepath():
assert mmcv.is_filepath(__file__)
assert mmcv.is_filepath('abc')
assert mmcv.is_filepath(Path('/etc'))
assert (not mmcv.is_filepath(0))
|
def test_fopen():
assert hasattr(mmcv.fopen(__file__), 'read')
assert hasattr(mmcv.fopen(Path(__file__)), 'read')
|
def test_check_file_exist():
mmcv.check_file_exist(__file__)
with pytest.raises(FileNotFoundError):
mmcv.check_file_exist('no_such_file.txt')
|
def test_scandir():
folder = osp.join(osp.dirname(osp.dirname(__file__)), 'data/for_scan')
filenames = ['a.bin', '1.txt', '2.txt', '1.json', '2.json', '3.TXT']
assert (set(mmcv.scandir(folder)) == set(filenames))
assert (set(mmcv.scandir(Path(folder))) == set(filenames))
assert (set(mmcv.scandir(f... |
def reset_string_io(io):
io.truncate(0)
io.seek(0)
|
class TestProgressBar():
def test_start(self):
out = StringIO()
bar_width = 20
prog_bar = mmcv.ProgressBar(bar_width=bar_width, file=out)
assert (out.getvalue() == 'completed: 0, elapsed: 0s')
reset_string_io(out)
prog_bar = mmcv.ProgressBar(bar_width=bar_width, st... |
def sleep_1s(num):
time.sleep(1)
return num
|
def test_track_progress_list():
out = StringIO()
ret = mmcv.track_progress(sleep_1s, [1, 2, 3], bar_width=3, file=out)
assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3, 1.0 task/s, elapsed: ... |
def test_track_progress_iterator():
out = StringIO()
ret = mmcv.track_progress(sleep_1s, ((i for i in [1, 2, 3]), 3), bar_width=3, file=out)
assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2s, ETA: 1s\r[>>>] 3/3,... |
def test_track_iter_progress():
out = StringIO()
ret = []
for num in mmcv.track_iter_progress([1, 2, 3], bar_width=3, file=out):
ret.append(sleep_1s(num))
assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elapsed: 1s, ETA: 2s\r[>> ] 2/3, 1.0 task/s, elapsed: 2... |
def test_track_enum_progress():
out = StringIO()
ret = []
count = []
for (i, num) in enumerate(mmcv.track_iter_progress([1, 2, 3], bar_width=3, file=out)):
ret.append(sleep_1s(num))
count.append(i)
assert (out.getvalue() == '[ ] 0/3, elapsed: 0s, ETA:\r[> ] 1/3, 1.0 task/s, elap... |
def test_track_parallel_progress_list():
out = StringIO()
results = mmcv.track_parallel_progress(sleep_1s, [1, 2, 3, 4], 2, bar_width=4, file=out)
assert (results == [1, 2, 3, 4])
|
def test_track_parallel_progress_iterator():
out = StringIO()
results = mmcv.track_parallel_progress(sleep_1s, ((i for i in [1, 2, 3, 4]), 4), 2, bar_width=4, file=out)
assert (results == [1, 2, 3, 4])
|
def test_registry():
CATS = mmcv.Registry('cat')
assert (CATS.name == 'cat')
assert (CATS.module_dict == {})
assert (len(CATS) == 0)
@CATS.register_module()
class BritishShorthair():
pass
assert (len(CATS) == 1)
assert (CATS.get('BritishShorthair') is BritishShorthair)
cl... |
def test_multi_scope_registry():
DOGS = mmcv.Registry('dogs')
assert (DOGS.name == 'dogs')
assert (DOGS.scope == 'test_registry')
assert (DOGS.module_dict == {})
assert (len(DOGS) == 0)
@DOGS.register_module()
class GoldenRetriever():
pass
assert (len(DOGS) == 1)
assert (D... |
def test_build_from_cfg():
BACKBONES = mmcv.Registry('backbone')
@BACKBONES.register_module()
class ResNet():
def __init__(self, depth, stages=4):
self.depth = depth
self.stages = stages
@BACKBONES.register_module()
class ResNeXt():
def __init__(self, de... |
def test_assert_dict_contains_subset():
dict_obj = {'a': 'test1', 'b': 2, 'c': (4, 6)}
expected_subset = {'a': 'test1', 'b': 2, 'c': (4, 6)}
assert mmcv.assert_dict_contains_subset(dict_obj, expected_subset)
expected_subset = {'a': 'test1', 'b': 2, 'c': (6, 4)}
assert (not mmcv.assert_dict_contain... |
def test_assert_attrs_equal():
class TestExample(object):
(a, b, c) = (1, ('wvi', 3), [4.5, 3.14])
def test_func(self):
return self.b
assert mmcv.assert_attrs_equal(TestExample, {'a': 1, 'b': ('wvi', 3), 'c': [4.5, 3.14]})
assert (not mmcv.assert_attrs_equal(TestExample, {'a'... |
@pytest.mark.parametrize('obj', assert_dict_has_keys_data_1)
@pytest.mark.parametrize('expected_keys, ret_value', assert_dict_has_keys_data_2)
def test_assert_dict_has_keys(obj, expected_keys, ret_value):
assert (mmcv.assert_dict_has_keys(obj, expected_keys) == ret_value)
|
@pytest.mark.parametrize('result_keys', assert_keys_equal_data_1)
@pytest.mark.parametrize('target_keys, ret_value', assert_keys_equal_data_2)
def test_assert_keys_equal(result_keys, target_keys, ret_value):
assert (mmcv.assert_keys_equal(result_keys, target_keys) == ret_value)
|
@pytest.mark.skipif((torch is None), reason='requires torch library')
def test_assert_is_norm_layer():
assert (not mmcv.assert_is_norm_layer(nn.Conv3d(3, 64, 3)))
assert mmcv.assert_is_norm_layer(nn.BatchNorm3d(128))
assert mmcv.assert_is_norm_layer(nn.GroupNorm(8, 64))
assert (not mmcv.assert_is_norm... |
@pytest.mark.skipif((torch is None), reason='requires torch library')
def test_assert_params_all_zeros():
demo_module = nn.Conv2d(3, 64, 3)
nn.init.constant_(demo_module.weight, 0)
nn.init.constant_(demo_module.bias, 0)
assert mmcv.assert_params_all_zeros(demo_module)
nn.init.xavier_normal_(demo_m... |
def test_check_python_script(capsys):
mmcv.utils.check_python_script('./tests/data/scripts/hello.py zz')
captured = capsys.readouterr().out
assert (captured == 'hello zz!\n')
mmcv.utils.check_python_script('./tests/data/scripts/hello.py agent')
captured = capsys.readouterr().out
assert (captur... |
def test_timer_init():
timer = mmcv.Timer(start=False)
assert (not timer.is_running)
timer.start()
assert timer.is_running
timer = mmcv.Timer()
assert timer.is_running
|
def test_timer_run():
timer = mmcv.Timer()
time.sleep(1)
assert (abs((timer.since_start() - 1)) < 0.01)
time.sleep(1)
assert (abs((timer.since_last_check() - 1)) < 0.01)
assert (abs((timer.since_start() - 2)) < 0.01)
timer = mmcv.Timer(False)
with pytest.raises(mmcv.TimerError):
... |
def test_timer_context(capsys):
with mmcv.Timer():
time.sleep(1)
(out, _) = capsys.readouterr()
assert (abs((float(out) - 1)) < 0.01)
with mmcv.Timer(print_tmpl='time: {:.1f}s'):
time.sleep(1)
(out, _) = capsys.readouterr()
assert (out == 'time: 1.0s\n')
|
@pytest.mark.skipif((digit_version(torch.__version__) < digit_version('1.6.0')), reason='torch.jit.is_tracing is not available before 1.6.0')
def test_is_jit_tracing():
def foo(x):
if is_jit_tracing():
return x
else:
return x.tolist()
x = torch.rand(3)
assert isins... |
def test_digit_version():
assert (digit_version('0.2.16') == (0, 2, 16, 0, 0, 0))
assert (digit_version('1.2.3') == (1, 2, 3, 0, 0, 0))
assert (digit_version('1.2.3rc0') == (1, 2, 3, 0, (- 1), 0))
assert (digit_version('1.2.3rc1') == (1, 2, 3, 0, (- 1), 1))
assert (digit_version('1.0rc0') == (1, 0... |
def test_parse_version_info():
assert (parse_version_info('0.2.16') == (0, 2, 16, 0, 0, 0))
assert (parse_version_info('1.2.3') == (1, 2, 3, 0, 0, 0))
assert (parse_version_info('1.2.3rc0') == (1, 2, 3, 0, 'rc', 0))
assert (parse_version_info('1.2.3rc1') == (1, 2, 3, 0, 'rc', 1))
assert (parse_ver... |
def _mock_cmd_success(cmd):
return '3b46d33e90c397869ad5103075838fdfc9812aa0'.encode('ascii')
|
def _mock_cmd_fail(cmd):
raise OSError
|
def test_get_git_hash():
with patch('mmcv.utils.version_utils._minimal_ext_cmd', _mock_cmd_success):
assert (get_git_hash() == '3b46d33e90c397869ad5103075838fdfc9812aa0')
assert (get_git_hash(digits=6) == '3b46d3')
assert (get_git_hash(digits=100) == get_git_hash())
with patch('mmcv.ut... |
class TestVideoEditor():
@classmethod
def setup_class(cls):
cls.video_path = osp.join(osp.dirname(__file__), '../data/test.mp4')
cls.num_frames = 168
@pytest.mark.skipif((platform.system() == 'Windows'), reason='skip windows')
def test_cut_concat_video(self):
part1_file = osp... |
class TestCache():
def test_init(self):
with pytest.raises(ValueError):
mmcv.Cache(0)
cache = mmcv.Cache(100)
assert (cache.capacity == 100)
assert (cache.size == 0)
def test_put(self):
cache = mmcv.Cache(3)
for i in range(1, 4):
cache.... |
class TestVideoReader():
@classmethod
def setup_class(cls):
cls.video_path = osp.join(osp.dirname(__file__), '../data/test.mp4')
cls.num_frames = 168
cls.video_url = 'https://www.learningcontainer.com/wp-content/uploads/2020/05/sample-mp4-file.mp4'
def test_load(self):
v ... |
def test_color():
assert (mmcv.color_val(mmcv.Color.blue) == (255, 0, 0))
assert (mmcv.color_val('green') == (0, 255, 0))
assert (mmcv.color_val((1, 2, 3)) == (1, 2, 3))
assert (mmcv.color_val(100) == (100, 100, 100))
assert (mmcv.color_val(np.zeros(3, dtype=int)) == (0, 0, 0))
with pytest.rai... |
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif (x.find('rc') != (- 1)):
patch_version = x.split('rc')
digit_version.append((int(patch_version[0]) - 1))
digit_v... |
def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None):
'Initialize a detector from config file.\n\n Args:\n config (str or :obj:`mmcv.Config`): Config file path or the config\n object.\n checkpoint (str, optional): Checkpoint path. If left as None, the model\n ... |
class LoadImage():
'Deprecated.\n\n A simple pipeline to load image.\n '
def __call__(self, results):
'Call function to load images into results.\n\n Args:\n results (dict): A result dict contains the file name\n of the image to be read.\n Returns:\n ... |
def inference_detector(model, imgs):
'Inference image(s) with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):\n Either image files or loaded images.\n\n Returns:\n If imgs is a list or tuple, th... |
def show_result_pyplot(model, img, result, score_thr=0.3, title='result', wait_time=0, palette=None):
'Visualize the detection results on the image.\n\n Args:\n model (nn.Module): The loaded detector.\n img (str or np.ndarray): Image filename or loaded image.\n result (tuple[list] or list)... |
def init_random_seed(seed=None, device='cuda'):
"Initialize random seed.\n\n If the seed is not set, the seed will be automatically randomized,\n and then broadcast to all processes to prevent some potential bugs.\n\n Args:\n seed (int, Optional): The seed. Default to None.\n device (str): ... |
def set_random_seed(seed, deterministic=False):
'Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.... |
def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None):
logger = get_root_logger(log_level=cfg.log_level)
dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset])
if ('imgs_per_gpu' in cfg.data):
logger.warning('"imgs_per_gpu" is depre... |
def build_prior_generator(cfg, default_args=None):
return build_from_cfg(cfg, PRIOR_GENERATORS, default_args)
|
def build_anchor_generator(cfg, default_args=None):
warnings.warn('``build_anchor_generator`` would be deprecated soon, please use ``build_prior_generator`` ')
return build_prior_generator(cfg, default_args=default_args)
|
@PRIOR_GENERATORS.register_module()
class PointGenerator():
def _meshgrid(self, x, y, row_major=True):
xx = x.repeat(len(y))
yy = y.view((- 1), 1).repeat(1, len(x)).view((- 1))
if row_major:
return (xx, yy)
else:
return (yy, xx)
def grid_points(self, f... |
@PRIOR_GENERATORS.register_module()
class MlvlPointGenerator():
'Standard points generator for multi-level (Mlvl) feature maps in 2D\n points-based detectors.\n\n Args:\n strides (list[int] | list[tuple[int, int]]): Strides of anchors\n in multiple feature levels in order (w, h).\n ... |
class AssignResult(util_mixins.NiceRepr):
'Stores assignments between predicted and truth boxes.\n\n Attributes:\n num_gts (int): the number of truth boxes considered when computing this\n assignment\n\n gt_inds (LongTensor): for each predicted box indicates the 1-based\n in... |
class BaseAssigner(metaclass=ABCMeta):
'Base assigner that assigns boxes to ground truth boxes.'
@abstractmethod
def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
'Assign boxes to either a ground truth boxes or a negative boxes.'
|
@BBOX_ASSIGNERS.register_module()
class HungarianAssigner(BaseAssigner):
'Computes one-to-one matching between predictions and ground truth.\n\n This class computes an assignment between the targets and the predictions\n based on the costs. The costs are weighted sum of three components:\n classification... |
@BBOX_ASSIGNERS.register_module()
class MaskHungarianAssigner(BaseAssigner):
"Computes one-to-one matching between predictions and ground truth for\n mask.\n\n This class computes an assignment between the targets and the predictions\n based on the costs. The costs are weighted sum of three components:\n... |
@BBOX_ASSIGNERS.register_module()
class UniformAssigner(BaseAssigner):
'Uniform Matching between the anchors and gt boxes, which can achieve\n balance in positive anchors, and gt_bboxes_ignore was not considered for\n now.\n\n Args:\n pos_ignore_thr (float): the threshold to ignore positive anchor... |
def build_assigner(cfg, **default_args):
'Builder of box assigner.'
return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args)
|
def build_sampler(cfg, **default_args):
'Builder of box sampler.'
return build_from_cfg(cfg, BBOX_SAMPLERS, default_args)
|
def build_bbox_coder(cfg, **default_args):
'Builder of box coder.'
return build_from_cfg(cfg, BBOX_CODERS, default_args)
|
class BaseBBoxCoder(metaclass=ABCMeta):
'Base bounding box coder.'
def __init__(self, **kwargs):
pass
@abstractmethod
def encode(self, bboxes, gt_bboxes):
'Encode deltas between bboxes and ground truth boxes.'
@abstractmethod
def decode(self, bboxes, bboxes_pred):
'D... |
@BBOX_CODERS.register_module()
class DistancePointBBoxCoder(BaseBBoxCoder):
'Distance Point BBox coder.\n\n This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,\n right) and decode it back to the original.\n\n Args:\n clip_border (bool, optional): Whether clip the objects outside... |
@BBOX_CODERS.register_module()
class PseudoBBoxCoder(BaseBBoxCoder):
'Pseudo bounding box coder.'
def __init__(self, **kwargs):
super(BaseBBoxCoder, self).__init__(**kwargs)
def encode(self, bboxes, gt_bboxes):
'torch.Tensor: return the given ``bboxes``'
return gt_bboxes
def... |
def build_iou_calculator(cfg, default_args=None):
'Builder of IoU calculator.'
return build_from_cfg(cfg, IOU_CALCULATORS, default_args)
|
def build_match_cost(cfg, default_args=None):
'Builder of IoU calculator.'
return build_from_cfg(cfg, MATCH_COST, default_args)
|
@BBOX_SAMPLERS.register_module()
class CombinedSampler(BaseSampler):
'A sampler that combines positive sampler and negative sampler.'
def __init__(self, pos_sampler, neg_sampler, **kwargs):
super(CombinedSampler, self).__init__(**kwargs)
self.pos_sampler = build_sampler(pos_sampler, **kwargs)... |
@BBOX_SAMPLERS.register_module()
class InstanceBalancedPosSampler(RandomSampler):
'Instance balanced sampler that samples equal number of positive samples\n for each instance.'
def _sample_pos(self, assign_result, num_expected, **kwargs):
'Sample positive boxes.\n\n Args:\n assig... |
@BBOX_SAMPLERS.register_module()
class IoUBalancedNegSampler(RandomSampler):
'IoU Balanced Sampling.\n\n arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)\n\n Sampling proposals according to their IoU. `floor_fraction` of needed RoIs\n are sampled from proposals whose IoU are lower than `floor_thr`... |
@BBOX_SAMPLERS.register_module()
class MaskPseudoSampler(BaseSampler):
'A pseudo sampler that does not do sampling actually.'
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
'Sample positive samples.'
raise NotImplementedError
def _sample_neg(self, **kwar... |
@BBOX_SAMPLERS.register_module()
class OHEMSampler(BaseSampler):
'Online Hard Example Mining Sampler described in `Training Region-based\n Object Detectors with Online Hard Example Mining\n <https://arxiv.org/abs/1604.03540>`_.\n '
def __init__(self, num, pos_fraction, context, neg_pos_ub=(- 1), add... |
@BBOX_SAMPLERS.register_module()
class PseudoSampler(BaseSampler):
'A pseudo sampler that does not do sampling actually.'
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
'Sample positive samples.'
raise NotImplementedError
def _sample_neg(self, **kwargs):... |
@BBOX_SAMPLERS.register_module()
class RandomSampler(BaseSampler):
'Random sampler.\n\n Args:\n num (int): Number of samples\n pos_fraction (float): Fraction of positive samples\n neg_pos_up (int, optional): Upper bound number of negative and\n positive samples. Defaults to -1.\... |
class GeneralData(NiceRepr):
'A general data structure of OpenMMlab.\n\n A data structure that stores the meta information,\n the annotations of the images or the model predictions,\n which can be used in communication between components.\n\n The attributes in `GeneralData` are divided into two parts,... |
class InstanceData(GeneralData):
'Data structure for instance-level annnotations or predictions.\n\n Subclass of :class:`GeneralData`. All value in `data_fields`\n should have the same length. This design refer to\n https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/instance... |
def wider_face_classes():
return ['face']
|
def voc_classes():
return ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
|
def imagenet_det_classes():
return ['accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', '... |
def imagenet_vid_classes():
return ['airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car', 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda', 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit', 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle', 'wa... |
def coco_classes():
return ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie'... |
def cityscapes_classes():
return ['person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']
|
def oid_challenge_classes():
return ['Footwear', 'Jeans', 'House', 'Tree', 'Woman', 'Man', 'Land vehicle', 'Person', 'Wheel', 'Bus', 'Human face', 'Bird', 'Dress', 'Girl', 'Vehicle', 'Building', 'Cat', 'Car', 'Belt', 'Elephant', 'Dessert', 'Butterfly', 'Train', 'Guitar', 'Poster', 'Book', 'Boy', 'Bee', 'Flower', ... |
def oid_v6_classes():
return ['Tortoise', 'Container', 'Magpie', 'Sea turtle', 'Football', 'Ambulance', 'Ladder', 'Toothbrush', 'Syringe', 'Sink', 'Toy', 'Organ (Musical Instrument)', 'Cassette deck', 'Apple', 'Human eye', 'Cosmetics', 'Paddle', 'Snowman', 'Beer', 'Chopsticks', 'Human beard', 'Bird', 'Parking met... |
def get_classes(dataset):
'Get class names of a dataset.'
alias2name = {}
for (name, aliases) in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if (dataset in alias2name):
labels = eval((alias2name[dataset] + '_c... |
def _calc_dynamic_intervals(start_interval, dynamic_interval_list):
assert mmcv.is_list_of(dynamic_interval_list, tuple)
dynamic_milestones = [0]
dynamic_milestones.extend([dynamic_interval[0] for dynamic_interval in dynamic_interval_list])
dynamic_intervals = [start_interval]
dynamic_intervals.ex... |
class EvalHook(BaseEvalHook):
def __init__(self, *args, dynamic_intervals=None, **kwargs):
super(EvalHook, self).__init__(*args, **kwargs)
self.use_dynamic_intervals = (dynamic_intervals is not None)
if self.use_dynamic_intervals:
(self.dynamic_milestones, self.dynamic_interva... |
class DistEvalHook(BaseDistEvalHook):
def __init__(self, *args, dynamic_intervals=None, **kwargs):
super(DistEvalHook, self).__init__(*args, **kwargs)
self.use_dynamic_intervals = (dynamic_intervals is not None)
if self.use_dynamic_intervals:
(self.dynamic_milestones, self.dyn... |
def generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config, cfg_options=None):
"Prepare sample input and wrap model for ONNX export.\n\n The ONNX export API only accept args, and all inputs should be\n torch.Tensor or corresponding types (such as tuple of tensor).\n So we should call t... |
def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None):
'Build a model from config and load the given checkpoint.\n\n Args:\n config_path (str): the OpenMMLab config for the model we want to\n export to ONNX\n checkpoint_path (str): Path to the corresponding checkpoin... |
def preprocess_example_input(input_config):
"Prepare an example input image for ``generate_inputs_and_wrap_model``.\n\n Args:\n input_config (dict): customized config describing the example input.\n\n Returns:\n tuple: (one_img, one_meta), tensor of the example input image and meta... |
@HOOKS.register_module()
class CheckInvalidLossHook(Hook):
'Check invalid loss hook.\n\n This hook will regularly check whether the loss is valid\n during training.\n\n Args:\n interval (int): Checking interval (every k iterations).\n Default: 50.\n '
def __init__(self, interval... |
class BaseEMAHook(Hook):
"Exponential Moving Average Hook.\n\n Use Exponential Moving Average on all parameters of model in training\n process. All parameters have a ema backup, which update by the formula\n as below. EMAHook takes priority over EvalHook and CheckpointHook. Note,\n the original model ... |
@HOOKS.register_module()
class ExpMomentumEMAHook(BaseEMAHook):
'EMAHook using exponential momentum strategy.\n\n Args:\n total_iter (int): The total number of iterations of EMA momentum.\n Defaults to 2000.\n '
def __init__(self, total_iter=2000, **kwargs):
super(ExpMomentumEM... |
@HOOKS.register_module()
class LinearMomentumEMAHook(BaseEMAHook):
'EMAHook using linear momentum strategy.\n\n Args:\n warm_up (int): During first warm_up steps, we may use smaller decay\n to update ema parameters more slowly. Defaults to 100.\n '
def __init__(self, warm_up=100, **kw... |
@HOOKS.register_module()
class SetEpochInfoHook(Hook):
"Set runner's epoch information to the model."
def before_train_epoch(self, runner):
epoch = runner.epoch
model = runner.model
if is_module_wrapper(model):
model = model.module
model.set_epoch(epoch)
|
def get_norm_states(module):
async_norm_states = OrderedDict()
for (name, child) in module.named_modules():
if isinstance(child, nn.modules.batchnorm._NormBase):
for (k, v) in child.state_dict().items():
async_norm_states['.'.join([name, k])] = v
return async_norm_state... |
@HOOKS.register_module()
class SyncNormHook(Hook):
'Synchronize Norm states after training epoch, currently used in YOLOX.\n\n Args:\n num_last_epochs (int): The number of latter epochs in the end of the\n training to switch to synchronizing norm interval. Default: 15.\n interval (int)... |
@HOOKS.register_module()
class SyncRandomSizeHook(Hook):
"Change and synchronize the random image size across ranks.\n SyncRandomSizeHook is deprecated, please use Resize pipeline to achieve\n similar functions. Such as `dict(type='Resize', img_scale=[(448, 448),\n (832, 832)], multiscale_mode='range', k... |
@HOOKS.register_module()
class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook):
'YOLOX learning rate scheme.\n\n There are two main differences between YOLOXLrUpdaterHook\n and CosineAnnealingLrUpdaterHook.\n\n 1. When the current running epoch is greater than\n `max_epoch-last_epoch`, a fi... |
@HOOKS.register_module()
class YOLOXModeSwitchHook(Hook):
"Switch the mode of YOLOX during training.\n\n This hook turns off the mosaic and mixup data augmentation and switches\n to use L1 loss in bbox_head.\n\n Args:\n num_last_epochs (int): The number of latter epochs in the end of the\n ... |
def mask_matrix_nms(masks, labels, scores, filter_thr=(- 1), nms_pre=(- 1), max_num=(- 1), kernel='gaussian', sigma=2.0, mask_area=None):
"Matrix NMS for multi-class masks.\n\n Args:\n masks (Tensor): Has shape (num_instances, h, w)\n labels (Tensor): Labels of corresponding masks,\n h... |
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=(- 1)):
if (bucket_size_mb > 0):
bucket_size_bytes = ((bucket_size_mb * 1024) * 1024)
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.ty... |
def allreduce_grads(params, coalesce=True, bucket_size_mb=(- 1)):
'Allreduce gradients.\n\n Args:\n params (list[torch.Parameters]): List of parameters of a model\n coalesce (bool, optional): Whether allreduce parameters as a whole.\n Defaults to True.\n bucket_size_mb (int, opt... |
class DistOptimizerHook(OptimizerHook):
'Deprecated optimizer hook for distributed training.'
def __init__(self, *args, **kwargs):
warnings.warn('"DistOptimizerHook" is deprecated, please switch to"mmcv.runner.OptimizerHook".')
super().__init__(*args, **kwargs)
|
def reduce_mean(tensor):
'"Obtain the mean of tensor on different GPUs.'
if (not (dist.is_available() and dist.is_initialized())):
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
return tensor
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.