code
stringlengths
17
6.64M
@patch('torch.cuda.device_count', return_value=1) @patch('torch.cuda.set_device') @patch('torch.distributed.init_process_group') @patch('subprocess.getoutput', return_value='127.0.0.1') def test_init_dist(mock_getoutput, mock_dist_init, mock_set_device, mock_device_count): with pytest.raises(ValueError): ...
class ExampleDataset(Dataset): def __init__(self): self.index = 0 self.eval_result = [1, 4, 3, 7, 2, (- 3), 4, 6] def __getitem__(self, idx): results = dict(x=torch.tensor([1])) return results def __len__(self): return 1 @mock.create_autospec def evaluat...
class EvalDataset(ExampleDataset): def evaluate(self, results, logger=None): acc = self.eval_result[self.index] output = OrderedDict(acc=acc, index=self.index, score=acc, loss_top=acc) self.index += 1 return output
class Model(nn.Module): def __init__(self): super().__init__() self.param = nn.Parameter(torch.tensor([1.0])) def forward(self, x, **kwargs): return (self.param * x) def train_step(self, data_batch, optimizer, **kwargs): return {'loss': torch.sum(self(data_batch['x']))} ...
def _build_epoch_runner(): model = Model() tmp_dir = tempfile.mkdtemp() runner = EpochBasedRunner(model=model, work_dir=tmp_dir, logger=get_logger('demo')) return runner
def _build_iter_runner(): model = Model() tmp_dir = tempfile.mkdtemp() runner = IterBasedRunner(model=model, work_dir=tmp_dir, logger=get_logger('demo')) return runner
class EvalHook(BaseEvalHook): _default_greater_keys = ['acc', 'top'] _default_less_keys = ['loss', 'loss_top'] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)
class DistEvalHook(BaseDistEvalHook): greater_keys = ['acc', 'top'] less_keys = ['loss', 'loss_top'] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs)
def test_eval_hook(): with pytest.raises(AssertionError): test_dataset = Model() data_loader = DataLoader(test_dataset) EvalHook(data_loader, save_best=True) with pytest.raises(TypeError): test_dataset = Model() data_loader = [DataLoader(test_dataset)] EvalHook(...
@patch('mmcv.engine.single_gpu_test', MagicMock) @patch('mmcv.engine.multi_gpu_test', MagicMock) @pytest.mark.parametrize('EvalHookParam', [EvalHook, DistEvalHook]) @pytest.mark.parametrize('_build_demo_runner,by_epoch', [(_build_epoch_runner, True), (_build_iter_runner, False)]) def test_start_param(EvalHookParam, _...
@pytest.mark.parametrize('runner,by_epoch,eval_hook_priority', [(EpochBasedRunner, True, 'NORMAL'), (EpochBasedRunner, True, 'LOW'), (IterBasedRunner, False, 'LOW')]) def test_logger(runner, by_epoch, eval_hook_priority): loader = DataLoader(EvalDataset()) model = Model() data_loader = DataLoader(EvalData...
def test_cast_tensor_type(): inputs = torch.FloatTensor([5.0]) src_type = torch.float32 dst_type = torch.int32 outputs = cast_tensor_type(inputs, src_type, dst_type) assert isinstance(outputs, torch.Tensor) assert (outputs.dtype == dst_type) inputs = torch.FloatTensor([5.0]) src_type =...
def test_auto_fp16(): with pytest.raises(TypeError): class ExampleObject(object): @auto_fp16() def __call__(self, x): return x model = ExampleObject() input_x = torch.ones(1, dtype=torch.float32) model(input_x) class ExampleModule(nn.M...
def test_force_fp32(): with pytest.raises(TypeError): class ExampleObject(object): @force_fp32() def __call__(self, x): return x model = ExampleObject() input_x = torch.ones(1, dtype=torch.float32) model(input_x) class ExampleModule(nn...
def test_optimizerhook(): class Model(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=3, stride=1, padding=1, dilation=1) self.conv2 = nn.Conv2d(in_channels=2, out_channels=2, kernel_size=3, stride=1...
def test_checkpoint_hook(tmp_path): 'xdoctest -m tests/test_runner/test_hooks.py test_checkpoint_hook.' loader = DataLoader(torch.ones((5, 2))) runner = _build_demo_runner('EpochBasedRunner', max_epochs=1) runner.meta = dict() checkpointhook = CheckpointHook(interval=1, by_epoch=True) runner.r...
def test_ema_hook(): 'xdoctest -m tests/test_hooks.py test_ema_hook.' class DemoModel(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=1, padding=1, bias=True) self._init_weight() def _ini...
def test_custom_hook(): @HOOKS.register_module() class ToyHook(Hook): def __init__(self, info, *args, **kwargs): super().__init__() self.info = info runner = _build_demo_runner_without_hook('EpochBasedRunner', max_epochs=1) runner.register_custom_hooks(None) asser...
def test_pavi_hook(): sys.modules['pavi'] = MagicMock() loader = DataLoader(torch.ones((5, 2))) runner = _build_demo_runner() runner.meta = dict(config_dict=dict(lr=0.02, gpu_ids=range(1))) hook = PaviLoggerHook(add_graph=False, add_last_ckpt=True) runner.register_hook(hook) runner.run([lo...
def test_sync_buffers_hook(): loader = DataLoader(torch.ones((5, 2))) runner = _build_demo_runner() runner.register_hook_from_cfg(dict(type='SyncBuffersHook')) runner.run([loader, loader], [('train', 1), ('val', 1)]) shutil.rmtree(runner.work_dir)
@pytest.mark.parametrize('multi_optimizers, max_iters, gamma, cyclic_times', [(True, 8, 1, 1), (False, 8, 0.5, 2)]) def test_momentum_runner_hook(multi_optimizers, max_iters, gamma, cyclic_times): 'xdoctest -m tests/test_hooks.py test_momentum_runner_hook.' sys.modules['pavi'] = MagicMock() loader = DataL...
@pytest.mark.parametrize('multi_optimizers', (True, False)) def test_cosine_runner_hook(multi_optimizers): 'xdoctest -m tests/test_hooks.py test_cosine_runner_hook.' sys.modules['pavi'] = MagicMock() loader = DataLoader(torch.ones((10, 2))) runner = _build_demo_runner(multi_optimizers=multi_optimizers...
@pytest.mark.parametrize('multi_optimizers, by_epoch', [(False, False), (True, False), (False, True), (True, True)]) def test_flat_cosine_runner_hook(multi_optimizers, by_epoch): 'xdoctest -m tests/test_hooks.py test_flat_cosine_runner_hook.' sys.modules['pavi'] = MagicMock() loader = DataLoader(torch.one...
@pytest.mark.parametrize('multi_optimizers, max_iters', [(True, 10), (True, 2), (False, 10), (False, 2)]) def test_one_cycle_runner_hook(multi_optimizers, max_iters): 'Test OneCycleLrUpdaterHook and OneCycleMomentumUpdaterHook.' with pytest.raises(AssertionError): OneCycleLrUpdaterHook(max_lr=0.1, by_...
@pytest.mark.parametrize('multi_optimizers', (True, False)) def test_cosine_restart_lr_update_hook(multi_optimizers): 'Test CosineRestartLrUpdaterHook.' with pytest.raises(AssertionError): CosineRestartLrUpdaterHook(by_epoch=False, periods=[2, 10], restart_weights=[0.5, 0.5], min_lr=0.1, min_lr_ratio=...
@pytest.mark.parametrize('multi_optimizers', (True, False)) def test_step_runner_hook(multi_optimizers): 'Test StepLrUpdaterHook.' with pytest.raises(TypeError): StepLrUpdaterHook() with pytest.raises(AssertionError): StepLrUpdaterHook((- 10)) with pytest.raises(AssertionError): ...
@pytest.mark.parametrize('multi_optimizers, max_iters, gamma, cyclic_times', [(True, 8, 1, 1), (False, 8, 0.5, 2)]) def test_cyclic_lr_update_hook(multi_optimizers, max_iters, gamma, cyclic_times): 'Test CyclicLrUpdateHook.' with pytest.raises(AssertionError): CyclicLrUpdaterHook(by_epoch=True) wi...
@pytest.mark.parametrize('log_model', (True, False)) def test_mlflow_hook(log_model): sys.modules['mlflow'] = MagicMock() sys.modules['mlflow.pytorch'] = MagicMock() runner = _build_demo_runner() loader = DataLoader(torch.ones((5, 2))) hook = MlflowLoggerHook(exp_name='test', log_model=log_model) ...
def test_segmind_hook(): sys.modules['segmind'] = MagicMock() runner = _build_demo_runner() hook = SegmindLoggerHook() loader = DataLoader(torch.ones((5, 2))) runner.register_hook(hook) runner.run([loader, loader], [('train', 1), ('val', 1)]) shutil.rmtree(runner.work_dir) hook.mlflow_...
def test_wandb_hook(): sys.modules['wandb'] = MagicMock() runner = _build_demo_runner() hook = WandbLoggerHook(log_artifact=True) loader = DataLoader(torch.ones((5, 2))) runner.register_hook(hook) runner.run([loader, loader], [('train', 1), ('val', 1)]) shutil.rmtree(runner.work_dir) h...
def test_neptune_hook(): sys.modules['neptune'] = MagicMock() sys.modules['neptune.new'] = MagicMock() runner = _build_demo_runner() hook = NeptuneLoggerHook() loader = DataLoader(torch.ones((5, 2))) runner.register_hook(hook) runner.run([loader, loader], [('train', 1), ('val', 1)]) sh...
def test_dvclive_hook(): sys.modules['dvclive'] = MagicMock() runner = _build_demo_runner() hook = DvcliveLoggerHook() dvclive_mock = hook.dvclive loader = DataLoader(torch.ones((5, 2))) runner.register_hook(hook) runner.run([loader, loader], [('train', 1), ('val', 1)]) shutil.rmtree(r...
def test_dvclive_hook_model_file(tmp_path): sys.modules['dvclive'] = MagicMock() runner = _build_demo_runner() hook = DvcliveLoggerHook(model_file=osp.join(runner.work_dir, 'model.pth')) runner.register_hook(hook) loader = torch.utils.data.DataLoader(torch.ones((5, 2))) loader = DataLoader(tor...
def _build_demo_runner_without_hook(runner_type='EpochBasedRunner', max_epochs=1, max_iters=None, multi_optimizers=False): class Model(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(2, 1) self.conv = nn.Conv2d(3, 3, 3) def forward(...
def _build_demo_runner(runner_type='EpochBasedRunner', max_epochs=1, max_iters=None, multi_optimizers=False): log_config = dict(interval=1, hooks=[dict(type='TextLoggerHook')]) runner = _build_demo_runner_without_hook(runner_type, max_epochs, max_iters, multi_optimizers) runner.register_checkpoint_hook(di...
def test_runner_with_revise_keys(): import os class Model(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(3, 3, 1) class PrefixModel(nn.Module): def __init__(self): super().__init__() self.backbone = Model() p...
def test_get_triggered_stages(): class ToyHook(Hook): def before_run(): pass def after_epoch(): pass hook = ToyHook() expected_stages = ['before_run', 'after_train_epoch', 'after_val_epoch'] assert (hook.get_triggered_stages() == expected_stages)
def test_gradient_cumulative_optimizer_hook(): class ToyModel(nn.Module): def __init__(self, with_norm=False): super().__init__() self.fp16_enabled = False self.fc = nn.Linear(3, 2) nn.init.constant_(self.fc.weight, 1.0) nn.init.constant_(self....
@pytest.mark.skipif((not torch.cuda.is_available()), reason='requires CUDA support') def test_gradient_cumulative_fp16_optimizer_hook(): class ToyModel(nn.Module): def __init__(self): super().__init__() self.fp16_enabled = False self.fc = nn.Linear(3, 2) n...
class SubModel(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(2, 2, kernel_size=1, groups=2) self.gn = nn.GroupNorm(2, 2) self.param1 = nn.Parameter(torch.ones(1)) def forward(self, x): return x
class ExampleModel(nn.Module): def __init__(self): super().__init__() self.param1 = nn.Parameter(torch.ones(1)) self.conv1 = nn.Conv2d(3, 4, kernel_size=1, bias=False) self.conv2 = nn.Conv2d(4, 2, kernel_size=1) self.bn = nn.BatchNorm2d(2) self.sub = SubModel() ...
class ExampleDuplicateModel(nn.Module): def __init__(self): super().__init__() self.param1 = nn.Parameter(torch.ones(1)) self.conv1 = nn.Sequential(nn.Conv2d(3, 4, kernel_size=1, bias=False)) self.conv2 = nn.Sequential(nn.Conv2d(4, 2, kernel_size=1)) self.bn = nn.BatchNorm...
class PseudoDataParallel(nn.Module): def __init__(self): super().__init__() self.module = ExampleModel() def forward(self, x): return x
def check_default_optimizer(optimizer, model, prefix=''): assert isinstance(optimizer, torch.optim.SGD) assert (optimizer.defaults['lr'] == base_lr) assert (optimizer.defaults['momentum'] == momentum) assert (optimizer.defaults['weight_decay'] == base_wd) param_groups = optimizer.param_groups[0] ...
def check_sgd_optimizer(optimizer, model, prefix='', bias_lr_mult=1, bias_decay_mult=1, norm_decay_mult=1, dwconv_decay_mult=1, dcn_offset_lr_mult=1, bypass_duplicate=False): param_groups = optimizer.param_groups assert isinstance(optimizer, torch.optim.SGD) assert (optimizer.defaults['lr'] == base_lr) ...
def test_default_optimizer_constructor(): model = ExampleModel() with pytest.raises(TypeError): optimizer_cfg = [] optim_constructor = DefaultOptimizerConstructor(optimizer_cfg) optim_constructor(model) with pytest.raises(TypeError): optimizer_cfg = dict(lr=0.0001) ...
def test_torch_optimizers(): torch_optimizers = ['ASGD', 'Adadelta', 'Adagrad', 'Adam', 'AdamW', 'Adamax', 'LBFGS', 'Optimizer', 'RMSprop', 'Rprop', 'SGD', 'SparseAdam'] assert set(torch_optimizers).issubset(set(TORCH_OPTIMIZERS))
def test_build_optimizer_constructor(): model = ExampleModel() optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) paramwise_cfg = dict(bias_lr_mult=2, bias_decay_mult=0.5, norm_decay_mult=0, dwconv_decay_mult=0.1, dcn_offset_lr_mult=0.1) optim_constructor_cfg = dict(...
def test_build_optimizer(): model = ExampleModel() optimizer_cfg = dict(type='SGD', lr=base_lr, weight_decay=base_wd, momentum=momentum) optimizer = build_optimizer(model, optimizer_cfg) check_default_optimizer(optimizer, model) model = ExampleModel() optimizer_cfg = dict(type='SGD', lr=base_l...
class OldStyleModel(nn.Module): def __init__(self): super().__init__() self.conv = nn.Conv2d(3, 3, 1)
class Model(OldStyleModel): def train_step(self): pass def val_step(self): pass
def test_build_runner(): temp_root = tempfile.gettempdir() dir_name = ''.join([random.choice(string.ascii_letters) for _ in range(10)]) default_args = dict(model=Model(), work_dir=osp.join(temp_root, dir_name), logger=logging.getLogger()) cfg = dict(type='EpochBasedRunner', max_epochs=1) runner = ...
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values()) def test_epoch_based_runner(runner_class): with pytest.warns(DeprecationWarning): model = OldStyleModel() def batch_processor(): pass _ = runner_class(model, batch_processor, logger=logging.getLogger()) ...
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values()) def test_runner_with_parallel(runner_class): def batch_processor(): pass model = MMDataParallel(OldStyleModel()) _ = runner_class(model, batch_processor, logger=logging.getLogger()) model = MMDataParallel(Model()) _ = ...
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values()) def test_save_checkpoint(runner_class): model = Model() runner = runner_class(model=model, logger=logging.getLogger()) with pytest.raises(TypeError): runner.save_checkpoint('.', meta=list()) with tempfile.TemporaryDirectory...
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values()) def test_build_lr_momentum_hook(runner_class): model = Model() runner = runner_class(model=model, logger=logging.getLogger()) lr_config = dict(policy='CosineAnnealing', by_epoch=False, min_lr_ratio=0, warmup_iters=2, warmup_ratio=0.9) ...
@pytest.mark.parametrize('runner_class', RUNNERS.module_dict.values()) def test_register_timer_hook(runner_class): model = Model() runner = runner_class(model=model, logger=logging.getLogger()) timer_config = None runner.register_timer_hook(timer_config) assert (len(runner.hooks) == 0) timer_c...
def test_set_random_seed(): set_random_seed(0) a_random = random.randint(0, 10) a_np_random = np.random.rand(2, 2) a_torch_random = torch.rand(2, 2) assert (torch.backends.cudnn.deterministic is False) assert (torch.backends.cudnn.benchmark is False) assert (os.environ['PYTHONHASHSEED'] ==...
def test_construct(): cfg = Config() assert (cfg.filename is None) assert (cfg.text == '') assert (len(cfg) == 0) assert (cfg._cfg_dict == {}) with pytest.raises(TypeError): Config([0, 1]) cfg_dict = dict(item1=[1, 2], item2=dict(a=0), item3=True, item4='test') cfg_file = osp.j...
def test_fromfile(): for filename in ['a.py', 'a.b.py', 'b.json', 'c.yaml']: cfg_file = osp.join(data_path, 'config', filename) cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.text == ((osp.abspath(osp.expanduser(...
def test_fromstring(): for filename in ['a.py', 'a.b.py', 'b.json', 'c.yaml']: cfg_file = osp.join(data_path, 'config', filename) file_format = osp.splitext(filename)[(- 1)] in_cfg = Config.fromfile(cfg_file) out_cfg = Config.fromstring(in_cfg.pretty_text, '.py') assert (in...
def test_merge_from_base(): cfg_file = osp.join(data_path, 'config/d.py') cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) base_cfg_file = osp.join(data_path, 'config/base.py') merge_text = ((osp.abspath(osp.expanduser(base_cfg_file)) + '\n') + o...
def test_merge_from_multiple_bases(): cfg_file = osp.join(data_path, 'config/l.py') cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.item1 == [1, 2]) assert (cfg.item2.a == 0) assert (cfg.item3 is False) assert (cfg.item4 == '...
def test_base_variables(): for file in ['t.py', 't.json', 't.yaml']: cfg_file = osp.join(data_path, f'config/{file}') cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.item1 == [1, 2]) assert (cfg.item2.a ==...
def test_merge_recursive_bases(): cfg_file = osp.join(data_path, 'config/f.py') cfg = Config.fromfile(cfg_file) assert isinstance(cfg, Config) assert (cfg.filename == cfg_file) assert (cfg.item1 == [2, 3]) assert (cfg.item2.a == 1) assert (cfg.item3 is False) assert (cfg.item4 == 'test...
def test_merge_from_dict(): cfg_file = osp.join(data_path, 'config/a.py') cfg = Config.fromfile(cfg_file) input_options = {'item2.a': 1, 'item2.b': 0.1, 'item3': False} cfg.merge_from_dict(input_options) assert (cfg.item2 == dict(a=1, b=0.1)) assert (cfg.item3 is False) cfg_file = osp.join...
def test_merge_delete(): cfg_file = osp.join(data_path, 'config/delete.py') cfg = Config.fromfile(cfg_file) assert (cfg.item1 == dict(a=0)) assert (cfg.item2 == dict(a=0, b=0)) assert (cfg.item3 is True) assert (cfg.item4 == 'test') assert ('_delete_' not in cfg.item2) assert (type(cfg...
def test_merge_intermediate_variable(): cfg_file = osp.join(data_path, 'config/i_child.py') cfg = Config.fromfile(cfg_file) assert (cfg.item1 == [1, 2]) assert (cfg.item2 == dict(a=0)) assert (cfg.item3 is True) assert (cfg.item4 == 'test') assert (cfg.item_cfg == dict(b=2)) assert (cf...
def test_fromfile_in_config(): cfg_file = osp.join(data_path, 'config/code.py') cfg = Config.fromfile(cfg_file) assert (cfg.cfg.item1 == [1, 2]) assert (cfg.cfg.item2 == dict(a=0)) assert (cfg.cfg.item3 is True) assert (cfg.cfg.item4 == 'test') assert (cfg.item5 == 1)
def test_dict(): cfg_dict = dict(item1=[1, 2], item2=dict(a=0), item3=True, item4='test') for filename in ['a.py', 'b.json', 'c.yaml']: cfg_file = osp.join(data_path, 'config', filename) cfg = Config.fromfile(cfg_file) assert (len(cfg) == 4) assert (set(cfg.keys()) == set(cfg_d...
def test_setattr(): cfg = Config() cfg.item1 = [1, 2] cfg.item2 = {'a': 0} cfg['item5'] = {'a': {'b': None}} assert (cfg._cfg_dict['item1'] == [1, 2]) assert (cfg.item1 == [1, 2]) assert (cfg._cfg_dict['item2'] == {'a': 0}) assert (cfg.item2.a == 0) assert (cfg._cfg_dict['item5'] =...
def test_pretty_text(): cfg_file = osp.join(data_path, 'config/l.py') cfg = Config.fromfile(cfg_file) with tempfile.TemporaryDirectory() as temp_config_dir: text_cfg_filename = osp.join(temp_config_dir, '_text_config.py') with open(text_cfg_filename, 'w') as f: f.write(cfg.pret...
def test_dict_action(): parser = argparse.ArgumentParser(description='Train a detector') parser.add_argument('--options', nargs='+', action=DictAction, help='custom options') args = parser.parse_args(['--options', 'item2.a=a,b', 'item2.b=[(a,b), [1,2], false]']) out_dict = {'item2.a': ['a', 'b'], 'ite...
def test_dump_mapping(): cfg_file = osp.join(data_path, 'config/n.py') cfg = Config.fromfile(cfg_file) with tempfile.TemporaryDirectory() as temp_config_dir: text_cfg_filename = osp.join(temp_config_dir, '_text_config.py') cfg.dump(text_cfg_filename) text_cfg = Config.fromfile(text...
def test_reserved_key(): cfg_file = osp.join(data_path, 'config/g.py') with pytest.raises(KeyError): Config.fromfile(cfg_file)
def test_syntax_error(): temp_cfg_file = tempfile.NamedTemporaryFile(suffix='.py', delete=False) temp_cfg_path = temp_cfg_file.name with open(temp_cfg_path, 'w') as f: f.write('a=0b=dict(c=1)') with pytest.raises(SyntaxError, match='There are syntax errors in config file'): Config.from...
def test_pickle_support(): cfg_file = osp.join(data_path, 'config/n.py') cfg = Config.fromfile(cfg_file) with tempfile.TemporaryDirectory() as temp_config_dir: pkl_cfg_filename = osp.join(temp_config_dir, '_pickle.pkl') dump(cfg, pkl_cfg_filename) pkl_cfg = load(pkl_cfg_filename) ...
def test_deprecation(): deprecated_cfg_files = [osp.join(data_path, 'config/deprecated.py'), osp.join(data_path, 'config/deprecated_as_base.py')] for cfg_file in deprecated_cfg_files: with pytest.warns(DeprecationWarning): cfg = Config.fromfile(cfg_file) assert (cfg.item1 == 'expec...
def test_deepcopy(): cfg_file = osp.join(data_path, 'config/n.py') cfg = Config.fromfile(cfg_file) new_cfg = copy.deepcopy(cfg) assert isinstance(new_cfg, Config) assert (new_cfg._cfg_dict == cfg._cfg_dict) assert (new_cfg._cfg_dict is not cfg._cfg_dict) assert (new_cfg._filename == cfg._f...
def test_copy(): cfg_file = osp.join(data_path, 'config/n.py') cfg = Config.fromfile(cfg_file) new_cfg = copy.copy(cfg) assert isinstance(new_cfg, Config) assert (new_cfg is not cfg) assert (new_cfg._cfg_dict is cfg._cfg_dict) assert (new_cfg._filename == cfg._filename) assert (new_cfg...
def test_collect_env(): try: import torch except ModuleNotFoundError: pytest.skip('skipping tests that require PyTorch') from mmcv.utils import collect_env env_info = collect_env() expected_keys = ['sys.platform', 'Python', 'CUDA available', 'PyTorch', 'PyTorch compiling details', ...
def test_load_url(): url1 = 'https://download.openmmlab.com/mmcv/test_data/saved_in_pt1.5.pth' url2 = 'https://download.openmmlab.com/mmcv/test_data/saved_in_pt1.6.pth' if (digit_version(TORCH_VERSION) < digit_version('1.7.0')): model_zoo.load_url(url1) with pytest.raises(RuntimeError): ...
@patch('torch.distributed.get_rank', (lambda : 0)) @patch('torch.distributed.is_initialized', (lambda : True)) @patch('torch.distributed.is_available', (lambda : True)) def test_get_logger_rank0(): logger = get_logger('rank0.pkg1') assert isinstance(logger, logging.Logger) assert (len(logger.handlers) == ...
@patch('torch.distributed.get_rank', (lambda : 1)) @patch('torch.distributed.is_initialized', (lambda : True)) @patch('torch.distributed.is_available', (lambda : True)) def test_get_logger_rank1(): logger = get_logger('rank1.pkg1') assert isinstance(logger, logging.Logger) assert (len(logger.handlers) == ...
def test_print_log_print(capsys): print_log('welcome', logger=None) (out, _) = capsys.readouterr() assert (out == 'welcome\n')
def test_print_log_silent(capsys, caplog): print_log('welcome', logger='silent') (out, _) = capsys.readouterr() assert (out == '') assert (len(caplog.records) == 0)
def test_print_log_logger(caplog): print_log('welcome', logger='mmcv') assert (caplog.record_tuples[(- 1)] == ('mmcv', logging.INFO, 'welcome')) print_log('welcome', logger='mmcv', level=logging.ERROR) assert (caplog.record_tuples[(- 1)] == ('mmcv', logging.ERROR, 'welcome')) with tempfile.NamedTe...
def test_print_log_exception(): with pytest.raises(TypeError): print_log('welcome', logger=0)
def test_to_ntuple(): single_number = 2 assert (mmcv.utils.to_1tuple(single_number) == (single_number,)) assert (mmcv.utils.to_2tuple(single_number) == (single_number, single_number)) assert (mmcv.utils.to_3tuple(single_number) == (single_number, single_number, single_number)) assert (mmcv.utils.t...
def test_iter_cast(): assert (mmcv.list_cast([1, 2, 3], int) == [1, 2, 3]) assert (mmcv.list_cast(['1.1', 2, '3'], float) == [1.1, 2.0, 3.0]) assert (mmcv.list_cast([1, 2, 3], str) == ['1', '2', '3']) assert (mmcv.tuple_cast((1, 2, 3), str) == ('1', '2', '3')) assert (next(mmcv.iter_cast([1, 2, 3]...
def test_is_seq_of(): assert mmcv.is_seq_of([1.0, 2.0, 3.0], float) assert mmcv.is_seq_of([(1,), (2,), (3,)], tuple) assert mmcv.is_seq_of((1.0, 2.0, 3.0), float) assert mmcv.is_list_of([1.0, 2.0, 3.0], float) assert (not mmcv.is_seq_of((1.0, 2.0, 3.0), float, seq_type=list)) assert (not mmcv....
def test_slice_list(): in_list = [1, 2, 3, 4, 5, 6] assert (mmcv.slice_list(in_list, [1, 2, 3]) == [[1], [2, 3], [4, 5, 6]]) assert (mmcv.slice_list(in_list, [len(in_list)]) == [in_list]) with pytest.raises(TypeError): mmcv.slice_list(in_list, 2.0) with pytest.raises(ValueError): m...
def test_concat_list(): assert (mmcv.concat_list([[1, 2]]) == [1, 2]) assert (mmcv.concat_list([[1, 2], [3, 4, 5], [6]]) == [1, 2, 3, 4, 5, 6])
def test_requires_package(capsys): @mmcv.requires_package('nnn') def func_a(): pass @mmcv.requires_package(['numpy', 'n1', 'n2']) def func_b(): pass @mmcv.requires_package('numpy') def func_c(): return 1 with pytest.raises(RuntimeError): func_a() (out...
def test_requires_executable(capsys): @mmcv.requires_executable('nnn') def func_a(): pass @mmcv.requires_executable(['ls', 'n1', 'n2']) def func_b(): pass @mmcv.requires_executable('mv') def func_c(): return 1 with pytest.raises(RuntimeError): func_a() ...
def test_import_modules_from_strings(): import os.path as osp_ import sys as sys_ (osp, sys) = mmcv.import_modules_from_strings(['os.path', 'sys']) assert (osp == osp_) assert (sys == sys_) osp = mmcv.import_modules_from_strings('os.path') assert (osp == osp_) assert (mmcv.import_modul...
def test_is_method_overridden(): class Base(): def foo1(): pass def foo2(): pass class Sub(Base): def foo1(): pass assert mmcv.is_method_overridden('foo1', Base, Sub) assert (not mmcv.is_method_overridden('foo2', Base, Sub)) sub_inst...
def test_has_method(): class Foo(): def __init__(self, name): self.name = name def print_name(self): print(self.name) foo = Foo('foo') assert (not has_method(foo, 'name')) assert has_method(foo, 'print_name')
def test_deprecated_api_warning(): @deprecated_api_warning(name_dict=dict(old_key='new_key')) def dummy_func(new_key=1): return new_key assert (dummy_func(old_key=2) == 2) with pytest.raises(AssertionError): dummy_func(old_key=1, new_key=2)
class TestJit(object): def test_add_dict(self): @mmcv.jit def add_dict(oper): rets = (oper['x'] + oper['y']) return {'result': rets} def add_dict_pyfunc(oper): rets = (oper['x'] + oper['y']) return {'result': rets} a = torch.rand((...