code
stringlengths
17
6.64M
class ModuleList(BaseModule, nn.ModuleList): 'ModuleList in openmmlab.\n\n Args:\n modules (iterable, optional): an iterable of modules to add.\n init_cfg (dict, optional): Initialization config dict.\n ' def __init__(self, modules=None, init_cfg=None): BaseModule.__init__(self, i...
class ModuleDict(BaseModule, nn.ModuleDict): 'ModuleDict in openmmlab.\n\n Args:\n modules (dict, optional): a mapping (dictionary) of (string: module)\n or an iterable of key-value pairs of type (string, module).\n init_cfg (dict, optional): Initialization config dict.\n ' def...
class BaseRunner(metaclass=ABCMeta): 'The base class of Runner, a training helper for PyTorch.\n\n All subclasses should implement the following APIs:\n\n - ``run()``\n - ``train()``\n - ``val()``\n - ``save_checkpoint()``\n\n Args:\n model (:obj:`torch.nn.Module`): The model to be run.\n...
def build_runner_constructor(cfg): return RUNNER_BUILDERS.build(cfg)
def build_runner(cfg, default_args=None): runner_cfg = copy.deepcopy(cfg) constructor_type = runner_cfg.pop('constructor', 'DefaultRunnerConstructor') runner_constructor = build_runner_constructor(dict(type=constructor_type, runner_cfg=runner_cfg, default_args=default_args)) runner = runner_constructo...
def _get_mmcv_home(): mmcv_home = os.path.expanduser(os.getenv(ENV_MMCV_HOME, os.path.join(os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv'))) mkdir_or_exist(mmcv_home) return mmcv_home
def load_state_dict(module, state_dict, strict=False, logger=None): "Load state_dict to a module.\n\n This method is modified from :meth:`torch.nn.Module.load_state_dict`.\n Default value for ``strict`` is set to ``False`` and the message for\n param mismatch will be shown even if strict is False.\n\n ...
def get_torchvision_models(): model_urls = dict() for (_, name, ispkg) in pkgutil.walk_packages(torchvision.models.__path__): if ispkg: continue _zoo = import_module(f'torchvision.models.{name}') if hasattr(_zoo, 'model_urls'): _urls = getattr(_zoo, 'model_urls'...
def get_external_models(): mmcv_home = _get_mmcv_home() default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json') default_urls = load_file(default_json_path) assert isinstance(default_urls, dict) external_json_path = osp.join(mmcv_home, 'open_mmlab.json') if osp.exists(extern...
def get_mmcls_models(): mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json') mmcls_urls = load_file(mmcls_json_path) return mmcls_urls
def get_deprecated_model_names(): deprecate_json_path = osp.join(mmcv.__path__[0], 'model_zoo/deprecated.json') deprecate_urls = load_file(deprecate_json_path) assert isinstance(deprecate_urls, dict) return deprecate_urls
def _process_mmcls_checkpoint(checkpoint): if ('state_dict' in checkpoint): state_dict = checkpoint['state_dict'] else: state_dict = checkpoint new_state_dict = OrderedDict() for (k, v) in state_dict.items(): if k.startswith('backbone.'): new_state_dict[k[9:]] = v ...
class CheckpointLoader(): 'A general checkpoint loader to manage all schemes.' _schemes = {} @classmethod def _register_scheme(cls, prefixes, loader, force=False): if isinstance(prefixes, str): prefixes = [prefixes] else: assert isinstance(prefixes, (list, tupl...
@CheckpointLoader.register_scheme(prefixes='') def load_from_local(filename, map_location): 'load checkpoint by local file path.\n\n Args:\n filename (str): local checkpoint file path\n map_location (str, optional): Same as :func:`torch.load`.\n\n Returns:\n dict or OrderedDict: The loa...
@CheckpointLoader.register_scheme(prefixes=('http://', 'https://')) def load_from_http(filename, map_location=None, model_dir=None): 'load checkpoint through HTTP or HTTPS scheme path. In distributed\n setting, this function only download checkpoint at local rank 0.\n\n Args:\n filename (str): checkp...
@CheckpointLoader.register_scheme(prefixes='pavi://') def load_from_pavi(filename, map_location=None): 'load checkpoint through the file path prefixed with pavi. In distributed\n setting, this function download ckpt at all ranks to different temporary\n directories.\n\n Args:\n filename (str): che...
@CheckpointLoader.register_scheme(prefixes='(\\S+\\:)?s3://') def load_from_ceph(filename, map_location=None, backend='petrel'): "load checkpoint through the file path prefixed with s3. In distributed\n setting, this function download ckpt at all ranks to different temporary\n directories.\n\n Note:\n ...
@CheckpointLoader.register_scheme(prefixes=('modelzoo://', 'torchvision://')) def load_from_torchvision(filename, map_location=None): 'load checkpoint through the file path prefixed with modelzoo or\n torchvision.\n\n Args:\n filename (str): checkpoint file path with modelzoo or\n torchvis...
@CheckpointLoader.register_scheme(prefixes=('open-mmlab://', 'openmmlab://')) def load_from_openmmlab(filename, map_location=None): 'load checkpoint through the file path prefixed with open-mmlab or\n openmmlab.\n\n Args:\n filename (str): checkpoint file path with open-mmlab or\n openmmlab pr...
@CheckpointLoader.register_scheme(prefixes='mmcls://') def load_from_mmcls(filename, map_location=None): 'load checkpoint through the file path prefixed with mmcls.\n\n Args:\n filename (str): checkpoint file path with mmcls prefix\n map_location (str, optional): Same as :func:`torch.load`.\n\n ...
def _load_checkpoint(filename, map_location=None, logger=None): 'Load checkpoint from somewhere (modelzoo, file, url).\n\n Args:\n filename (str): Accept local filepath, URL, ``torchvision://xxx``,\n ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for\n details.\n ...
def _load_checkpoint_with_prefix(prefix, filename, map_location=None): 'Load partial pretrained model with specific prefix.\n\n Args:\n prefix (str): The prefix of sub-module.\n filename (str): Accept local filepath, URL, ``torchvision://xxx``,\n ``open-mmlab://xxx``. Please refer to `...
def load_checkpoint(model, filename, map_location=None, strict=False, logger=None, revise_keys=[('^module\\.', '')]): "Load checkpoint from a file or URI.\n\n Args:\n model (Module): Module to load checkpoint.\n filename (str): Accept local filepath, URL, ``torchvision://xxx``,\n ``ope...
def weights_to_cpu(state_dict): 'Copy a model state_dict to cpu.\n\n Args:\n state_dict (OrderedDict): Model weights on GPU.\n\n Returns:\n OrderedDict: Model weights on GPU.\n ' state_dict_cpu = OrderedDict() for (key, val) in state_dict.items(): state_dict_cpu[key] = val.c...
def _save_to_state_dict(module, destination, prefix, keep_vars): 'Saves module state to `destination` dictionary.\n\n This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.\n\n Args:\n module (nn.Module): The module to generate state_dict.\n destination (dict): A dict where ...
def get_state_dict(module, destination=None, prefix='', keep_vars=False): 'Returns a dictionary containing a whole state of the module.\n\n Both parameters and persistent buffers (e.g. running averages) are\n included. Keys are corresponding parameter and buffer names.\n\n This method is modified from :m...
def save_checkpoint(model, filename, optimizer=None, meta=None, file_client_args=None): 'Save checkpoint to file.\n\n The checkpoint will have 3 fields: ``meta``, ``state_dict`` and\n ``optimizer``. By default ``meta`` will contain version and time info.\n\n Args:\n model (Module): Module whose pa...
@RUNNER_BUILDERS.register_module() class DefaultRunnerConstructor(): "Default constructor for runners.\n\n Custom existing `Runner` like `EpocBasedRunner` though `RunnerConstructor`.\n For example, We can inject some new properties and functions for `Runner`.\n\n Example:\n >>> from mmcv.runner im...
def init_dist(launcher, backend='nccl', **kwargs): if (mp.get_start_method(allow_none=True) is None): mp.set_start_method('spawn') if (launcher == 'pytorch'): _init_dist_pytorch(backend, **kwargs) elif (launcher == 'mpi'): _init_dist_mpi(backend, **kwargs) elif (launcher == 'sl...
def _init_dist_pytorch(backend, **kwargs): rank = int(os.environ['RANK']) num_gpus = torch.cuda.device_count() torch.cuda.set_device((rank % num_gpus)) dist.init_process_group(backend=backend, **kwargs)
def _init_dist_mpi(backend, **kwargs): rank = int(os.environ['OMPI_COMM_WORLD_RANK']) num_gpus = torch.cuda.device_count() torch.cuda.set_device((rank % num_gpus)) dist.init_process_group(backend=backend, **kwargs)
def _init_dist_slurm(backend, port=None): 'Initialize slurm distributed training environment.\n\n If argument ``port`` is not specified, then the master port will be system\n environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system\n environment variable, then a default port ``29500`` wi...
def get_dist_info(): if (dist.is_available() and dist.is_initialized()): rank = dist.get_rank() world_size = dist.get_world_size() else: rank = 0 world_size = 1 return (rank, world_size)
def master_only(func): @functools.wraps(func) def wrapper(*args, **kwargs): (rank, _) = get_dist_info() if (rank == 0): return func(*args, **kwargs) return wrapper
def allreduce_params(params, coalesce=True, bucket_size_mb=(- 1)): 'Allreduce parameters.\n\n Args:\n params (list[torch.Parameters]): List of parameters or buffers of a\n model.\n coalesce (bool, optional): Whether allreduce parameters as a whole.\n Defaults to True.\n ...
def allreduce_grads(params, coalesce=True, bucket_size_mb=(- 1)): 'Allreduce gradients.\n\n Args:\n params (list[torch.Parameters]): List of parameters of a model\n coalesce (bool, optional): Whether allreduce parameters as a whole.\n Defaults to True.\n bucket_size_mb (int, opt...
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=(- 1)): if (bucket_size_mb > 0): bucket_size_bytes = ((bucket_size_mb * 1024) * 1024) buckets = _take_tensors(tensors, bucket_size_bytes) else: buckets = OrderedDict() for tensor in tensors: tp = tensor.ty...
@RUNNERS.register_module() class EpochBasedRunner(BaseRunner): 'Epoch-based Runner.\n\n This runner train models epoch by epoch.\n ' def run_iter(self, data_batch, train_mode, **kwargs): if (self.batch_processor is not None): outputs = self.batch_processor(self.model, data_batch, tr...
@RUNNERS.register_module() class Runner(EpochBasedRunner): 'Deprecated name of EpochBasedRunner.' def __init__(self, *args, **kwargs): warnings.warn('Runner was deprecated, please use EpochBasedRunner instead', DeprecationWarning) super().__init__(*args, **kwargs)
def cast_tensor_type(inputs, src_type, dst_type): 'Recursively convert Tensor in inputs from src_type to dst_type.\n\n Note:\n In v1.4.4 and later, ``cast_tersor_type`` will only convert the\n torch.Tensor which is consistent with ``src_type`` to the ``dst_type``.\n Before v1.4.4, it ignor...
def auto_fp16(apply_to=None, out_fp32=False): "Decorator to enable fp16 training automatically.\n\n This decorator is useful when you write custom modules and want to support\n mixed precision training. If inputs arguments are fp32 tensors, they will\n be converted to fp16 automatically. Arguments other ...
def force_fp32(apply_to=None, out_fp16=False): "Decorator to convert input arguments to fp32 in force.\n\n This decorator is useful when you write custom modules and want to support\n mixed precision training. If there are some inputs that must be processed\n in fp32 mode, then this decorator can handle ...
def allreduce_grads(params, coalesce=True, bucket_size_mb=(- 1)): warnings.warning('"mmcv.runner.fp16_utils.allreduce_grads" is deprecated, and will be removed in v2.8. Please switch to "mmcv.runner.allreduce_grads', DeprecationWarning) _allreduce_grads(params, coalesce=coalesce, bucket_size_mb=bucket_size_mb...
def wrap_fp16_model(model): 'Wrap the FP32 model to FP16.\n\n If you are using PyTorch >= 1.6, torch.cuda.amp is used as the\n backend, otherwise, original mmcv implementation will be adopted.\n\n For PyTorch >= 1.6, this function will\n 1. Set fp16 flag inside the model to True.\n\n Otherwise:\n ...
def patch_norm_fp32(module): 'Recursively convert normalization layers from FP16 to FP32.\n\n Args:\n module (nn.Module): The modules to be converted in FP16.\n\n Returns:\n nn.Module: The converted module, the normalization layers have been\n converted to FP32.\n ' if isinst...
def patch_forward_method(func, src_type, dst_type, convert_output=True): 'Patch the forward method of a module.\n\n Args:\n func (callable): The original forward method.\n src_type (torch.dtype): Type of input arguments to be converted from.\n dst_type (torch.dtype): Type of input argument...
class LossScaler(): 'Class that manages loss scaling in mixed precision training which\n supports both dynamic or static mode.\n\n The implementation refers to\n https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py.\n Indirectly, by supplying ``mode=\'dynamic\'`` for dynamic loss ...
@HOOKS.register_module() class CheckpointHook(Hook): 'Save checkpoints periodically.\n\n Args:\n interval (int): The saving period. If ``by_epoch=True``, interval\n indicates epochs, otherwise it indicates iterations.\n Default: -1, which means "never".\n by_epoch (bool): Sa...
@HOOKS.register_module() class ClosureHook(Hook): def __init__(self, fn_name, fn): assert hasattr(self, fn_name) assert callable(fn) setattr(self, fn_name, fn)
@HOOKS.register_module() class EMAHook(Hook): 'Exponential Moving Average Hook.\n\n Use Exponential Moving Average on all parameters of model in training\n process. All parameters have a ema backup, which update by the formula\n as below. EMAHook takes priority over EvalHook and CheckpointSaverHook.\n\n ...
class EvalHook(Hook): "Non-Distributed evaluation hook.\n\n This hook will regularly perform evaluation in a given interval when\n performing in non-distributed environment.\n\n Args:\n dataloader (DataLoader): A PyTorch dataloader, whose dataset has\n implemented ``evaluate`` function....
class DistEvalHook(EvalHook): "Distributed evaluation hook.\n\n This hook will regularly perform evaluation in a given interval when\n performing in distributed environment.\n\n Args:\n dataloader (DataLoader): A PyTorch dataloader, whose dataset has\n implemented ``evaluate`` function....
class Hook(): stages = ('before_run', 'before_train_epoch', 'before_train_iter', 'after_train_iter', 'after_train_epoch', 'before_val_epoch', 'before_val_iter', 'after_val_iter', 'after_val_epoch', 'after_run') def before_run(self, runner): pass def after_run(self, runner): pass def...
@HOOKS.register_module() class IterTimerHook(Hook): def before_epoch(self, runner): self.t = time.time() def before_iter(self, runner): runner.log_buffer.update({'data_time': (time.time() - self.t)}) def after_iter(self, runner): runner.log_buffer.update({'time': (time.time() - ...
class LoggerHook(Hook): 'Base class for logger hooks.\n\n Args:\n interval (int): Logging interval (every k iterations). Default 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n if less than `interval`. Default True.\n reset_flag (bool): Whether to cle...
@HOOKS.register_module() class DvcliveLoggerHook(LoggerHook): 'Class to log metrics with dvclive.\n\n It requires `dvclive`_ to be installed.\n\n Args:\n model_file (str): Default None. If not None, after each epoch the\n model will be saved to {model_file}.\n interval (int): Loggin...
@HOOKS.register_module() class MlflowLoggerHook(LoggerHook): 'Class to log metrics and (optionally) a trained model to MLflow.\n\n It requires `MLflow`_ to be installed.\n\n Args:\n exp_name (str, optional): Name of the experiment to be used.\n Default None. If not None, set the active exp...
@HOOKS.register_module() class NeptuneLoggerHook(LoggerHook): "Class to log metrics to NeptuneAI.\n\n It requires `Neptune`_ to be installed.\n\n Args:\n init_kwargs (dict): a dict contains the initialization keys as below:\n\n - project (str): Name of a project in a form of\n ...
@HOOKS.register_module() class PaviLoggerHook(LoggerHook): "Class to visual model, log metrics (for internal use).\n\n Args:\n init_kwargs (dict): A dict contains the initialization keys.\n add_graph (bool): Whether to visual model. Default: False.\n add_last_ckpt (bool): Whether to save c...
@HOOKS.register_module() class SegmindLoggerHook(LoggerHook): 'Class to log metrics to Segmind.\n\n It requires `Segmind`_ to be installed.\n\n Args:\n interval (int): Logging interval (every k iterations). Default: 10.\n ignore_last (bool): Ignore the log of last iterations in each epoch\n ...
@HOOKS.register_module() class TensorboardLoggerHook(LoggerHook): 'Class to log metrics to Tensorboard.\n\n Args:\n log_dir (string): Save directory location. Default: None. If default\n values are used, directory location is ``runner.work_dir``/tf_logs.\n interval (int): Logging inter...
@HOOKS.register_module() class TextLoggerHook(LoggerHook): "Logger hook in text.\n\n In this logger hook, the information will be printed on terminal and\n saved in json file.\n\n Args:\n by_epoch (bool, optional): Whether EpochBasedRunner is used.\n Default: True.\n interval (in...
@HOOKS.register_module() class WandbLoggerHook(LoggerHook): "Class to log metrics with wandb.\n\n It requires `wandb`_ to be installed.\n\n\n Args:\n init_kwargs (dict): A dict contains the initialization keys. Check\n https://docs.wandb.ai/ref/python/init for more init arguments.\n ...
class LrUpdaterHook(Hook): "LR Scheduler in MMCV.\n\n Args:\n by_epoch (bool): LR changes epoch by epoch\n warmup (string): Type of warmup used. It can be None(use no warmup),\n 'constant', 'linear' or 'exp'\n warmup_iters (int): The number of iterations or epochs that warmup\n ...
@HOOKS.register_module() class FixedLrUpdaterHook(LrUpdaterHook): def __init__(self, **kwargs): super(FixedLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): return base_lr
@HOOKS.register_module() class StepLrUpdaterHook(LrUpdaterHook): "Step LR scheduler with min_lr clipping.\n\n Args:\n step (int | list[int]): Step to decay the LR. If an int value is given,\n regard it as the decay interval. If a list is given, decay LR at\n these steps.\n g...
@HOOKS.register_module() class ExpLrUpdaterHook(LrUpdaterHook): def __init__(self, gamma, **kwargs): self.gamma = gamma super(ExpLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): progress = (runner.epoch if self.by_epoch else runner.iter) return (base...
@HOOKS.register_module() class PolyLrUpdaterHook(LrUpdaterHook): def __init__(self, power=1.0, min_lr=0.0, **kwargs): self.power = power self.min_lr = min_lr super(PolyLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): if self.by_epoch: pro...
@HOOKS.register_module() class InvLrUpdaterHook(LrUpdaterHook): def __init__(self, gamma, power=1.0, **kwargs): self.gamma = gamma self.power = power super(InvLrUpdaterHook, self).__init__(**kwargs) def get_lr(self, runner, base_lr): progress = (runner.epoch if self.by_epoch ...
@HOOKS.register_module() class CosineAnnealingLrUpdaterHook(LrUpdaterHook): def __init__(self, min_lr=None, min_lr_ratio=None, **kwargs): assert ((min_lr is None) ^ (min_lr_ratio is None)) self.min_lr = min_lr self.min_lr_ratio = min_lr_ratio super(CosineAnnealingLrUpdaterHook, se...
@HOOKS.register_module() class FlatCosineAnnealingLrUpdaterHook(LrUpdaterHook): 'Flat + Cosine lr schedule.\n\n Modified from https://github.com/fastai/fastai/blob/master/fastai/callback/schedule.py#L128 # noqa: E501\n\n Args:\n start_percent (float): When to start annealing the learning rate\n ...
@HOOKS.register_module() class CosineRestartLrUpdaterHook(LrUpdaterHook): 'Cosine annealing with restarts learning rate scheme.\n\n Args:\n periods (list[int]): Periods for each cosine anneling cycle.\n restart_weights (list[float], optional): Restart weights at each\n restart iteratio...
def get_position_from_periods(iteration, cumulative_periods): 'Get the position from a period list.\n\n It will return the index of the right-closest number in the period list.\n For example, the cumulative_periods = [100, 200, 300, 400],\n if iteration == 50, return 0;\n if iteration == 210, return 2...
@HOOKS.register_module() class CyclicLrUpdaterHook(LrUpdaterHook): "Cyclic LR Scheduler.\n\n Implement the cyclical learning rate policy (CLR) described in\n https://arxiv.org/pdf/1506.01186.pdf\n\n Different from the original paper, we use cosine annealing rather than\n triangular policy inside a cyc...
@HOOKS.register_module() class OneCycleLrUpdaterHook(LrUpdaterHook): "One Cycle LR Scheduler.\n\n The 1cycle learning rate policy changes the learning rate after every\n batch. The one cycle learning rate policy is described in\n https://arxiv.org/pdf/1708.07120.pdf\n\n Args:\n max_lr (float or...
def annealing_cos(start, end, factor, weight=1): 'Calculate annealing cos learning rate.\n\n Cosine anneal from `weight * start + (1 - weight) * end` to `end` as\n percentage goes from 0.0 to 1.0.\n\n Args:\n start (float): The starting learning rate of the cosine annealing.\n end (float): ...
def annealing_linear(start, end, factor): 'Calculate annealing linear learning rate.\n\n Linear anneal from `start` to `end` as percentage goes from 0.0 to 1.0.\n\n Args:\n start (float): The starting learning rate of the linear annealing.\n end (float): The ending learing rate of the linear a...
def format_param(name, optim, param): if isinstance(param, numbers.Number): return ([param] * len(optim.param_groups)) elif isinstance(param, (list, tuple)): if (len(param) != len(optim.param_groups)): raise ValueError(f'expected {len(optim.param_groups)} values for {name}, got {le...
@HOOKS.register_module() class EmptyCacheHook(Hook): def __init__(self, before_epoch=False, after_epoch=True, after_iter=False): self._before_epoch = before_epoch self._after_epoch = after_epoch self._after_iter = after_iter def after_iter(self, runner): if self._after_iter: ...
class MomentumUpdaterHook(Hook): def __init__(self, by_epoch=True, warmup=None, warmup_iters=0, warmup_ratio=0.9): if (warmup is not None): if (warmup not in ['constant', 'linear', 'exp']): raise ValueError(f'"{warmup}" is not a supported type for warming up, valid types are "...
@HOOKS.register_module() class StepMomentumUpdaterHook(MomentumUpdaterHook): "Step momentum scheduler with min value clipping.\n\n Args:\n step (int | list[int]): Step to decay the momentum. If an int value is\n given, regard it as the decay interval. If a list is given, decay\n mo...
@HOOKS.register_module() class CosineAnnealingMomentumUpdaterHook(MomentumUpdaterHook): def __init__(self, min_momentum=None, min_momentum_ratio=None, **kwargs): assert ((min_momentum is None) ^ (min_momentum_ratio is None)) self.min_momentum = min_momentum self.min_momentum_ratio = min_m...
@HOOKS.register_module() class CyclicMomentumUpdaterHook(MomentumUpdaterHook): "Cyclic momentum Scheduler.\n\n Implement the cyclical momentum scheduler policy described in\n https://arxiv.org/pdf/1708.07120.pdf\n\n This momentum scheduler usually used together with the CyclicLRUpdater\n to improve th...
@HOOKS.register_module() class OneCycleMomentumUpdaterHook(MomentumUpdaterHook): "OneCycle momentum Scheduler.\n\n This momentum scheduler usually used together with the OneCycleLrUpdater\n to improve the performance.\n\n Args:\n base_momentum (float or list): Lower momentum boundaries in the cycl...
@HOOKS.register_module() class OptimizerHook(Hook): 'A hook contains custom operations for the optimizer.\n\n Args:\n grad_clip (dict, optional): A config dict to control the clip_grad.\n Default: None.\n detect_anomalous_params (bool): This option is only used for\n debuggi...
@HOOKS.register_module() class GradientCumulativeOptimizerHook(OptimizerHook): 'Optimizer Hook implements multi-iters gradient cumulating.\n\n Args:\n cumulative_iters (int, optional): Num of gradient cumulative iters.\n The optimizer will step every `cumulative_iters` iters.\n Def...
@HOOKS.register_module() class ProfilerHook(Hook): "Profiler to analyze performance during training.\n\n PyTorch Profiler is a tool that allows the collection of the performance\n metrics during the training. More details on Profiler can be found at\n https://pytorch.org/docs/1.8.1/profiler.html#torch.pr...
@HOOKS.register_module() class DistSamplerSeedHook(Hook): 'Data-loading sampler for distributed training.\n\n When distributed training, it is only useful in conjunction with\n :obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same\n purpose with :obj:`IterLoader`.\n ' def before...
@HOOKS.register_module() class SyncBuffersHook(Hook): 'Synchronize model buffers such as running_mean and running_var in BN at\n the end of each epoch.\n\n Args:\n distributed (bool): Whether distributed training is used. It is\n effective only for distributed training. Defaults to True.\n ...
class IterLoader(): def __init__(self, dataloader): self._dataloader = dataloader self.iter_loader = iter(self._dataloader) self._epoch = 0 @property def epoch(self): return self._epoch def __next__(self): try: data = next(self.iter_loader) ...
@RUNNERS.register_module() class IterBasedRunner(BaseRunner): 'Iteration-based Runner.\n\n This runner train models iteration by iteration.\n ' def train(self, data_loader, **kwargs): self.model.train() self.mode = 'train' self.data_loader = data_loader self._epoch = dat...
class LogBuffer(): def __init__(self): self.val_history = OrderedDict() self.n_history = OrderedDict() self.output = OrderedDict() self.ready = False def clear(self): self.val_history.clear() self.n_history.clear() self.clear_output() def clear_ou...
def register_torch_optimizers(): torch_optimizers = [] for module_name in dir(torch.optim): if module_name.startswith('__'): continue _optim = getattr(torch.optim, module_name) if (inspect.isclass(_optim) and issubclass(_optim, torch.optim.Optimizer)): OPTIMIZER...
def build_optimizer_constructor(cfg): return build_from_cfg(cfg, OPTIMIZER_BUILDERS)
def build_optimizer(model, cfg): optimizer_cfg = copy.deepcopy(cfg) constructor_type = optimizer_cfg.pop('constructor', 'DefaultOptimizerConstructor') paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None) optim_constructor = build_optimizer_constructor(dict(type=constructor_type, optimizer_cfg=opti...
@OPTIMIZER_BUILDERS.register_module() class DefaultOptimizerConstructor(): "Default constructor for optimizers.\n\n By default each parameter share the same optimizer settings, and we\n provide an argument ``paramwise_cfg`` to specify parameter-wise settings.\n It is a dict and may contain the following ...
class Priority(Enum): 'Hook priority levels.\n\n +--------------+------------+\n | Level | Value |\n +==============+============+\n | HIGHEST | 0 |\n +--------------+------------+\n | VERY_HIGH | 10 |\n +--------------+------------+\n | HIGH | ...
def get_priority(priority): 'Get priority value.\n\n Args:\n priority (int or str or :obj:`Priority`): Priority.\n\n Returns:\n int: The priority value.\n ' if isinstance(priority, int): if ((priority < 0) or (priority > 100)): raise ValueError('priority must be betw...
def get_host_info(): 'Get hostname and username.\n\n Return empty string if exception raised, e.g. ``getpass.getuser()`` will\n lead to error in docker container\n ' host = '' try: host = f'{getuser()}@{gethostname()}' except Exception as e: warnings.warn(f'Host or user not fo...
def get_time_str(): return time.strftime('%Y%m%d_%H%M%S', time.localtime())