repo_name
stringclasses
4 values
method_name
stringlengths
3
72
method_code
stringlengths
87
3.59k
method_summary
stringlengths
12
196
original_method_code
stringlengths
129
8.98k
method_path
stringlengths
15
136
open-mmlab/mmcv
imresize_like
def imresize_like(img, dst_img, return_scale=False, interpolation='bilinear'): h, w = dst_img.shape[:2] return imresize(img, (w, h), return_scale, interpolation)
Resize image to the same size of a given image.
def imresize_like(img, dst_img, return_scale=False, interpolation='bilinear'): """Resize image to the same size of a given image. Args: img (ndarray): The input image. dst_img (ndarray): The target image. return_scale (bool): Whether to return `w_scale` and `h_scale`. interpolation (str): Same as :func:`resize`. Returns: tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or `resized_img`. """ h, w = dst_img.shape[:2] return imresize(img, (w, h), return_scale, interpolation)
mmcv/image/transforms/resize.py
open-mmlab/mmcv
imrescale
def imrescale(img, scale, return_scale=False, interpolation='bilinear'): h, w = img.shape[:2] if isinstance(scale, (float, int)): if scale <= 0: raise ValueError( 'Invalid scale {}, must be positive.'.format(scale)) scale_factor = scale elif isinstance(scale, tuple): max_long_edge = max(scale) max_short_edge = min(scale) scale_factor = min(max_long_edge / max(h, w), max_short_edge / min(h, w)) else: raise TypeError( 'Scale must be a number or tuple of int, but got {}'.format( type(scale))) new_size = _scale_size((w, h), scale_factor) rescaled_img = imresize(img, new_size, interpolation=interpolation) if return_scale: return rescaled_img, scale_factor else: return rescaled_img
Resize image while keeping the aspect ratio.
def imrescale(img, scale, return_scale=False, interpolation='bilinear'): """Resize image while keeping the aspect ratio. Args: img (ndarray): The input image. scale (float or tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image. interpolation (str): Same as :func:`resize`. Returns: ndarray: The rescaled image. """ h, w = img.shape[:2] if isinstance(scale, (float, int)): if scale <= 0: raise ValueError( 'Invalid scale {}, must be positive.'.format(scale)) scale_factor = scale elif isinstance(scale, tuple): max_long_edge = max(scale) max_short_edge = min(scale) scale_factor = min(max_long_edge / max(h, w), max_short_edge / min(h, w)) else: raise TypeError( 'Scale must be a number or tuple of int, but got {}'.format( type(scale))) new_size = _scale_size((w, h), scale_factor) rescaled_img = imresize(img, new_size, interpolation=interpolation) if return_scale: return rescaled_img, scale_factor else: return rescaled_img
mmcv/image/transforms/resize.py
open-mmlab/mmcv
_register_handler
def _register_handler(handler, file_formats): if not isinstance(handler, BaseFileHandler): raise TypeError( 'handler must be a child of BaseFileHandler, not {}'.format( type(handler))) if isinstance(file_formats, str): file_formats = [file_formats] if not is_list_of(file_formats, str): raise TypeError('file_formats must be a str or a list of str') for ext in file_formats: file_handlers[ext] = handler
Register a handler for some file extensions.
def _register_handler(handler, file_formats): """Register a handler for some file extensions. Args: handler (:obj:`BaseFileHandler`): Handler to be registered. file_formats (str or list[str]): File formats to be handled by this handler. """ if not isinstance(handler, BaseFileHandler): raise TypeError( 'handler must be a child of BaseFileHandler, not {}'.format( type(handler))) if isinstance(file_formats, str): file_formats = [file_formats] if not is_list_of(file_formats, str): raise TypeError('file_formats must be a str or a list of str') for ext in file_formats: file_handlers[ext] = handler
mmcv/fileio/io.py
open-mmlab/mmcv
get_priority
def get_priority(priority): if isinstance(priority, int): if priority < 0 or priority > 100: raise ValueError('priority must be between 0 and 100') return priority elif isinstance(priority, Priority): return priority.value elif isinstance(priority, str): return Priority[priority.upper()].value else: raise TypeError('priority must be an integer or Priority enum value')
Get priority value.
def get_priority(priority): """Get priority value. Args: priority (int or str or :obj:`Priority`): Priority. Returns: int: The priority value. """ if isinstance(priority, int): if priority < 0 or priority > 100: raise ValueError('priority must be between 0 and 100') return priority elif isinstance(priority, Priority): return priority.value elif isinstance(priority, str): return Priority[priority.upper()].value else: raise TypeError('priority must be an integer or Priority enum value')
mmcv/runner/priority.py
open-mmlab/mmcv
dequantize
def dequantize(arr, min_val, max_val, levels, dtype=np.float64): if not (isinstance(levels, int) and levels > 1): raise ValueError( 'levels must be a positive integer, but got {}'.format(levels)) if min_val >= max_val: raise ValueError( 'min_val ({}) must be smaller than max_val ({})'.format( min_val, max_val)) dequantized_arr = (arr + 0.5).astype(dtype) * ( max_val - min_val) / levels + min_val return dequantized_arr
Dequantize an array.
def dequantize(arr, min_val, max_val, levels, dtype=np.float64): """Dequantize an array. Args: arr (ndarray): Input array. min_val (scalar): Minimum value to be clipped. max_val (scalar): Maximum value to be clipped. levels (int): Quantization levels. dtype (np.type): The type of the dequantized array. Returns: tuple: Dequantized array. """ if not (isinstance(levels, int) and levels > 1): raise ValueError( 'levels must be a positive integer, but got {}'.format(levels)) if min_val >= max_val: raise ValueError( 'min_val ({}) must be smaller than max_val ({})'.format( min_val, max_val)) dequantized_arr = (arr + 0.5).astype(dtype) * ( max_val - min_val) / levels + min_val return dequantized_arr
mmcv/arraymisc/quantization.py
open-mmlab/mmcv
imshow
def imshow(img, win_name='', wait_time=0): cv2.imshow(win_name, imread(img)) cv2.waitKey(wait_time)
Show an image.
def imshow(img, win_name='', wait_time=0): """Show an image. Args: img (str or ndarray): The image to be displayed. win_name (str): The window name. wait_time (int): Value of waitKey param. """ cv2.imshow(win_name, imread(img)) cv2.waitKey(wait_time)
mmcv/visualization/image.py
open-mmlab/mmcv
imshow_bboxes
def imshow_bboxes(img, bboxes, colors='green', top_k=-1, thickness=1, show=True, win_name='', wait_time=0, out_file=None): img = imread(img) if isinstance(bboxes, np.ndarray): bboxes = [bboxes] if not isinstance(colors, list): colors = [colors for _ in range(len(bboxes))] colors = [color_val(c) for c in colors] assert len(bboxes) == len(colors) for i, _bboxes in enumerate(bboxes): _bboxes = _bboxes.astype(np.int32) if top_k <= 0: _top_k = _bboxes.shape[0] else: _top_k = min(top_k, _bboxes.shape[0]) for j in range(_top_k): left_top = (_bboxes[j, 0], _bboxes[j, 1]) right_bottom = (_bboxes[j, 2], _bboxes[j, 3]) cv2.rectangle( img, left_top, right_bottom, colors[i], thickness=thickness) if show: imshow(img, win_name, wait_time) if out_file is not None: imwrite(img, out_file)
Draw bboxes on an image.
def imshow_bboxes(img, bboxes, colors='green', top_k=-1, thickness=1, show=True, win_name='', wait_time=0, out_file=None): """Draw bboxes on an image. Args: img (str or ndarray): The image to be displayed. bboxes (list or ndarray): A list of ndarray of shape (k, 4). colors (list[str or tuple or Color]): A list of colors. top_k (int): Plot the first k bboxes only if set positive. thickness (int): Thickness of lines. show (bool): Whether to show the image. win_name (str): The window name. wait_time (int): Value of waitKey param. out_file (str, optional): The filename to write the image. """ img = imread(img) if isinstance(bboxes, np.ndarray): bboxes = [bboxes] if not isinstance(colors, list): colors = [colors for _ in range(len(bboxes))] colors = [color_val(c) for c in colors] assert len(bboxes) == len(colors) for i, _bboxes in enumerate(bboxes): _bboxes = _bboxes.astype(np.int32) if top_k <= 0: _top_k = _bboxes.shape[0] else: _top_k = min(top_k, _bboxes.shape[0]) for j in range(_top_k): left_top = (_bboxes[j, 0], _bboxes[j, 1]) right_bottom = (_bboxes[j, 2], _bboxes[j, 3]) cv2.rectangle( img, left_top, right_bottom, colors[i], thickness=thickness) if show: imshow(img, win_name, wait_time) if out_file is not None: imwrite(img, out_file)
mmcv/visualization/image.py
open-mmlab/mmcv
flowread
def flowread(flow_or_path, quantize=False, concat_axis=0, *args, **kwargs): if isinstance(flow_or_path, np.ndarray): if (flow_or_path.ndim != 3) or (flow_or_path.shape[-1] != 2): raise ValueError('Invalid flow with shape {}'.format( flow_or_path.shape)) return flow_or_path elif not is_str(flow_or_path): raise TypeError( '"flow_or_path" must be a filename or numpy array, not {}'.format( type(flow_or_path))) if not quantize: with open(flow_or_path, 'rb') as f: try: header = f.read(4).decode('utf-8') except Exception: raise IOError('Invalid flow file: {}'.format(flow_or_path)) else: if header != 'PIEH': raise IOError( 'Invalid flow file: {}, header does not contain PIEH'. format(flow_or_path)) w = np.fromfile(f, np.int32, 1).squeeze() h = np.fromfile(f, np.int32, 1).squeeze() flow = np.fromfile(f, np.float32, w * h * 2).reshape((h, w, 2)) else: assert concat_axis in [0, 1] cat_flow = imread(flow_or_path, flag='unchanged') if cat_flow.ndim != 2: raise IOError( '{} is not a valid quantized flow file, its dimension is {}.'. format(flow_or_path, cat_flow.ndim)) assert cat_flow.shape[concat_axis] % 2 == 0 dx, dy = np.split(cat_flow, 2, axis=concat_axis) flow = dequantize_flow(dx, dy, *args, **kwargs) return flow.astype(np.float32)
Read an optical flow map.
def flowread(flow_or_path, quantize=False, concat_axis=0, *args, **kwargs): """Read an optical flow map. Args: flow_or_path (ndarray or str): A flow map or filepath. quantize (bool): whether to read quantized pair, if set to True, remaining args will be passed to :func:`dequantize_flow`. concat_axis (int): The axis that dx and dy are concatenated, can be either 0 or 1. Ignored if quantize is False. Returns: ndarray: Optical flow represented as a (h, w, 2) numpy array """ if isinstance(flow_or_path, np.ndarray): if (flow_or_path.ndim != 3) or (flow_or_path.shape[-1] != 2): raise ValueError('Invalid flow with shape {}'.format( flow_or_path.shape)) return flow_or_path elif not is_str(flow_or_path): raise TypeError( '"flow_or_path" must be a filename or numpy array, not {}'.format( type(flow_or_path))) if not quantize: with open(flow_or_path, 'rb') as f: try: header = f.read(4).decode('utf-8') except Exception: raise IOError('Invalid flow file: {}'.format(flow_or_path)) else: if header != 'PIEH': raise IOError( 'Invalid flow file: {}, header does not contain PIEH'. format(flow_or_path)) w = np.fromfile(f, np.int32, 1).squeeze() h = np.fromfile(f, np.int32, 1).squeeze() flow = np.fromfile(f, np.float32, w * h * 2).reshape((h, w, 2)) else: assert concat_axis in [0, 1] cat_flow = imread(flow_or_path, flag='unchanged') if cat_flow.ndim != 2: raise IOError( '{} is not a valid quantized flow file, its dimension is {}.'. format(flow_or_path, cat_flow.ndim)) assert cat_flow.shape[concat_axis] % 2 == 0 dx, dy = np.split(cat_flow, 2, axis=concat_axis) flow = dequantize_flow(dx, dy, *args, **kwargs) return flow.astype(np.float32)
mmcv/video/optflow.py
open-mmlab/mmcv
dequantize_flow
def dequantize_flow(dx, dy, max_val=0.02, denorm=True): assert dx.shape == dy.shape assert dx.ndim == 2 or (dx.ndim == 3 and dx.shape[-1] == 1) dx, dy = [dequantize(d, -max_val, max_val, 255) for d in [dx, dy]] if denorm: dx *= dx.shape[1] dy *= dx.shape[0] flow = np.dstack((dx, dy)) return flow
Recover from quantized flow.
def dequantize_flow(dx, dy, max_val=0.02, denorm=True): """Recover from quantized flow. Args: dx (ndarray): Quantized dx. dy (ndarray): Quantized dy. max_val (float): Maximum value used when quantizing. denorm (bool): Whether to multiply flow values with width/height. Returns: ndarray: Dequantized flow. """ assert dx.shape == dy.shape assert dx.ndim == 2 or (dx.ndim == 3 and dx.shape[-1] == 1) dx, dy = [dequantize(d, -max_val, max_val, 255) for d in [dx, dy]] if denorm: dx *= dx.shape[1] dy *= dx.shape[0] flow = np.dstack((dx, dy)) return flow
mmcv/video/optflow.py
open-mmlab/mmcv
load_checkpoint
def load_checkpoint(model, filename, map_location=None, strict=False, logger=None): if filename.startswith('modelzoo://'): import torchvision model_urls = dict() for _, name, ispkg in pkgutil.walk_packages( torchvision.models.__path__): if not ispkg: _zoo = import_module('torchvision.models.{}'.format(name)) _urls = getattr(_zoo, 'model_urls') model_urls.update(_urls) model_name = filename[11:] checkpoint = model_zoo.load_url(model_urls[model_name]) elif filename.startswith('open-mmlab://'): model_name = filename[13:] checkpoint = model_zoo.load_url(open_mmlab_model_urls[model_name]) elif filename.startswith(('http://', 'https://')): checkpoint = model_zoo.load_url(filename) else: if not osp.isfile(filename): raise IOError('{} is not a checkpoint file'.format(filename)) checkpoint = torch.load(filename, map_location=map_location) if isinstance(checkpoint, OrderedDict): state_dict = checkpoint elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: raise RuntimeError( 'No state_dict found in checkpoint file {}'.format(filename)) if list(state_dict.keys())[0].startswith('module.'): state_dict = {k[7:]: v for k, v in checkpoint['state_dict'].items()} if hasattr(model, 'module'): load_state_dict(model.module, state_dict, strict, logger) else: load_state_dict(model, state_dict, strict, logger) return checkpoint
Load checkpoint from a file or URI.
def load_checkpoint(model, filename, map_location=None, strict=False, logger=None): """Load checkpoint from a file or URI. Args: model (Module): Module to load checkpoint. filename (str): Either a filepath or URL or modelzoo://xxxxxxx. map_location (str): Same as :func:`torch.load`. strict (bool): Whether to allow different params for the model and checkpoint. logger (:mod:`logging.Logger` or None): The logger for error message. Returns: dict or OrderedDict: The loaded checkpoint. """ # load checkpoint from modelzoo or file or url if filename.startswith('modelzoo://'): import torchvision model_urls = dict() for _, name, ispkg in pkgutil.walk_packages( torchvision.models.__path__): if not ispkg: _zoo = import_module('torchvision.models.{}'.format(name)) _urls = getattr(_zoo, 'model_urls') model_urls.update(_urls) model_name = filename[11:] checkpoint = model_zoo.load_url(model_urls[model_name]) elif filename.startswith('open-mmlab://'): model_name = filename[13:] checkpoint = model_zoo.load_url(open_mmlab_model_urls[model_name]) elif filename.startswith(('http://', 'https://')): checkpoint = model_zoo.load_url(filename) else: if not osp.isfile(filename): raise IOError('{} is not a checkpoint file'.format(filename)) checkpoint = torch.load(filename, map_location=map_location) # get state_dict from checkpoint if isinstance(checkpoint, OrderedDict): state_dict = checkpoint elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: raise RuntimeError( 'No state_dict found in checkpoint file {}'.format(filename)) # strip prefix of state_dict if list(state_dict.keys())[0].startswith('module.'): state_dict = {k[7:]: v for k, v in checkpoint['state_dict'].items()} # load state_dict if hasattr(model, 'module'): load_state_dict(model.module, state_dict, strict, logger) else: load_state_dict(model, state_dict, strict, logger) return checkpoint
mmcv/runner/checkpoint.py
open-mmlab/mmcv
weights_to_cpu
def weights_to_cpu(state_dict): state_dict_cpu = OrderedDict() for key, val in state_dict.items(): state_dict_cpu[key] = val.cpu() return state_dict_cpu
Copy a model state_dict to cpu.
def weights_to_cpu(state_dict): """Copy a model state_dict to cpu. Args: state_dict (OrderedDict): Model weights on GPU. Returns: OrderedDict: Model weights on GPU. """ state_dict_cpu = OrderedDict() for key, val in state_dict.items(): state_dict_cpu[key] = val.cpu() return state_dict_cpu
mmcv/runner/checkpoint.py
open-mmlab/mmcv
save_checkpoint
def save_checkpoint(model, filename, optimizer=None, meta=None): if meta is None: meta = {} elif not isinstance(meta, dict): raise TypeError('meta must be a dict or None, but got {}'.format( type(meta))) meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) mmcv.mkdir_or_exist(osp.dirname(filename)) if hasattr(model, 'module'): model = model.module checkpoint = { 'meta': meta, 'state_dict': weights_to_cpu(model.state_dict()) } if optimizer is not None: checkpoint['optimizer'] = optimizer.state_dict() torch.save(checkpoint, filename)
Save checkpoint to file. The checkpoint will have 3
def save_checkpoint(model, filename, optimizer=None, meta=None): """Save checkpoint to file. The checkpoint will have 3 fields: ``meta``, ``state_dict`` and ``optimizer``. By default ``meta`` will contain version and time info. Args: model (Module): Module whose params are to be saved. filename (str): Checkpoint filename. optimizer (:obj:`Optimizer`, optional): Optimizer to be saved. meta (dict, optional): Metadata to be saved in checkpoint. """ if meta is None: meta = {} elif not isinstance(meta, dict): raise TypeError('meta must be a dict or None, but got {}'.format( type(meta))) meta.update(mmcv_version=mmcv.__version__, time=time.asctime()) mmcv.mkdir_or_exist(osp.dirname(filename)) if hasattr(model, 'module'): model = model.module checkpoint = { 'meta': meta, 'state_dict': weights_to_cpu(model.state_dict()) } if optimizer is not None: checkpoint['optimizer'] = optimizer.state_dict() torch.save(checkpoint, filename)
mmcv/runner/checkpoint.py
open-mmlab/mmcv
Runner.init_optimizer
def init_optimizer(self, optimizer): if isinstance(optimizer, dict): optimizer = obj_from_dict( optimizer, torch.optim, dict(params=self.model.parameters())) elif not isinstance(optimizer, torch.optim.Optimizer): raise TypeError( 'optimizer must be either an Optimizer object or a dict, ' 'but got {}'.format(type(optimizer))) return optimizer
Init the optimizer.
def init_optimizer(self, optimizer): """Init the optimizer. Args: optimizer (dict or :obj:`~torch.optim.Optimizer`): Either an optimizer object or a dict used for constructing the optimizer. Returns: :obj:`~torch.optim.Optimizer`: An optimizer object. Examples: >>> optimizer = dict(type='SGD', lr=0.01, momentum=0.9) >>> type(runner.init_optimizer(optimizer)) <class 'torch.optim.sgd.SGD'> """ if isinstance(optimizer, dict): optimizer = obj_from_dict( optimizer, torch.optim, dict(params=self.model.parameters())) elif not isinstance(optimizer, torch.optim.Optimizer): raise TypeError( 'optimizer must be either an Optimizer object or a dict, ' 'but got {}'.format(type(optimizer))) return optimizer
mmcv/runner/runner.py
open-mmlab/mmcv
Runner.init_logger
def init_logger(self, log_dir=None, level=logging.INFO): logging.basicConfig( format='%(asctime)s - %(levelname)s - %(message)s', level=level) logger = logging.getLogger(__name__) if log_dir and self.rank == 0: filename = '{}.log'.format(self.timestamp) log_file = osp.join(log_dir, filename) self._add_file_handler(logger, log_file, level=level) return logger
Init the logger.
def init_logger(self, log_dir=None, level=logging.INFO): """Init the logger. Args: log_dir(str, optional): Log file directory. If not specified, no log file will be used. level (int or str): See the built-in python logging module. Returns: :obj:`~logging.Logger`: Python logger. """ logging.basicConfig( format='%(asctime)s - %(levelname)s - %(message)s', level=level) logger = logging.getLogger(__name__) if log_dir and self.rank == 0: filename = '{}.log'.format(self.timestamp) log_file = osp.join(log_dir, filename) self._add_file_handler(logger, log_file, level=level) return logger
mmcv/runner/runner.py
open-mmlab/mmcv
Runner.current_lr
def current_lr(self): if self.optimizer is None: raise RuntimeError( 'lr is not applicable because optimizer does not exist.') return [group['lr'] for group in self.optimizer.param_groups]
Get current learning rates.
def current_lr(self): """Get current learning rates. Returns: list: Current learning rate of all param groups. """ if self.optimizer is None: raise RuntimeError( 'lr is not applicable because optimizer does not exist.') return [group['lr'] for group in self.optimizer.param_groups]
mmcv/runner/runner.py
open-mmlab/mmcv
Runner.register_hook
def register_hook(self, hook, priority='NORMAL'): assert isinstance(hook, Hook) if hasattr(hook, 'priority'): raise ValueError('"priority" is a reserved attribute for hooks') priority = get_priority(priority) hook.priority = priority inserted = False for i in range(len(self._hooks) - 1, -1, -1): if priority >= self._hooks[i].priority: self._hooks.insert(i + 1, hook) inserted = True break if not inserted: self._hooks.insert(0, hook)
Register a hook into the hook list.
def register_hook(self, hook, priority='NORMAL'): """Register a hook into the hook list. Args: hook (:obj:`Hook`): The hook to be registered. priority (int or str or :obj:`Priority`): Hook priority. Lower value means higher priority. """ assert isinstance(hook, Hook) if hasattr(hook, 'priority'): raise ValueError('"priority" is a reserved attribute for hooks') priority = get_priority(priority) hook.priority = priority # insert the hook to a sorted list inserted = False for i in range(len(self._hooks) - 1, -1, -1): if priority >= self._hooks[i].priority: self._hooks.insert(i + 1, hook) inserted = True break if not inserted: self._hooks.insert(0, hook)
mmcv/runner/runner.py
open-mmlab/mmcv
Runner.run
def run(self, data_loaders, workflow, max_epochs, **kwargs): assert isinstance(data_loaders, list) assert mmcv.is_list_of(workflow, tuple) assert len(data_loaders) == len(workflow) self._max_epochs = max_epochs work_dir = self.work_dir if self.work_dir is not None else 'NONE' self.logger.info('Start running, host: %s, work_dir: %s', get_host_info(), work_dir) self.logger.info('workflow: %s, max: %d epochs', workflow, max_epochs) self.call_hook('before_run') while self.epoch < max_epochs: for i, flow in enumerate(workflow): mode, epochs = flow if isinstance(mode, str): if not hasattr(self, mode): raise ValueError( 'runner has no method named "{}" to run an epoch'. format(mode)) epoch_runner = getattr(self, mode) elif callable(mode): epoch_runner = mode else: raise TypeError('mode in workflow must be a str or ' 'callable function, not {}'.format( type(mode))) for _ in range(epochs): if mode == 'train' and self.epoch >= max_epochs: return epoch_runner(data_loaders[i], **kwargs) time.sleep(1) self.call_hook('after_run')
Start running.
def run(self, data_loaders, workflow, max_epochs, **kwargs): """Start running. Args: data_loaders (list[:obj:`DataLoader`]): Dataloaders for training and validation. workflow (list[tuple]): A list of (phase, epochs) to specify the running order and epochs. E.g, [('train', 2), ('val', 1)] means running 2 epochs for training and 1 epoch for validation, iteratively. max_epochs (int): Total training epochs. """ assert isinstance(data_loaders, list) assert mmcv.is_list_of(workflow, tuple) assert len(data_loaders) == len(workflow) self._max_epochs = max_epochs work_dir = self.work_dir if self.work_dir is not None else 'NONE' self.logger.info('Start running, host: %s, work_dir: %s', get_host_info(), work_dir) self.logger.info('workflow: %s, max: %d epochs', workflow, max_epochs) self.call_hook('before_run') while self.epoch < max_epochs: for i, flow in enumerate(workflow): mode, epochs = flow if isinstance(mode, str): # self.train() if not hasattr(self, mode): raise ValueError( 'runner has no method named "{}" to run an epoch'. format(mode)) epoch_runner = getattr(self, mode) elif callable(mode): # custom train() epoch_runner = mode else: raise TypeError('mode in workflow must be a str or ' 'callable function, not {}'.format( type(mode))) for _ in range(epochs): if mode == 'train' and self.epoch >= max_epochs: return epoch_runner(data_loaders[i], **kwargs) time.sleep(1) # wait for some hooks like loggers to finish self.call_hook('after_run')
mmcv/runner/runner.py
open-mmlab/mmcv
Runner.register_training_hooks
def register_training_hooks(self, lr_config, optimizer_config=None, checkpoint_config=None, log_config=None): if optimizer_config is None: optimizer_config = {} if checkpoint_config is None: checkpoint_config = {} self.register_lr_hooks(lr_config) self.register_hook(self.build_hook(optimizer_config, OptimizerHook)) self.register_hook(self.build_hook(checkpoint_config, CheckpointHook)) self.register_hook(IterTimerHook()) if log_config is not None: self.register_logger_hooks(log_config)
Register default hooks for training. Default hooks
def register_training_hooks(self, lr_config, optimizer_config=None, checkpoint_config=None, log_config=None): """Register default hooks for training. Default hooks include: - LrUpdaterHook - OptimizerStepperHook - CheckpointSaverHook - IterTimerHook - LoggerHook(s) """ if optimizer_config is None: optimizer_config = {} if checkpoint_config is None: checkpoint_config = {} self.register_lr_hooks(lr_config) self.register_hook(self.build_hook(optimizer_config, OptimizerHook)) self.register_hook(self.build_hook(checkpoint_config, CheckpointHook)) self.register_hook(IterTimerHook()) if log_config is not None: self.register_logger_hooks(log_config)
mmcv/runner/runner.py
open-mmlab/mmcv
convert_video
def convert_video(in_file, out_file, print_cmd=False, pre_options='', **kwargs): options = [] for k, v in kwargs.items(): if isinstance(v, bool): if v: options.append('-{}'.format(k)) elif k == 'log_level': assert v in [ 'quiet', 'panic', 'fatal', 'error', 'warning', 'info', 'verbose', 'debug', 'trace' ] options.append('-loglevel {}'.format(v)) else: options.append('-{} {}'.format(k, v)) cmd = 'ffmpeg -y {} -i {} {} {}'.format(pre_options, in_file, ' '.join(options), out_file) if print_cmd: print(cmd) subprocess.call(cmd, shell=True)
Convert a video with ffmpeg. This provides a general api to ffmpeg, the executed command
def convert_video(in_file, out_file, print_cmd=False, pre_options='', **kwargs): """Convert a video with ffmpeg. This provides a general api to ffmpeg, the executed command is:: `ffmpeg -y <pre_options> -i <in_file> <options> <out_file>` Options(kwargs) are mapped to ffmpeg commands with the following rules: - key=val: "-key val" - key=True: "-key" - key=False: "" Args: in_file (str): Input video filename. out_file (str): Output video filename. pre_options (str): Options appears before "-i <in_file>". print_cmd (bool): Whether to print the final ffmpeg command. """ options = [] for k, v in kwargs.items(): if isinstance(v, bool): if v: options.append('-{}'.format(k)) elif k == 'log_level': assert v in [ 'quiet', 'panic', 'fatal', 'error', 'warning', 'info', 'verbose', 'debug', 'trace' ] options.append('-loglevel {}'.format(v)) else: options.append('-{} {}'.format(k, v)) cmd = 'ffmpeg -y {} -i {} {} {}'.format(pre_options, in_file, ' '.join(options), out_file) if print_cmd: print(cmd) subprocess.call(cmd, shell=True)
mmcv/video/processing.py
open-mmlab/mmcv
resize_video
def resize_video(in_file, out_file, size=None, ratio=None, keep_ar=False, log_level='info', print_cmd=False, **kwargs): if size is None and ratio is None: raise ValueError('expected size or ratio must be specified') elif size is not None and ratio is not None: raise ValueError('size and ratio cannot be specified at the same time') options = {'log_level': log_level} if size: if not keep_ar: options['vf'] = 'scale={}:{}'.format(size[0], size[1]) else: options['vf'] = ('scale=w={}:h={}:force_original_aspect_ratio' '=decrease'.format(size[0], size[1])) else: if not isinstance(ratio, tuple): ratio = (ratio, ratio) options['vf'] = 'scale="trunc(iw*{}):trunc(ih*{})"'.format( ratio[0], ratio[1]) convert_video(in_file, out_file, print_cmd, **options)
Resize a video.
def resize_video(in_file, out_file, size=None, ratio=None, keep_ar=False, log_level='info', print_cmd=False, **kwargs): """Resize a video. Args: in_file (str): Input video filename. out_file (str): Output video filename. size (tuple): Expected size (w, h), eg, (320, 240) or (320, -1). ratio (tuple or float): Expected resize ratio, (2, 0.5) means (w*2, h*0.5). keep_ar (bool): Whether to keep original aspect ratio. log_level (str): Logging level of ffmpeg. print_cmd (bool): Whether to print the final ffmpeg command. """ if size is None and ratio is None: raise ValueError('expected size or ratio must be specified') elif size is not None and ratio is not None: raise ValueError('size and ratio cannot be specified at the same time') options = {'log_level': log_level} if size: if not keep_ar: options['vf'] = 'scale={}:{}'.format(size[0], size[1]) else: options['vf'] = ('scale=w={}:h={}:force_original_aspect_ratio' '=decrease'.format(size[0], size[1])) else: if not isinstance(ratio, tuple): ratio = (ratio, ratio) options['vf'] = 'scale="trunc(iw*{}):trunc(ih*{})"'.format( ratio[0], ratio[1]) convert_video(in_file, out_file, print_cmd, **options)
mmcv/video/processing.py
open-mmlab/mmcv
cut_video
def cut_video(in_file, out_file, start=None, end=None, vcodec=None, acodec=None, log_level='info', print_cmd=False, **kwargs): options = {'log_level': log_level} if vcodec is None: options['vcodec'] = 'copy' if acodec is None: options['acodec'] = 'copy' if start: options['ss'] = start else: start = 0 if end: options['t'] = end - start convert_video(in_file, out_file, print_cmd, **options)
Cut a clip from a video.
def cut_video(in_file, out_file, start=None, end=None, vcodec=None, acodec=None, log_level='info', print_cmd=False, **kwargs): """Cut a clip from a video. Args: in_file (str): Input video filename. out_file (str): Output video filename. start (None or float): Start time (in seconds). end (None or float): End time (in seconds). vcodec (None or str): Output video codec, None for unchanged. acodec (None or str): Output audio codec, None for unchanged. log_level (str): Logging level of ffmpeg. print_cmd (bool): Whether to print the final ffmpeg command. """ options = {'log_level': log_level} if vcodec is None: options['vcodec'] = 'copy' if acodec is None: options['acodec'] = 'copy' if start: options['ss'] = start else: start = 0 if end: options['t'] = end - start convert_video(in_file, out_file, print_cmd, **options)
mmcv/video/processing.py
open-mmlab/mmcv
concat_video
def concat_video(video_list, out_file, vcodec=None, acodec=None, log_level='info', print_cmd=False, **kwargs): _, tmp_filename = tempfile.mkstemp(suffix='.txt', text=True) with open(tmp_filename, 'w') as f: for filename in video_list: f.write('file {}\n'.format(osp.abspath(filename))) options = {'log_level': log_level} if vcodec is None: options['vcodec'] = 'copy' if acodec is None: options['acodec'] = 'copy' convert_video( tmp_filename, out_file, print_cmd, pre_options='-f concat -safe 0', **options) os.remove(tmp_filename)
Concatenate multiple videos into a single one.
def concat_video(video_list, out_file, vcodec=None, acodec=None, log_level='info', print_cmd=False, **kwargs): """Concatenate multiple videos into a single one. Args: video_list (list): A list of video filenames out_file (str): Output video filename vcodec (None or str): Output video codec, None for unchanged acodec (None or str): Output audio codec, None for unchanged log_level (str): Logging level of ffmpeg. print_cmd (bool): Whether to print the final ffmpeg command. """ _, tmp_filename = tempfile.mkstemp(suffix='.txt', text=True) with open(tmp_filename, 'w') as f: for filename in video_list: f.write('file {}\n'.format(osp.abspath(filename))) options = {'log_level': log_level} if vcodec is None: options['vcodec'] = 'copy' if acodec is None: options['acodec'] = 'copy' convert_video( tmp_filename, out_file, print_cmd, pre_options='-f concat -safe 0', **options) os.remove(tmp_filename)
mmcv/video/processing.py
open-mmlab/mmcv
list_from_file
def list_from_file(filename, prefix='', offset=0, max_num=0): cnt = 0 item_list = [] with open(filename, 'r') as f: for _ in range(offset): f.readline() for line in f: if max_num > 0 and cnt >= max_num: break item_list.append(prefix + line.rstrip('\n')) cnt += 1 return item_list
Load a text file and parse the content as a list of strings.
def list_from_file(filename, prefix='', offset=0, max_num=0): """Load a text file and parse the content as a list of strings. Args: filename (str): Filename. prefix (str): The prefix to be inserted to the begining of each item. offset (int): The offset of lines. max_num (int): The maximum number of lines to be read, zeros and negatives mean no limitation. Returns: list[str]: A list of strings. """ cnt = 0 item_list = [] with open(filename, 'r') as f: for _ in range(offset): f.readline() for line in f: if max_num > 0 and cnt >= max_num: break item_list.append(prefix + line.rstrip('\n')) cnt += 1 return item_list
mmcv/fileio/parse.py
open-mmlab/mmcv
conv3x3
def conv3x3(in_planes, out_planes, dilation=1): "3x3 convolution with padding" return nn.Conv2d( in_planes, out_planes, kernel_size=3, padding=dilation, dilation=dilation)
3x3 convolution with padding
def conv3x3(in_planes, out_planes, dilation=1): "3x3 convolution with padding" return nn.Conv2d( in_planes, out_planes, kernel_size=3, padding=dilation, dilation=dilation)
mmcv/cnn/vgg.py
open-mmlab/mmcv
imread
def imread(img_or_path, flag='color'): if isinstance(img_or_path, np.ndarray): return img_or_path elif is_str(img_or_path): flag = imread_flags[flag] if is_str(flag) else flag check_file_exist(img_or_path, 'img file does not exist: {}'.format(img_or_path)) return cv2.imread(img_or_path, flag) else: raise TypeError('"img" must be a numpy array or a filename')
Read an image.
def imread(img_or_path, flag='color'): """Read an image. Args: img_or_path (ndarray or str): Either a numpy array or image path. If it is a numpy array (loaded image), then it will be returned as is. flag (str): Flags specifying the color type of a loaded image, candidates are `color`, `grayscale` and `unchanged`. Returns: ndarray: Loaded image array. """ if isinstance(img_or_path, np.ndarray): return img_or_path elif is_str(img_or_path): flag = imread_flags[flag] if is_str(flag) else flag check_file_exist(img_or_path, 'img file does not exist: {}'.format(img_or_path)) return cv2.imread(img_or_path, flag) else: raise TypeError('"img" must be a numpy array or a filename')
mmcv/image/io.py
open-mmlab/mmcv
imfrombytes
def imfrombytes(content, flag='color'): img_np = np.frombuffer(content, np.uint8) flag = imread_flags[flag] if is_str(flag) else flag img = cv2.imdecode(img_np, flag) return img
Read an image from bytes.
def imfrombytes(content, flag='color'): """Read an image from bytes. Args: content (bytes): Image bytes got from files or other streams. flag (str): Same as :func:`imread`. Returns: ndarray: Loaded image array. """ img_np = np.frombuffer(content, np.uint8) flag = imread_flags[flag] if is_str(flag) else flag img = cv2.imdecode(img_np, flag) return img
mmcv/image/io.py
open-mmlab/mmcv
imwrite
def imwrite(img, file_path, params=None, auto_mkdir=True): if auto_mkdir: dir_name = osp.abspath(osp.dirname(file_path)) mkdir_or_exist(dir_name) return cv2.imwrite(file_path, img, params)
Write image to file
def imwrite(img, file_path, params=None, auto_mkdir=True): """Write image to file Args: img (ndarray): Image array to be written. file_path (str): Image file path. params (None or list): Same as opencv's :func:`imwrite` interface. auto_mkdir (bool): If the parent folder of `file_path` does not exist, whether to create it automatically. Returns: bool: Successful or not. """ if auto_mkdir: dir_name = osp.abspath(osp.dirname(file_path)) mkdir_or_exist(dir_name) return cv2.imwrite(file_path, img, params)
mmcv/image/io.py
open-mmlab/mmcv
bgr2gray
def bgr2gray(img, keepdim=False): out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if keepdim: out_img = out_img[..., None] return out_img
Convert a BGR image to grayscale image.
def bgr2gray(img, keepdim=False): """Convert a BGR image to grayscale image. Args: img (ndarray): The input image. keepdim (bool): If False (by default), then return the grayscale image with 2 dims, otherwise 3 dims. Returns: ndarray: The converted grayscale image. """ out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if keepdim: out_img = out_img[..., None] return out_img
mmcv/image/transforms/colorspace.py
open-mmlab/mmcv
gray2bgr
def gray2bgr(img): img = img[..., None] if img.ndim == 2 else img out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) return out_img
Convert a grayscale image to BGR image.
def gray2bgr(img): """Convert a grayscale image to BGR image. Args: img (ndarray or str): The input image. Returns: ndarray: The converted BGR image. """ img = img[..., None] if img.ndim == 2 else img out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) return out_img
mmcv/image/transforms/colorspace.py
open-mmlab/mmcv
iter_cast
def iter_cast(inputs, dst_type, return_type=None): if not isinstance(inputs, collections_abc.Iterable): raise TypeError('inputs must be an iterable object') if not isinstance(dst_type, type): raise TypeError('"dst_type" must be a valid type') out_iterable = six.moves.map(dst_type, inputs) if return_type is None: return out_iterable else: return return_type(out_iterable)
Cast elements of an iterable object into some type.
def iter_cast(inputs, dst_type, return_type=None): """Cast elements of an iterable object into some type. Args: inputs (Iterable): The input object. dst_type (type): Destination type. return_type (type, optional): If specified, the output object will be converted to this type, otherwise an iterator. Returns: iterator or specified type: The converted object. """ if not isinstance(inputs, collections_abc.Iterable): raise TypeError('inputs must be an iterable object') if not isinstance(dst_type, type): raise TypeError('"dst_type" must be a valid type') out_iterable = six.moves.map(dst_type, inputs) if return_type is None: return out_iterable else: return return_type(out_iterable)
mmcv/utils/misc.py
open-mmlab/mmcv
is_seq_of
def is_seq_of(seq, expected_type, seq_type=None): if seq_type is None: exp_seq_type = collections_abc.Sequence else: assert isinstance(seq_type, type) exp_seq_type = seq_type if not isinstance(seq, exp_seq_type): return False for item in seq: if not isinstance(item, expected_type): return False return True
Check whether it is a sequence of some type.
def is_seq_of(seq, expected_type, seq_type=None): """Check whether it is a sequence of some type. Args: seq (Sequence): The sequence to be checked. expected_type (type): Expected type of sequence items. seq_type (type, optional): Expected sequence type. Returns: bool: Whether the sequence is valid. """ if seq_type is None: exp_seq_type = collections_abc.Sequence else: assert isinstance(seq_type, type) exp_seq_type = seq_type if not isinstance(seq, exp_seq_type): return False for item in seq: if not isinstance(item, expected_type): return False return True
mmcv/utils/misc.py
open-mmlab/mmcv
slice_list
def slice_list(in_list, lens): if not isinstance(lens, list): raise TypeError('"indices" must be a list of integers') elif sum(lens) != len(in_list): raise ValueError( 'sum of lens and list length does not match: {} != {}'.format( sum(lens), len(in_list))) out_list = [] idx = 0 for i in range(len(lens)): out_list.append(in_list[idx:idx + lens[i]]) idx += lens[i] return out_list
Slice a list into several sub lists by a list of given length.
def slice_list(in_list, lens): """Slice a list into several sub lists by a list of given length. Args: in_list (list): The list to be sliced. lens(int or list): The expected length of each out list. Returns: list: A list of sliced list. """ if not isinstance(lens, list): raise TypeError('"indices" must be a list of integers') elif sum(lens) != len(in_list): raise ValueError( 'sum of lens and list length does not match: {} != {}'.format( sum(lens), len(in_list))) out_list = [] idx = 0 for i in range(len(lens)): out_list.append(in_list[idx:idx + lens[i]]) idx += lens[i] return out_list
mmcv/utils/misc.py
open-mmlab/mmcv
check_prerequisites
def check_prerequisites( prerequisites, checker, msg_tmpl='Prerequisites "{}" are required in method "{}" but not ' 'found, please install them first.'): def wrap(func): @functools.wraps(func) def wrapped_func(*args, **kwargs): requirements = [prerequisites] if isinstance( prerequisites, str) else prerequisites missing = [] for item in requirements: if not checker(item): missing.append(item) if missing: print(msg_tmpl.format(', '.join(missing), func.__name__)) raise RuntimeError('Prerequisites not meet.') else: return func(*args, **kwargs) return wrapped_func return wrap
A decorator factory to check if prerequisites are satisfied.
def check_prerequisites( prerequisites, checker, msg_tmpl='Prerequisites "{}" are required in method "{}" but not ' 'found, please install them first.'): """A decorator factory to check if prerequisites are satisfied. Args: prerequisites (str of list[str]): Prerequisites to be checked. checker (callable): The checker method that returns True if a prerequisite is meet, False otherwise. msg_tmpl (str): The message template with two variables. Returns: decorator: A specific decorator. """ def wrap(func): @functools.wraps(func) def wrapped_func(*args, **kwargs): requirements = [prerequisites] if isinstance( prerequisites, str) else prerequisites missing = [] for item in requirements: if not checker(item): missing.append(item) if missing: print(msg_tmpl.format(', '.join(missing), func.__name__)) raise RuntimeError('Prerequisites not meet.') else: return func(*args, **kwargs) return wrapped_func return wrap
mmcv/utils/misc.py
open-mmlab/mmcv
LogBuffer.average
def average(self, n=0): assert n >= 0 for key in self.val_history: values = np.array(self.val_history[key][-n:]) nums = np.array(self.n_history[key][-n:]) avg = np.sum(values * nums) / np.sum(nums) self.output[key] = avg self.ready = True
Average latest n values or all values
def average(self, n=0): """Average latest n values or all values""" assert n >= 0 for key in self.val_history: values = np.array(self.val_history[key][-n:]) nums = np.array(self.n_history[key][-n:]) avg = np.sum(values * nums) / np.sum(nums) self.output[key] = avg self.ready = True
mmcv/runner/log_buffer.py
open-mmlab/mmcv
scatter
def scatter(input, devices, streams=None): if streams is None: streams = [None] * len(devices) if isinstance(input, list): chunk_size = (len(input) - 1) // len(devices) + 1 outputs = [ scatter(input[i], [devices[i // chunk_size]], [streams[i // chunk_size]]) for i in range(len(input)) ] return outputs elif isinstance(input, torch.Tensor): output = input.contiguous() stream = streams[0] if output.numel() > 0 else None with torch.cuda.device(devices[0]), torch.cuda.stream(stream): output = output.cuda(devices[0], non_blocking=True) return output else: raise Exception('Unknown type {}.'.format(type(input)))
Scatters tensor across multiple GPUs.
def scatter(input, devices, streams=None): """Scatters tensor across multiple GPUs. """ if streams is None: streams = [None] * len(devices) if isinstance(input, list): chunk_size = (len(input) - 1) // len(devices) + 1 outputs = [ scatter(input[i], [devices[i // chunk_size]], [streams[i // chunk_size]]) for i in range(len(input)) ] return outputs elif isinstance(input, torch.Tensor): output = input.contiguous() # TODO: copy to a pinned buffer first (if copying from CPU) stream = streams[0] if output.numel() > 0 else None with torch.cuda.device(devices[0]), torch.cuda.stream(stream): output = output.cuda(devices[0], non_blocking=True) return output else: raise Exception('Unknown type {}.'.format(type(input)))
mmcv/parallel/_functions.py
open-mmlab/mmcv
color_val
def color_val(color): if is_str(color): return Color[color].value elif isinstance(color, Color): return color.value elif isinstance(color, tuple): assert len(color) == 3 for channel in color: assert channel >= 0 and channel <= 255 return color elif isinstance(color, int): assert color >= 0 and color <= 255 return color, color, color elif isinstance(color, np.ndarray): assert color.ndim == 1 and color.size == 3 assert np.all((color >= 0) & (color <= 255)) color = color.astype(np.uint8) return tuple(color) else: raise TypeError('Invalid type for color: {}'.format(type(color)))
Convert various input to color tuples.
def color_val(color): """Convert various input to color tuples. Args: color (:obj:`Color`/str/tuple/int/ndarray): Color inputs Returns: tuple[int]: A tuple of 3 integers indicating BGR channels. """ if is_str(color): return Color[color].value elif isinstance(color, Color): return color.value elif isinstance(color, tuple): assert len(color) == 3 for channel in color: assert channel >= 0 and channel <= 255 return color elif isinstance(color, int): assert color >= 0 and color <= 255 return color, color, color elif isinstance(color, np.ndarray): assert color.ndim == 1 and color.size == 3 assert np.all((color >= 0) & (color <= 255)) color = color.astype(np.uint8) return tuple(color) else: raise TypeError('Invalid type for color: {}'.format(type(color)))
mmcv/visualization/color.py
open-mmlab/mmcv
check_time
def check_time(timer_id): if timer_id not in _g_timers: _g_timers[timer_id] = Timer() return 0 else: return _g_timers[timer_id].since_last_check()
Add check points in a single line. This method is suitable for running a task on a list of items. A timer will be registered when the method is called for the first time.
def check_time(timer_id): """Add check points in a single line. This method is suitable for running a task on a list of items. A timer will be registered when the method is called for the first time. :Example: >>> import time >>> import mmcv >>> for i in range(1, 6): >>> # simulate a code block >>> time.sleep(i) >>> mmcv.check_time('task1') 2.000 3.000 4.000 5.000 Args: timer_id (str): Timer identifier. """ if timer_id not in _g_timers: _g_timers[timer_id] = Timer() return 0 else: return _g_timers[timer_id].since_last_check()
mmcv/utils/timer.py
open-mmlab/mmcv
Timer.start
def start(self): if not self._is_running: self._t_start = time() self._is_running = True self._t_last = time()
Start the timer.
def start(self): """Start the timer.""" if not self._is_running: self._t_start = time() self._is_running = True self._t_last = time()
mmcv/utils/timer.py
open-mmlab/mmcv
Timer.since_start
def since_start(self): if not self._is_running: raise TimerError('timer is not running') self._t_last = time() return self._t_last - self._t_start
Total time since the timer is started.
def since_start(self): """Total time since the timer is started. Returns (float): Time in seconds. """ if not self._is_running: raise TimerError('timer is not running') self._t_last = time() return self._t_last - self._t_start
mmcv/utils/timer.py
open-mmlab/mmcv
Timer.since_last_check
def since_last_check(self): if not self._is_running: raise TimerError('timer is not running') dur = time() - self._t_last self._t_last = time() return dur
Time since the last checking. Either :func:`since_start` or :func:`since_last_check` is a checking operation.
def since_last_check(self): """Time since the last checking. Either :func:`since_start` or :func:`since_last_check` is a checking operation. Returns (float): Time in seconds. """ if not self._is_running: raise TimerError('timer is not running') dur = time() - self._t_last self._t_last = time() return dur
mmcv/utils/timer.py
open-mmlab/mmcv
flowshow
def flowshow(flow, win_name='', wait_time=0): flow = flowread(flow) flow_img = flow2rgb(flow) imshow(rgb2bgr(flow_img), win_name, wait_time)
Show optical flow.
def flowshow(flow, win_name='', wait_time=0): """Show optical flow. Args: flow (ndarray or str): The optical flow to be displayed. win_name (str): The window name. wait_time (int): Value of waitKey param. """ flow = flowread(flow) flow_img = flow2rgb(flow) imshow(rgb2bgr(flow_img), win_name, wait_time)
mmcv/visualization/optflow.py
open-mmlab/mmcv
flow2rgb
def flow2rgb(flow, color_wheel=None, unknown_thr=1e6): assert flow.ndim == 3 and flow.shape[-1] == 2 if color_wheel is None: color_wheel = make_color_wheel() assert color_wheel.ndim == 2 and color_wheel.shape[1] == 3 num_bins = color_wheel.shape[0] dx = flow[:, :, 0].copy() dy = flow[:, :, 1].copy() ignore_inds = (np.isnan(dx) | np.isnan(dy) | (np.abs(dx) > unknown_thr) | (np.abs(dy) > unknown_thr)) dx[ignore_inds] = 0 dy[ignore_inds] = 0 rad = np.sqrt(dx**2 + dy**2) if np.any(rad > np.finfo(float).eps): max_rad = np.max(rad) dx /= max_rad dy /= max_rad [h, w] = dx.shape rad = np.sqrt(dx**2 + dy**2) angle = np.arctan2(-dy, -dx) / np.pi bin_real = (angle + 1) / 2 * (num_bins - 1) bin_left = np.floor(bin_real).astype(int) bin_right = (bin_left + 1) % num_bins w = (bin_real - bin_left.astype(np.float32))[..., None] flow_img = ( 1 - w) * color_wheel[bin_left, :] + w * color_wheel[bin_right, :] small_ind = rad <= 1 flow_img[small_ind] = 1 - rad[small_ind, None] * (1 - flow_img[small_ind]) flow_img[np.logical_not(small_ind)] *= 0.75 flow_img[ignore_inds, :] = 0 return flow_img
Convert flow map to RGB image.
def flow2rgb(flow, color_wheel=None, unknown_thr=1e6): """Convert flow map to RGB image. Args: flow (ndarray): Array of optical flow. color_wheel (ndarray or None): Color wheel used to map flow field to RGB colorspace. Default color wheel will be used if not specified. unknown_thr (str): Values above this threshold will be marked as unknown and thus ignored. Returns: ndarray: RGB image that can be visualized. """ assert flow.ndim == 3 and flow.shape[-1] == 2 if color_wheel is None: color_wheel = make_color_wheel() assert color_wheel.ndim == 2 and color_wheel.shape[1] == 3 num_bins = color_wheel.shape[0] dx = flow[:, :, 0].copy() dy = flow[:, :, 1].copy() ignore_inds = (np.isnan(dx) | np.isnan(dy) | (np.abs(dx) > unknown_thr) | (np.abs(dy) > unknown_thr)) dx[ignore_inds] = 0 dy[ignore_inds] = 0 rad = np.sqrt(dx**2 + dy**2) if np.any(rad > np.finfo(float).eps): max_rad = np.max(rad) dx /= max_rad dy /= max_rad [h, w] = dx.shape rad = np.sqrt(dx**2 + dy**2) angle = np.arctan2(-dy, -dx) / np.pi bin_real = (angle + 1) / 2 * (num_bins - 1) bin_left = np.floor(bin_real).astype(int) bin_right = (bin_left + 1) % num_bins w = (bin_real - bin_left.astype(np.float32))[..., None] flow_img = ( 1 - w) * color_wheel[bin_left, :] + w * color_wheel[bin_right, :] small_ind = rad <= 1 flow_img[small_ind] = 1 - rad[small_ind, None] * (1 - flow_img[small_ind]) flow_img[np.logical_not(small_ind)] *= 0.75 flow_img[ignore_inds, :] = 0 return flow_img
mmcv/visualization/optflow.py
open-mmlab/mmcv
make_color_wheel
def make_color_wheel(bins=None): if bins is None: bins = [15, 6, 4, 11, 13, 6] assert len(bins) == 6 RY, YG, GC, CB, BM, MR = tuple(bins) ry = [1, np.arange(RY) / RY, 0] yg = [1 - np.arange(YG) / YG, 1, 0] gc = [0, 1, np.arange(GC) / GC] cb = [0, 1 - np.arange(CB) / CB, 1] bm = [np.arange(BM) / BM, 0, 1] mr = [1, 0, 1 - np.arange(MR) / MR] num_bins = RY + YG + GC + CB + BM + MR color_wheel = np.zeros((3, num_bins), dtype=np.float32) col = 0 for i, color in enumerate([ry, yg, gc, cb, bm, mr]): for j in range(3): color_wheel[j, col:col + bins[i]] = color[j] col += bins[i] return color_wheel.T
Build a color wheel.
def make_color_wheel(bins=None): """Build a color wheel. Args: bins(list or tuple, optional): Specify the number of bins for each color range, corresponding to six ranges: red -> yellow, yellow -> green, green -> cyan, cyan -> blue, blue -> magenta, magenta -> red. [15, 6, 4, 11, 13, 6] is used for default (see Middlebury). Returns: ndarray: Color wheel of shape (total_bins, 3). """ if bins is None: bins = [15, 6, 4, 11, 13, 6] assert len(bins) == 6 RY, YG, GC, CB, BM, MR = tuple(bins) ry = [1, np.arange(RY) / RY, 0] yg = [1 - np.arange(YG) / YG, 1, 0] gc = [0, 1, np.arange(GC) / GC] cb = [0, 1 - np.arange(CB) / CB, 1] bm = [np.arange(BM) / BM, 0, 1] mr = [1, 0, 1 - np.arange(MR) / MR] num_bins = RY + YG + GC + CB + BM + MR color_wheel = np.zeros((3, num_bins), dtype=np.float32) col = 0 for i, color in enumerate([ry, yg, gc, cb, bm, mr]): for j in range(3): color_wheel[j, col:col + bins[i]] = color[j] col += bins[i] return color_wheel.T
mmcv/visualization/optflow.py
open-mmlab/mmcv
accuracy
def accuracy(output, target, topk=(1, )): with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res
Computes the precision@k for the specified values of k
def accuracy(output, target, topk=(1, )): """Computes the precision@k for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res
examples/train_cifar10.py
open-mmlab/mmcv
scatter
def scatter(inputs, target_gpus, dim=0): def scatter_map(obj): if isinstance(obj, torch.Tensor): return OrigScatter.apply(target_gpus, None, dim, obj) if isinstance(obj, DataContainer): if obj.cpu_only: return obj.data else: return Scatter.forward(target_gpus, obj.data) if isinstance(obj, tuple) and len(obj) > 0: return list(zip(*map(scatter_map, obj))) if isinstance(obj, list) and len(obj) > 0: out = list(map(list, zip(*map(scatter_map, obj)))) return out if isinstance(obj, dict) and len(obj) > 0: out = list(map(type(obj), zip(*map(scatter_map, obj.items())))) return out return [obj for targets in target_gpus] try: return scatter_map(inputs) finally: scatter_map = None
Scatter inputs to target gpus. The only difference from original :func:`scatter` is to add support for
def scatter(inputs, target_gpus, dim=0): """Scatter inputs to target gpus. The only difference from original :func:`scatter` is to add support for :type:`~mmcv.parallel.DataContainer`. """ def scatter_map(obj): if isinstance(obj, torch.Tensor): return OrigScatter.apply(target_gpus, None, dim, obj) if isinstance(obj, DataContainer): if obj.cpu_only: return obj.data else: return Scatter.forward(target_gpus, obj.data) if isinstance(obj, tuple) and len(obj) > 0: return list(zip(*map(scatter_map, obj))) if isinstance(obj, list) and len(obj) > 0: out = list(map(list, zip(*map(scatter_map, obj)))) return out if isinstance(obj, dict) and len(obj) > 0: out = list(map(type(obj), zip(*map(scatter_map, obj.items())))) return out return [obj for targets in target_gpus] # After scatter_map is called, a scatter_map cell will exist. This cell # has a reference to the actual function scatter_map, which has references # to a closure that has a reference to the scatter_map cell (because the # fn is recursive). To avoid this reference cycle, we set the function to # None, clearing the cell try: return scatter_map(inputs) finally: scatter_map = None
mmcv/parallel/scatter_gather.py
open-mmlab/mmcv
scatter_kwargs
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0): inputs = scatter(inputs, target_gpus, dim) if inputs else [] kwargs = scatter(kwargs, target_gpus, dim) if kwargs else [] if len(inputs) < len(kwargs): inputs.extend([() for _ in range(len(kwargs) - len(inputs))]) elif len(kwargs) < len(inputs): kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))]) inputs = tuple(inputs) kwargs = tuple(kwargs) return inputs, kwargs
Scatter with support for kwargs dictionary
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0): """Scatter with support for kwargs dictionary""" inputs = scatter(inputs, target_gpus, dim) if inputs else [] kwargs = scatter(kwargs, target_gpus, dim) if kwargs else [] if len(inputs) < len(kwargs): inputs.extend([() for _ in range(len(kwargs) - len(inputs))]) elif len(kwargs) < len(inputs): kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))]) inputs = tuple(inputs) kwargs = tuple(kwargs) return inputs, kwargs
mmcv/parallel/scatter_gather.py