code stringlengths 17 6.64M |
|---|
@ACTIVATION_LAYERS.register_module()
class Swish(nn.Module):
'Swish Module.\n\n This module applies the swish function:\n\n .. math::\n Swish(x) = x * Sigmoid(x)\n\n Returns:\n Tensor: The output tensor.\n '
def __init__(self):
super(Swish, self).__init__()
def forward(... |
def build_positional_encoding(cfg, default_args=None):
'Builder for Position Encoding.'
return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args)
|
def build_attention(cfg, default_args=None):
'Builder for attention.'
return build_from_cfg(cfg, ATTENTION, default_args)
|
def build_feedforward_network(cfg, default_args=None):
'Builder for feed-forward network (FFN).'
return build_from_cfg(cfg, FEEDFORWARD_NETWORK, default_args)
|
def build_transformer_layer(cfg, default_args=None):
'Builder for transformer layer.'
return build_from_cfg(cfg, TRANSFORMER_LAYER, default_args)
|
def build_transformer_layer_sequence(cfg, default_args=None):
'Builder for transformer encoder and transformer decoder.'
return build_from_cfg(cfg, TRANSFORMER_LAYER_SEQUENCE, default_args)
|
class AdaptivePadding(nn.Module):
'Applies padding adaptively to the input.\n\n This module can make input get fully covered by filter\n you specified. It support two modes "same" and "corner". The\n "same" mode is same with "SAME" padding mode in TensorFlow, pad\n zero around input. The "corner" mod... |
class PatchEmbed(BaseModule):
'Image to Patch Embedding.\n\n We use a conv layer to implement PatchEmbed.\n\n Args:\n in_channels (int): The num of input channels. Default: 3\n embed_dims (int): The dimensions of embedding. Default: 768\n conv_type (str): The type of convolution\n ... |
class PatchMerging(BaseModule):
'Merge patch feature map.\n\n This layer groups feature map by kernel_size, and applies norm and linear\n layers to the grouped feature map ((used in Swin Transformer)).\n Our implementation uses `nn.Unfold` to\n merge patches, which is about 25% faster than the origina... |
@ATTENTION.register_module()
class MultiheadAttention(BaseModule):
'A wrapper for ``torch.nn.MultiheadAttention``.\n\n This module implements MultiheadAttention with identity connection,\n and positional encoding is also passed as input.\n\n Args:\n embed_dims (int): The embedding dimension.\n ... |
@FEEDFORWARD_NETWORK.register_module()
class FFN(BaseModule):
"Implements feed-forward networks (FFNs) with identity connection.\n\n Args:\n embed_dims (int): The feature dimension. Same as\n `MultiheadAttention`. Defaults: 256.\n feedforward_channels (int): The hidden dimension of FFN... |
@TRANSFORMER_LAYER.register_module()
class BaseTransformerLayer(BaseModule):
"Base `TransformerLayer` for vision transformer.\n\n It can be built from `mmcv.ConfigDict` and support more flexible\n customization, for example, using any number of `FFN or LN ` and\n use different kinds of `attention` by spe... |
@TRANSFORMER_LAYER_SEQUENCE.register_module()
class TransformerLayerSequence(BaseModule):
'Base class for TransformerEncoder and TransformerDecoder in vision\n transformer.\n\n As base-class of Encoder and Decoder in vision transformer.\n Support customization such as specifying different kind\n of `t... |
@UPSAMPLE_LAYERS.register_module(name='pixel_shuffle')
class PixelShufflePack(nn.Module):
'Pixel Shuffle upsample layer.\n\n This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to\n achieve a simple upsampling with pixel shuffle.\n\n Args:\n in_channels (int): Number of input cha... |
def build_upsample_layer(cfg, *args, **kwargs):
'Build upsample layer.\n\n Args:\n cfg (dict): The upsample layer config, which should contain:\n\n - type (str): Layer type.\n - scale_factor (int): Upsample ratio, which is not applicable to\n deconv.\n - lay... |
def obsolete_torch_version(torch_version, version_threshold):
return ((torch_version == 'parrots') or (torch_version <= version_threshold))
|
class NewEmptyTensorOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
@staticmethod
def backward(ctx, grad):
shape = ctx.shape
return (NewEmptyTensorOp.apply(grad, shape), None)
|
@CONV_LAYERS.register_module('Conv', force=True)
class Conv2d(nn.Conv2d):
def forward(self, x):
if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))):
out_shape = [x.shape[0], self.out_channels]
for (i, k, p, s, d) in zip(x.shape[(- 2):], self.kernel_size, self.p... |
@CONV_LAYERS.register_module('Conv3d', force=True)
class Conv3d(nn.Conv3d):
def forward(self, x):
if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))):
out_shape = [x.shape[0], self.out_channels]
for (i, k, p, s, d) in zip(x.shape[(- 3):], self.kernel_size, self... |
@CONV_LAYERS.register_module()
@CONV_LAYERS.register_module('deconv')
@UPSAMPLE_LAYERS.register_module('deconv', force=True)
class ConvTranspose2d(nn.ConvTranspose2d):
def forward(self, x):
if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))):
out_shape = [x.shape[0], self.... |
@CONV_LAYERS.register_module()
@CONV_LAYERS.register_module('deconv3d')
@UPSAMPLE_LAYERS.register_module('deconv3d', force=True)
class ConvTranspose3d(nn.ConvTranspose3d):
def forward(self, x):
if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 4))):
out_shape = [x.shape[0], s... |
class MaxPool2d(nn.MaxPool2d):
def forward(self, x):
if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 9))):
out_shape = list(x.shape[:2])
for (i, k, p, s, d) in zip(x.shape[(- 2):], _pair(self.kernel_size), _pair(self.padding), _pair(self.stride), _pair(self.dila... |
class MaxPool3d(nn.MaxPool3d):
def forward(self, x):
if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 9))):
out_shape = list(x.shape[:2])
for (i, k, p, s, d) in zip(x.shape[(- 3):], _triple(self.kernel_size), _triple(self.padding), _triple(self.stride), _triple(s... |
class Linear(torch.nn.Linear):
def forward(self, x):
if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 5))):
out_shape = [x.shape[0], self.out_features]
empty = NewEmptyTensorOp.apply(x, out_shape)
if self.training:
dummy = (sum((x.view... |
def build_model_from_cfg(cfg, registry, default_args=None):
'Build a PyTorch model from config dict(s). Different from\n ``build_from_cfg``, if cfg is a list, a ``nn.Sequential`` will be built.\n\n Args:\n cfg (dict, list[dict]): The config of modules, is is either a config\n dict or a lis... |
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
'3x3 convolution with padding.'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False):
super(BasicBlock, self).__init__()
assert (style in ['pytorch', 'caffe'])
self.conv1 = conv3x3(inplanes, planes, stride, dilation)
... |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False):
'Bottleneck block.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer, if\n it is "caffe", the stride-two layer is t... |
def make_res_layer(block, inplanes, planes, blocks, stride=1, dilation=1, style='pytorch', with_cp=False):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=Fal... |
class ResNet(nn.Module):
'ResNet backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n num_stages (int): Resnet stages, normally 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\... |
def get_model_complexity_info(model, input_shape, print_per_layer_stat=True, as_strings=True, input_constructor=None, flush=False, ost=sys.stdout):
'Get complexity information of a model.\n\n This method can calculate FLOPs and parameter counts of a model with\n corresponding input shape. It can also print ... |
def flops_to_string(flops, units='GFLOPs', precision=2):
"Convert FLOPs number into a string.\n\n Note that Here we take a multiply-add counts as one FLOP.\n\n Args:\n flops (float): FLOPs number to be converted.\n units (str | None): Converted FLOPs units. Options are None, 'GFLOPs',\n ... |
def params_to_string(num_params, units=None, precision=2):
"Convert parameter number into a string.\n\n Args:\n num_params (float): Parameter number to be converted.\n units (str | None): Converted FLOPs units. Options are None, 'M',\n 'K' and ''. If set to None, it will automatically ... |
def print_model_with_flops(model, total_flops, total_params, units='GFLOPs', precision=3, ost=sys.stdout, flush=False):
"Print a model with FLOPs for each layer.\n\n Args:\n model (nn.Module): The model to be printed.\n total_flops (float): Total FLOPs of the model.\n total_params (float):... |
def get_model_parameters_number(model):
'Calculate parameter number of a model.\n\n Args:\n model (nn.module): The model for parameter number calculation.\n\n Returns:\n float: Parameter number of the model.\n '
num_params = sum((p.numel() for p in model.parameters() if p.requires_grad)... |
def add_flops_counting_methods(net_main_module):
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.co... |
def compute_average_flops_cost(self):
'Compute average FLOPs cost.\n\n A method to compute average FLOPs cost, which will be available after\n `add_flops_counting_methods()` is called on a desired net object.\n\n Returns:\n float: Current mean flops consumption per image.\n '
batches_count ... |
def start_flops_count(self):
'Activate the computation of mean flops consumption per image.\n\n A method to activate the computation of mean flops consumption per image.\n which will be available after ``add_flops_counting_methods()`` is called on\n a desired net object. It should be called before runnin... |
def stop_flops_count(self):
'Stop computing the mean flops consumption per image.\n\n A method to stop computing the mean flops consumption per image, which will\n be available after ``add_flops_counting_methods()`` is called on a desired\n net object. It can be called to pause the computation whenever.\... |
def reset_flops_count(self):
'Reset statistics computed so far.\n\n A method to Reset computed statistics, which will be available after\n `add_flops_counting_methods()` is called on a desired net object.\n '
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_rese... |
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
|
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += int(output_elements_count)
|
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += int(active_elements_count)
|
def linear_flops_counter_hook(module, input, output):
input = input[0]
output_last_dim = output.shape[(- 1)]
module.__flops__ += int((np.prod(input.shape) * output_last_dim))
|
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += int(np.prod(input.shape))
|
def norm_flops_counter_hook(module, input, output):
input = input[0]
batch_flops = np.prod(input.shape)
if (getattr(module, 'affine', False) or getattr(module, 'elementwise_affine', False)):
batch_flops *= 2
module.__flops__ += int(batch_flops)
|
def deconv_flops_counter_hook(conv_module, input, output):
input = input[0]
batch_size = input.shape[0]
(input_height, input_width) = input.shape[2:]
(kernel_height, kernel_width) = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups... |
def conv_flops_counter_hook(conv_module, input, output):
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
kernel_dims = list(conv_module.kernel_size)
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
... |
def batch_counter_hook(module, input, output):
batch_size = 1
if (len(input) > 0):
input = input[0]
batch_size = len(input)
else:
warnings.warn('No positional inputs found for a module, assuming batch size is 1.')
module.__batch_counter__ += batch_size
|
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
|
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
|
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
|
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
if (hasattr(module, '__flops__') or hasattr(module, '__params__')):
warnings.warn((('variables __flops__ or __params__ are already defined for the module' + type(module).__name__) + ' ptflops can affect your cod... |
def is_supported_instance(module):
if (type(module) in get_modules_mapping()):
return True
return False
|
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
|
def get_modules_mapping():
return {nn.Conv1d: conv_flops_counter_hook, nn.Conv2d: conv_flops_counter_hook, mmcv.cnn.bricks.Conv2d: conv_flops_counter_hook, nn.Conv3d: conv_flops_counter_hook, mmcv.cnn.bricks.Conv3d: conv_flops_counter_hook, nn.ReLU: relu_flops_counter_hook, nn.PReLU: relu_flops_counter_hook, nn.E... |
def _fuse_conv_bn(conv, bn):
'Fuse conv and bn into one module.\n\n Args:\n conv (nn.Module): Conv to be fused.\n bn (nn.Module): BN to be fused.\n\n Returns:\n nn.Module: Fused module.\n '
conv_w = conv.weight
conv_b = (conv.bias if (conv.bias is not None) else torch.zeros_l... |
def fuse_conv_bn(module):
'Recursively fuse conv and bn in a module.\n\n During inference, the functionary of batch norm layers is turned off\n but only the mean and var alone channels are used, which exposes the\n chance to fuse it with the preceding conv layers to save computations and\n simplify ne... |
class _BatchNormXd(torch.nn.modules.batchnorm._BatchNorm):
"A general BatchNorm layer without input dimension check.\n\n Reproduced from @kapily's work:\n (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)\n The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc\n ... |
def revert_sync_batchnorm(module):
"Helper function to convert all `SyncBatchNorm` (SyncBN) and\n `mmcv.ops.sync_bn.SyncBatchNorm`(MMSyncBN) layers in the model to\n `BatchNormXd` layers.\n\n Adapted from @kapily's work:\n (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)\n\n ... |
def update_init_info(module, init_info):
'Update the `_params_init_info` in the module if the value of parameters\n are changed.\n\n Args:\n module (obj:`nn.Module`): The module of PyTorch with a user-defined\n attribute `_params_init_info` which records the initialization\n inf... |
def constant_init(module, val, bias=0):
if (hasattr(module, 'weight') and (module.weight is not None)):
nn.init.constant_(module.weight, val)
if (hasattr(module, 'bias') and (module.bias is not None)):
nn.init.constant_(module.bias, bias)
|
def xavier_init(module, gain=1, bias=0, distribution='normal'):
assert (distribution in ['uniform', 'normal'])
if (hasattr(module, 'weight') and (module.weight is not None)):
if (distribution == 'uniform'):
nn.init.xavier_uniform_(module.weight, gain=gain)
else:
nn.init... |
def normal_init(module, mean=0, std=1, bias=0):
if (hasattr(module, 'weight') and (module.weight is not None)):
nn.init.normal_(module.weight, mean, std)
if (hasattr(module, 'bias') and (module.bias is not None)):
nn.init.constant_(module.bias, bias)
|
def trunc_normal_init(module: nn.Module, mean: float=0, std: float=1, a: float=(- 2), b: float=2, bias: float=0) -> None:
if (hasattr(module, 'weight') and (module.weight is not None)):
trunc_normal_(module.weight, mean, std, a, b)
if (hasattr(module, 'bias') and (module.bias is not None)):
nn... |
def uniform_init(module, a=0, b=1, bias=0):
if (hasattr(module, 'weight') and (module.weight is not None)):
nn.init.uniform_(module.weight, a, b)
if (hasattr(module, 'bias') and (module.bias is not None)):
nn.init.constant_(module.bias, bias)
|
def kaiming_init(module, a=0, mode='fan_out', nonlinearity='relu', bias=0, distribution='normal'):
assert (distribution in ['uniform', 'normal'])
if (hasattr(module, 'weight') and (module.weight is not None)):
if (distribution == 'uniform'):
nn.init.kaiming_uniform_(module.weight, a=a, mod... |
def caffe2_xavier_init(module, bias=0):
kaiming_init(module, a=1, mode='fan_in', nonlinearity='leaky_relu', bias=bias, distribution='uniform')
|
def bias_init_with_prob(prior_prob):
'initialize conv/fc bias value according to a given probability value.'
bias_init = float((- np.log(((1 - prior_prob) / prior_prob))))
return bias_init
|
def _get_bases_name(m):
return [b.__name__ for b in m.__class__.__bases__]
|
class BaseInit(object):
def __init__(self, *, bias=0, bias_prob=None, layer=None):
self.wholemodule = False
if (not isinstance(bias, (int, float))):
raise TypeError(f'bias must be a number, but got a {type(bias)}')
if (bias_prob is not None):
if (not isinstance(bia... |
@INITIALIZERS.register_module(name='Constant')
class ConstantInit(BaseInit):
'Initialize module parameters with constant values.\n\n Args:\n val (int | float): the value to fill the weights in the module with\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (flo... |
@INITIALIZERS.register_module(name='Xavier')
class XavierInit(BaseInit):
"Initialize module parameters with values according to the method\n described in `Understanding the difficulty of training deep feedforward\n neural networks - Glorot, X. & Bengio, Y. (2010).\n <http://proceedings.mlr.press/v9/gloro... |
@INITIALIZERS.register_module(name='Normal')
class NormalInit(BaseInit):
'Initialize module parameters with the values drawn from the normal\n distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`.\n\n Args:\n mean (int | float):the mean of the normal distribution. Defaults to 0.\n st... |
@INITIALIZERS.register_module(name='TruncNormal')
class TruncNormalInit(BaseInit):
'Initialize module parameters with the values drawn from the normal\n distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)` with values\n outside :math:`[a, b]`.\n\n Args:\n mean (float): the mean of the no... |
@INITIALIZERS.register_module(name='Uniform')
class UniformInit(BaseInit):
'Initialize module parameters with values drawn from the uniform\n distribution :math:`\\mathcal{U}(a, b)`.\n\n Args:\n a (int | float): the lower bound of the uniform distribution.\n Defaults to 0.\n b (int ... |
@INITIALIZERS.register_module(name='Kaiming')
class KaimingInit(BaseInit):
"Initialize module parameters with the values according to the method\n described in `Delving deep into rectifiers: Surpassing human-level\n performance on ImageNet classification - He, K. et al. (2015).\n <https://www.cv-foundati... |
@INITIALIZERS.register_module(name='Caffe2Xavier')
class Caffe2XavierInit(KaimingInit):
def __init__(self, **kwargs):
super().__init__(a=1, mode='fan_in', nonlinearity='leaky_relu', distribution='uniform', **kwargs)
def __call__(self, module):
super().__call__(module)
|
@INITIALIZERS.register_module(name='Pretrained')
class PretrainedInit(object):
"Initialize module by loading a pretrained model.\n\n Args:\n checkpoint (str): the checkpoint file of the pretrained model should\n be load.\n prefix (str, optional): the prefix of a sub-module in the pretr... |
def _initialize(module, cfg, wholemodule=False):
func = build_from_cfg(cfg, INITIALIZERS)
func.wholemodule = wholemodule
func(module)
|
def _initialize_override(module, override, cfg):
if (not isinstance(override, (dict, list))):
raise TypeError(f'override must be a dict or a list of dict, but got {type(override)}')
override = ([override] if isinstance(override, dict) else override)
for override_ in override:
... |
def initialize(module, init_cfg):
'Initialize a module.\n\n Args:\n module (``torch.nn.Module``): the module will be initialized.\n init_cfg (dict | list[dict]): initialization configuration dict to\n define initializer. OpenMMLab has implemented 6 initializers\n including `... |
def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float, b: float) -> Tensor:
def norm_cdf(x):
return ((1.0 + math.erf((x / math.sqrt(2.0)))) / 2.0)
if ((mean < (a - (2 * std))) or (mean > (b + (2 * std)))):
warnings.warn('mean is more than 2 std from [a, b] in nn.init.tr... |
def trunc_normal_(tensor: Tensor, mean: float=0.0, std: float=1.0, a: float=(- 2.0), b: float=2.0) -> Tensor:
'Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n ... |
def conv3x3(in_planes, out_planes, dilation=1):
'3x3 convolution with padding.'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, padding=dilation, dilation=dilation)
|
def make_vgg_layer(inplanes, planes, num_blocks, dilation=1, with_bn=False, ceil_mode=False):
layers = []
for _ in range(num_blocks):
layers.append(conv3x3(inplanes, planes, dilation))
if with_bn:
layers.append(nn.BatchNorm2d(planes))
layers.append(nn.ReLU(inplace=True))
... |
class VGG(nn.Module):
'VGG backbone.\n\n Args:\n depth (int): Depth of vgg, from {11, 13, 16, 19}.\n with_bn (bool): Use BatchNorm or not.\n num_classes (int): number of classes for classification.\n num_stages (int): VGG stages, normally 5.\n dilations (Sequence[int]): Dilat... |
def single_gpu_test(model, data_loader):
'Test model with a single gpu.\n\n This method tests model with a single gpu and displays test progress bar.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n\n Returns:\n list: The predic... |
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
'Test model with multiple gpus.\n\n This method tests model with multiple gpus and collects the results\n under two different modes: gpu and cpu modes. By setting\n ``gpu_collect=True``, it encodes results to gpu tensors and use gpu\... |
def collect_results_cpu(result_part, size, tmpdir=None):
'Collect results under cpu mode.\n\n On cpu mode, this function will save the results on different gpus to\n ``tmpdir`` and collect them by the rank 0 worker.\n\n Args:\n result_part (list): Result list containing result parts\n t... |
def collect_results_gpu(result_part, size):
'Collect results under gpu mode.\n\n On gpu mode, this function will encode results to gpu tensors and use gpu\n communication for results collection.\n\n Args:\n result_part (list): Result list containing result parts\n to be collected.\n ... |
class BaseStorageBackend(metaclass=ABCMeta):
'Abstract class of storage backends.\n\n All backends need to implement two apis: ``get()`` and ``get_text()``.\n ``get()`` reads the file as a byte stream and ``get_text()`` reads the file\n as texts.\n '
_allow_symlink = False
@property
def n... |
class CephBackend(BaseStorageBackend):
"Ceph storage backend (for internal use).\n\n Args:\n path_mapping (dict|None): path mapping dict from local path to Petrel\n path. When ``path_mapping={'src': 'dst'}``, ``src`` in ``filepath``\n will be replaced by ``dst``. Default: None.\n\n... |
class PetrelBackend(BaseStorageBackend):
"Petrel storage backend (for internal use).\n\n PetrelBackend supports reading and writing data to multiple clusters.\n If the file path contains the cluster name, PetrelBackend will read data\n from specified cluster or write data to it. Otherwise, PetrelBackend ... |
class MemcachedBackend(BaseStorageBackend):
'Memcached storage backend.\n\n Attributes:\n server_list_cfg (str): Config file for memcached server list.\n client_cfg (str): Config file for memcached client.\n sys_path (str | None): Additional path to be appended to `sys.path`.\n ... |
class LmdbBackend(BaseStorageBackend):
'Lmdb storage backend.\n\n Args:\n db_path (str): Lmdb database path.\n readonly (bool, optional): Lmdb environment parameter. If True,\n disallow any write operations. Default: True.\n lock (bool, optional): Lmdb environment parameter. If ... |
class HardDiskBackend(BaseStorageBackend):
'Raw hard disks storage backend.'
_allow_symlink = True
def get(self, filepath: Union[(str, Path)]) -> bytes:
"Read data from a given ``filepath`` with 'rb' mode.\n\n Args:\n filepath (str or Path): Path to read data.\n\n Returns... |
class HTTPBackend(BaseStorageBackend):
'HTTP and HTTPS storage bachend.'
def get(self, filepath):
value_buf = urlopen(filepath).read()
return value_buf
def get_text(self, filepath, encoding='utf-8'):
value_buf = urlopen(filepath).read()
return value_buf.decode(encoding)
... |
class FileClient():
'A general file client to access files in different backends.\n\n The client loads a file or text in a specified backend from its path\n and returns it as a binary or text file. There are two ways to choose a\n backend, the name of backend and the prefix of path. Although both of them... |
class BaseFileHandler(metaclass=ABCMeta):
str_like = True
@abstractmethod
def load_from_fileobj(self, file, **kwargs):
pass
@abstractmethod
def dump_to_fileobj(self, obj, file, **kwargs):
pass
@abstractmethod
def dump_to_str(self, obj, **kwargs):
pass
def lo... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.