code
stringlengths
17
6.64M
def upsample_flops_counter_hook(module, input, output): output_size = output[0] batch_size = output_size.shape[0] output_elements_count = batch_size for val in output_size.shape[1:]: output_elements_count *= val module.__flops__ += int(output_elements_count)
def relu_flops_counter_hook(module, input, output): active_elements_count = output.numel() module.__flops__ += int(active_elements_count)
def linear_flops_counter_hook(module, input, output): input = input[0] output_last_dim = output.shape[(- 1)] module.__flops__ += int((np.prod(input.shape) * output_last_dim))
def pool_flops_counter_hook(module, input, output): input = input[0] module.__flops__ += int(np.prod(input.shape))
def norm_flops_counter_hook(module, input, output): input = input[0] batch_flops = np.prod(input.shape) if (getattr(module, 'affine', False) or getattr(module, 'elementwise_affine', False)): batch_flops *= 2 module.__flops__ += int(batch_flops)
def deconv_flops_counter_hook(conv_module, input, output): input = input[0] batch_size = input.shape[0] (input_height, input_width) = input.shape[2:] (kernel_height, kernel_width) = conv_module.kernel_size in_channels = conv_module.in_channels out_channels = conv_module.out_channels groups...
def conv_flops_counter_hook(conv_module, input, output): input = input[0] batch_size = input.shape[0] output_dims = list(output.shape[2:]) kernel_dims = list(conv_module.kernel_size) in_channels = conv_module.in_channels out_channels = conv_module.out_channels groups = conv_module.groups ...
def batch_counter_hook(module, input, output): batch_size = 1 if (len(input) > 0): input = input[0] batch_size = len(input) else: warnings.warn('No positional inputs found for a module, assuming batch size is 1.') module.__batch_counter__ += batch_size
def add_batch_counter_variables_or_reset(module): module.__batch_counter__ = 0
def add_batch_counter_hook_function(module): if hasattr(module, '__batch_counter_handle__'): return handle = module.register_forward_hook(batch_counter_hook) module.__batch_counter_handle__ = handle
def remove_batch_counter_hook_function(module): if hasattr(module, '__batch_counter_handle__'): module.__batch_counter_handle__.remove() del module.__batch_counter_handle__
def add_flops_counter_variable_or_reset(module): if is_supported_instance(module): if (hasattr(module, '__flops__') or hasattr(module, '__params__')): warnings.warn((('variables __flops__ or __params__ are already defined for the module' + type(module).__name__) + ' ptflops can affect your cod...
def is_supported_instance(module): if (type(module) in get_modules_mapping()): return True return False
def remove_flops_counter_hook_function(module): if is_supported_instance(module): if hasattr(module, '__flops_handle__'): module.__flops_handle__.remove() del module.__flops_handle__
def get_modules_mapping(): return {nn.Conv1d: conv_flops_counter_hook, nn.Conv2d: conv_flops_counter_hook, mmcv.cnn.bricks.Conv2d: conv_flops_counter_hook, nn.Conv3d: conv_flops_counter_hook, mmcv.cnn.bricks.Conv3d: conv_flops_counter_hook, nn.ReLU: relu_flops_counter_hook, nn.PReLU: relu_flops_counter_hook, nn.E...
def _fuse_conv_bn(conv, bn): 'Fuse conv and bn into one module.\n\n Args:\n conv (nn.Module): Conv to be fused.\n bn (nn.Module): BN to be fused.\n\n Returns:\n nn.Module: Fused module.\n ' conv_w = conv.weight conv_b = (conv.bias if (conv.bias is not None) else torch.zeros_l...
def fuse_conv_bn(module): 'Recursively fuse conv and bn in a module.\n\n During inference, the functionary of batch norm layers is turned off\n but only the mean and var alone channels are used, which exposes the\n chance to fuse it with the preceding conv layers to save computations and\n simplify ne...
class _BatchNormXd(torch.nn.modules.batchnorm._BatchNorm): "A general BatchNorm layer without input dimension check.\n\n Reproduced from @kapily's work:\n (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)\n The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc\n ...
def revert_sync_batchnorm(module): "Helper function to convert all `SyncBatchNorm` (SyncBN) and\n `mmcv.ops.sync_bn.SyncBatchNorm`(MMSyncBN) layers in the model to\n `BatchNormXd` layers.\n\n Adapted from @kapily's work:\n (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)\n\n ...
def update_init_info(module, init_info): 'Update the `_params_init_info` in the module if the value of parameters\n are changed.\n\n Args:\n module (obj:`nn.Module`): The module of PyTorch with a user-defined\n attribute `_params_init_info` which records the initialization\n inf...
def constant_init(module, val, bias=0): if (hasattr(module, 'weight') and (module.weight is not None)): nn.init.constant_(module.weight, val) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)
def xavier_init(module, gain=1, bias=0, distribution='normal'): assert (distribution in ['uniform', 'normal']) if (hasattr(module, 'weight') and (module.weight is not None)): if (distribution == 'uniform'): nn.init.xavier_uniform_(module.weight, gain=gain) else: nn.init...
def normal_init(module, mean=0, std=1, bias=0): if (hasattr(module, 'weight') and (module.weight is not None)): nn.init.normal_(module.weight, mean, std) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)
def trunc_normal_init(module: nn.Module, mean: float=0, std: float=1, a: float=(- 2), b: float=2, bias: float=0) -> None: if (hasattr(module, 'weight') and (module.weight is not None)): trunc_normal_(module.weight, mean, std, a, b) if (hasattr(module, 'bias') and (module.bias is not None)): nn...
def uniform_init(module, a=0, b=1, bias=0): if (hasattr(module, 'weight') and (module.weight is not None)): nn.init.uniform_(module.weight, a, b) if (hasattr(module, 'bias') and (module.bias is not None)): nn.init.constant_(module.bias, bias)
def kaiming_init(module, a=0, mode='fan_out', nonlinearity='relu', bias=0, distribution='normal'): assert (distribution in ['uniform', 'normal']) if (hasattr(module, 'weight') and (module.weight is not None)): if (distribution == 'uniform'): nn.init.kaiming_uniform_(module.weight, a=a, mod...
def caffe2_xavier_init(module, bias=0): kaiming_init(module, a=1, mode='fan_in', nonlinearity='leaky_relu', bias=bias, distribution='uniform')
def bias_init_with_prob(prior_prob): 'initialize conv/fc bias value according to a given probability value.' bias_init = float((- np.log(((1 - prior_prob) / prior_prob)))) return bias_init
def _get_bases_name(m): return [b.__name__ for b in m.__class__.__bases__]
class BaseInit(object): def __init__(self, *, bias=0, bias_prob=None, layer=None): self.wholemodule = False if (not isinstance(bias, (int, float))): raise TypeError(f'bias must be a number, but got a {type(bias)}') if (bias_prob is not None): if (not isinstance(bia...
@INITIALIZERS.register_module(name='Constant') class ConstantInit(BaseInit): 'Initialize module parameters with constant values.\n\n Args:\n val (int | float): the value to fill the weights in the module with\n bias (int | float): the value to fill the bias. Defaults to 0.\n bias_prob (flo...
@INITIALIZERS.register_module(name='Xavier') class XavierInit(BaseInit): "Initialize module parameters with values according to the method\n described in `Understanding the difficulty of training deep feedforward\n neural networks - Glorot, X. & Bengio, Y. (2010).\n <http://proceedings.mlr.press/v9/gloro...
@INITIALIZERS.register_module(name='Normal') class NormalInit(BaseInit): 'Initialize module parameters with the values drawn from the normal\n distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`.\n\n Args:\n mean (int | float):the mean of the normal distribution. Defaults to 0.\n st...
@INITIALIZERS.register_module(name='TruncNormal') class TruncNormalInit(BaseInit): 'Initialize module parameters with the values drawn from the normal\n distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)` with values\n outside :math:`[a, b]`.\n\n Args:\n mean (float): the mean of the no...
@INITIALIZERS.register_module(name='Uniform') class UniformInit(BaseInit): 'Initialize module parameters with values drawn from the uniform\n distribution :math:`\\mathcal{U}(a, b)`.\n\n Args:\n a (int | float): the lower bound of the uniform distribution.\n Defaults to 0.\n b (int ...
@INITIALIZERS.register_module(name='Kaiming') class KaimingInit(BaseInit): "Initialize module parameters with the values according to the method\n described in `Delving deep into rectifiers: Surpassing human-level\n performance on ImageNet classification - He, K. et al. (2015).\n <https://www.cv-foundati...
@INITIALIZERS.register_module(name='Caffe2Xavier') class Caffe2XavierInit(KaimingInit): def __init__(self, **kwargs): super().__init__(a=1, mode='fan_in', nonlinearity='leaky_relu', distribution='uniform', **kwargs) def __call__(self, module): super().__call__(module)
@INITIALIZERS.register_module(name='Pretrained') class PretrainedInit(object): "Initialize module by loading a pretrained model.\n\n Args:\n checkpoint (str): the checkpoint file of the pretrained model should\n be load.\n prefix (str, optional): the prefix of a sub-module in the pretr...
def _initialize(module, cfg, wholemodule=False): func = build_from_cfg(cfg, INITIALIZERS) func.wholemodule = wholemodule func(module)
def _initialize_override(module, override, cfg): if (not isinstance(override, (dict, list))): raise TypeError(f'override must be a dict or a list of dict, but got {type(override)}') override = ([override] if isinstance(override, dict) else override) for override_ in override: ...
def initialize(module, init_cfg): 'Initialize a module.\n\n Args:\n module (``torch.nn.Module``): the module will be initialized.\n init_cfg (dict | list[dict]): initialization configuration dict to\n define initializer. OpenMMLab has implemented 6 initializers\n including `...
def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float, b: float) -> Tensor: def norm_cdf(x): return ((1.0 + math.erf((x / math.sqrt(2.0)))) / 2.0) if ((mean < (a - (2 * std))) or (mean > (b + (2 * std)))): warnings.warn('mean is more than 2 std from [a, b] in nn.init.tr...
def trunc_normal_(tensor: Tensor, mean: float=0.0, std: float=1.0, a: float=(- 2.0), b: float=2.0) -> Tensor: 'Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n ...
def conv3x3(in_planes, out_planes, dilation=1): '3x3 convolution with padding.' return nn.Conv2d(in_planes, out_planes, kernel_size=3, padding=dilation, dilation=dilation)
def make_vgg_layer(inplanes, planes, num_blocks, dilation=1, with_bn=False, ceil_mode=False): layers = [] for _ in range(num_blocks): layers.append(conv3x3(inplanes, planes, dilation)) if with_bn: layers.append(nn.BatchNorm2d(planes)) layers.append(nn.ReLU(inplace=True)) ...
class VGG(nn.Module): 'VGG backbone.\n\n Args:\n depth (int): Depth of vgg, from {11, 13, 16, 19}.\n with_bn (bool): Use BatchNorm or not.\n num_classes (int): number of classes for classification.\n num_stages (int): VGG stages, normally 5.\n dilations (Sequence[int]): Dilat...
def single_gpu_test(model, data_loader): 'Test model with a single gpu.\n\n This method tests model with a single gpu and displays test progress bar.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n\n Returns:\n list: The predic...
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): 'Test model with multiple gpus.\n\n This method tests model with multiple gpus and collects the results\n under two different modes: gpu and cpu modes. By setting\n ``gpu_collect=True``, it encodes results to gpu tensors and use gpu\...
def collect_results_cpu(result_part, size, tmpdir=None): 'Collect results under cpu mode.\n\n On cpu mode, this function will save the results on different gpus to\n ``tmpdir`` and collect them by the rank 0 worker.\n\n Args:\n result_part (list): Result list containing result parts\n t...
def collect_results_gpu(result_part, size): 'Collect results under gpu mode.\n\n On gpu mode, this function will encode results to gpu tensors and use gpu\n communication for results collection.\n\n Args:\n result_part (list): Result list containing result parts\n to be collected.\n ...
class BaseStorageBackend(metaclass=ABCMeta): 'Abstract class of storage backends.\n\n All backends need to implement two apis: ``get()`` and ``get_text()``.\n ``get()`` reads the file as a byte stream and ``get_text()`` reads the file\n as texts.\n ' _allow_symlink = False @property def n...
class CephBackend(BaseStorageBackend): "Ceph storage backend (for internal use).\n\n Args:\n path_mapping (dict|None): path mapping dict from local path to Petrel\n path. When ``path_mapping={'src': 'dst'}``, ``src`` in ``filepath``\n will be replaced by ``dst``. Default: None.\n\n...
class PetrelBackend(BaseStorageBackend): "Petrel storage backend (for internal use).\n\n PetrelBackend supports reading and writing data to multiple clusters.\n If the file path contains the cluster name, PetrelBackend will read data\n from specified cluster or write data to it. Otherwise, PetrelBackend ...
class MemcachedBackend(BaseStorageBackend): 'Memcached storage backend.\n\n Attributes:\n server_list_cfg (str): Config file for memcached server list.\n client_cfg (str): Config file for memcached client.\n sys_path (str | None): Additional path to be appended to `sys.path`.\n ...
class LmdbBackend(BaseStorageBackend): 'Lmdb storage backend.\n\n Args:\n db_path (str): Lmdb database path.\n readonly (bool, optional): Lmdb environment parameter. If True,\n disallow any write operations. Default: True.\n lock (bool, optional): Lmdb environment parameter. If ...
class HardDiskBackend(BaseStorageBackend): 'Raw hard disks storage backend.' _allow_symlink = True def get(self, filepath: Union[(str, Path)]) -> bytes: "Read data from a given ``filepath`` with 'rb' mode.\n\n Args:\n filepath (str or Path): Path to read data.\n\n Returns...
class HTTPBackend(BaseStorageBackend): 'HTTP and HTTPS storage bachend.' def get(self, filepath): value_buf = urlopen(filepath).read() return value_buf def get_text(self, filepath, encoding='utf-8'): value_buf = urlopen(filepath).read() return value_buf.decode(encoding) ...
class FileClient(): 'A general file client to access files in different backends.\n\n The client loads a file or text in a specified backend from its path\n and returns it as a binary or text file. There are two ways to choose a\n backend, the name of backend and the prefix of path. Although both of them...
class BaseFileHandler(metaclass=ABCMeta): str_like = True @abstractmethod def load_from_fileobj(self, file, **kwargs): pass @abstractmethod def dump_to_fileobj(self, obj, file, **kwargs): pass @abstractmethod def dump_to_str(self, obj, **kwargs): pass def lo...
def set_default(obj): 'Set default json values for non-serializable values.\n\n It helps convert ``set``, ``range`` and ``np.ndarray`` data types to list.\n It also converts ``np.generic`` (including ``np.int32``, ``np.float32``,\n etc.) into plain numbers of plain python built-in types.\n ' if is...
class JsonHandler(BaseFileHandler): def load_from_fileobj(self, file): return json.load(file) def dump_to_fileobj(self, obj, file, **kwargs): kwargs.setdefault('default', set_default) json.dump(obj, file, **kwargs) def dump_to_str(self, obj, **kwargs): kwargs.setdefault(...
class PickleHandler(BaseFileHandler): str_like = False def load_from_fileobj(self, file, **kwargs): return pickle.load(file, **kwargs) def load_from_path(self, filepath, **kwargs): return super(PickleHandler, self).load_from_path(filepath, mode='rb', **kwargs) def dump_to_str(self, ...
class YamlHandler(BaseFileHandler): def load_from_fileobj(self, file, **kwargs): kwargs.setdefault('Loader', Loader) return yaml.load(file, **kwargs) def dump_to_fileobj(self, obj, file, **kwargs): kwargs.setdefault('Dumper', Dumper) yaml.dump(obj, file, **kwargs) def du...
def load(file, file_format=None, file_client_args=None, **kwargs): 'Load data from json/yaml/pickle files.\n\n This method provides a unified api for loading data from serialized files.\n\n Note:\n In v1.3.16 and later, ``load`` supports loading data from serialized\n files those can be storag...
def dump(obj, file=None, file_format=None, file_client_args=None, **kwargs): "Dump data to json/yaml/pickle strings or files.\n\n This method provides a unified api for dumping data as strings or to files,\n and also supports custom arguments for each file format.\n\n Note:\n In v1.3.16 and later,...
def _register_handler(handler, file_formats): 'Register a handler for some file extensions.\n\n Args:\n handler (:obj:`BaseFileHandler`): Handler to be registered.\n file_formats (str or list[str]): File formats to be handled by this\n handler.\n ' if (not isinstance(handler, Ba...
def register_handler(file_formats, **kwargs): def wrap(cls): _register_handler(cls(**kwargs), file_formats) return cls return wrap
def list_from_file(filename, prefix='', offset=0, max_num=0, encoding='utf-8', file_client_args=None): "Load a text file and parse the content as a list of strings.\n\n Note:\n In v1.3.16 and later, ``list_from_file`` supports loading a text file\n which can be storaged in different backends and ...
def dict_from_file(filename, key_type=str, encoding='utf-8', file_client_args=None): "Load a text file and parse the content as a dict.\n\n Each line of the text file will be two or more columns split by\n whitespaces or tabs. The first column will be parsed as dict keys, and\n the following columns will...
def imconvert(img, src, dst): "Convert an image from the src colorspace to dst colorspace.\n\n Args:\n img (ndarray): The input image.\n src (str): The source colorspace, e.g., 'rgb', 'hsv'.\n dst (str): The destination colorspace, e.g., 'rgb', 'hsv'.\n\n Returns:\n ndarray: The ...
def bgr2gray(img, keepdim=False): 'Convert a BGR image to grayscale image.\n\n Args:\n img (ndarray): The input image.\n keepdim (bool): If False (by default), then return the grayscale image\n with 2 dims, otherwise 3 dims.\n\n Returns:\n ndarray: The converted grayscale ima...
def rgb2gray(img, keepdim=False): 'Convert a RGB image to grayscale image.\n\n Args:\n img (ndarray): The input image.\n keepdim (bool): If False (by default), then return the grayscale image\n with 2 dims, otherwise 3 dims.\n\n Returns:\n ndarray: The converted grayscale ima...
def gray2bgr(img): 'Convert a grayscale image to BGR image.\n\n Args:\n img (ndarray): The input image.\n\n Returns:\n ndarray: The converted BGR image.\n ' img = (img[(..., None)] if (img.ndim == 2) else img) out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) return out_img
def gray2rgb(img): 'Convert a grayscale image to RGB image.\n\n Args:\n img (ndarray): The input image.\n\n Returns:\n ndarray: The converted RGB image.\n ' img = (img[(..., None)] if (img.ndim == 2) else img) out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) return out_img
def _convert_input_type_range(img): 'Convert the type and range of the input image.\n\n It converts the input image to np.float32 type and range of [0, 1].\n It is mainly used for pre-processing the input image in colorspace\n conversion functions such as rgb2ycbcr and ycbcr2rgb.\n\n Args:\n im...
def _convert_output_type_range(img, dst_type): 'Convert the type and range of the image according to dst_type.\n\n It converts the image to desired type and range. If `dst_type` is np.uint8,\n images will be converted to np.uint8 type with range [0, 255]. If\n `dst_type` is np.float32, it converts the im...
def rgb2ycbcr(img, y_only=False): "Convert a RGB image to YCbCr image.\n\n This function produces the same results as Matlab's `rgb2ycbcr` function.\n It implements the ITU-R BT.601 conversion for standard-definition\n television. See more details in\n https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_...
def bgr2ycbcr(img, y_only=False): 'Convert a BGR image to YCbCr image.\n\n The bgr version of rgb2ycbcr.\n It implements the ITU-R BT.601 conversion for standard-definition\n television. See more details in\n https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.\n\n It differs from a similar...
def ycbcr2rgb(img): "Convert a YCbCr image to RGB image.\n\n This function produces the same results as Matlab's ycbcr2rgb function.\n It implements the ITU-R BT.601 conversion for standard-definition\n television. See more details in\n https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.\n\n ...
def ycbcr2bgr(img): 'Convert a YCbCr image to BGR image.\n\n The bgr version of ycbcr2rgb.\n It implements the ITU-R BT.601 conversion for standard-definition\n television. See more details in\n https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.\n\n It differs from a similar function in c...
def convert_color_factory(src, dst): code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}') def convert_color(img): out_img = cv2.cvtColor(img, code) return out_img convert_color.__doc__ = f'''Convert a {src.upper()} image to {dst.upper()} image. Args: img (ndarray...
def tensor2imgs(tensor, mean=None, std=None, to_rgb=True): 'Convert tensor to 3-channel images or 1-channel gray images.\n\n Args:\n tensor (torch.Tensor): Tensor that contains multiple images, shape (\n N, C, H, W). :math:`C` can be either 3 or 1.\n mean (tuple[float], optional): Mean...
def is_custom_op_loaded(): (bright_style, reset_style) = ('\x1b[1m', '\x1b[0m') (red_text, blue_text) = ('\x1b[31m', '\x1b[34m') white_background = '\x1b[107m' msg = ((white_background + bright_style) + red_text) msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += ...
def _parse_arg(value, desc): if (desc == 'none'): return value if ((desc == 'v') or (not _is_value(value))): return value if value.node().mustBeNone(): return None if (value.node().kind() == 'onnx::Constant'): tval = value.node()['value'] if (desc == 'i'): ...
def _maybe_get_const(value, desc): if (_is_value(value) and (value.node().kind() == 'onnx::Constant')): return _parse_arg(value, desc) return value
def _maybe_get_scalar(value): value_t = _maybe_get_const(value, 't') if (isinstance(value_t, torch.Tensor) and (value_t.shape == ())): return value_t return value
def _get_const(value, desc, arg_name): if (_is_value(value) and (value.node().kind() not in ('onnx::Constant', 'prim::Constant'))): raise RuntimeError('ONNX symbolic expected a constant value of the {} argument, got `{}`'.format(arg_name, value)) return _parse_arg(value, desc)
def _unpack_list(list_value): list_node = list_value.node() assert (list_node.kind() == 'prim::ListConstruct') return list(list_node.inputs())
def _is_packed_list(list_value): return (_is_value(list_value) and (list_value.node().kind() == 'prim::ListConstruct'))
def parse_args(*arg_descriptors): def decorator(fn): fn._arg_descriptors = arg_descriptors def wrapper(g, *args): assert (len(arg_descriptors) >= len(args)) args = [_parse_arg(arg, arg_desc) for (arg, arg_desc) in zip(args, arg_descriptors)] return fn(g, *args...
def _scalar(x): 'Convert a scalar tensor into a Python value.' assert (x.numel() == 1) return x.item()
def _if_scalar_type_as(g, self, tensor): 'Convert self into the same type of tensor, as necessary.' if isinstance(self, torch._C.Value): return self scalar_type = tensor.type().scalarType() if scalar_type: ty = scalar_type.lower() return getattr(self, ty)() return self
def _is_none(x): return x.node().mustBeNone()
def _is_value(x): return isinstance(x, torch._C.Value)
def _is_tensor_list(x): return x.type().isSubtypeOf(ListType.ofTensors())
def _unimplemented(op, msg): warnings.warn((((('ONNX export failed on ' + op) + ' because ') + msg) + ' not supported'))
def _try_get_scalar_type(*args): for arg in args: try: return arg.type().scalarType() except RuntimeError: pass return None
def _topk_helper(g, input, k, dim, largest=True, sorted=False, out=None): if (out is not None): _unimplemented('TopK', 'Out parameter is not supported') if (not _is_value(k)): k = g.op('Constant', value_t=torch.tensor([k], dtype=torch.int64)) else: k = g.op('Reshape', k, g.op('Cons...
def _slice_helper(g, input, axes, starts, ends, steps=None, dynamic_slice=False): from torch.onnx.symbolic_opset10 import _slice return _slice(g, input, axes, starts, ends, steps, dynamic_slice)
def _unsqueeze_helper(g, input, dim): from torch.onnx.symbolic_opset9 import unsqueeze return unsqueeze(g, input, dim)